Compare commits
No commits in common. "b1f60bc5016aa2102b1c390b950509503dca1e30" and "2ba837470ef31dd015300a803559023910f57ac6" have entirely different histories.
b1f60bc501
...
2ba837470e
|
|
@ -0,0 +1,90 @@
|
|||
# Zebra cargo configuration
|
||||
|
||||
# Flags that apply to all Zebra crates and configurations
|
||||
[target.'cfg(all())']
|
||||
rustflags = [
|
||||
# Zebra standard lints for Rust 1.65+
|
||||
|
||||
# High-risk code
|
||||
"-Dunsafe_code",
|
||||
"-Dnon_ascii_idents",
|
||||
|
||||
# Potential bugs
|
||||
#
|
||||
# If we deny these lints, we could be excluded from Crater builds:
|
||||
# https://www.reddit.com/r/rust/comments/f5xpib/psa_denywarnings_is_actively_harmful/
|
||||
|
||||
# Compatibility
|
||||
"-Wrust_2021_compatibility",
|
||||
"-Wnonstandard_style",
|
||||
"-Wfuture_incompatible",
|
||||
|
||||
# Async code
|
||||
"-Wclippy::await_holding_lock",
|
||||
"-Wclippy::await_holding_refcell_ref",
|
||||
|
||||
# Pointers
|
||||
"-Wclippy::cast_ptr_alignment",
|
||||
"-Wclippy::fn_to_numeric_cast_any",
|
||||
|
||||
# Integers
|
||||
"-Wclippy::checked_conversions",
|
||||
"-Wclippy::implicit_saturating_sub",
|
||||
"-Wclippy::invalid_upcast_comparisons",
|
||||
"-Wclippy::range_minus_one",
|
||||
"-Wclippy::range_plus_one",
|
||||
"-Wclippy::unnecessary_cast",
|
||||
|
||||
# Incomplete code
|
||||
"-Wclippy::dbg_macro",
|
||||
"-Wclippy::todo",
|
||||
|
||||
# Manual debugging output.
|
||||
# Use tracing::trace!() or tracing::debug!() instead.
|
||||
"-Wclippy::print_stdout",
|
||||
"-Wclippy::print_stderr",
|
||||
"-Wclippy::dbg_macro",
|
||||
|
||||
# Code styles we want to accept
|
||||
"-Aclippy::try_err",
|
||||
|
||||
# Panics
|
||||
"-Wclippy::fallible_impl_from",
|
||||
"-Wclippy::unwrap_in_result",
|
||||
|
||||
# Documentation
|
||||
"-Wmissing_docs",
|
||||
|
||||
# TODOs:
|
||||
# Fix this lint eventually.
|
||||
"-Aclippy::result_large_err",
|
||||
|
||||
# `cargo fix` might help do these fixes,
|
||||
# or add a config.toml to sub-directories which should allow these lints,
|
||||
# or try allowing the lint in the specific module (lib.rs doesn't seem to work in some cases)
|
||||
#
|
||||
# lint configs that don't work:
|
||||
# - allowing these lints in lib.rs (command-line warn overrides allow in lib.rs?)
|
||||
# - adding a [target.'cfg(not(test))'] rustflags config (it runs on test code anyway)
|
||||
|
||||
# fix code that triggers these lints,
|
||||
# or disable the lint for that code (or for all test code)
|
||||
#
|
||||
#"-Wclippy::cast_lossless", # 30 non-test warnings, a few test warnings
|
||||
#"-Wclippy::cast_possible_truncation", # 40 non-test warnings, 20 test warnings
|
||||
#"-Wclippy::cast_possible_wrap", # 13 test warnings (fixed outside tests)
|
||||
#"-Wclippy::cast_precision_loss", # 25 non-test warnings, 10 test warnings
|
||||
#"-Wclippy::cast_sign_loss", # 6 non-test warnings, 15 test warnings
|
||||
|
||||
# fix hidden lifetime parameters
|
||||
#"-Wrust_2018_idioms",
|
||||
]
|
||||
|
||||
[build]
|
||||
rustdocflags = [
|
||||
# The -A and -W settings must be the same as the `RUSTDOCFLAGS` in:
|
||||
# https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/docs-deploy-firebase.yml#L68
|
||||
|
||||
# Links in public docs can point to private items.
|
||||
"-Arustdoc::private_intra_doc_links",
|
||||
]
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
[codespell]
|
||||
ignore-words-list=crate,Sur,inout,Groth,groth,re-use,
|
||||
exclude-file=book/mermaid.min.js
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
# Before the docker CLI sends the context to the docker daemon, it looks for a file
|
||||
# named .dockerignore in the root directory of the context. If this file exists, the
|
||||
# CLI modifies the context to exclude files and directories that match patterns in it.
|
||||
#
|
||||
# You may want to specify which files to include in the context, rather than which
|
||||
# to exclude. To achieve this, specify * as the first pattern, followed by one or
|
||||
# more ! exception patterns.
|
||||
#
|
||||
# https://docs.docker.com/engine/reference/builder/#dockerignore-file
|
||||
|
||||
# Exclude everything:
|
||||
#
|
||||
*
|
||||
|
||||
# Now un-exclude required files and folders:
|
||||
#
|
||||
!.cargo
|
||||
!*.toml
|
||||
!*.lock
|
||||
!tower-*
|
||||
!zebra-*
|
||||
!zebrad
|
||||
!docker/entrypoint.sh
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
{
|
||||
"projects": {
|
||||
"default": "zfnd-prod-zebra"
|
||||
},
|
||||
"targets": {
|
||||
"zfnd-prod-zebra": {
|
||||
"hosting": {
|
||||
"docs-book": [
|
||||
"zebra-docs-book"
|
||||
],
|
||||
"docs-external": [
|
||||
"zebra-docs-external"
|
||||
],
|
||||
"docs-internal": [
|
||||
"zebra-docs-internal"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"etags": {}
|
||||
}
|
||||
|
|
@ -0,0 +1 @@
|
|||
**/Cargo.lock linguist-generated=false
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
# Unless a later match takes precedence,
|
||||
# this team will be requested for review when someone opens a pull request.
|
||||
#
|
||||
# We use a single team here, because if we use two teams, GitHub assigns two reviewers.
|
||||
* @ZcashFoundation/general-rust-reviewers
|
||||
|
||||
# Frequently Modified Code
|
||||
#
|
||||
# This code is currently being modified in most PRs,
|
||||
# so we assign reviews to the largest group of reviewers,
|
||||
# to stop GitHub assigning multiple reviewers
|
||||
#
|
||||
# lightwalletd epic
|
||||
/zebrad/src/commands/start.rs @ZcashFoundation/general-rust-reviewers
|
||||
|
||||
# Network and Async Code
|
||||
/tower-batch-control/ @ZcashFoundation/network-reviewers
|
||||
/tower-fallback/ @ZcashFoundation/network-reviewers
|
||||
/zebra-network/ @ZcashFoundation/network-reviewers
|
||||
/zebra-node-services/ @ZcashFoundation/network-reviewers
|
||||
/zebra-tests/src/mock_service.rs @ZcashFoundation/network-reviewers
|
||||
/zebra-tests/src/service_extensions.rs @ZcashFoundation/network-reviewers
|
||||
/zebra-tests/src/transcript.rs @ZcashFoundation/network-reviewers
|
||||
/zebrad/src/components/ @ZcashFoundation/network-reviewers
|
||||
|
||||
# Cryptographic Code
|
||||
/zebra-consensus/src/primitives/ @ZcashFoundation/cryptographic-reviewers
|
||||
/zebra-chain/src/primitives/ @ZcashFoundation/cryptographic-reviewers
|
||||
/zebra-chain/src/orchard/ @ZcashFoundation/cryptographic-reviewers
|
||||
/zebra-chain/src/sapling/ @ZcashFoundation/cryptographic-reviewers
|
||||
/zebra-chain/src/sprout/ @ZcashFoundation/cryptographic-reviewers
|
||||
/zebra-chain/src/transparent/ @ZcashFoundation/cryptographic-reviewers
|
||||
/zebra-chain/src/history_tree.rs @ZcashFoundation/cryptographic-reviewers
|
||||
/zebra-chain/src/history_tree/ @ZcashFoundation/cryptographic-reviewers
|
||||
|
||||
# Devops Code
|
||||
/.github/workflows/ @ZcashFoundation/devops-reviewers
|
||||
/.github/mergify.yml @ZcashFoundation/devops-reviewers
|
||||
/docker/ @ZcashFoundation/devops-reviewers
|
||||
cloudbuild.yaml @ZcashFoundation/devops-reviewers
|
||||
codecov.yml @ZcashFoundation/devops-reviewers
|
||||
firebase.json @ZcashFoundation/devops-reviewers
|
||||
katex-header.html @ZcashFoundation/devops-reviewers
|
||||
|
||||
# Unsafe Code
|
||||
/zebra-script/ @ZcashFoundation/unsafe-rust-reviewers
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
name: '🐛 Bug report'
|
||||
description: Create a report to help us improve
|
||||
title: 'bug: '
|
||||
labels: [C-bug, S-needs-triage]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for taking the time to report a bug in Zebra!
|
||||
|
||||
Please fill out the sections below to help us reproduce and fix the bug.
|
||||
If you have a question, please ask on [Discord](https://discord.gg/fP2JGmhm) or [GitHub Discussions](https://github.com/ZcashFoundation/zebra/discussions)
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
label: What happened?
|
||||
description: Also tell us, what did you expect to happen?
|
||||
value: '
|
||||
I expected to see this happen:
|
||||
|
||||
|
||||
Instead, this happened:
|
||||
'
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: reproduce
|
||||
attributes:
|
||||
label: What were you doing when the issue happened?
|
||||
description: Copy and paste the exact commands or code here.
|
||||
placeholder: 'Behavior or code sample that causes the bug'
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Zebra logs
|
||||
description: Copy and paste the last 100 Zebra log lines or upload the full logs to https://gist.github.com/ and add a link to them here.
|
||||
placeholder: 'Copy and paste the logs here'
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
id: zebrad-version
|
||||
attributes:
|
||||
label: Zebra Version
|
||||
description: 'For bugs in `zebrad`, run `zebrad --version`.'
|
||||
placeholder: 'zebrad 1.0.0-placeholder'
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
id: os
|
||||
attributes:
|
||||
label: Which operating systems does the issue happen on?
|
||||
description: You may select more than one.
|
||||
options:
|
||||
- label: Linux
|
||||
- label: macOS
|
||||
- label: Windows
|
||||
- label: Other OS
|
||||
- type: input
|
||||
id: os-details
|
||||
attributes:
|
||||
label: OS details
|
||||
description: 'Linux, macOS, BSD: the output of `uname -a`; Windows: version and 32-bit or 64-bit; Other OS: name and version'
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: anything-else
|
||||
attributes:
|
||||
label: Additional information
|
||||
description: Is there anything else that could help us solve this issue?
|
||||
validations:
|
||||
required: false
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: 💬 Zcash Community Support
|
||||
url: https://forum.zcashcommunity.com/
|
||||
about: You're invited to ask questions about the ecosystem, community and Zebra
|
||||
- name: ❓ General Questions about Zebra
|
||||
url: https://github.com/ZcashFoundation/zebra/discussions/categories/q-a
|
||||
about: Please ask and answer questions about Zebra as a discussion threads
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
---
|
||||
name: '🚦 DevOps Report'
|
||||
description: Issues related to the Zebra build, test, or release process.
|
||||
title: 'devops: '
|
||||
labels: [A-devops, C-bug, S-needs-triage]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for taking the time to report a bug in Zebra!
|
||||
|
||||
Please fill out the sections below to help us reproduce and fix the bug.
|
||||
If you have a question, please ask on [Discord](https://discord.gg/fP2JGmhm) or [GitHub Discussions](https://github.com/ZcashFoundation/zebra/discussions)
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Describe the issue or request
|
||||
description: What is the problem? A clear and concise description of the bug.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: |
|
||||
What did you expect to happen?
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: current
|
||||
attributes:
|
||||
label: Current Behavior
|
||||
description: |
|
||||
What actually happened?
|
||||
|
||||
Please include full errors, uncaught exceptions, stack traces, and relevant logs.
|
||||
Links to the faulty logs in GitHub Actions or other places are also welcomed.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: solution
|
||||
attributes:
|
||||
label: Possible Solution
|
||||
description: |
|
||||
Suggest a fix/reason for the bug
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: context
|
||||
attributes:
|
||||
label: Additional Information/Context
|
||||
description: |
|
||||
Anything else that might be relevant for troubleshooting this bug. Providing context helps us come up with a solution that is most useful for the community.
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
id: on-prs
|
||||
attributes:
|
||||
label: Is this happening on PRs?
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
id: on-main
|
||||
attributes:
|
||||
label: Is this happening on the main branch?
|
||||
validations:
|
||||
required: false
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
---
|
||||
name: "🚧 Change request"
|
||||
description: Suggest a feature or change for this project
|
||||
title: 'feature: '
|
||||
labels: [C-enhancement, S-needs-triage]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for taking the time to suggest a feature or change for Zebra!
|
||||
|
||||
Please fill out the sections below to help us understand your request.
|
||||
If you have a question, please ask on [Discord](https://discord.gg/fP2JGmhm) or [GitHub Discussions](https://github.com/ZcashFoundation/zebra/discussions)
|
||||
- type: textarea
|
||||
id: motivation
|
||||
attributes:
|
||||
label: Motivation
|
||||
description: |
|
||||
Is your feature request related to a problem?
|
||||
How does this change improve Zebra?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: specs
|
||||
attributes:
|
||||
label: Specifications
|
||||
description: |
|
||||
If this change is based on consensus rules, quote them, and link to the Zcash spec or ZIP: https://zips.z.cash/#nu5-zips
|
||||
If this changes network behaviour, quote and link to the Bitcoin network reference: https://developer.bitcoin.org/reference/p2p_networking.html
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: complexity
|
||||
attributes:
|
||||
label: Complex Code or Requirements
|
||||
description: |
|
||||
Does this PR change concurrency, unsafe code, or complex consensus rules?
|
||||
If it does, explain how we will implement, review, and test it.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: tests
|
||||
attributes:
|
||||
label: Testing
|
||||
description: |
|
||||
How can we check that this change does what we want it to do?
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: related
|
||||
attributes:
|
||||
label: Related Work
|
||||
description: |
|
||||
Is this change related to other features or tickets?
|
||||
validations:
|
||||
required: false
|
||||
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
name: '🔓 Private Security Issue'
|
||||
description: Zebra team use only
|
||||
title: 'Security Issue #NNN'
|
||||
labels: [C-security, S-needs-triage]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
This ticket is a public placeholder for a security issue that the Zebra team is fixing privately.
|
||||
The issue number is chosen by our internal tracker, it is not meaningful.
|
||||
|
||||
Zebra developers must discuss the details of this issue using secure channels.
|
||||
Please do not discuss this issue in public.
|
||||
- type: textarea
|
||||
id: issue
|
||||
attributes:
|
||||
label: Description
|
||||
description: |
|
||||
Any relevant information about the issue
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
---
|
||||
name: "🚀 Zebra Release"
|
||||
about: 'Zebra team use only'
|
||||
title: 'Publish next Zebra release: (version)'
|
||||
labels: 'A-release, C-trivial, P-Medium :zap:'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Prepare for the Release
|
||||
|
||||
These release steps can be done a week before the release, in separate PRs.
|
||||
They can be skipped for urgent releases.
|
||||
|
||||
## State Full Sync Test
|
||||
|
||||
To check consensus correctness, we want to test that the state format is valid after a full sync. (Format upgrades are tested in CI on each PR.)
|
||||
|
||||
- [ ] Make sure there has been [at least one successful full sync test](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml?query=event%3Aschedule) since the last state change, or
|
||||
- [ ] Start a manual workflow run with a Zebra and `lightwalletd` full sync.
|
||||
|
||||
State format changes can be made in `zebra-state` or `zebra-chain`. The state format can be changed by data that is sent to the state, data created within the state using `zebra-chain`, or serialization formats in `zebra-state` or `zebra-chain`.
|
||||
|
||||
After the test has been started, or if it has finished already:
|
||||
- [ ] Ask for a state code freeze in Slack. The freeze lasts until the release has been published.
|
||||
|
||||
## Checkpoints
|
||||
|
||||
For performance and security, we want to update the Zebra checkpoints in every release.
|
||||
- [ ] You can copy the latest checkpoints from CI by following [the zebra-checkpoints README](https://github.com/ZcashFoundation/zebra/blob/main/zebra-utils/README.md#zebra-checkpoints).
|
||||
|
||||
## Missed Dependency Updates
|
||||
|
||||
Sometimes `dependabot` misses some dependency updates, or we accidentally turned them off.
|
||||
|
||||
This step can be skipped if there is a large pending dependency upgrade. (For example, shared ECC crates.)
|
||||
|
||||
Here's how we make sure we got everything:
|
||||
- [ ] Run `cargo update` on the latest `main` branch, and keep the output
|
||||
- [ ] If needed, [add duplicate dependency exceptions to deny.toml](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/continuous-integration.md#fixing-duplicate-dependencies-in-check-denytoml-bans)
|
||||
- [ ] If needed, remove resolved duplicate dependencies from `deny.toml`
|
||||
- [ ] Open a separate PR with the changes
|
||||
- [ ] Add the output of `cargo update` to that PR as a comment
|
||||
|
||||
# Prepare and Publish the Release
|
||||
|
||||
Follow the steps in the [release checklist](https://github.com/ZcashFoundation/zebra/blob/main/.github/PULL_REQUEST_TEMPLATE/release-checklist.md) to prepare the release:
|
||||
|
||||
Release PR:
|
||||
- [ ] Update Changelog
|
||||
- [ ] Update README
|
||||
- [ ] Update Zebra Versions
|
||||
- [ ] Update End of Support Height
|
||||
|
||||
Publish Release:
|
||||
- [ ] Create & Test GitHub Pre-Release
|
||||
- [ ] Publish GitHub Release
|
||||
- [ ] Publish Rust Crates
|
||||
- [ ] Publish Docker Images
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
---
|
||||
name: "📋 Usability Testing Plan"
|
||||
about: Create a Usability Testing Plan
|
||||
title: 'Usability Testing Plan'
|
||||
labels: C-research
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Usability Testing Plan
|
||||
|
||||
<!-- For an example of a usability testing plan see #1501. This and other comments should be removed as you write the plan. -->
|
||||
|
||||
## Purpose of test and research questions
|
||||
|
||||
<!-- The research question should be focused, narrow, specific, answerable, feasible and open, e.g "what", "why" and "how" -->
|
||||
|
||||
## Participants
|
||||
|
||||
<!-- How many people will take part in your usability study? -->
|
||||
|
||||
### Recruitment criteria
|
||||
|
||||
<!-- What characteristics should your research participants have in order to answer your research questions? -->
|
||||
|
||||
### Recruitment strategy
|
||||
|
||||
<!-- Where and how will you find your participants? How will you compensate them for their time and feedback? -->
|
||||
|
||||
## Protocol
|
||||
|
||||
### Method
|
||||
|
||||
<!-- Explain whether the tests will be co-located or reomte, moderated or unmoderated, and who will attend the sessions (e.g a moderator and a note-taker) -->
|
||||
|
||||
### Test environment, equipment and logistics
|
||||
|
||||
<!-- Describe the software artifact that you will present to participants (e.g a test instance, an alpha version, a prototype, printed mock ups...), and the technical setup (e.g what type of device you will use, what recording equipment and software, any peripherals such as a mouse or keyboard -->
|
||||
|
||||
## Task(s)
|
||||
|
||||
<!-- For each task provide a brief description, a criteria for success (what needs to happen for the participant to successfully complete the task) and the scenario you will provide to participants -->
|
||||
|
||||
### Description
|
||||
|
||||
<!-- What does the task entail? e.g downloading, building and running zebrad -->
|
||||
|
||||
### Scenario
|
||||
|
||||
<!-- This is a little story you can tell your users about what they are trying to do and why -->
|
||||
|
||||
### Success criteria
|
||||
|
||||
<!-- What needs to happen for the participant to successfully complete the task -->
|
||||
|
||||
|
||||
## Session Outline and timing
|
||||
|
||||
<!-- The following sections provide some space to plan out the script and and tasks for your participants -->
|
||||
|
||||
### 1.Introduction to the session (5\')
|
||||
|
||||
<!-- Here you can write the script to: Welcome participant, explain the activity, the technical setup, get consent, etc... -->
|
||||
|
||||
### 2. Introductory Interview (10\')
|
||||
|
||||
<!-- Here you can write the script to gather some insight into the participant and their context -->
|
||||
|
||||
### 3. Tasks (25\')
|
||||
|
||||
<!-- Here you can write the tasks -->
|
||||
|
||||
### 4. Post-test debriefing (5\')
|
||||
|
||||
<!-- Here you can write the script for the closing interview and list any other short activities you want to run during the final minutes, e.g user satisfaction questionnaire, collect general feedback from participant, etc... -->
|
||||
|
||||
## Data to be collected and findings
|
||||
|
||||
<!-- List the data you will collect during the study, e.g screen + audio recordings, task success rates, etc... as well as how you will present and share findings, e.g report, wiki page, presentation, etc...) -->
|
||||
|
||||
## Required documentation
|
||||
|
||||
<!-- List the documents you will need to produce and bring to the usability testing sessions, e.g consent forms, usability testing script, questionnaires, etc... -->
|
||||
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
---
|
||||
name: "Upgrade zcashd dependencies"
|
||||
description: "Specific steps for upgrading dependencies shared with zcashd. Zebra developers only."
|
||||
title: 'Upgrade dependencies for zcashd (version)'
|
||||
labels: [A-dependencies, A-script, S-needs-triage]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
## Motivation
|
||||
|
||||
`zcashd` crate dependency upgrades need to be done together, along with an upgrade to `zcash_script`.
|
||||
We don't have upgrade instructions yet, but there's a draft in #6532.
|
||||
|
||||
The upgrade steps are:
|
||||
- [ ] upgrade and release `zcash_script`
|
||||
- [ ] upgrade Zebra's ECC and `zcash_script` dependencies in the same PR
|
||||
- type: textarea
|
||||
id: versions
|
||||
attributes:
|
||||
label: Versions
|
||||
description: |
|
||||
What versions of `zcashd` and the ECC crates do we need to upgrade to?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: complexity
|
||||
attributes:
|
||||
label: Complex Code or Requirements
|
||||
description: |
|
||||
Does this PR change unsafe code or complex consensus rules?
|
||||
If it does, explain how we will implement, review, and test it.
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: tests
|
||||
attributes:
|
||||
label: Testing
|
||||
description: |
|
||||
How can we check that this change does what we want it to do?
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: related
|
||||
attributes:
|
||||
label: Related Work
|
||||
description: |
|
||||
Is this change related to other features or tickets?
|
||||
validations:
|
||||
required: false
|
||||
|
||||
|
|
@ -0,0 +1,180 @@
|
|||
---
|
||||
name: 'Release Checklist Template'
|
||||
about: 'Checklist to create and publish a Zebra release'
|
||||
title: 'Release Zebra (version)'
|
||||
labels: 'A-release, C-trivial, P-Critical :ambulance:'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
# Prepare for the Release
|
||||
|
||||
- [ ] Make sure there has been [at least one successful full sync test](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml?query=event%3Aschedule) since the last state change, or start a manual full sync.
|
||||
- [ ] Make sure the PRs with the new checkpoint hashes and missed dependencies are already merged.
|
||||
(See the release ticket checklist for details)
|
||||
|
||||
|
||||
# Summarise Release Changes
|
||||
|
||||
These steps can be done a few days before the release, in the same PR:
|
||||
|
||||
## Change Log
|
||||
|
||||
**Important**: Any merge into `main` deletes any edits to the draft changelog.
|
||||
Once you are ready to tag a release, copy the draft changelog into `CHANGELOG.md`.
|
||||
|
||||
We use [the Release Drafter workflow](https://github.com/marketplace/actions/release-drafter) to automatically create a [draft changelog](https://github.com/ZcashFoundation/zebra/releases). We follow the [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) format.
|
||||
|
||||
To create the final change log:
|
||||
- [ ] Copy the **latest** draft changelog into `CHANGELOG.md` (there can be multiple draft releases)
|
||||
- [ ] Delete any trivial changes
|
||||
- [ ] Put the list of deleted changelog entries in a PR comment to make reviewing easier
|
||||
- [ ] Combine duplicate changes
|
||||
- [ ] Edit change descriptions so they will make sense to Zebra users
|
||||
- [ ] Check the category for each change
|
||||
- Prefer the "Fix" category if you're not sure
|
||||
|
||||
## README
|
||||
|
||||
README updates can be skipped for urgent releases.
|
||||
|
||||
Update the README to:
|
||||
- [ ] Remove any "Known Issues" that have been fixed since the last release.
|
||||
- [ ] Update the "Build and Run Instructions" with any new dependencies.
|
||||
Check for changes in the `Dockerfile` since the last tag: `git diff <previous-release-tag> docker/Dockerfile`.
|
||||
- [ ] If Zebra has started using newer Rust language features or standard library APIs, update the known working Rust version in the README, book, and `Cargo.toml`s
|
||||
|
||||
You can use a command like:
|
||||
```sh
|
||||
fastmod --fixed-strings '1.58' '1.65'
|
||||
```
|
||||
|
||||
## Create the Release PR
|
||||
|
||||
- [ ] Push the updated changelog and README into a new branch
|
||||
for example: `bump-v1.0.0` - this needs to be different to the tag name
|
||||
- [ ] Create a release PR by adding `&template=release-checklist.md` to the comparing url ([Example](https://github.com/ZcashFoundation/zebra/compare/bump-v1.0.0?expand=1&template=release-checklist.md)).
|
||||
- [ ] Freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify.
|
||||
- [ ] Mark all the release PRs as `Critical` priority, so they go in the `urgent` Mergify queue.
|
||||
- [ ] Mark all non-release PRs with `do-not-merge`, because Mergify checks approved PRs against every commit, even when a queue is frozen.
|
||||
|
||||
|
||||
# Update Versions and End of Support
|
||||
|
||||
## Update Zebra Version
|
||||
|
||||
### Choose a Release Level
|
||||
|
||||
Zebra follows [semantic versioning](https://semver.org). Semantic versions look like: MAJOR.MINOR.PATCH[-TAG.PRE-RELEASE]
|
||||
|
||||
Choose a release level for `zebrad`. Release levels are based on user-visible changes from the changelog:
|
||||
- Mainnet Network Upgrades are `major` releases
|
||||
- significant new features or behaviour changes; changes to RPCs, command-line, or configs; and deprecations or removals are `minor` releases
|
||||
- otherwise, it is a `patch` release
|
||||
|
||||
Zebra's Rust API doesn't have any support or stability guarantees, so we keep all the `zebra-*` and `tower-*` crates on a beta `pre-release` version.
|
||||
|
||||
### Update Crate Versions
|
||||
|
||||
If you're publishing crates for the first time, [log in to crates.io](https://github.com/ZcashFoundation/zebra/blob/doc-crate-own/book/src/dev/crate-owners.md#logging-in-to-cratesio),
|
||||
and make sure you're a member of owners group.
|
||||
|
||||
Check that the release will work:
|
||||
- [ ] Update crate versions, commit the changes to the release branch, and do a release dry-run:
|
||||
|
||||
```sh
|
||||
# Update everything except for alpha crates and zebrad:
|
||||
cargo release version --verbose --execute --allow-branch '*' --workspace --exclude zebrad --exclude zebra-scan --exclude zebra-grpc beta
|
||||
# Due to a bug in cargo-release, we need to pass exact versions for alpha crates:
|
||||
cargo release version --verbose --execute --allow-branch '*' --package zebra-scan 0.1.0-alpha.4
|
||||
cargo release version --verbose --execute --allow-branch '*' --package zebra-grpc 0.1.0-alpha.2
|
||||
# Update zebrad:
|
||||
cargo release version --verbose --execute --allow-branch '*' --package zebrad patch # [ major | minor | patch ]
|
||||
# Continue with the release process:
|
||||
cargo release replace --verbose --execute --allow-branch '*' --package zebrad
|
||||
cargo release commit --verbose --execute --allow-branch '*'
|
||||
```
|
||||
|
||||
Crate publishing is [automatically checked in CI](https://github.com/ZcashFoundation/zebra/actions/workflows/release-crates-io.yml) using "dry run" mode, however due to a bug in `cargo-release` we need to pass exact versions to the alpha crates:
|
||||
|
||||
- [ ] Update `zebra-scan` and `zebra-grpc` alpha crates in the [release-crates-dry-run workflow script](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/scripts/release-crates-dry-run.sh)
|
||||
- [ ] Push the above version changes to the release branch.
|
||||
|
||||
## Update End of Support
|
||||
|
||||
The end of support height is calculated from the current blockchain height:
|
||||
- [ ] Find where the Zcash blockchain tip is now by using a [Zcash explorer](https://zcashblockexplorer.com/blocks) or other tool.
|
||||
- [ ] Replace `ESTIMATED_RELEASE_HEIGHT` in [`end_of_support.rs`](https://github.com/ZcashFoundation/zebra/blob/main/zebrad/src/components/sync/end_of_support.rs) with the height you estimate the release will be tagged.
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Optional: calculate the release tagging height</summary>
|
||||
|
||||
- Add `1152` blocks for each day until the release
|
||||
- For example, if the release is in 3 days, add `1152 * 3` to the current Mainnet block height
|
||||
|
||||
</details>
|
||||
|
||||
## Update the Release PR
|
||||
|
||||
- [ ] Push the version increments and the release constants to the release branch.
|
||||
|
||||
|
||||
# Publish the Zebra Release
|
||||
|
||||
## Create the GitHub Pre-Release
|
||||
|
||||
- [ ] Wait for all the release PRs to be merged
|
||||
- [ ] Create a new release using the draft release as a base, by clicking the Edit icon in the [draft release](https://github.com/ZcashFoundation/zebra/releases)
|
||||
- [ ] Set the tag name to the version tag,
|
||||
for example: `v1.0.0`
|
||||
- [ ] Set the release to target the `main` branch
|
||||
- [ ] Set the release title to `Zebra ` followed by the version tag,
|
||||
for example: `Zebra 1.0.0`
|
||||
- [ ] Replace the prepopulated draft changelog in the release description with the final changelog you created;
|
||||
starting just _after_ the title `## [Zebra ...` of the current version being released,
|
||||
and ending just _before_ the title of the previous release.
|
||||
- [ ] Mark the release as 'pre-release', until it has been built and tested
|
||||
- [ ] Publish the pre-release to GitHub using "Publish Release"
|
||||
- [ ] Delete all the [draft releases from the list of releases](https://github.com/ZcashFoundation/zebra/releases)
|
||||
|
||||
## Test the Pre-Release
|
||||
|
||||
- [ ] Wait until the Docker binaries have been built on `main`, and the quick tests have passed:
|
||||
- [ ] [ci-unit-tests-docker.yml](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-unit-tests-docker.yml?query=branch%3Amain)
|
||||
- [ ] [ci-integration-tests-gcp.yml](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml?query=branch%3Amain)
|
||||
- [ ] Wait until the [pre-release deployment machines have successfully launched](https://github.com/ZcashFoundation/zebra/actions/workflows/cd-deploy-nodes-gcp.yml?query=event%3Arelease)
|
||||
|
||||
## Publish Release
|
||||
|
||||
- [ ] [Publish the release to GitHub](https://github.com/ZcashFoundation/zebra/releases) by disabling 'pre-release', then clicking "Set as the latest release"
|
||||
|
||||
## Publish Crates
|
||||
|
||||
- [ ] [Run `cargo login`](https://github.com/ZcashFoundation/zebra/blob/doc-crate-own/book/src/dev/crate-owners.md#logging-in-to-cratesio)
|
||||
- [ ] Run `cargo clean` in the zebra repo (optional)
|
||||
- [ ] Publish the crates to crates.io: `cargo release publish --verbose --workspace --execute`
|
||||
- [ ] Check that Zebra can be installed from `crates.io`:
|
||||
`cargo install --locked --force --version 1.minor.patch zebrad && ~/.cargo/bin/zebrad`
|
||||
and put the output in a comment on the PR.
|
||||
|
||||
## Publish Docker Images
|
||||
- [ ] Wait for the [the Docker images to be published successfully](https://github.com/ZcashFoundation/zebra/actions/workflows/release-binaries.yml?query=event%3Arelease).
|
||||
- [ ] Un-freeze the [`batched` queue](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) using Mergify.
|
||||
- [ ] Remove `do-not-merge` from the PRs you added it to
|
||||
|
||||
## Release Failures
|
||||
|
||||
If building or running fails after tagging:
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Tag a new release, following these instructions...</summary>
|
||||
|
||||
1. Fix the bug that caused the failure
|
||||
2. Start a new `patch` release
|
||||
3. Skip the **Release Preparation**, and start at the **Release Changes** step
|
||||
4. Update `CHANGELOG.md` with details about the fix
|
||||
5. Follow the release checklist for the new Zebra version
|
||||
|
||||
</details>
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
version: 2
|
||||
updates:
|
||||
# Rust section
|
||||
- package-ecosystem: cargo
|
||||
directory: '/'
|
||||
# serde, clap, and other dependencies sometimes have multiple updates in a week
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
timezone: America/New_York
|
||||
# Limit dependabot to 1 PR per reviewer
|
||||
open-pull-requests-limit: 6
|
||||
labels:
|
||||
- 'C-trivial'
|
||||
- 'A-rust'
|
||||
- 'A-dependencies'
|
||||
- 'P-Low :snowflake:'
|
||||
groups:
|
||||
ecc:
|
||||
patterns:
|
||||
# deliberately include zcash_script (even though it is maintained by ZF)
|
||||
- "zcash_*|orchard|halo2*|incrementalmerkletree|bridgetree|equihash"
|
||||
prod:
|
||||
dependency-type: "production"
|
||||
exclude-patterns:
|
||||
- "zcash_*|orchard|halo2*|incrementalmerkletree|bridgetree|equihash"
|
||||
dev:
|
||||
dependency-type: "development"
|
||||
# Devops section
|
||||
- package-ecosystem: github-actions
|
||||
directory: '/'
|
||||
schedule:
|
||||
# tj-actions/changed-files often updates daily, which is too much for us
|
||||
interval: weekly
|
||||
day: wednesday
|
||||
timezone: America/New_York
|
||||
open-pull-requests-limit: 4
|
||||
labels:
|
||||
- 'C-trivial'
|
||||
- 'A-devops'
|
||||
- 'A-dependencies'
|
||||
- 'P-Low :snowflake:'
|
||||
groups:
|
||||
devops:
|
||||
patterns:
|
||||
- "*"
|
||||
|
|
@ -0,0 +1,99 @@
|
|||
# Automatic merging of approved PRs for Zebra
|
||||
#
|
||||
# This file can be edited and validated using:
|
||||
# https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/config-editor
|
||||
|
||||
queue_rules:
|
||||
- name: urgent
|
||||
# Allow to update/rebase the original pull request if possible to check its mergeability,
|
||||
# and it does not create a draft PR if not needed
|
||||
allow_inplace_checks: True
|
||||
allow_checks_interruption: False
|
||||
speculative_checks: 1
|
||||
batch_size: 8
|
||||
# Wait a short time to embark hotfixes together in a merge train
|
||||
batch_max_wait_time: "2 minutes"
|
||||
conditions:
|
||||
# Mergify automatically applies status check, approval, and conversation rules,
|
||||
# which are the same as the GitHub main branch protection rules
|
||||
# https://docs.mergify.com/conditions/#about-branch-protection
|
||||
- base=main
|
||||
|
||||
- name: batched
|
||||
allow_inplace_checks: True
|
||||
allow_checks_interruption: True
|
||||
speculative_checks: 1
|
||||
batch_size: 20
|
||||
# Wait for about 10% of the time it takes Rust PRs to run CI (3h)
|
||||
batch_max_wait_time: "20 minutes"
|
||||
conditions:
|
||||
- base=main
|
||||
|
||||
# These rules are checked in order, the first one to be satisfied applies
|
||||
pull_request_rules:
|
||||
- name: move to urgent queue when CI passes with multiple reviews
|
||||
conditions:
|
||||
# This queue handles a PR if it:
|
||||
# has multiple approving reviewers
|
||||
- "#approved-reviews-by>=2"
|
||||
# is labeled with Critical priority
|
||||
- 'label~=^P-Critical'
|
||||
# and satisfies the standard merge conditions:
|
||||
# targets main
|
||||
- base=main
|
||||
# is not in draft
|
||||
- -draft
|
||||
# does not include the do-not-merge label
|
||||
- label!=do-not-merge
|
||||
actions:
|
||||
queue:
|
||||
name: urgent
|
||||
method: squash
|
||||
|
||||
- name: move to urgent queue when CI passes with 1 review
|
||||
conditions:
|
||||
# This queue handles a PR if it:
|
||||
# has at least one approving reviewer (branch protection rule)
|
||||
# does not need extra reviews
|
||||
- 'label!=extra-reviews'
|
||||
# is labeled with Critical priority
|
||||
- 'label~=^P-Critical'
|
||||
# and satisfies the standard merge conditions:
|
||||
- base=main
|
||||
- -draft
|
||||
- label!=do-not-merge
|
||||
actions:
|
||||
queue:
|
||||
name: urgent
|
||||
method: squash
|
||||
|
||||
- name: move to batched queue when CI passes with multiple reviews
|
||||
conditions:
|
||||
# This queue handles a PR if it:
|
||||
# has multiple approving reviewers
|
||||
- "#approved-reviews-by>=2"
|
||||
# is labeled with any other priority (rules are checked in order)
|
||||
# and satisfies the standard merge conditions:
|
||||
- base=main
|
||||
- -draft
|
||||
- label!=do-not-merge
|
||||
actions:
|
||||
queue:
|
||||
name: batched
|
||||
method: squash
|
||||
|
||||
- name: move to batched queue when CI passes with 1 review
|
||||
conditions:
|
||||
# This queue handles a PR if it:
|
||||
# has at least one approving reviewer (branch protection rule)
|
||||
# does not need extra reviews
|
||||
- 'label!=extra-reviews'
|
||||
# is labeled with any other priority (rules are checked in order)
|
||||
# and satisfies the standard merge conditions:
|
||||
- base=main
|
||||
- -draft
|
||||
- label!=do-not-merge
|
||||
actions:
|
||||
queue:
|
||||
name: batched
|
||||
method: squash
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
## Motivation
|
||||
<!--
|
||||
Thank you for your Pull Request.
|
||||
Does it close any issues?
|
||||
-->
|
||||
_What are the most important goals of the ticket or PR?_
|
||||
|
||||
|
||||
### PR Author Checklist
|
||||
|
||||
#### Check before marking the PR as ready for review:
|
||||
- [ ] Will the PR name make sense to users?
|
||||
- [ ] Does the PR have a priority label?
|
||||
- [ ] Have you added or updated tests?
|
||||
- [ ] Is the documentation up to date?
|
||||
|
||||
##### For significant changes:
|
||||
- [ ] Is there a summary in the CHANGELOG?
|
||||
- [ ] Can these changes be split into multiple PRs?
|
||||
|
||||
_If a checkbox isn't relevant to the PR, mark it as done._
|
||||
|
||||
### Specifications
|
||||
|
||||
<!--
|
||||
If this PR changes consensus rules, quote them, and link to the Zcash spec or ZIP:
|
||||
https://zips.z.cash/#nu5-zips
|
||||
-->
|
||||
|
||||
|
||||
### Complex Code or Requirements
|
||||
|
||||
<!--
|
||||
Does this PR change concurrency, unsafe code, or complex consensus rules?
|
||||
If it does, label this PR with `extra-reviews`.
|
||||
-->
|
||||
|
||||
|
||||
## Solution
|
||||
|
||||
<!--
|
||||
Summarize the changes in this PR.
|
||||
-->
|
||||
|
||||
|
||||
### Testing
|
||||
|
||||
<!--
|
||||
Which tests were changed or added in this PR? Were there manual tests?
|
||||
-->
|
||||
|
||||
|
||||
## Review
|
||||
|
||||
<!--
|
||||
Is this PR blocking any other work?
|
||||
If you want specific reviewers for this PR, tag them here.
|
||||
-->
|
||||
|
||||
|
||||
### Reviewer Checklist
|
||||
|
||||
Check before approving the PR:
|
||||
- [ ] Does the PR scope match the ticket?
|
||||
- [ ] Are there enough tests to make sure it works? Do the tests cover the PR motivation?
|
||||
- [ ] Are all the PR blockers dealt with?
|
||||
PR blockers can be dealt with in new tickets or PRs.
|
||||
|
||||
_And check the PR Author checklist is complete._
|
||||
|
||||
## Follow Up Work
|
||||
|
||||
<!--
|
||||
Is there anything missing from the solution?
|
||||
-->
|
||||
|
|
@ -0,0 +1,183 @@
|
|||
# Configuration for automatic Zebra CHANGELOGs and PR tagging
|
||||
#
|
||||
# Based on:
|
||||
# https://github.com/marketplace/actions/release-drafter#example
|
||||
|
||||
# Automatically label PRs based on their branch, title, or changed files.
|
||||
# This helps categorise PRs in the CHANGELOG.
|
||||
autolabeler:
|
||||
- label: 'C-security'
|
||||
branch:
|
||||
- '/secur/i'
|
||||
title:
|
||||
- '/secur/i'
|
||||
- '/crash/i'
|
||||
- '/destr/i'
|
||||
- '/unsafe/i'
|
||||
- label: 'C-deprecated'
|
||||
branch:
|
||||
- '/deprecat/i'
|
||||
title:
|
||||
- '/deprecat/i'
|
||||
- label: 'extra-reviews'
|
||||
branch:
|
||||
- '/remov/i'
|
||||
- '/deprecat/i'
|
||||
title:
|
||||
- '/remov/i'
|
||||
- '/deprecat/i'
|
||||
- '/crash/i'
|
||||
- '/destr/i'
|
||||
- '/unsafe/i'
|
||||
- label: 'C-feature'
|
||||
branch:
|
||||
- '/feat/i'
|
||||
title:
|
||||
- '/feat/i'
|
||||
- label: 'C-bug'
|
||||
branch:
|
||||
- '/bug/i'
|
||||
title:
|
||||
- '/bug/i'
|
||||
# Changes that are almost always trivial for users
|
||||
- label: 'C-trivial'
|
||||
branch:
|
||||
- '/clean/i'
|
||||
- '/chore/i'
|
||||
- '/clippy/i'
|
||||
- '/test/i'
|
||||
title:
|
||||
- '/clean/i'
|
||||
- '/chore/i'
|
||||
- '/clippy/i'
|
||||
- '/test/i'
|
||||
- '/(ci)/i'
|
||||
- '/(cd)/i'
|
||||
- '/job/i'
|
||||
- '/patch/i'
|
||||
- '/actions/i'
|
||||
files:
|
||||
# Regular changes that don't need to go in the CHANGELOG
|
||||
- 'CHANGELOG.md'
|
||||
- 'zebra-consensus/src/checkpoint/*-checkpoints.txt'
|
||||
# Developer-only changes
|
||||
- '.gitignore'
|
||||
- '.dockerignore'
|
||||
# Test-only changes
|
||||
- 'zebra-test'
|
||||
- '.cargo/config.toml'
|
||||
- 'clippy.toml'
|
||||
# CI-only changes
|
||||
- '.github'
|
||||
- '.codespellrc'
|
||||
- 'codecov.yml'
|
||||
- 'deny.toml'
|
||||
|
||||
# The release name, tag, and settings for the draft CHANGELOG.
|
||||
name-template: 'Zebra $RESOLVED_VERSION'
|
||||
tag-template: 'v$RESOLVED_VERSION'
|
||||
tag-prefix: 'v'
|
||||
prerelease: true
|
||||
|
||||
# Categories in rough order of importance to users.
|
||||
# Based on https://keepachangelog.com/en/1.0.0/
|
||||
category-template: '### $TITLE'
|
||||
categories:
|
||||
- title: 'Security'
|
||||
labels:
|
||||
- 'C-security'
|
||||
# Other labels that are usually security issues
|
||||
- 'I-invalid-data'
|
||||
- 'I-consensus'
|
||||
- 'I-crash'
|
||||
- 'I-destructive'
|
||||
- 'I-hang'
|
||||
- 'I-lose-funds'
|
||||
- 'I-privacy'
|
||||
- 'I-remote-node-overload'
|
||||
- 'I-unbounded-growth'
|
||||
- 'I-memory-safety'
|
||||
- title: 'Removed'
|
||||
labels:
|
||||
- 'C-removal'
|
||||
- title: 'Deprecated'
|
||||
labels:
|
||||
- 'C-deprecation'
|
||||
# TODO: when release drafter has per-category templates, add this to the Deprecated category template:
|
||||
# 'These features might be removed in Zebra $NEXT_MINOR_VERSION'
|
||||
- title: 'Added'
|
||||
labels:
|
||||
- 'C-feature'
|
||||
- title: 'Changed'
|
||||
labels:
|
||||
- 'C-enhancement'
|
||||
- title: 'Fixed'
|
||||
labels:
|
||||
- 'C-bug'
|
||||
# Other labels that are usually bugs
|
||||
- 'I-build-fail'
|
||||
- 'I-integration-fail'
|
||||
- 'I-panic'
|
||||
# TODO: if we're happy with the trivial PRs, use "exclude-labels:" instead
|
||||
- title: 'Trivial *TODO:* put this in a PR comment, not the CHANGELOG'
|
||||
labels:
|
||||
- 'C-trivial'
|
||||
- 'C-cleanup'
|
||||
|
||||
# The next release's $RESOLVED_VERSION, based on the labels of the PRs in the release.
|
||||
#
|
||||
# In Zebra, we use major versions for mainnet network upgrades,
|
||||
# and minor versions for less significant breaking changes.
|
||||
version-resolver:
|
||||
# We increment the major release version manually
|
||||
#major:
|
||||
# labels:
|
||||
#labels can not be an empty list, or empty strings
|
||||
# - # network upgrade release PRs
|
||||
minor:
|
||||
labels:
|
||||
- 'C-feature'
|
||||
- 'C-breaking'
|
||||
- 'C-removal'
|
||||
- 'C-deprecation'
|
||||
# We increment the patch version for every release
|
||||
default: patch
|
||||
|
||||
# How PR names get turned into CHANGELOG entries.
|
||||
change-template: '- $TITLE ([#$NUMBER]($URL))'
|
||||
sort-by: title
|
||||
sort-direction: ascending
|
||||
# Characters escaped when converting PR titles to CHANGELOG entries.
|
||||
# Add ` to disable code blocks.
|
||||
change-title-escapes: '\<*_&#@'
|
||||
# Strip PR series numbers, leading spaces, and conventional commit prefixes from PR titles.
|
||||
replacers:
|
||||
- search: '/- [0-9\. ]*([a-zA-Z0-9\(\)!]+:)?/'
|
||||
replace: '- '
|
||||
|
||||
# The list of contributors to each release.
|
||||
exclude-contributors:
|
||||
- 'dependabot' # 'dependabot[bot]'
|
||||
- 'mergifyio' # 'mergify[bot]'
|
||||
|
||||
# The template for the draft CHANGELOG.
|
||||
template: |
|
||||
## [Zebra $RESOLVED_VERSION](https://github.com/ZcashFoundation/zebra/releases/tag/v$RESOLVED_VERSION) - *TODO*: date
|
||||
|
||||
This release *TODO*: a summary of the significant user-visible changes in the release
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
This release has the following breaking changes:
|
||||
- *TODO*: Check the `Removed` and `Deprecated` sections for any breaking changes
|
||||
- *TODO*: Add a short description of the user impact of each breaking change, and any actions users need to take
|
||||
|
||||
$CHANGES
|
||||
|
||||
### Contributors
|
||||
|
||||
Thank you to everyone who contributed to this release, we couldn't make Zebra without you:
|
||||
$CONTRIBUTORS
|
||||
|
||||
|
||||
# the trailing newlines in the template are deliberate
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
# Workflow patches for skipping Google Cloud CD deployments on PRs from external repositories.
|
||||
name: Deploy Nodes to GCP
|
||||
|
||||
# Run on PRs from external repositories, let them pass, and then Mergify will check them.
|
||||
# GitHub doesn't support filtering workflows by source branch names, so we have to do it for each
|
||||
# job.
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
# IMPORTANT
|
||||
#
|
||||
# The job names in `cd-deploy-nodes-gcp.yml`, `cd-deploy-nodes-gcp.patch.yml` and
|
||||
# `cd-deploy-nodes-gcp.patch-external.yml` must be kept in sync.
|
||||
jobs:
|
||||
# We don't patch the testnet job, because testnet isn't required to merge (it's too unstable)
|
||||
build:
|
||||
name: Build CD Docker / Build images
|
||||
# Only run on PRs from external repositories, skipping ZF branches and tags.
|
||||
if: ${{ startsWith(github.event_name, 'pull') && github.event.pull_request.head.repo.fork }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
test-configuration-file:
|
||||
name: Test CD default Docker config file / Test default-conf in Docker
|
||||
# This dependency allows all these jobs to depend on a single condition, making it easier to
|
||||
# change.
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
test-zebra-conf-path:
|
||||
name: Test CD custom Docker config file / Test custom-conf in Docker
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
# Workflow patches for skipping Google Cloud CD deployments, when Rust code or dependencies aren't
|
||||
# modified in a PR.
|
||||
name: Deploy Nodes to GCP
|
||||
|
||||
# Run on PRs with unmodified code and dependency files.
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
# code and tests
|
||||
- '**/*.rs'
|
||||
# hard-coded checkpoints and proptest regressions
|
||||
- '**/*.txt'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- 'docker/**'
|
||||
- '.dockerignore'
|
||||
- '.github/workflows/cd-deploy-nodes-gcp.yml'
|
||||
- '.github/workflows/sub-build-docker-image.yml'
|
||||
|
||||
# IMPORTANT
|
||||
#
|
||||
# The job names in `cd-deploy-nodes-gcp.yml`, `cd-deploy-nodes-gcp.patch.yml` and
|
||||
# `cd-deploy-nodes-gcp.patch-external.yml` must be kept in sync.
|
||||
jobs:
|
||||
# We don't patch the testnet job, because testnet isn't required to merge (it's too unstable)
|
||||
build:
|
||||
name: Build CD Docker / Build images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
test-configuration-file:
|
||||
name: Test CD default Docker config file / Test default-conf in Docker
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
test-zebra-conf-path:
|
||||
name: Test CD custom Docker config file / Test custom-conf in Docker
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
|
@ -0,0 +1,379 @@
|
|||
# Google Cloud node deployments and tests that run when Rust code or dependencies are modified,
|
||||
# but only on PRs from the ZcashFoundation/zebra repository.
|
||||
# (External PRs are tested/deployed by mergify.)
|
||||
#
|
||||
# 1. `versioning`: Extracts the major version from the release semver. Useful for segregating instances based on major versions.
|
||||
# 2. `build`: Builds a Docker image named `zebrad` with the necessary tags derived from Git.
|
||||
# 3. `test-configuration-file`: Validates Zebra using the default config with the latest version.
|
||||
# 4. `test-configuration-file-testnet`: Tests the Docker image for the testnet configuration.
|
||||
# 5. `test-zebra-conf-path`: Verifies Zebra with a custom Docker config file.
|
||||
# 6. `deploy-nodes`: Deploys Managed Instance Groups (MiGs) for Mainnet and Testnet. If triggered by main branch pushes, it always replaces the MiG. For releases, MiGs are replaced only if deploying the same major version; otherwise, a new major version is deployed.
|
||||
# 7. `deploy-instance`: Deploys a single node in a specified GCP zone for testing specific commits. Instances from this job aren't auto-replaced or deleted.
|
||||
#
|
||||
# The overall goal is to ensure that Zebra nodes are consistently deployed, tested, and managed on GCP.
|
||||
name: Deploy Nodes to GCP
|
||||
|
||||
# Ensures that only one workflow task will run at a time. Previous deployments, if
|
||||
# already in process, won't get cancelled. Instead, we let the first to complete
|
||||
# then queue the latest pending workflow, cancelling any workflows in between.
|
||||
#
|
||||
# Since the different event types each use a different Managed Instance Group or instance,
|
||||
# we can run different event types concurrently.
|
||||
#
|
||||
# For pull requests, we only run the tests from this workflow, and don't do any deployments.
|
||||
# So an in-progress pull request gets cancelled, just like other tests.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.ref }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
network:
|
||||
default: 'Mainnet'
|
||||
description: 'Network to deploy: Mainnet or Testnet'
|
||||
required: true
|
||||
log_file:
|
||||
default: ''
|
||||
description: 'Log to a file path rather than standard output'
|
||||
no_cache:
|
||||
description: 'Disable the Docker cache for this build'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
# TODO: Temporarily disabled to reduce network load, see #6894.
|
||||
#push:
|
||||
# # Skip main branch updates where Rust code and dependencies aren't modified.
|
||||
# branches:
|
||||
# - main
|
||||
# paths:
|
||||
# # code and tests
|
||||
# - '**/*.rs'
|
||||
# # hard-coded checkpoints and proptest regressions
|
||||
# - '**/*.txt'
|
||||
# # dependencies
|
||||
# - '**/Cargo.toml'
|
||||
# - '**/Cargo.lock'
|
||||
# # configuration files
|
||||
# - '.cargo/config.toml'
|
||||
# - '**/clippy.toml'
|
||||
# # workflow definitions
|
||||
# - 'docker/**'
|
||||
# - '.dockerignore'
|
||||
# - '.github/workflows/cd-deploy-nodes-gcp.yml'
|
||||
# - '.github/workflows/sub-build-docker-image.yml'
|
||||
|
||||
# Only runs the Docker image tests, doesn't deploy any instances
|
||||
pull_request:
|
||||
# Skip PRs where Rust code and dependencies aren't modified.
|
||||
paths:
|
||||
# code and tests
|
||||
- '**/*.rs'
|
||||
# hard-coded checkpoints and proptest regressions
|
||||
- '**/*.txt'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- 'docker/**'
|
||||
- '.dockerignore'
|
||||
- '.github/workflows/cd-deploy-nodes-gcp.yml'
|
||||
- '.github/workflows/sub-build-docker-image.yml'
|
||||
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
|
||||
# IMPORTANT
|
||||
#
|
||||
# The job names in `cd-deploy-nodes-gcp.yml`, `cd-deploy-nodes-gcp.patch.yml` and
|
||||
# `cd-deploy-nodes-gcp.patch-external.yml` must be kept in sync.
|
||||
jobs:
|
||||
# If a release was made we want to extract the first part of the semver from the
|
||||
# tag_name
|
||||
#
|
||||
# Generate the following output to pass to subsequent jobs
|
||||
# - If our semver is `v1.3.0` the resulting output from this job would be `v1`
|
||||
#
|
||||
# Note: We just use the first part of the version to replace old instances, and change
|
||||
# it when a major version is released, to keep a segregation between new and old
|
||||
# versions.
|
||||
versioning:
|
||||
name: Versioning
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
major_version: ${{ steps.set.outputs.major_version }}
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
steps:
|
||||
- name: Getting Zebrad Version
|
||||
id: get
|
||||
uses: actions/github-script@v7.0.1
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
return context.payload.release.tag_name.substring(0,2)
|
||||
- name: Setting API Version
|
||||
id: set
|
||||
run: echo "major_version=${{ steps.get.outputs.result }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# Each time this workflow is executed, a build will be triggered to create a new image
|
||||
# with the corresponding tags using information from Git
|
||||
#
|
||||
# The image will be commonly named `zebrad:<short-hash | github-ref | semver>`
|
||||
build:
|
||||
name: Build CD Docker
|
||||
# Skip PRs from external repositories, let them pass, and then Mergify will check them.
|
||||
# This workflow also runs on release tags, the event name check will run it on releases.
|
||||
if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }}
|
||||
uses: ./.github/workflows/sub-build-docker-image.yml
|
||||
with:
|
||||
dockerfile_path: ./docker/Dockerfile
|
||||
dockerfile_target: runtime
|
||||
image_name: zebrad
|
||||
no_cache: ${{ inputs.no_cache || false }}
|
||||
rust_log: info
|
||||
|
||||
# Test that Zebra works using the default config with the latest Zebra version.
|
||||
test-configuration-file:
|
||||
name: Test CD default Docker config file
|
||||
needs: build
|
||||
uses: ./.github/workflows/sub-test-zebra-config.yml
|
||||
with:
|
||||
test_id: 'default-conf'
|
||||
docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }}
|
||||
grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"'
|
||||
test_variables: '-e NETWORK'
|
||||
network: 'Mainnet'
|
||||
|
||||
# Test reconfiguring the docker image for testnet.
|
||||
test-configuration-file-testnet:
|
||||
name: Test CD testnet Docker config file
|
||||
needs: build
|
||||
# Make sure Zebra can sync the genesis block on testnet
|
||||
uses: ./.github/workflows/sub-test-zebra-config.yml
|
||||
with:
|
||||
test_id: 'testnet-conf'
|
||||
docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }}
|
||||
grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"'
|
||||
test_variables: '-e NETWORK'
|
||||
network: 'Testnet'
|
||||
|
||||
# Test that Zebra works using $ZEBRA_CONF_PATH config
|
||||
test-zebra-conf-path:
|
||||
name: Test CD custom Docker config file
|
||||
needs: build
|
||||
uses: ./.github/workflows/sub-test-zebra-config.yml
|
||||
with:
|
||||
test_id: 'custom-conf'
|
||||
docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }}
|
||||
grep_patterns: '-e "loaded zebrad config.*config_path.*=.*v1.0.0-rc.2.toml"'
|
||||
test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"'
|
||||
network: ${{ inputs.network || vars.ZCASH_NETWORK }}
|
||||
|
||||
# Deploy Managed Instance Groups (MiGs) for Mainnet and Testnet,
|
||||
# with one node in the configured GCP region.
|
||||
#
|
||||
# Separate Mainnet and Testnet MiGs are deployed whenever there are:
|
||||
# - pushes to the main branch, or
|
||||
# - version releases of Zebra.
|
||||
#
|
||||
# Once this workflow is triggered:
|
||||
# - by pushes to main: the MiG is always replaced,
|
||||
# - by releases: the MiG is only replaced if the same major version is being deployed,
|
||||
# otherwise a new major version is deployed in a new MiG.
|
||||
#
|
||||
# Runs:
|
||||
# - on every push to the `main` branch
|
||||
# - on every release, when it's published
|
||||
deploy-nodes:
|
||||
strategy:
|
||||
matrix:
|
||||
network: [Mainnet, Testnet]
|
||||
name: Deploy ${{ matrix.network }} nodes
|
||||
needs: [ build, versioning, test-configuration-file, test-zebra-conf-path ]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
if: ${{ !cancelled() && !failure() && ((github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release') }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Inject slug/short variables
|
||||
uses: rlespinasse/github-slug-action@v4
|
||||
with:
|
||||
short-length: 7
|
||||
|
||||
# Makes the Zcash network name lowercase.
|
||||
#
|
||||
# Labels in GCP are required to be in lowercase, but the blockchain network
|
||||
# uses sentence case, so we need to downcase the network.
|
||||
#
|
||||
# Passes the lowercase network to subsequent steps using $NETWORK env variable.
|
||||
- name: Downcase network name for labels
|
||||
run: |
|
||||
NETWORK_CAPS="${{ matrix.network }}"
|
||||
echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV"
|
||||
|
||||
# Setup gcloud CLI
|
||||
- name: Authenticate to Google Cloud
|
||||
id: auth
|
||||
uses: google-github-actions/auth@v2.1.2
|
||||
with:
|
||||
retries: '3'
|
||||
workload_identity_provider: '${{ vars.GCP_WIF }}'
|
||||
service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}'
|
||||
|
||||
- name: Set up Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v2.1.0
|
||||
|
||||
# TODO we should implement the fixes from https://github.com/ZcashFoundation/zebra/pull/5670 here
|
||||
# but the implementation is failing as it's requiring the disk names, contrary to what is stated in the official documentation
|
||||
- name: Create instance template for ${{ matrix.network }}
|
||||
run: |
|
||||
gcloud compute instance-templates create-with-container zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK} \
|
||||
--boot-disk-size 300GB \
|
||||
--boot-disk-type=pd-ssd \
|
||||
--image-project=cos-cloud \
|
||||
--image-family=cos-stable \
|
||||
--user-output-enabled \
|
||||
--metadata google-logging-enabled=true,google-logging-use-fluentbit=true,google-monitoring-enabled=true \
|
||||
--container-stdin \
|
||||
--container-tty \
|
||||
--container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \
|
||||
--container-env "NETWORK=${{ matrix.network }},LOG_FILE=${{ vars.CD_LOG_FILE }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }}" \
|
||||
--create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},device-name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd,mode=rw \
|
||||
--container-mount-disk=mount-path='/var/cache/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},mode=rw \
|
||||
--machine-type ${{ vars.GCP_SMALL_MACHINE }} \
|
||||
--network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \
|
||||
--service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \
|
||||
--scopes cloud-platform \
|
||||
--labels=app=zebrad,environment=prod,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }} \
|
||||
--tags zebrad
|
||||
|
||||
# Check if our destination instance group exists already
|
||||
- name: Check if ${{ matrix.network }} instance group exists
|
||||
id: does-group-exist
|
||||
continue-on-error: true
|
||||
run: |
|
||||
gcloud compute instance-groups list | grep "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${NETWORK}" | grep "${{ vars.GCP_REGION }}"
|
||||
|
||||
# Deploy new managed instance group using the new instance template
|
||||
- name: Create managed instance group for ${{ matrix.network }}
|
||||
if: steps.does-group-exist.outcome == 'failure'
|
||||
run: |
|
||||
gcloud compute instance-groups managed create \
|
||||
"zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${NETWORK}" \
|
||||
--template "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \
|
||||
--health-check zebrad-tracing-filter \
|
||||
--initial-delay 30 \
|
||||
--region "${{ vars.GCP_REGION }}" \
|
||||
--size 1
|
||||
|
||||
# Rolls out update to existing group using the new instance template
|
||||
- name: Update managed instance group for ${{ matrix.network }}
|
||||
if: steps.does-group-exist.outcome == 'success'
|
||||
run: |
|
||||
gcloud compute instance-groups managed rolling-action start-update \
|
||||
"zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${NETWORK}" \
|
||||
--version template="zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \
|
||||
--region "${{ vars.GCP_REGION }}"
|
||||
|
||||
# This jobs handles the deployment of a single node (1) in the configured GCP zone
|
||||
# when an instance is required to test a specific commit
|
||||
#
|
||||
# Runs:
|
||||
# - on request, using workflow_dispatch with regenerate-disks
|
||||
#
|
||||
# Note: this instances are not automatically replaced or deleted
|
||||
deploy-instance:
|
||||
name: Deploy single ${{ inputs.network }} instance
|
||||
needs: [ build, test-configuration-file, test-zebra-conf-path ]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Inject slug/short variables
|
||||
uses: rlespinasse/github-slug-action@v4
|
||||
with:
|
||||
short-length: 7
|
||||
|
||||
# Makes the Zcash network name lowercase.
|
||||
#
|
||||
# Labels in GCP are required to be in lowercase, but the blockchain network
|
||||
# uses sentence case, so we need to downcase the network.
|
||||
#
|
||||
# Passes the lowercase network to subsequent steps using $NETWORK env variable.
|
||||
- name: Downcase network name for labels
|
||||
run: |
|
||||
NETWORK_CAPS="${{ inputs.network }}"
|
||||
echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV"
|
||||
|
||||
# Setup gcloud CLI
|
||||
- name: Authenticate to Google Cloud
|
||||
id: auth
|
||||
uses: google-github-actions/auth@v2.1.2
|
||||
with:
|
||||
retries: '3'
|
||||
workload_identity_provider: '${{ vars.GCP_WIF }}'
|
||||
service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}'
|
||||
|
||||
- name: Set up Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v2.1.0
|
||||
|
||||
# Create instance template from container image
|
||||
- name: Manual deploy of a single ${{ inputs.network }} instance running zebrad
|
||||
run: |
|
||||
gcloud compute instances create-with-container "zebrad-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \
|
||||
--boot-disk-size 300GB \
|
||||
--boot-disk-type=pd-ssd \
|
||||
--image-project=cos-cloud \
|
||||
--image-family=cos-stable \
|
||||
--user-output-enabled \
|
||||
--metadata google-logging-enabled=true,google-logging-use-fluentbit=true,google-monitoring-enabled=true \
|
||||
--container-stdin \
|
||||
--container-tty \
|
||||
--container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \
|
||||
--container-env "NETWORK=${{ inputs.network }},LOG_FILE=${{ inputs.log_file }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }}" \
|
||||
--create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},device-name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd,mode=rw \
|
||||
--container-mount-disk=mount-path='/var/cache/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},mode=rw \
|
||||
--machine-type ${{ vars.GCP_SMALL_MACHINE }} \
|
||||
--network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \
|
||||
--service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \
|
||||
--labels=app=zebrad,environment=qa,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }} \
|
||||
--tags zebrad \
|
||||
--zone ${{ vars.GCP_ZONE }}
|
||||
|
||||
failure-issue:
|
||||
name: Open or update issues for release failures
|
||||
# When a new job is added to this workflow, add it to this list.
|
||||
needs: [ versioning, build, deploy-nodes, deploy-instance ]
|
||||
# Only open tickets for failed or cancelled jobs that are not coming from PRs.
|
||||
# (PR statuses are already reported in the PR jobs list, and checked by Mergify.)
|
||||
if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: jayqi/failed-build-issue-action@v1
|
||||
with:
|
||||
title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}"
|
||||
# New failures open an issue with this label.
|
||||
label-name: S-ci-fail-release-auto-issue
|
||||
# If there is already an open issue with this label, any failures become comments on that issue.
|
||||
always-create-new-issue: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
@ -0,0 +1,140 @@
|
|||
# This workflow is designed to delete old Google Cloud Platform (GCP) resources to save on costs.
|
||||
#
|
||||
# 1. Deletes specific instances in GCP older than a defined number of days.
|
||||
# 2. Deletes instance templates older than a set number of days.
|
||||
# 3. Deletes older disks not currently in use, with certain ones prefixed by commit hashes or "zebrad-".
|
||||
# 4. Deletes cache images from GCP, retaining a specified number of the latest images for certain types like zebrad checkpoint cache, zebrad tip cache, and lightwalletd + zebrad tip cache.
|
||||
# 5. Deletes unused artifacts from Google Artifact Registry older than a defined number of hours while retaining the latest few.
|
||||
#
|
||||
# It uses the gcloud CLI for most of its operations and also leverages specific GitHub Actions like the gcr-cleaner for deleting old images from the Google Artifact Registry.
|
||||
# The workflow is scheduled to run daily at 0700 UTC.
|
||||
name: Delete GCP resources
|
||||
|
||||
on:
|
||||
# Run daily, when most devs aren't working
|
||||
# 0700 UTC is after AEST working hours but before ET working hours
|
||||
schedule:
|
||||
- cron: "0 7 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Delete all resources created before $DELETE_INSTANCE_DAYS days ago.
|
||||
# We keep this short to reduce CPU, RAM, and storage costs.
|
||||
DELETE_INSTANCE_DAYS: 3
|
||||
# Delete all other resources created before $DELETE_AGE_DAYS days ago.
|
||||
# We keep this short to reduce storage costs.
|
||||
DELETE_AGE_DAYS: 2
|
||||
# But keep the latest $KEEP_LATEST_IMAGE_COUNT images of each type.
|
||||
# We keep this small to reduce storage costs.
|
||||
KEEP_LATEST_IMAGE_COUNT: 2
|
||||
# Delete all artifacts in registry created before $DELETE_IMAGE_HOURS hours ago.
|
||||
# We keep this long enough for PRs that are still on the same commit can re-run with the same image.
|
||||
DELETE_IMAGE_HOURS: 504h # 21 days
|
||||
|
||||
jobs:
|
||||
delete-resources:
|
||||
name: Delete old GCP resources
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
# Setup gcloud CLI
|
||||
- name: Authenticate to Google Cloud
|
||||
id: auth
|
||||
uses: google-github-actions/auth@v2.1.2
|
||||
with:
|
||||
retries: '3'
|
||||
workload_identity_provider: '${{ vars.GCP_WIF }}'
|
||||
service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}'
|
||||
|
||||
- name: Set up Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v2.1.0
|
||||
|
||||
# Deletes all mainnet and testnet instances older than $DELETE_INSTANCE_DAYS days.
|
||||
#
|
||||
# We only delete instances that end in 7 or more hex characters,
|
||||
# to avoid deleting managed instance groups and manually created instances.
|
||||
#
|
||||
# ${INSTANCE_AND_ZONE} expands to:
|
||||
# <instance-name> --zone=<zone-name>
|
||||
# so it can't be shell-quoted.
|
||||
- name: Delete old instances
|
||||
run: |
|
||||
./.github/workflows/scripts/gcp-delete-old-instances.sh
|
||||
# Deletes all the instance templates older than $DELETE_AGE_DAYS days.
|
||||
- name: Delete old instance templates
|
||||
run: |
|
||||
./.github/workflows/scripts/gcp-delete-old-templates.sh
|
||||
|
||||
# Deletes all mainnet and testnet disks older than $DELETE_AGE_DAYS days.
|
||||
#
|
||||
# Disks that are attached to an instance template can't be deleted, so it is safe to try to delete all disks here.
|
||||
#
|
||||
# ${DISK_AND_LOCATION} expands to:
|
||||
# <disk-name> --[zone|region]=<location-name>
|
||||
# so it can't be shell-quoted.
|
||||
- name: Delete old disks
|
||||
run: |
|
||||
./.github/workflows/scripts/gcp-delete-old-disks.sh
|
||||
|
||||
# Deletes mainnet and testnet cache images older than $DELETE_AGE_DAYS days.
|
||||
#
|
||||
# Keeps all images younger than $DELETE_AGE_DAYS.
|
||||
# Also keeps $KEEP_LATEST_IMAGE_COUNT older images of each type, for each network:
|
||||
# - zebrad checkpoint cache
|
||||
# - zebrad tip cache
|
||||
# - lightwalletd + zebrad tip cache
|
||||
#
|
||||
# TODO:
|
||||
# - refactor out repeated shell script code
|
||||
- name: Delete old cache images
|
||||
run: |
|
||||
./.github/workflows/scripts/gcp-delete-old-cache-images.sh
|
||||
|
||||
# We're using a generic approach here, which allows multiple registries to be included,
|
||||
# even those not related to GCP. Enough reason to create a separate job.
|
||||
#
|
||||
# The same artifacts are used for both mainnet and testnet.
|
||||
clean-registries:
|
||||
name: Delete unused artifacts in registry
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
# Setup gcloud CLI
|
||||
- name: Authenticate to Google Cloud
|
||||
id: auth
|
||||
uses: google-github-actions/auth@v2.1.2
|
||||
with:
|
||||
retries: '3'
|
||||
workload_identity_provider: '${{ vars.GCP_WIF }}'
|
||||
service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}'
|
||||
token_format: 'access_token'
|
||||
|
||||
- name: Login to Google Artifact Registry
|
||||
uses: docker/login-action@v3.1.0
|
||||
with:
|
||||
registry: us-docker.pkg.dev
|
||||
username: oauth2accesstoken
|
||||
password: ${{ steps.auth.outputs.access_token }}
|
||||
|
||||
# Deletes all images older than $DELETE_IMAGE_HOURS days.
|
||||
- uses: 'docker://us-docker.pkg.dev/gcr-cleaner/gcr-cleaner/gcr-cleaner-cli'
|
||||
continue-on-error: true # TODO: remove after fixig https://github.com/ZcashFoundation/zebra/issues/5933
|
||||
# Refer to the official documentation to understand available arguments:
|
||||
# https://github.com/GoogleCloudPlatform/gcr-cleaner
|
||||
with:
|
||||
args: >-
|
||||
-repo=us-docker.pkg.dev/${{ vars.GCP_PROJECT }}/zebra/zebrad-test
|
||||
-grace=${{ env.DELETE_IMAGE_HOURS }}
|
||||
-keep=${{ env.KEEP_LATEST_IMAGE_COUNT }}
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
name: Build crates individually
|
||||
|
||||
# We need to keep the `matrix` job in this workflow as-is, as we need the results
|
||||
# to actually match the same `build` job names from the original file.
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
# production code and test code
|
||||
- '**/*.rs'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- '.github/workflows/ci-build-crates.yml'
|
||||
|
||||
jobs:
|
||||
matrix:
|
||||
name: Generate crates matrix
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
|
||||
# Setup Rust with stable toolchain and minimal profile
|
||||
- name: Setup Rust
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal
|
||||
|
||||
# This step is meant to dynamically create a JSON containing the values of each crate
|
||||
# available in this repo in the root directory. We use `cargo tree` to accomplish this task.
|
||||
#
|
||||
# The result from `cargo tree` is then transform to JSON values between double quotes,
|
||||
# and separated by commas, then added to a `crates.txt` and assigned to a $JSON_CRATES variable.
|
||||
#
|
||||
# A JSON object is created and assigned to a $MATRIX variable, which is use to create an output
|
||||
# named `matrix`, which is then used as the input in following steps,
|
||||
# using ` ${{ fromJson(needs.matrix.outputs.matrix) }}`
|
||||
- id: set-matrix
|
||||
name: Dynamically build crates JSON
|
||||
run: |
|
||||
TEMP_DIR=$(mktemp -d)
|
||||
echo "$(cargo tree --depth 0 --edges no-normal,no-dev,no-build,no-proc-macro --prefix none | cut -d ' ' -f1 | sed '/^$/d' | awk '{ printf "\"%s\",\n", $0 }' | sed '$ s/.$//')" > $TEMP_DIR/crates.txt
|
||||
MATRIX=$( (
|
||||
echo '{ "crate" : ['
|
||||
echo "$(cat $TEMP_DIR/crates.txt)"
|
||||
echo " ]}"
|
||||
) | jq -c .)
|
||||
echo $MATRIX
|
||||
echo $MATRIX | jq .
|
||||
echo "matrix=$MATRIX" >> "$GITHUB_OUTPUT"
|
||||
|
||||
check-matrix:
|
||||
name: Check crates matrix
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ matrix ]
|
||||
steps:
|
||||
- run: 'echo "No job required"'
|
||||
|
||||
build:
|
||||
name: Build ${{ matrix.crate }} crate
|
||||
needs: [ matrix, check-matrix ]
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix: ${{ fromJson(needs.matrix.outputs.matrix) }}
|
||||
|
||||
steps:
|
||||
- run: 'echo "No job required"'
|
||||
|
|
@ -0,0 +1,183 @@
|
|||
# This workflow facilitates the individual building of Rust crates present in the repository.
|
||||
# 1. A matrix is generated dynamically to identify each crate in the repository.
|
||||
# 2. This matrix is checked for validity.
|
||||
# 3. Each identified crate undergoes three build processes:
|
||||
# - With no features.
|
||||
# - With the default features.
|
||||
# - With all the features enabled.
|
||||
# 4. In case of build failures outside of pull requests, an issue is either opened or updated
|
||||
# in the repository to report the failure.
|
||||
# Throughout the workflow, various setup steps ensure the correct environment and tools are present.
|
||||
name: Build crates individually
|
||||
|
||||
# Ensures that only one workflow task will run at a time. Previous builds, if
|
||||
# already in process, will get cancelled. Only the latest commit will be allowed
|
||||
# to run, cancelling any workflows in between
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
# production code and test code
|
||||
- '**/*.rs'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- '.github/workflows/ci-build-crates.yml'
|
||||
pull_request:
|
||||
paths:
|
||||
# production code and test code
|
||||
- '**/*.rs'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- '.github/workflows/ci-build-crates.yml'
|
||||
|
||||
env:
|
||||
CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }}
|
||||
RUST_LOG: ${{ vars.RUST_LOG }}
|
||||
RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }}
|
||||
RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }}
|
||||
COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }}
|
||||
|
||||
jobs:
|
||||
matrix:
|
||||
name: Generate crates matrix
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
# Setup Rust with stable toolchain and minimal profile
|
||||
- name: Setup Rust
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal
|
||||
|
||||
# This step is meant to dynamically create a JSON containing the values of each crate
|
||||
# available in this repo in the root directory. We use `cargo tree` to accomplish this task.
|
||||
#
|
||||
# The result from `cargo tree` is then sorted so the longest job (zebrad) runs first,
|
||||
# transformed to JSON values between double quotes, and separated by commas,
|
||||
# then added to a `crates.txt`.
|
||||
#
|
||||
# A JSON object is created and assigned to a $MATRIX variable, which is use to create an
|
||||
# output named `matrix`, which is then used as the input in following steps,
|
||||
# using ` ${{ fromJson(needs.matrix.outputs.matrix) }}`
|
||||
- id: set-matrix
|
||||
name: Dynamically build crates JSON
|
||||
run: |
|
||||
TEMP_DIR=$(mktemp -d)
|
||||
cargo tree --depth 0 --edges no-normal,no-dev,no-build,no-proc-macro --prefix none | cut -d ' ' -f1 | sed '/^$/d' | LC_ALL=C sort --reverse | awk '{ printf "\"%s\",\n", $0 }' | sed '$ s/.$//' > $TEMP_DIR/crates.txt
|
||||
MATRIX=$( (
|
||||
echo '{ "crate" : ['
|
||||
echo "$(cat $TEMP_DIR/crates.txt)"
|
||||
echo " ]}"
|
||||
) | jq -c .)
|
||||
echo $MATRIX
|
||||
echo $MATRIX | jq .
|
||||
echo "matrix=$MATRIX" >> "$GITHUB_OUTPUT"
|
||||
|
||||
check-matrix:
|
||||
name: Check crates matrix
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ matrix ]
|
||||
steps:
|
||||
- name: Install json2yaml
|
||||
run: |
|
||||
sudo npm install -g json2yaml
|
||||
|
||||
- name: Check matrix definition
|
||||
run: |
|
||||
matrix='${{ needs.matrix.outputs.matrix }}'
|
||||
echo $matrix
|
||||
echo $matrix | jq .
|
||||
echo $matrix | json2yaml
|
||||
|
||||
build:
|
||||
name: Build ${{ matrix.crate }} crate
|
||||
timeout-minutes: 90
|
||||
needs: [ matrix, check-matrix ]
|
||||
# Some of these builds take more than 14GB disk space
|
||||
runs-on: ubuntu-latest-m
|
||||
strategy:
|
||||
# avoid rate-limit errors by only launching a few of these jobs at a time,
|
||||
# but still finish in a similar time to the longest tests
|
||||
max-parallel: 4
|
||||
fail-fast: true
|
||||
matrix: ${{ fromJson(needs.matrix.outputs.matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Install last version of Protoc
|
||||
uses: arduino/setup-protoc@v3.0.0
|
||||
with:
|
||||
# TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed
|
||||
version: '23.x'
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Setup Rust with stable toolchain and minimal profile
|
||||
- name: Setup Rust
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal
|
||||
|
||||
# We could use `features: ['', '--all-features', '--no-default-features']` as a matrix argument,
|
||||
# but it's faster to run these commands sequentially, so they can re-use the local cargo cache.
|
||||
#
|
||||
# Some Zebra crates do not have any features, and most don't have any default features.
|
||||
# Some targets activate features, but we still need to be able to build without them.
|
||||
- name: Build ${{ matrix.crate }} crate with default features
|
||||
run: |
|
||||
cargo clippy --package ${{ matrix.crate }} -- -D warnings
|
||||
cargo build --package ${{ matrix.crate }}
|
||||
|
||||
- name: Build ${{ matrix.crate }} crate with no default features and all targets
|
||||
run: |
|
||||
cargo clippy --package ${{ matrix.crate }} --no-default-features --all-targets -- -D warnings
|
||||
cargo build --package ${{ matrix.crate }} --no-default-features --all-targets
|
||||
|
||||
- name: Build ${{ matrix.crate }} crate with default features and all targets
|
||||
run: |
|
||||
cargo clippy --package ${{ matrix.crate }} --all-targets -- -D warnings
|
||||
cargo build --package ${{ matrix.crate }} --all-targets
|
||||
|
||||
- name: Build ${{ matrix.crate }} crate with all features and all targets
|
||||
run: |
|
||||
cargo clippy --package ${{ matrix.crate }} --all-features --all-targets -- -D warnings
|
||||
cargo build --package ${{ matrix.crate }} --all-features --all-targets
|
||||
|
||||
failure-issue:
|
||||
name: Open or update issues for building crates individually failures
|
||||
# When a new job is added to this workflow, add it to this list.
|
||||
needs: [ matrix, build ]
|
||||
# Only open tickets for failed or cancelled jobs that are not coming from PRs.
|
||||
# (PR statuses are already reported in the PR jobs list, and checked by Mergify.)
|
||||
if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: jayqi/failed-build-issue-action@v1
|
||||
with:
|
||||
title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}"
|
||||
# New failures open an issue with this label.
|
||||
label-name: S-ci-fail-build-crates-auto-issue
|
||||
# If there is already an open issue with this label, any failures become comments on that issue.
|
||||
always-create-new-issue: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
name: Coverage
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- '**/*.rs'
|
||||
- '**/*.txt'
|
||||
- '**/*.snap'
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
- 'codecov.yml'
|
||||
- '.github/workflows/ci-coverage.yml'
|
||||
|
||||
jobs:
|
||||
coverage:
|
||||
name: Coverage on stable
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
# This workflow calculates the test coverage for the Rust codebase.
|
||||
# 1. The code is checked out.
|
||||
# 2. Rust with the stable toolchain, minimal profile, and llvm-tools-preview component is set up.
|
||||
# 3. Necessary tools like 'cargo-llvm-cov' are installed.
|
||||
# 4. Proptest is minimized for efficient coverage test runs.
|
||||
# 5. Tests are run without producing a report to gather coverage information.
|
||||
# 6. A coverage report (lcov format) is generated based on the gathered information.
|
||||
# 7. Finally, this report is uploaded to Codecov for visualization and analysis.
|
||||
name: Coverage
|
||||
|
||||
# Ensures that only one workflow task will run at a time. Previous builds, if
|
||||
# already in process, will get cancelled. Only the latest commit will be allowed
|
||||
# to run, cancelling any workflows in between
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
# we build Rust caches on main,
|
||||
# so they can be shared by all branches:
|
||||
# https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
# code and tests
|
||||
- '**/*.rs'
|
||||
# hard-coded checkpoints and proptest regressions
|
||||
- '**/*.txt'
|
||||
# test data snapshots
|
||||
- '**/*.snap'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- 'codecov.yml'
|
||||
- '.github/workflows/ci-coverage.yml'
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- '**/*.rs'
|
||||
- '**/*.txt'
|
||||
- '**/*.snap'
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
- 'codecov.yml'
|
||||
- '.github/workflows/ci-coverage.yml'
|
||||
|
||||
env:
|
||||
CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }}
|
||||
RUST_LOG: ${{ vars.RUST_LOG }}
|
||||
RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }}
|
||||
RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }}
|
||||
COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }}
|
||||
|
||||
jobs:
|
||||
coverage:
|
||||
name: Coverage on stable
|
||||
# The large timeout is to accommodate:
|
||||
# - stable builds (typically 50-90 minutes), and
|
||||
timeout-minutes: 120
|
||||
runs-on: ubuntu-latest-xl
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
# Setup Rust with stable toolchain and minimal profile
|
||||
- name: Setup Rust
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal --component=llvm-tools-preview
|
||||
|
||||
- name: Install cargo-llvm-cov cargo command
|
||||
run: cargo install cargo-llvm-cov
|
||||
|
||||
- name: Skip network tests on Ubuntu
|
||||
# Ubuntu runners don't have reliable network or DNS during test steps.
|
||||
shell: bash
|
||||
run: echo "ZEBRA_SKIP_NETWORK_TESTS=1" >> $GITHUB_ENV
|
||||
|
||||
- name: Minimise proptest cases in Coverage tests
|
||||
# We set cases to 1, because some tests already run 1 case by default.
|
||||
# We set maximum shrink iterations to 0, because we don't expect failures in these tests.
|
||||
#
|
||||
# Coverage tests are much slower than other tests, particularly in hot loops.
|
||||
shell: bash
|
||||
run: |
|
||||
echo "PROPTEST_CASES=1" >> $GITHUB_ENV
|
||||
echo "PROPTEST_MAX_SHRINK_ITERS=0" >> $GITHUB_ENV
|
||||
|
||||
- name: Run Zebra tests
|
||||
run: cargo llvm-cov --lcov --no-report
|
||||
|
||||
- name: Generate coverage report
|
||||
run: cargo llvm-cov --lcov --no-run --output-path lcov.info
|
||||
|
||||
- name: Upload coverage report to Codecov
|
||||
uses: codecov/codecov-action@v4.3.0
|
||||
|
|
@ -0,0 +1,115 @@
|
|||
# Workflow patches for skipping Google Cloud unit test CI on PRs from external repositories.
|
||||
name: Integration Tests on GCP
|
||||
|
||||
# Run on PRs from external repositories, let them pass, and then Mergify will check them.
|
||||
# GitHub doesn't support filtering workflows by source branch names, so we have to do it for each
|
||||
# job.
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
# IMPORTANT
|
||||
#
|
||||
# The job names in `ci-integration-tests-gcp.yml`, `ci-integration-tests-gcp.patch.yml` and
|
||||
# `ci-integration-tests-gcp.patch-external.yml` must be kept in sync.
|
||||
jobs:
|
||||
# We don't patch the testnet job, because testnet isn't required to merge (it's too unstable)
|
||||
get-available-disks:
|
||||
name: Check if cached state disks exist for Mainnet / Check if cached state disks exist
|
||||
# Only run on PRs from external repositories.
|
||||
if: ${{ startsWith(github.event_name, 'pull') && github.event.pull_request.head.repo.fork }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
build:
|
||||
name: Build CI Docker / Build images
|
||||
# This dependency allows all these jobs to depend on a single condition, making it easier to
|
||||
# change.
|
||||
needs: get-available-disks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
test-stateful-sync:
|
||||
name: Zebra checkpoint update / Run sync-past-checkpoint test
|
||||
needs: get-available-disks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
test-update-sync:
|
||||
name: Zebra tip update / Run update-to-tip test
|
||||
needs: get-available-disks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
checkpoints-mainnet:
|
||||
name: Generate checkpoints mainnet / Run checkpoints-mainnet test
|
||||
needs: get-available-disks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
lightwalletd-rpc-test:
|
||||
name: Zebra tip JSON-RPC / Run fully-synced-rpc test
|
||||
needs: get-available-disks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
lightwalletd-transactions-test:
|
||||
name: lightwalletd tip send / Run lwd-send-transactions test
|
||||
needs: get-available-disks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
get-block-template-test:
|
||||
name: get block template / Run get-block-template test
|
||||
needs: get-available-disks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
submit-block-test:
|
||||
name: submit block / Run submit-block test
|
||||
needs: get-available-disks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
scan-start-where-left-test:
|
||||
name: Scan starts where left / Run scan-start-where-left test
|
||||
needs: get-available-disks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
scan-task-commands:
|
||||
name: scan task commands / Run scan-task-commands test
|
||||
needs: get-available-disks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
lightwalletd-full-sync:
|
||||
name: lightwalletd tip / Run lwd-full-sync test
|
||||
needs: get-available-disks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
lightwalletd-update-sync:
|
||||
name: lightwalletd tip update / Run lwd-update-sync test
|
||||
needs: get-available-disks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
lightwalletd-grpc-test:
|
||||
name: lightwalletd GRPC tests / Run lwd-grpc-wallet test
|
||||
needs: get-available-disks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
|
@ -0,0 +1,105 @@
|
|||
# Workflow patches for skipping Google Cloud integration test CI when Rust code or dependencies
|
||||
# aren't modified in a PR.
|
||||
name: Integration Tests on GCP
|
||||
|
||||
# Run on PRs with unmodified code and dependency files.
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
# code and tests
|
||||
- '**/*.rs'
|
||||
# hard-coded checkpoints and proptest regressions
|
||||
- '**/*.txt'
|
||||
# test data snapshots
|
||||
- '**/*.snap'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- 'docker/**'
|
||||
- '.dockerignore'
|
||||
- '.github/workflows/ci-unit-tests-docker.yml'
|
||||
- '.github/workflows/sub-deploy-integration-tests-gcp.yml'
|
||||
- '.github/workflows/sub-find-cached-disks.yml'
|
||||
- '.github/workflows/sub-build-docker-image.yml'
|
||||
|
||||
# IMPORTANT
|
||||
#
|
||||
# The job names in `ci-integration-tests-gcp.yml`, `ci-integration-tests-gcp.patch.yml` and
|
||||
# `ci-integration-tests-gcp.patch-external.yml` must be kept in sync.
|
||||
jobs:
|
||||
# We don't patch the testnet job, because testnet isn't required to merge (it's too unstable)
|
||||
get-available-disks:
|
||||
name: Check if cached state disks exist for Mainnet / Check if cached state disks exist
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
build:
|
||||
name: Build CI Docker / Build images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
test-stateful-sync:
|
||||
name: Zebra checkpoint update / Run sync-past-checkpoint test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
test-update-sync:
|
||||
name: Zebra tip update / Run update-to-tip test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
checkpoints-mainnet:
|
||||
name: Generate checkpoints mainnet / Run checkpoints-mainnet test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
lightwalletd-rpc-test:
|
||||
name: Zebra tip JSON-RPC / Run fully-synced-rpc test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
lightwalletd-transactions-test:
|
||||
name: lightwalletd tip send / Run lwd-send-transactions test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
get-block-template-test:
|
||||
name: get block template / Run get-block-template test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
submit-block-test:
|
||||
name: submit block / Run submit-block test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
lightwalletd-full-sync:
|
||||
name: lightwalletd tip / Run lwd-full-sync test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
lightwalletd-update-sync:
|
||||
name: lightwalletd tip update / Run lwd-update-sync test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
lightwalletd-grpc-test:
|
||||
name: lightwalletd GRPC tests / Run lwd-grpc-wallet test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
|
@ -0,0 +1,695 @@
|
|||
# Google Cloud integration tests that run when Rust code or dependencies are modified,
|
||||
# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are tested by mergify.)
|
||||
#
|
||||
# Specific conditions and dependencies are set for each job to ensure they are executed in the correct sequence and under the right circumstances.
|
||||
# Each test has a description of the conditions under which it runs.
|
||||
name: Integration Tests on GCP
|
||||
|
||||
# Ensures that only one workflow task will run at a time. Previous builds, if
|
||||
# already in process, will get cancelled. Only the latest commit will be allowed
|
||||
# to run, cancelling any workflows in between
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run this job every Friday at mid-day UTC
|
||||
# This is limited to the Zebra and lightwalletd Full Sync jobs
|
||||
# TODO: we should move this behavior to a separate workflow
|
||||
- cron: '0 12 * * 5'
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
network:
|
||||
default: 'Mainnet'
|
||||
description: 'Network to deploy: Mainnet or Testnet'
|
||||
required: true
|
||||
regenerate-disks:
|
||||
type: boolean
|
||||
default: false
|
||||
description: 'Just run a Zebra checkpoint sync and update checkpoint disks'
|
||||
required: true
|
||||
run-full-sync:
|
||||
type: boolean
|
||||
default: false
|
||||
description: 'Just run a Zebra full sync on `network`, and update tip disks'
|
||||
required: true
|
||||
run-lwd-sync:
|
||||
type: boolean
|
||||
default: false
|
||||
description: 'Just run a lightwalletd full sync and update tip disks'
|
||||
required: true
|
||||
force_save_to_disk:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
description: 'Force tests to always create a cached state disk, if they already create disks'
|
||||
no_cache:
|
||||
description: 'Disable the Docker cache for this build'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
pull_request:
|
||||
# Skip PRs where Rust code and dependencies aren't modified.
|
||||
paths:
|
||||
# code and tests
|
||||
- '**/*.rs'
|
||||
# hard-coded checkpoints and proptest regressions
|
||||
- '**/*.txt'
|
||||
# test data snapshots
|
||||
- '**/*.snap'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- 'docker/**'
|
||||
- '.github/workflows/ci-integration-tests-gcp.yml'
|
||||
- '.github/workflows/sub-deploy-integration-tests-gcp.yml'
|
||||
- '.github/workflows/sub-build-docker-image.yml'
|
||||
- '.github/workflows/sub-find-cached-disks.yml'
|
||||
|
||||
push:
|
||||
# Skip main branch updates where Rust code and dependencies aren't modified.
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
# code and tests
|
||||
- '**/*.rs'
|
||||
# hard-coded checkpoints and proptest regressions
|
||||
- '**/*.txt'
|
||||
# test data snapshots
|
||||
- '**/*.snap'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- 'docker/**'
|
||||
- '.dockerignore'
|
||||
- '.github/workflows/ci-integration-tests-gcp.yml'
|
||||
- '.github/workflows/sub-deploy-integration-tests-gcp.yml'
|
||||
- '.github/workflows/sub-find-cached-disks.yml'
|
||||
- '.github/workflows/sub-build-docker-image.yml'
|
||||
|
||||
# IMPORTANT
|
||||
#
|
||||
# The job names in `ci-integration-tests-gcp.yml`, `ci-integration-tests-gcp.patch.yml` and
|
||||
# `ci-integration-tests-gcp.patch-external.yml` must be kept in sync.
|
||||
jobs:
|
||||
# to also run a job on Mergify head branches,
|
||||
# add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`:
|
||||
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-based-on-the-head-or-base-branch-of-a-pull-request-1
|
||||
|
||||
# Check if the cached state disks used by the tests are available for the default network.
|
||||
#
|
||||
# The default network is mainnet unless a manually triggered workflow or repository variable
|
||||
# is configured differently.
|
||||
#
|
||||
# The outputs for this job have the same names as the workflow outputs in sub-find-cached-disks.yml
|
||||
get-available-disks:
|
||||
name: Check if cached state disks exist for ${{ inputs.network || vars.ZCASH_NETWORK }}
|
||||
# Skip PRs from external repositories, let them pass, and then Mergify will check them
|
||||
if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }}
|
||||
uses: ./.github/workflows/sub-find-cached-disks.yml
|
||||
with:
|
||||
network: ${{ inputs.network || vars.ZCASH_NETWORK }}
|
||||
|
||||
# Check if the cached state disks used by the tests are available for testnet.
|
||||
#
|
||||
# The outputs for this job have the same names as the workflow outputs in sub-find-cached-disks.yml
|
||||
# Some outputs are ignored, because we don't run those jobs on testnet.
|
||||
get-available-disks-testnet:
|
||||
name: Check if cached state disks exist for testnet
|
||||
if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }}
|
||||
uses: ./.github/workflows/sub-find-cached-disks.yml
|
||||
with:
|
||||
network: 'Testnet'
|
||||
|
||||
# Build the docker image used by the tests.
|
||||
#
|
||||
# The default network in the Zebra config in the image is mainnet, unless a manually triggered
|
||||
# workflow or repository variable is configured differently. Testnet jobs change that config to
|
||||
# testnet when running the image.
|
||||
build:
|
||||
name: Build CI Docker
|
||||
if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }}
|
||||
uses: ./.github/workflows/sub-build-docker-image.yml
|
||||
with:
|
||||
dockerfile_path: ./docker/Dockerfile
|
||||
dockerfile_target: tests
|
||||
image_name: ${{ vars.CI_IMAGE_NAME }}
|
||||
no_cache: ${{ inputs.no_cache || false }}
|
||||
rust_backtrace: full
|
||||
rust_lib_backtrace: full
|
||||
rust_log: info
|
||||
|
||||
# zebrad cached checkpoint state tests
|
||||
|
||||
# Regenerate mandatory checkpoint Zebra cached state disks.
|
||||
#
|
||||
# Runs:
|
||||
# - on every PR update, but only if there's no available disk matching the actual state version from constants.rs
|
||||
# - on request, using workflow_dispatch with regenerate-disks
|
||||
#
|
||||
# Note: the output from get-available-disks should match with the caller workflow inputs
|
||||
regenerate-stateful-disks:
|
||||
name: Zebra checkpoint
|
||||
needs: [ build, get-available-disks ]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ !fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || github.event.inputs.regenerate-disks == 'true' }}
|
||||
with:
|
||||
app_name: zebrad
|
||||
test_id: sync-to-checkpoint
|
||||
test_description: Test sync up to mandatory checkpoint
|
||||
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1'
|
||||
needs_zebra_state: false
|
||||
saves_to_disk: true
|
||||
force_save_to_disk: ${{ inputs.force_save_to_disk || false }}
|
||||
disk_suffix: checkpoint
|
||||
height_grep_text: 'flushing database to disk .*height.*=.*Height.*\('
|
||||
secrets: inherit
|
||||
# We want to prevent multiple checkpoint syncs running at the same time,
|
||||
# but we don't want to cancel running syncs on `main` if a new PR gets merged,
|
||||
# because we might never get a finished sync.
|
||||
#
|
||||
# See the concurrency comment on the zebrad test-full-sync job for details.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}−manual-${{ format('{0}', github.event.inputs.regenerate-disks == 'true') }}-regenerate-stateful-disks
|
||||
cancel-in-progress: false
|
||||
|
||||
# Test that Zebra syncs and fully validates a few thousand blocks from a cached mandatory checkpoint disk
|
||||
#
|
||||
# If the state version has changed, waits for the new cached state to be created.
|
||||
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
|
||||
test-stateful-sync:
|
||||
name: Zebra checkpoint update
|
||||
needs: [ regenerate-stateful-disks, get-available-disks ]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || needs.regenerate-stateful-disks.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
|
||||
with:
|
||||
app_name: zebrad
|
||||
test_id: sync-past-checkpoint
|
||||
test_description: Test full validation sync from a cached state
|
||||
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1'
|
||||
needs_zebra_state: true
|
||||
saves_to_disk: false
|
||||
disk_suffix: checkpoint
|
||||
secrets: inherit
|
||||
|
||||
# zebrad cached tip state tests
|
||||
|
||||
# Test that Zebra can run a full sync on mainnet,
|
||||
# and regenerate chain tip Zebra cached state disks.
|
||||
#
|
||||
# This test always runs on mainnet.
|
||||
#
|
||||
# Runs:
|
||||
# - on schedule, as defined at the top of the workflow
|
||||
# - on every PR update, but only if the state version in constants.rs has no cached disk
|
||||
# - in manual workflow runs, when run-full-sync is 'true' and network is 'Mainnet'
|
||||
#
|
||||
# Note: the output from get-available-disks should match with the caller workflow inputs
|
||||
test-full-sync:
|
||||
name: Zebra tip
|
||||
needs: [ build, get-available-disks ]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet') }}
|
||||
with:
|
||||
app_name: zebrad
|
||||
test_id: full-sync
|
||||
test_description: Test a full sync up to the tip
|
||||
# The value of FULL_SYNC_MAINNET_TIMEOUT_MINUTES is currently ignored.
|
||||
# TODO: update the test to use {{ input.network }} instead?
|
||||
test_variables: '-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1'
|
||||
# This test runs for longer than 6 hours, so it needs multiple jobs
|
||||
is_long_test: true
|
||||
needs_zebra_state: false
|
||||
saves_to_disk: true
|
||||
force_save_to_disk: ${{ inputs.force_save_to_disk || false }}
|
||||
disk_suffix: tip
|
||||
height_grep_text: 'current_height.*=.*Height.*\('
|
||||
secrets: inherit
|
||||
# We want to prevent multiple full zebrad syncs running at the same time,
|
||||
# but we don't want to cancel running syncs on `main` if a new PR gets merged,
|
||||
# because we might never get a finished sync.
|
||||
#
|
||||
# Instead, we let the first sync complete, then queue the latest pending sync, cancelling any syncs in between.
|
||||
# (As the general workflow concurrency group just gets matched in Pull Requests,
|
||||
# it has no impact on this job.)
|
||||
#
|
||||
# TODO:
|
||||
# - allow multiple manual syncs on a branch by adding '-${{ github.run_id }}' when github.event.inputs.run-full-sync is true
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}−manual-${{ format('{0}', github.event.inputs.run-full-sync == 'true') }}-test-full-sync
|
||||
cancel-in-progress: false
|
||||
|
||||
# Test that Zebra can sync to the chain tip, using a cached Zebra tip state,
|
||||
# without launching `lightwalletd`.
|
||||
#
|
||||
# Runs:
|
||||
# - after every PR is merged to `main`
|
||||
# - on every PR update
|
||||
#
|
||||
# If the state version has changed, waits for the new cached state to be created.
|
||||
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
|
||||
test-update-sync:
|
||||
name: Zebra tip update
|
||||
needs: [ test-full-sync, get-available-disks ]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
|
||||
with:
|
||||
app_name: zebrad
|
||||
test_id: update-to-tip
|
||||
test_description: Test syncing to tip with a Zebra tip state
|
||||
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_UPDATE_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache'
|
||||
needs_zebra_state: true
|
||||
# update the disk on every PR, to increase CI speed
|
||||
saves_to_disk: true
|
||||
force_save_to_disk: ${{ inputs.force_save_to_disk || false }}
|
||||
disk_suffix: tip
|
||||
root_state_path: '/var/cache'
|
||||
zebra_state_dir: 'zebrad-cache'
|
||||
height_grep_text: 'current_height.*=.*Height.*\('
|
||||
secrets: inherit
|
||||
|
||||
# zebra mainnet checkpoint generation tests
|
||||
|
||||
# Test that Zebra can generate mainnet checkpoints after syncing to the chain tip,
|
||||
# using a cached Zebra tip state,
|
||||
#
|
||||
# This test always runs on mainnet.
|
||||
#
|
||||
# Runs:
|
||||
# - after every PR is merged to `main`
|
||||
# - on every PR update
|
||||
#
|
||||
# If the state version has changed, waits for the new cached state to be created.
|
||||
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
|
||||
checkpoints-mainnet:
|
||||
name: Generate checkpoints mainnet
|
||||
needs: [ test-full-sync, get-available-disks ]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
|
||||
with:
|
||||
app_name: zebrad
|
||||
test_id: checkpoints-mainnet
|
||||
test_description: Generate Zebra checkpoints on mainnet
|
||||
# TODO: update the test to use {{ input.network }} instead?
|
||||
test_variables: '-e NETWORK=Mainnet -e GENERATE_CHECKPOINTS_MAINNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache'
|
||||
needs_zebra_state: true
|
||||
# test-update-sync updates the disk on every PR, so we don't need to do it here
|
||||
saves_to_disk: false
|
||||
disk_suffix: tip
|
||||
root_state_path: '/var/cache'
|
||||
zebra_state_dir: 'zebrad-cache'
|
||||
height_grep_text: 'current_height.*=.*Height.*\('
|
||||
secrets: inherit
|
||||
|
||||
# zebra testnet checkpoint generation tests
|
||||
#
|
||||
# These tests will fail when testnet is unstable, they should not be required to merge.
|
||||
#
|
||||
# TODO: ignore failures on testnet, so any failures don't appear in the GitHub interface.
|
||||
|
||||
# Test that Zebra can run a full testnet sync, and regenerate chain tip Zebra cached state disks.
|
||||
# This job always runs on testnet, regardless of any inputs or variable settings.
|
||||
#
|
||||
# Runs:
|
||||
# - on schedule, as defined at the top of the workflow
|
||||
# - on every PR update, but only if the state version in constants.rs has no cached disk
|
||||
# - in manual workflow runs, when run-full-sync is 'true' and network is 'Testnet'
|
||||
#
|
||||
# Note: the output from get-available-disks-testnet should match with the caller workflow inputs
|
||||
test-full-sync-testnet:
|
||||
name: Zebra tip on testnet
|
||||
needs: [ build, get-available-disks-testnet ]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ (github.event_name == 'schedule' && vars.SCHEDULE_TESTNET_FULL_SYNC == 'true') || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Testnet') }}
|
||||
with:
|
||||
app_name: zebrad
|
||||
test_id: full-sync-testnet
|
||||
test_description: Test a full sync up to the tip on testnet
|
||||
# The value of FULL_SYNC_TESTNET_TIMEOUT_MINUTES is currently ignored.
|
||||
test_variables: '-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1'
|
||||
network: "Testnet"
|
||||
# A full testnet sync could take 2-10 hours in April 2023.
|
||||
# The time varies a lot due to the small number of nodes.
|
||||
is_long_test: true
|
||||
needs_zebra_state: false
|
||||
saves_to_disk: true
|
||||
force_save_to_disk: ${{ inputs.force_save_to_disk || false }}
|
||||
disk_suffix: tip
|
||||
height_grep_text: 'current_height.*=.*Height.*\('
|
||||
secrets: inherit
|
||||
# We want to prevent multiple full zebrad syncs running at the same time,
|
||||
# but we don't want to cancel running syncs on `main` if a new PR gets merged,
|
||||
# because we might never get a finished sync.
|
||||
#
|
||||
# Instead, we let the first sync complete, then queue the latest pending sync, cancelling any syncs in between.
|
||||
# (As the general workflow concurrency group just gets matched in Pull Requests,
|
||||
# it has no impact on this job.)
|
||||
#
|
||||
# TODO:
|
||||
# - allow multiple manual syncs on a branch by adding '-${{ github.run_id }}' when github.event.inputs.run-full-sync is true
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}−manual-${{ format('{0}', github.event.inputs.run-full-sync == 'true') }}-test-full-sync-testnet
|
||||
cancel-in-progress: false
|
||||
|
||||
# Test that Zebra can generate testnet checkpoints after syncing to the chain tip,
|
||||
# using a cached Zebra tip state.
|
||||
#
|
||||
# This test always runs on testnet.
|
||||
#
|
||||
# Runs:
|
||||
# - after every PR is merged to `main`
|
||||
# - on every PR update
|
||||
#
|
||||
# If the state version has changed, waits for the new cached state to be created.
|
||||
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
|
||||
checkpoints-testnet:
|
||||
name: Generate checkpoints testnet
|
||||
needs: [ test-full-sync-testnet, get-available-disks-testnet ]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || needs.test-full-sync-testnet.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
|
||||
with:
|
||||
app_name: zebrad
|
||||
test_id: checkpoints-testnet
|
||||
test_description: Generate Zebra checkpoints on testnet
|
||||
test_variables: '-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache'
|
||||
network: "Testnet"
|
||||
needs_zebra_state: true
|
||||
# update the disk on every PR, to increase CI speed
|
||||
# we don't have a test-update-sync-testnet job, so we need to update the disk here
|
||||
saves_to_disk: true
|
||||
force_save_to_disk: ${{ inputs.force_save_to_disk || false }}
|
||||
disk_suffix: tip
|
||||
root_state_path: '/var/cache'
|
||||
zebra_state_dir: 'zebrad-cache'
|
||||
height_grep_text: 'zebra_tip_height.*=.*Height.*\('
|
||||
secrets: inherit
|
||||
|
||||
# lightwalletd cached tip state tests
|
||||
|
||||
# Test full sync of lightwalletd with a Zebra tip state
|
||||
#
|
||||
# Runs:
|
||||
# - on schedule, as defined at the top of the workflow
|
||||
# - on every PR update, but only if the state version in constants.rs has no cached disk
|
||||
# - in manual workflow runs, when run-lwd-sync is 'true' and network is 'Mainnet' (the network is required by the test-full-sync job)
|
||||
#
|
||||
# If the state version has changed, waits for the new cached state to be created.
|
||||
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
|
||||
lightwalletd-full-sync:
|
||||
name: lightwalletd tip
|
||||
needs: [ test-full-sync, get-available-disks ]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
# Currently the lightwalletd tests only work on Mainnet
|
||||
if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && (github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }}
|
||||
with:
|
||||
app_name: lightwalletd
|
||||
test_id: lwd-full-sync
|
||||
test_description: Test lightwalletd full sync
|
||||
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache'
|
||||
# This test runs for longer than 6 hours, so it needs multiple jobs
|
||||
is_long_test: true
|
||||
needs_zebra_state: true
|
||||
needs_lwd_state: false
|
||||
saves_to_disk: true
|
||||
force_save_to_disk: ${{ inputs.force_save_to_disk || false }}
|
||||
disk_prefix: lwd-cache
|
||||
disk_suffix: tip
|
||||
root_state_path: '/var/cache'
|
||||
zebra_state_dir: 'zebrad-cache'
|
||||
lwd_state_dir: 'lwd-cache'
|
||||
height_grep_text: 'Waiting for block: '
|
||||
secrets: inherit
|
||||
# We want to prevent multiple lightwalletd full syncs running at the same time,
|
||||
# but we don't want to cancel running syncs on `main` if a new PR gets merged,
|
||||
# because we might never get a finished sync.
|
||||
#
|
||||
# See the concurrency comment on the zebrad test-full-sync job for details.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}−manual-${{ format('{0}', github.event.inputs.run-lwd-sync == 'true') }}-lightwalletd-full-sync
|
||||
cancel-in-progress: false
|
||||
|
||||
# Test update sync of lightwalletd with a lightwalletd and Zebra tip state
|
||||
# Runs:
|
||||
# - after every PR is merged to `main`
|
||||
# - on every PR update
|
||||
#
|
||||
# If the state version has changed, waits for the new cached states to be created.
|
||||
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
|
||||
lightwalletd-update-sync:
|
||||
name: lightwalletd tip update
|
||||
needs: [ lightwalletd-full-sync, get-available-disks ]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
|
||||
with:
|
||||
app_name: lightwalletd
|
||||
test_id: lwd-update-sync
|
||||
test_description: Test lightwalletd update sync with both states
|
||||
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache'
|
||||
needs_zebra_state: true
|
||||
needs_lwd_state: true
|
||||
saves_to_disk: true
|
||||
force_save_to_disk: ${{ inputs.force_save_to_disk || false }}
|
||||
disk_prefix: lwd-cache
|
||||
disk_suffix: tip
|
||||
root_state_path: '/var/cache'
|
||||
zebra_state_dir: 'zebrad-cache'
|
||||
lwd_state_dir: 'lwd-cache'
|
||||
height_grep_text: 'Waiting for block: '
|
||||
secrets: inherit
|
||||
|
||||
# Test that Zebra can answer a synthetic RPC call, using a cached Zebra tip state
|
||||
#
|
||||
# Runs:
|
||||
# - after every PR is merged to `main`
|
||||
# - on every PR update
|
||||
#
|
||||
# If the state version has changed, waits for the new cached state to be created.
|
||||
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
|
||||
#
|
||||
# TODO: move this job below the rest of the mainnet jobs that just use Zebra cached state
|
||||
lightwalletd-rpc-test:
|
||||
name: Zebra tip JSON-RPC
|
||||
needs: [ test-full-sync, get-available-disks ]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
|
||||
with:
|
||||
app_name: lightwalletd
|
||||
test_id: fully-synced-rpc
|
||||
test_description: Test lightwalletd RPC with a Zebra tip state
|
||||
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache'
|
||||
needs_zebra_state: true
|
||||
saves_to_disk: false
|
||||
disk_suffix: tip
|
||||
root_state_path: '/var/cache'
|
||||
zebra_state_dir: 'zebrad-cache'
|
||||
secrets: inherit
|
||||
|
||||
# Test that Zebra can handle a lightwalletd send transaction RPC call, using a cached Zebra tip state
|
||||
#
|
||||
# Runs:
|
||||
# - after every PR is merged to `main`
|
||||
# - on every PR update
|
||||
#
|
||||
# If the state version has changed, waits for the new cached states to be created.
|
||||
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
|
||||
lightwalletd-transactions-test:
|
||||
name: lightwalletd tip send
|
||||
needs: [ lightwalletd-full-sync, get-available-disks ]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
|
||||
with:
|
||||
app_name: lightwalletd
|
||||
test_id: lwd-send-transactions
|
||||
test_description: Test sending transactions via lightwalletd
|
||||
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache'
|
||||
needs_zebra_state: true
|
||||
needs_lwd_state: true
|
||||
saves_to_disk: false
|
||||
disk_suffix: tip
|
||||
root_state_path: '/var/cache'
|
||||
zebra_state_dir: 'zebrad-cache'
|
||||
lwd_state_dir: 'lwd-cache'
|
||||
secrets: inherit
|
||||
|
||||
# Test that Zebra can handle gRPC wallet calls, using a cached Zebra tip state
|
||||
#
|
||||
# Runs:
|
||||
# - after every PR is merged to `main`
|
||||
# - on every PR update
|
||||
#
|
||||
# If the state version has changed, waits for the new cached states to be created.
|
||||
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
|
||||
lightwalletd-grpc-test:
|
||||
name: lightwalletd GRPC tests
|
||||
needs: [ lightwalletd-full-sync, get-available-disks ]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
|
||||
with:
|
||||
app_name: lightwalletd
|
||||
test_id: lwd-grpc-wallet
|
||||
test_description: Test gRPC calls via lightwalletd
|
||||
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache'
|
||||
needs_zebra_state: true
|
||||
needs_lwd_state: true
|
||||
saves_to_disk: false
|
||||
disk_suffix: tip
|
||||
root_state_path: '/var/cache'
|
||||
zebra_state_dir: 'zebrad-cache'
|
||||
lwd_state_dir: 'lwd-cache'
|
||||
secrets: inherit
|
||||
|
||||
## getblocktemplate-rpcs using cached Zebra state on mainnet
|
||||
#
|
||||
# TODO: move these below the rest of the mainnet jobs that just use Zebra cached state
|
||||
|
||||
# Test that Zebra can handle a getblocktemplate RPC call, using a cached Zebra tip state
|
||||
#
|
||||
# Runs:
|
||||
# - after every PR is merged to `main`
|
||||
# - on every PR update
|
||||
#
|
||||
# If the state version has changed, waits for the new cached states to be created.
|
||||
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
|
||||
get-block-template-test:
|
||||
name: get block template
|
||||
needs: [ test-full-sync, get-available-disks ]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
|
||||
with:
|
||||
app_name: zebrad
|
||||
test_id: get-block-template
|
||||
test_description: Test getblocktemplate RPC method via Zebra's rpc server
|
||||
test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache'
|
||||
needs_zebra_state: true
|
||||
needs_lwd_state: false
|
||||
saves_to_disk: false
|
||||
disk_suffix: tip
|
||||
root_state_path: '/var/cache'
|
||||
zebra_state_dir: 'zebrad-cache'
|
||||
secrets: inherit
|
||||
|
||||
# Test that Zebra can handle a submit block RPC call, using a cached Zebra tip state
|
||||
#
|
||||
# Runs:
|
||||
# - after every PR is merged to `main`
|
||||
# - on every PR update
|
||||
#
|
||||
# If the state version has changed, waits for the new cached states to be created.
|
||||
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
|
||||
submit-block-test:
|
||||
name: submit block
|
||||
needs: [test-full-sync, get-available-disks]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
|
||||
with:
|
||||
app_name: zebrad
|
||||
test_id: submit-block
|
||||
test_description: Test submitting blocks via Zebra's rpc server
|
||||
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SUBMIT_BLOCK=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache"
|
||||
needs_zebra_state: true
|
||||
needs_lwd_state: false
|
||||
saves_to_disk: false
|
||||
disk_suffix: tip
|
||||
root_state_path: "/var/cache"
|
||||
zebra_state_dir: "zebrad-cache"
|
||||
secrets: inherit
|
||||
|
||||
# Test that the scanner can continue scanning where it was left when zebrad restarts.
|
||||
#
|
||||
# Runs:
|
||||
# - after every PR is merged to `main`
|
||||
# - on every PR update
|
||||
#
|
||||
# If the state version has changed, waits for the new cached states to be created.
|
||||
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
|
||||
scan-start-where-left-test:
|
||||
name: Scan starts where left
|
||||
needs: [test-full-sync, get-available-disks]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
|
||||
with:
|
||||
app_name: zebrad
|
||||
test_id: scan-start-where-left
|
||||
test_description: Test that the scanner can continue scanning where it was left when zebrad restarts.
|
||||
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCAN_START_WHERE_LEFT=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache"
|
||||
needs_zebra_state: true
|
||||
needs_lwd_state: false
|
||||
saves_to_disk: true
|
||||
disk_suffix: tip
|
||||
root_state_path: "/var/cache"
|
||||
zebra_state_dir: "zebrad-cache"
|
||||
secrets: inherit
|
||||
|
||||
# Test that the scan task registers keys, deletes keys, and subscribes to results for keys while running.
|
||||
#
|
||||
# Runs:
|
||||
# - after every PR is merged to `main`
|
||||
# - on every PR update
|
||||
#
|
||||
# If the state version has changed, waits for the new cached states to be created.
|
||||
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
|
||||
scan-task-commands-test:
|
||||
name: scan task commands
|
||||
needs: [test-full-sync, get-available-disks]
|
||||
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
|
||||
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
|
||||
with:
|
||||
app_name: zebrad
|
||||
test_id: scan-task-commands
|
||||
test_description: Test that the scan task registers keys, deletes keys, and subscribes to results for keys while running.
|
||||
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCAN_TASK_COMMANDS=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache"
|
||||
needs_zebra_state: true
|
||||
needs_lwd_state: false
|
||||
saves_to_disk: false
|
||||
disk_suffix: tip
|
||||
root_state_path: "/var/cache"
|
||||
zebra_state_dir: "zebrad-cache"
|
||||
secrets: inherit
|
||||
|
||||
failure-issue:
|
||||
name: Open or update issues for main branch failures
|
||||
# When a new test is added to this workflow, add it to this list.
|
||||
#
|
||||
# This list is for reliable tests that are run on the `main` branch.
|
||||
# Testnet jobs are not in this list, because we expect testnet to fail occasionally.
|
||||
needs:
|
||||
[
|
||||
regenerate-stateful-disks,
|
||||
test-full-sync,
|
||||
lightwalletd-full-sync,
|
||||
test-stateful-sync,
|
||||
test-update-sync,
|
||||
checkpoints-mainnet,
|
||||
lightwalletd-update-sync,
|
||||
lightwalletd-rpc-test,
|
||||
lightwalletd-transactions-test,
|
||||
lightwalletd-grpc-test,
|
||||
get-block-template-test,
|
||||
submit-block-test,
|
||||
scan-start-where-left-test,
|
||||
scan-task-commands-test
|
||||
]
|
||||
# Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges.
|
||||
# (PR statuses are already reported in the PR jobs list, and checked by Mergify.)
|
||||
if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: jayqi/failed-build-issue-action@v1
|
||||
with:
|
||||
title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}"
|
||||
# New failures open an issue with this label.
|
||||
label-name: S-ci-fail-main-branch-auto-issue
|
||||
# If there is already an open issue with this label, any failures become comments on that issue.
|
||||
always-create-new-issue: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
name: Lint
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
|
@ -0,0 +1,173 @@
|
|||
# This workflow conducts various linting checks for a Rust-based project.
|
||||
# 1. Determines if Rust or workflow files have been modified.
|
||||
# 2. Runs the Clippy linter on Rust files, producing annotations and failing on warnings.
|
||||
# 3. Ensures Rust code formatting complies with 'rustfmt' standards.
|
||||
# 4. Lints GitHub Actions workflow files for common issues.
|
||||
# 5. Checks for common spelling errors in the codebase.
|
||||
# The workflow is designed to maintain code quality and consistency, running checks conditionally based on the changed files.
|
||||
name: Lint
|
||||
|
||||
# Ensures that only one workflow task will run at a time. Previous builds, if
|
||||
# already in process, will get cancelled. Only the latest commit will be allowed
|
||||
# to run, cancelling any workflows in between
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
# we build Rust caches on main, so they can be shared by all branches:
|
||||
# https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }}
|
||||
RUST_LOG: ${{ vars.RUST_LOG }}
|
||||
RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }}
|
||||
RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }}
|
||||
COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }}
|
||||
|
||||
jobs:
|
||||
changed-files:
|
||||
runs-on: ubuntu-latest
|
||||
name: Checks changed-files
|
||||
outputs:
|
||||
rust: ${{ steps.changed-files-rust.outputs.any_changed == 'true' }}
|
||||
workflows: ${{ steps.changed-files-workflows.outputs.any_changed == 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Rust files
|
||||
id: changed-files-rust
|
||||
uses: tj-actions/changed-files@v44.0.1
|
||||
with:
|
||||
files: |
|
||||
**/*.rs
|
||||
**/Cargo.toml
|
||||
**/Cargo.lock
|
||||
clippy.toml
|
||||
.cargo/config.toml
|
||||
.github/workflows/ci-lint.yml
|
||||
|
||||
- name: Workflow files
|
||||
id: changed-files-workflows
|
||||
uses: tj-actions/changed-files@v44.0.1
|
||||
with:
|
||||
files: |
|
||||
.github/workflows/*.yml
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
timeout-minutes: 45
|
||||
runs-on: ubuntu-latest
|
||||
needs: changed-files
|
||||
if: ${{ needs.changed-files.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install last version of Protoc
|
||||
uses: arduino/setup-protoc@v3.0.0
|
||||
with:
|
||||
# TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed
|
||||
version: '23.x'
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Check workflow permissions
|
||||
id: check_permissions
|
||||
uses: scherermichael-oss/action-has-permission@1.0.6
|
||||
with:
|
||||
required-permission: write
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Setup Rust with stable toolchain and default profile
|
||||
- name: Setup Rust
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=default
|
||||
|
||||
- uses: Swatinem/rust-cache@v2.7.3
|
||||
with:
|
||||
shared-key: "clippy-cargo-lock"
|
||||
|
||||
# TODO: keep this action until we find a better solution
|
||||
- name: Run clippy action to produce annotations
|
||||
uses: actions-rs/clippy-check@v1.0.7
|
||||
if: ${{ steps.check_permissions.outputs.has-permission }}
|
||||
with:
|
||||
# GitHub displays the clippy job and its results as separate entries
|
||||
name: Clippy (stable) Results
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
args: --workspace --all-features --all-targets -- -D warnings
|
||||
|
||||
- name: Run clippy manually without annotations
|
||||
if: ${{ !steps.check_permissions.outputs.has-permission }}
|
||||
run: cargo clippy --workspace --all-features --all-targets -- -D warnings
|
||||
|
||||
fmt:
|
||||
name: Rustfmt
|
||||
timeout-minutes: 30
|
||||
runs-on: ubuntu-latest
|
||||
needs: changed-files
|
||||
if: ${{ needs.changed-files.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Install last version of Protoc
|
||||
uses: arduino/setup-protoc@v3.0.0
|
||||
with:
|
||||
# TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed
|
||||
version: '23.x'
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Setup Rust with stable toolchain and default profile
|
||||
- name: Setup Rust
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=default
|
||||
|
||||
# We don't cache `fmt` outputs because the job is quick,
|
||||
# and we want to use the limited GitHub actions cache space for slower jobs.
|
||||
#- uses: Swatinem/rust-cache@v2.7.3
|
||||
|
||||
- run: |
|
||||
cargo fmt --all -- --check
|
||||
|
||||
actionlint:
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
needs: changed-files
|
||||
if: ${{ needs.changed-files.outputs.workflows == 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
- name: actionlint
|
||||
uses: reviewdog/action-actionlint@v1.44.0
|
||||
with:
|
||||
level: warning
|
||||
fail_on_error: false
|
||||
# This is failing with a JSON schema error, see #8028 for details.
|
||||
#- name: validate-dependabot
|
||||
# # This gives an error when run on PRs from external repositories, so we skip it.
|
||||
# # If this is a PR, check that the PR source is a local branch. Always runs on non-PRs.
|
||||
# if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }}
|
||||
# uses: marocchino/validate-dependabot@v2.1.0
|
||||
|
||||
codespell:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changed-files
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
- uses: codespell-project/actions-codespell@v2.0
|
||||
with:
|
||||
only_warn: 1
|
||||
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
# Workflow patches for skipping Google Cloud unit test CI on PRs from external repositories.
|
||||
name: Docker Unit Tests
|
||||
|
||||
# Run on PRs from external repositories, let them pass, and then Mergify will check them.
|
||||
# GitHub doesn't support filtering workflows by source branch names, so we have to do it for each
|
||||
# job.
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
# IMPORTANT
|
||||
#
|
||||
# The job names in `ci-unit-tests-docker.yml`, `ci-unit-tests-docker.patch.yml` and
|
||||
# `ci-unit-tests-docker.patch-external.yml` must be kept in sync.
|
||||
jobs:
|
||||
build:
|
||||
name: Build CI Docker / Build images
|
||||
# Only run on PRs from external repositories.
|
||||
if: ${{ startsWith(github.event_name, 'pull') && github.event.pull_request.head.repo.fork }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
test-all:
|
||||
name: Test all
|
||||
# This dependency allows all these jobs to depend on a single condition, making it easier to
|
||||
# change.
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
test-fake-activation-heights:
|
||||
name: Test with fake activation heights
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
test-empty-sync:
|
||||
name: Test checkpoint sync from empty state
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
test-lightwalletd-integration:
|
||||
name: Test integration with lightwalletd
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
test-configuration-file:
|
||||
name: Test CI default Docker config file / Test default-conf in Docker
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
test-zebra-conf-path:
|
||||
name: Test CI custom Docker config file / Test custom-conf in Docker
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
# Workflow patches for skipping unit test CI when Rust code or dependencies aren't modified in a PR.
|
||||
name: Docker Unit Tests
|
||||
|
||||
# Run on PRs with unmodified code and dependency files.
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
# code and tests
|
||||
- '**/*.rs'
|
||||
# hard-coded checkpoints and proptest regressions
|
||||
- '**/*.txt'
|
||||
# test data snapshots
|
||||
- '**/*.snap'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- 'docker/**'
|
||||
- '.dockerignore'
|
||||
- '.github/workflows/ci-unit-tests-docker.yml'
|
||||
- '.github/workflows/sub-deploy-integration-tests-gcp.yml'
|
||||
- '.github/workflows/sub-find-cached-disks.yml'
|
||||
- '.github/workflows/sub-build-docker-image.yml'
|
||||
|
||||
# IMPORTANT
|
||||
#
|
||||
# The job names in `ci-unit-tests-docker.yml`, `ci-unit-tests-docker.patch.yml` and
|
||||
# `ci-unit-tests-docker.patch-external.yml` must be kept in sync.
|
||||
jobs:
|
||||
build:
|
||||
name: Build CI Docker / Build images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
test-all:
|
||||
name: Test all
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
test-fake-activation-heights:
|
||||
name: Test with fake activation heights
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
test-empty-sync:
|
||||
name: Test checkpoint sync from empty state
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
test-lightwalletd-integration:
|
||||
name: Test integration with lightwalletd
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
test-configuration-file:
|
||||
name: Test CI default Docker config file / Test default-conf in Docker
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
test-zebra-conf-path:
|
||||
name: Test CI custom Docker config file / Test custom-conf in Docker
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
|
@ -0,0 +1,281 @@
|
|||
# Google Cloud unit tests that run when Rust code or dependencies are modified,
|
||||
# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are tested by mergify.)
|
||||
#
|
||||
# This workflow is designed for running various unit tests within Docker containers.
|
||||
# Jobs:
|
||||
# 1. Builds a Docker image for tests, adaptable to the specified network (Mainnet or Testnet).
|
||||
# 2. 'test-all': Executes all Zebra tests, including normally ignored ones, in a Docker environment.
|
||||
# 3. 'test-fake-activation-heights': Runs state tests with fake activation heights, isolating its build products.
|
||||
# 4. 'test-empty-sync': Tests Zebra's ability to sync and checkpoint from an empty state.
|
||||
# 5. 'test-lightwalletd-integration': Validates integration with 'lightwalletd' starting from an empty state.
|
||||
# 6. 'test-configuration-file': Assesses the default Docker configuration for Zebra.
|
||||
# 7. 'test-configuration-file-testnet': Checks the Docker image reconfiguration for the Testnet.
|
||||
# 8. 'test-zebra-conf-path': Tests Zebra using a custom Docker configuration.
|
||||
name: Docker Unit Tests
|
||||
|
||||
# Ensures that only one workflow task will run at a time. Previous builds, if
|
||||
# already in process, will get cancelled. Only the latest commit will be allowed
|
||||
# to run, cancelling any workflows in between
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
network:
|
||||
default: 'Mainnet'
|
||||
description: 'Network to deploy: Mainnet or Testnet'
|
||||
required: true
|
||||
no_cache:
|
||||
description: 'Disable the Docker cache for this build'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
pull_request:
|
||||
# Skip PRs where Rust code and dependencies aren't modified.
|
||||
paths:
|
||||
# code and tests
|
||||
- '**/*.rs'
|
||||
# hard-coded checkpoints and proptest regressions
|
||||
- '**/*.txt'
|
||||
# test data snapshots
|
||||
- '**/*.snap'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- 'docker/**'
|
||||
- '.github/workflows/ci-unit-tests-docker.yml'
|
||||
- '.github/workflows/sub-deploy-integration-tests-gcp.yml'
|
||||
- '.github/workflows/sub-build-docker-image.yml'
|
||||
- '.github/workflows/sub-find-cached-disks.yml'
|
||||
- '.github/workflows/sub-test-zebra-config.yml'
|
||||
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
# Skip main branch updates where Rust code and dependencies aren't modified.
|
||||
paths:
|
||||
# code and tests
|
||||
- '**/*.rs'
|
||||
# hard-coded checkpoints and proptest regressions
|
||||
- '**/*.txt'
|
||||
# test data snapshots
|
||||
- '**/*.snap'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- 'docker/**'
|
||||
- '.dockerignore'
|
||||
- '.github/workflows/ci-unit-tests-docker.yml'
|
||||
- '.github/workflows/sub-deploy-integration-tests-gcp.yml'
|
||||
- '.github/workflows/sub-find-cached-disks.yml'
|
||||
- '.github/workflows/sub-build-docker-image.yml'
|
||||
|
||||
env:
|
||||
RUST_LOG: ${{ vars.RUST_LOG }}
|
||||
RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }}
|
||||
RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }}
|
||||
COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }}
|
||||
CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }}
|
||||
|
||||
# IMPORTANT
|
||||
#
|
||||
# The job names in `ci-unit-tests-docker.yml`, `ci-unit-tests-docker.patch.yml` and
|
||||
# `ci-unit-tests-docker.patch-external.yml` must be kept in sync.
|
||||
jobs:
|
||||
# Build the docker image used by the tests.
|
||||
#
|
||||
# The default network in the Zebra config in the image is mainnet, unless a manually triggered
|
||||
# workflow or repository variable is configured differently. Testnet jobs change that config to
|
||||
# testnet when running the image.
|
||||
build:
|
||||
name: Build CI Docker
|
||||
# Skip PRs from external repositories, let them pass, and then Mergify will check them
|
||||
if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }}
|
||||
uses: ./.github/workflows/sub-build-docker-image.yml
|
||||
with:
|
||||
dockerfile_path: ./docker/Dockerfile
|
||||
dockerfile_target: tests
|
||||
image_name: ${{ vars.CI_IMAGE_NAME }}
|
||||
no_cache: ${{ inputs.no_cache || false }}
|
||||
rust_backtrace: full
|
||||
rust_lib_backtrace: full
|
||||
rust_log: info
|
||||
|
||||
# Run all the zebra tests, including tests that are ignored by default.
|
||||
#
|
||||
# - We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.
|
||||
test-all:
|
||||
name: Test all
|
||||
timeout-minutes: 180
|
||||
runs-on: ubuntu-latest-xl
|
||||
needs: build
|
||||
steps:
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Inject slug/short variables
|
||||
uses: rlespinasse/github-slug-action@v4
|
||||
with:
|
||||
short-length: 7
|
||||
|
||||
# Run unit, basic acceptance tests, and ignored tests, only showing command output if the test fails.
|
||||
#
|
||||
# If some tests hang, add "-- --nocapture" for just that test, or for all the tests.
|
||||
#
|
||||
- name: Run zebrad tests
|
||||
env:
|
||||
NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }}
|
||||
run: |
|
||||
docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }}
|
||||
docker run --tty -e NETWORK -e RUN_ALL_TESTS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }}
|
||||
|
||||
# Run unit, basic acceptance tests, and ignored tests with experimental features.
|
||||
#
|
||||
- name: Run zebrad tests with experimental features
|
||||
env:
|
||||
NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }}
|
||||
run: |
|
||||
docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }}
|
||||
docker run --tty -e NETWORK -e RUN_ALL_EXPERIMENTAL_TESTS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }}
|
||||
|
||||
# Run state tests with fake activation heights.
|
||||
#
|
||||
# This test changes zebra-chain's activation heights,
|
||||
# which can recompile all the Zebra crates,
|
||||
# so we want its build products to be cached separately.
|
||||
#
|
||||
# Also, we don't want to accidentally use the fake heights in other tests.
|
||||
#
|
||||
# (We activate the test features to avoid recompiling dependencies, but we don't actually run any gRPC tests.)
|
||||
test-fake-activation-heights:
|
||||
name: Test with fake activation heights
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Inject slug/short variables
|
||||
uses: rlespinasse/github-slug-action@v4
|
||||
with:
|
||||
short-length: 7
|
||||
|
||||
- name: Run tests with fake activation heights
|
||||
env:
|
||||
NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }}
|
||||
run: |
|
||||
docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }}
|
||||
docker run --tty -e NETWORK -e TEST_FAKE_ACTIVATION_HEIGHTS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }}
|
||||
|
||||
# Test that Zebra syncs and checkpoints a few thousand blocks from an empty state.
|
||||
test-empty-sync:
|
||||
name: Test checkpoint sync from empty state
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Inject slug/short variables
|
||||
uses: rlespinasse/github-slug-action@v4
|
||||
with:
|
||||
short-length: 7
|
||||
|
||||
- name: Run zebrad large sync tests
|
||||
env:
|
||||
NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }}
|
||||
run: |
|
||||
docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }}
|
||||
docker run --tty -e NETWORK -e TEST_ZEBRA_EMPTY_SYNC=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }}
|
||||
|
||||
# Test launching lightwalletd with an empty lightwalletd and Zebra state.
|
||||
test-lightwalletd-integration:
|
||||
name: Test integration with lightwalletd
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Inject slug/short variables
|
||||
uses: rlespinasse/github-slug-action@v4
|
||||
with:
|
||||
short-length: 7
|
||||
|
||||
- name: Run tests with empty lightwalletd launch
|
||||
env:
|
||||
NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }}
|
||||
run: |
|
||||
docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }}
|
||||
docker run --tty -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }}
|
||||
|
||||
# Test that Zebra works using the default config with the latest Zebra version.
|
||||
test-configuration-file:
|
||||
name: Test CI default Docker config file
|
||||
needs: build
|
||||
uses: ./.github/workflows/sub-test-zebra-config.yml
|
||||
with:
|
||||
test_id: 'default-conf'
|
||||
docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }}
|
||||
grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"'
|
||||
test_variables: '-e NETWORK'
|
||||
network: 'Mainnet'
|
||||
|
||||
# Test reconfiguring the the docker image for tesnet.
|
||||
test-configuration-file-testnet:
|
||||
name: Test CI testnet Docker config file
|
||||
needs: build
|
||||
# Make sure Zebra can sync the genesis block on testnet
|
||||
uses: ./.github/workflows/sub-test-zebra-config.yml
|
||||
with:
|
||||
test_id: 'testnet-conf'
|
||||
docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }}
|
||||
grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"'
|
||||
# TODO: improve the entrypoint to avoid using `ENTRYPOINT_FEATURES=""`
|
||||
test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="/etc/zebrad/zebrad.toml" -e ENTRYPOINT_FEATURES=""'
|
||||
network: 'Testnet'
|
||||
|
||||
# Test that Zebra works using $ZEBRA_CONF_PATH config
|
||||
test-zebra-conf-path:
|
||||
name: Test CI custom Docker config file
|
||||
needs: build
|
||||
uses: ./.github/workflows/sub-test-zebra-config.yml
|
||||
with:
|
||||
test_id: 'custom-conf'
|
||||
docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }}
|
||||
grep_patterns: '-e "loaded zebrad config.*config_path.*=.*v1.0.0-rc.2.toml"'
|
||||
test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"'
|
||||
network: ${{ inputs.network || vars.ZCASH_NETWORK }}
|
||||
|
||||
failure-issue:
|
||||
name: Open or update issues for main branch failures
|
||||
# When a new test is added to this workflow, add it to this list.
|
||||
#
|
||||
# This list is for reliable tests that are run on the `main` branch.
|
||||
# Testnet jobs are not in this list, because we expect testnet to fail occasionally.
|
||||
needs: [ test-all, test-fake-activation-heights, test-empty-sync, test-lightwalletd-integration, test-configuration-file, test-zebra-conf-path ]
|
||||
# Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges.
|
||||
# (PR statuses are already reported in the PR jobs list, and checked by Mergify.)
|
||||
# TODO: if a job times out, we want to create a ticket. Does failure() do that? Or do we need cancelled()?
|
||||
if: failure() && github.event.pull_request == null
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: jayqi/failed-build-issue-action@v1
|
||||
with:
|
||||
title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}"
|
||||
# New failures open an issue with this label.
|
||||
# TODO: do we want a different label for each workflow, or each kind of workflow?
|
||||
label-name: S-ci-fail-auto-issue
|
||||
# If there is already an open issue with this label, any failures become comments on that issue.
|
||||
always-create-new-issue: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
name: Multi-OS Unit Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- '**/*.rs'
|
||||
- '**/*.txt'
|
||||
- '**/*.snap'
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
- '**/deny.toml'
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
- '.github/workflows/ci-unit-tests-os.yml'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Test ${{ matrix.rust }} on ${{ matrix.os }}${{ matrix.features }}
|
||||
# We're just doing this job for the name, the platform doesn't matter.
|
||||
# So we use the platform with the most concurrent instances.
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
rust: [stable, beta]
|
||||
features: [""]
|
||||
exclude:
|
||||
- os: macos-latest
|
||||
rust: beta
|
||||
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
install-from-lockfile-no-cache:
|
||||
name: Install zebrad from lockfile without cache on ubuntu-latest
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
check-cargo-lock:
|
||||
name: Check Cargo.lock is up to date
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
cargo-deny:
|
||||
name: Check deny.toml ${{ matrix.checks }} ${{ matrix.features }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
checks:
|
||||
- bans
|
||||
- sources
|
||||
features: ['', '--features default-release-binaries', '--all-features']
|
||||
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
unused-deps:
|
||||
name: Check for unused dependencies
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
|
@ -0,0 +1,320 @@
|
|||
# This workflow performs unit tests across different operating systems and Rust versions. It includes steps for:
|
||||
# - Testing on Ubuntu and macOS with stable and beta Rust toolchains.
|
||||
# - Installing Zebra from the lockfile without cache on Ubuntu.
|
||||
# - Verifying that Cargo.lock is up-to-date with Cargo.toml changes.
|
||||
# - Running cargo-deny checks for dependencies.
|
||||
# - Checking for unused dependencies in the code.
|
||||
name: Multi-OS Unit Tests
|
||||
|
||||
# Ensures that only one workflow task will run at a time. Previous builds, if
|
||||
# already in process, will get cancelled. Only the latest commit will be allowed
|
||||
# to run, cancelling any workflows in between
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
# code and tests
|
||||
- '**/*.rs'
|
||||
# hard-coded checkpoints and proptest regressions
|
||||
- '**/*.txt'
|
||||
# test data snapshots
|
||||
- '**/*.snap'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
- '**/deny.toml'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- '.github/workflows/ci-unit-tests-os.yml'
|
||||
|
||||
# we build Rust caches on main,
|
||||
# so they can be shared by all branches:
|
||||
# https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
# production code and test code
|
||||
- '**/*.rs'
|
||||
# hard-coded checkpoints
|
||||
# TODO: skip proptest regressions?
|
||||
- '**/*.txt'
|
||||
# test data snapshots
|
||||
- '**/*.snap'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
- '**/deny.toml'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- '.github/workflows/ci-unit-tests-os.yml'
|
||||
|
||||
env:
|
||||
CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }}
|
||||
RUST_LOG: ${{ vars.RUST_LOG }}
|
||||
RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }}
|
||||
RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }}
|
||||
COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }}
|
||||
|
||||
jobs:
|
||||
########################################
|
||||
### Build and test Zebra on all OSes ###
|
||||
########################################
|
||||
test:
|
||||
name: Test ${{ matrix.rust }} on ${{ matrix.os }}${{ matrix.features }}
|
||||
# The large timeout is to accommodate:
|
||||
# - macOS and Windows builds (typically 50-90 minutes), and
|
||||
timeout-minutes: 120
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# TODO: Windows was removed for now, see https://github.com/ZcashFoundation/zebra/issues/3801
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
rust: [stable, beta]
|
||||
# TODO: When vars.EXPERIMENTAL_FEATURES has features in it, add it here.
|
||||
# Or work out a way to trim the space from the variable: GitHub doesn't allow empty variables.
|
||||
# Or use `default` for the empty feature set and EXPERIMENTAL_FEATURES, and update the branch protection rules.
|
||||
#features: ${{ fromJSON(format('["", "{0}"]', vars.EXPERIMENTAL_FEATURES)) }}
|
||||
features: [""]
|
||||
exclude:
|
||||
# We're excluding macOS beta for the following reasons:
|
||||
# - the concurrent macOS runner limit is much lower than the Linux limit
|
||||
# - macOS is slower than Linux, and shouldn't have a build or test difference with Linux
|
||||
# - macOS is a second-tier Zebra support platform
|
||||
- os: macos-latest
|
||||
rust: beta
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Install last version of Protoc
|
||||
uses: arduino/setup-protoc@v3.0.0
|
||||
with:
|
||||
# TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed
|
||||
version: '23.x'
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Setup Rust with ${{ matrix.rust }} toolchain and minimal profile
|
||||
- name: Setup Rust
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=${{ matrix.rust }} --profile=minimal
|
||||
|
||||
|
||||
- uses: Swatinem/rust-cache@v2.7.3
|
||||
# TODO: change Rust cache target directory on Windows,
|
||||
# or remove this workaround once the build is more efficient (#3005).
|
||||
#with:
|
||||
# workspaces: ". -> C:\\zebra-target"
|
||||
with:
|
||||
# Split the experimental features cache from the regular cache, to avoid linker errors.
|
||||
# (These might be "disk full" errors, or they might be dependency resolution issues.)
|
||||
key: ${{ matrix.features }}
|
||||
|
||||
- name: Change target output directory on Windows
|
||||
# Windows doesn't have enough space on the D: drive, so we redirect the build output to the
|
||||
# larger C: drive.
|
||||
# TODO: Remove this workaround once the build is more efficient (#3005).
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
mkdir "C:\\zebra-target"
|
||||
echo "CARGO_TARGET_DIR=C:\\zebra-target" | Out-File -FilePath "$env:GITHUB_ENV" -Encoding utf8 -Append
|
||||
|
||||
- name: cargo fetch
|
||||
run: |
|
||||
cargo fetch
|
||||
|
||||
- name: Install LLVM on Windows
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
choco install llvm -y
|
||||
echo "C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
echo "LIBCLANG_PATH=C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
|
||||
- name: Skip network tests on Ubuntu and Windows
|
||||
# Ubuntu runners don't have reliable network or DNS during test steps.
|
||||
# Windows runners have an unreliable network.
|
||||
shell: bash
|
||||
if: matrix.os != 'macos-latest'
|
||||
run: echo "ZEBRA_SKIP_NETWORK_TESTS=1" >> $GITHUB_ENV
|
||||
|
||||
- name: Minimise proptest cases on macOS and Windows
|
||||
# We set cases to 1, because some tests already run 1 case by default.
|
||||
# We keep maximum shrink iterations at the default value, because it only happens on failure.
|
||||
#
|
||||
# Windows compilation and tests are slower than other platforms.
|
||||
# macOS runners do extra network tests, so they take longer.
|
||||
shell: bash
|
||||
if: matrix.os != 'ubuntu-latest'
|
||||
run: |
|
||||
echo "PROPTEST_CASES=1" >> $GITHUB_ENV
|
||||
echo "PROPTEST_MAX_SHRINK_ITERS=1024" >> $GITHUB_ENV
|
||||
|
||||
# Run unit and basic acceptance tests, only showing command output if the test fails.
|
||||
#
|
||||
# If some tests hang, add "-- --nocapture" for just that test, or for all the tests.
|
||||
- name: Run tests${{ matrix.features }}
|
||||
run: |
|
||||
cargo test --features "${{ matrix.features }}" --release --verbose --workspace
|
||||
|
||||
# Explicitly run any tests that are usually #[ignored]
|
||||
|
||||
- name: Run zebrad large sync tests${{ matrix.features }}
|
||||
# Skip the entire step on Ubuntu and Windows, because the test would be skipped anyway due to ZEBRA_SKIP_NETWORK_TESTS
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
cargo test --features "${{ matrix.features }}" --release --verbose --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_
|
||||
|
||||
# Install Zebra with lockfile dependencies, with no caching and default features
|
||||
install-from-lockfile-no-cache:
|
||||
name: Install zebrad from lockfile without cache on ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
# Setup Rust with stable toolchain and minimal profile
|
||||
- name: Setup Rust
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal
|
||||
|
||||
- name: Install zebrad
|
||||
run: |
|
||||
cargo install --locked --path ./zebrad/ zebrad
|
||||
|
||||
# Check that Cargo.lock includes any Cargo.toml changes.
|
||||
# This check makes sure the `cargo-deny` crate dependency checks are accurate.
|
||||
check-cargo-lock:
|
||||
name: Check Cargo.lock is up to date
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Install last version of Protoc
|
||||
uses: arduino/setup-protoc@v3.0.0
|
||||
with:
|
||||
version: '23.x'
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Setup Rust with stable toolchain and minimal profile
|
||||
- name: Setup Rust
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal
|
||||
|
||||
- uses: Swatinem/rust-cache@v2.7.3
|
||||
with:
|
||||
shared-key: "clippy-cargo-lock"
|
||||
|
||||
- name: Check Cargo.lock is up to date
|
||||
run: |
|
||||
cargo check --locked --all-features --all-targets
|
||||
|
||||
cargo-deny:
|
||||
name: Check deny.toml ${{ matrix.checks }} ${{ matrix.features }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
checks:
|
||||
- bans
|
||||
- sources
|
||||
# We don't need to check `--no-default-features` here, because (except in very rare cases):
|
||||
# - disabling features isn't going to add duplicate dependencies
|
||||
# - disabling features isn't going to add more crate sources
|
||||
features: ['', '--features default-release-binaries', '--all-features']
|
||||
# Always run the --all-features job, to get accurate "skip tree root was not found" warnings
|
||||
fail-fast: false
|
||||
|
||||
# Prevent sudden announcement of a new advisory from failing ci:
|
||||
continue-on-error: ${{ matrix.checks == 'advisories' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Check ${{ matrix.checks }} with features ${{ matrix.features }}
|
||||
uses: EmbarkStudios/cargo-deny-action@v1
|
||||
with:
|
||||
# --all-features spuriously activates openssl, but we want to ban that dependency in
|
||||
# all of zebrad's production features for security reasons. But the --all-features job is
|
||||
# the only job that gives accurate "skip tree root was not found" warnings.
|
||||
# In other jobs, we expect some of these warnings, due to disabled features.
|
||||
command: check ${{ matrix.checks }} ${{ matrix.features == '--all-features' && '--allow banned' || '--allow unmatched-skip-root' }}
|
||||
arguments: --workspace ${{ matrix.features }}
|
||||
|
||||
unused-deps:
|
||||
name: Check for unused dependencies
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout git repository
|
||||
uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
# Setup Rust with stable toolchain and minimal profile
|
||||
- name: Setup Rust
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal
|
||||
|
||||
- name: Install cargo-machete
|
||||
uses: baptiste0928/cargo-install@v3.1.0
|
||||
with:
|
||||
crate: cargo-machete
|
||||
|
||||
- name: Check unused dependencies
|
||||
# Exclude macro and transitive dependencies by filtering them out of the output,
|
||||
# then if there are any more unused dependencies, fail the job.
|
||||
run: |
|
||||
echo "-- full cargo machete output, including ignored dependencies --"
|
||||
cargo machete --skip-target-dir || true
|
||||
echo "-- unused dependencies are below this line, full output is above --"
|
||||
if cargo machete --skip-target-dir 2>/dev/null | \
|
||||
grep --extended-regexp -e '^\\t' | \
|
||||
grep -v -e gumdrop -e humantime-serde -e tinyvec -e zebra-utils; then
|
||||
echo "New unused dependencies were found, please remove them!"
|
||||
exit 1
|
||||
else
|
||||
echo "No unused dependencies found."
|
||||
fi
|
||||
|
||||
failure-issue:
|
||||
name: Open or update issues for OS integration failures
|
||||
# When a new job is added to this workflow, add it to this list.
|
||||
needs: [ test, install-from-lockfile-no-cache, check-cargo-lock, cargo-deny, unused-deps ]
|
||||
# Only open tickets for failed or cancelled jobs that are not coming from PRs.
|
||||
# (PR statuses are already reported in the PR jobs list, and checked by Mergify.)
|
||||
if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: jayqi/failed-build-issue-action@v1
|
||||
with:
|
||||
title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}"
|
||||
# New failures open an issue with this label.
|
||||
label-name: S-ci-fail-os-integration-auto-issue
|
||||
# If there is already an open issue with this label, any failures become comments on that issue.
|
||||
always-create-new-issue: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
# Workflow patches for skipping Google Cloud docs updates on PRs from external repositories.
|
||||
name: Docs
|
||||
|
||||
# Run on PRs from external repositories, let them pass, and then Mergify will check them.
|
||||
# GitHub doesn't support filtering workflows by source branch names, so we have to do it for each
|
||||
# job.
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
# IMPORTANT
|
||||
#
|
||||
# The job names in `docs-deploy-firebase.yml`, `docs-deploy-firebase.patch.yml` and
|
||||
# `docs-deploy-firebase.patch-external.yml` must be kept in sync.
|
||||
jobs:
|
||||
build-docs-book:
|
||||
name: Build and Deploy Zebra Book Docs
|
||||
# Only run on PRs from external repositories.
|
||||
if: ${{ startsWith(github.event_name, 'pull') && github.event.pull_request.head.repo.fork }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
||||
build-docs-internal:
|
||||
name: Build and Deploy Zebra Internal Docs
|
||||
# This dependency allows all these jobs to depend on a single condition, making it easier to
|
||||
# change.
|
||||
needs: build-docs-book
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "Skipping job on fork"'
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
# Workflow patches for skipping Google Cloud docs updates when docs, Rust code, or dependencies
|
||||
# aren't modified in a PR.
|
||||
name: Docs
|
||||
|
||||
# Run on PRs with unmodified docs, code, and dependency files.
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
# doc source files
|
||||
- 'book/**'
|
||||
- '**/firebase.json'
|
||||
- '**/.firebaserc'
|
||||
- 'katex-header.html'
|
||||
# rustdoc source files
|
||||
- '**/*.rs'
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- '.github/workflows/docs-deploy-firebase.yml'
|
||||
|
||||
# IMPORTANT
|
||||
#
|
||||
# The job names in `docs-deploy-firebase.yml`, `docs-deploy-firebase.patch.yml` and
|
||||
# `docs-deploy-firebase.patch-external.yml` must be kept in sync.
|
||||
jobs:
|
||||
build-docs-book:
|
||||
name: Build and Deploy Zebra Book Docs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
||||
build-docs-internal:
|
||||
name: Build and Deploy Zebra Internal Docs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No build required"'
|
||||
|
|
@ -0,0 +1,186 @@
|
|||
# Google Cloud docs updates that run when docs, Rust code, or dependencies are modified,
|
||||
# but only on PRs from the ZcashFoundation/zebra repository. (External PRs are deployed by mergify.)
|
||||
|
||||
# - Builds and deploys Zebra Book Docs using mdBook, setting up necessary tools and deploying to Firebase.
|
||||
# - Compiles and deploys external documentation, setting up Rust with the beta toolchain and default profile, building the docs, and deploying them to Firebase.
|
||||
# - Assembles and deploys internal documentation with similar steps, including private items in the documentation, and deploys to Firebase.
|
||||
name: Docs
|
||||
|
||||
# Ensures that only one workflow task will run at a time. Previous deployments, if
|
||||
# already in process, won't get cancelled. Instead, we let the first to complete
|
||||
# then queue the latest pending workflow, cancelling any workflows in between
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
push:
|
||||
# Skip main branch updates where docs, Rust code, and dependencies aren't modified.
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
# doc source files
|
||||
- 'book/**'
|
||||
- '**/firebase.json'
|
||||
- '**/.firebaserc'
|
||||
- 'katex-header.html'
|
||||
# rustdoc source files
|
||||
- '**/*.rs'
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- '.github/workflows/docs-deploy-firebase.yml'
|
||||
|
||||
pull_request:
|
||||
# Skip PRs where docs, Rust code, and dependencies aren't modified.
|
||||
paths:
|
||||
# doc source files
|
||||
- 'book/**'
|
||||
- '**/firebase.json'
|
||||
- '**/.firebaserc'
|
||||
- 'katex-header.html'
|
||||
# rustdoc source files
|
||||
- '**/*.rs'
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# workflow definitions
|
||||
- '.github/workflows/docs-deploy-firebase.yml'
|
||||
|
||||
env:
|
||||
RUST_LOG: ${{ vars.RUST_LOG }}
|
||||
RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }}
|
||||
RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }}
|
||||
COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }}
|
||||
FIREBASE_CHANNEL: ${{ github.event_name == 'pull_request' && 'preview' || 'live' }}
|
||||
# cargo doc doesn't support '-- -D warnings', so we have to add it here
|
||||
# https://github.com/rust-lang/cargo/issues/8424#issuecomment-774662296
|
||||
#
|
||||
# The -A and -W settings must be the same as the `rustdocflags` in:
|
||||
# https://github.com/ZcashFoundation/zebra/blob/main/.cargo/config.toml#L87
|
||||
RUSTDOCFLAGS: --html-in-header katex-header.html -D warnings -A rustdoc::private_intra_doc_links
|
||||
|
||||
# IMPORTANT
|
||||
#
|
||||
# The job names in `docs-deploy-firebase.yml`, `docs-deploy-firebase.patch.yml` and
|
||||
# `docs-deploy-firebase.patch-external.yml` must be kept in sync.
|
||||
jobs:
|
||||
build-docs-book:
|
||||
name: Build and Deploy Zebra Book Docs
|
||||
# Skip PRs from external repositories, let them pass, and then Mergify will check them
|
||||
if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }}
|
||||
timeout-minutes: 5
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
checks: write
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout the source code
|
||||
uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Setup mdBook
|
||||
uses: jontze/action-mdbook@v3.0.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
mdbook-version: '~0.4'
|
||||
use-linkcheck: true
|
||||
use-mermaid: true
|
||||
|
||||
- name: Build Zebra book
|
||||
run: |
|
||||
mdbook build book --dest-dir "$(pwd)"/target/book
|
||||
|
||||
# Setup gcloud CLI
|
||||
- name: Authenticate to Google Cloud
|
||||
id: auth
|
||||
uses: google-github-actions/auth@v2.1.2
|
||||
with:
|
||||
retries: '3'
|
||||
workload_identity_provider: '${{ vars.GCP_WIF }}'
|
||||
service_account: '${{ vars.GCP_FIREBASE_SA }}'
|
||||
|
||||
# TODO: remove this step after issue https://github.com/FirebaseExtended/action-hosting-deploy/issues/174 is fixed
|
||||
|
||||
- name: Add $GCP_FIREBASE_SA_PATH to env
|
||||
run: |
|
||||
# shellcheck disable=SC2002
|
||||
echo "GCP_FIREBASE_SA_PATH=$(cat ${{ steps.auth.outputs.credentials_file_path }} | tr -d '\n')" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Deploy Zebra book to firebase
|
||||
uses: FirebaseExtended/action-hosting-deploy@v0.7.1
|
||||
with:
|
||||
firebaseServiceAccount: ${{ env.GCP_FIREBASE_SA_PATH }}
|
||||
channelId: ${{ env.FIREBASE_CHANNEL }}
|
||||
projectId: ${{ vars.GCP_FIREBASE_PROJECT }}
|
||||
target: docs-book
|
||||
|
||||
build-docs-internal:
|
||||
name: Build and Deploy Zebra Internal Docs
|
||||
if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }}
|
||||
timeout-minutes: 45
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
checks: write
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout the source code
|
||||
uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Install last version of Protoc
|
||||
uses: arduino/setup-protoc@v3.0.0
|
||||
with:
|
||||
version: '23.x'
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Setup Rust with beta toolchain and default profile (to include rust-docs)
|
||||
- name: Setup Rust
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=beta --profile=default
|
||||
|
||||
- uses: Swatinem/rust-cache@v2.7.3
|
||||
|
||||
- name: Build internal docs
|
||||
run: |
|
||||
cargo doc --no-deps --workspace --all-features --document-private-items --target-dir "$(pwd)"/target/internal
|
||||
|
||||
# Setup gcloud CLI
|
||||
- name: Authenticate to Google Cloud
|
||||
id: auth
|
||||
uses: google-github-actions/auth@v2.1.2
|
||||
with:
|
||||
retries: '3'
|
||||
workload_identity_provider: '${{ vars.GCP_WIF }}'
|
||||
service_account: '${{ vars.GCP_FIREBASE_SA }}'
|
||||
|
||||
# TODO: remove this step after issue https://github.com/FirebaseExtended/action-hosting-deploy/issues/174 is fixed
|
||||
- name: Add $GCP_FIREBASE_SA_PATH to env
|
||||
run: |
|
||||
# shellcheck disable=SC2002
|
||||
echo "GCP_FIREBASE_SA_PATH=$(cat ${{ steps.auth.outputs.credentials_file_path }} | tr -d '\n')" >> "$GITHUB_ENV"
|
||||
|
||||
- name: Deploy internal docs to firebase
|
||||
uses: FirebaseExtended/action-hosting-deploy@v0.7.1
|
||||
with:
|
||||
firebaseServiceAccount: ${{ env.GCP_FIREBASE_SA_PATH }}
|
||||
channelId: ${{ env.FIREBASE_CHANNEL }}
|
||||
target: docs-internal
|
||||
projectId: ${{ vars.GCP_FIREBASE_PROJECT }}
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
name: Update Docker Hub Description
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- README.md
|
||||
- .github/workflows/dockerhub-description.yml
|
||||
|
||||
jobs:
|
||||
dockerHubDescription:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Docker Hub Description
|
||||
uses: peter-evans/dockerhub-description@v4.0.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
repository: zfnd/zebra
|
||||
short-description: ${{ github.event.repository.description }}
|
||||
|
|
@ -0,0 +1,107 @@
|
|||
# This workflow is designed for manually deploying zcashd nodes to Google Cloud Platform (GCP) based on user inputs.
|
||||
# - Allows selection of network type (Mainnet or Testnet) and instance group size.
|
||||
# - Converts network name to lowercase to comply with GCP labeling requirements.
|
||||
# - Authenticates with Google Cloud using provided credentials.
|
||||
# - Creates a GCP instance template from a container image of zcashd.
|
||||
# - Checks if the specified instance group already exists.
|
||||
# - Depending on the existence check, either creates a new managed instance group or updates the existing one with the new template.
|
||||
name: Zcashd Manual Deploy
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
network:
|
||||
default: 'Mainnet'
|
||||
description: 'Network to deploy: Mainnet or Testnet'
|
||||
required: true
|
||||
size:
|
||||
default: '10'
|
||||
description: 'GCP Managed Instance Group size'
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy zcashd nodes
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Inject slug/short variables
|
||||
uses: rlespinasse/github-slug-action@v4
|
||||
with:
|
||||
short-length: 7
|
||||
|
||||
# Makes the Zcash network name lowercase.
|
||||
#
|
||||
# Labels in GCP are required to be in lowercase, but the blockchain network
|
||||
# uses sentence case, so we need to downcase ${{ inputs.network }}.
|
||||
#
|
||||
# Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable.
|
||||
- name: Downcase network name for labels
|
||||
run: |
|
||||
NETWORK_CAPS="${{ inputs.network }}"
|
||||
echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV"
|
||||
|
||||
# Setup gcloud CLI
|
||||
- name: Authenticate to Google Cloud
|
||||
id: auth
|
||||
uses: google-github-actions/auth@v2.1.2
|
||||
with:
|
||||
retries: '3'
|
||||
workload_identity_provider: '${{ vars.GCP_WIF }}'
|
||||
service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}'
|
||||
|
||||
- name: Set up Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v2.1.0
|
||||
|
||||
# Create instance template from container image
|
||||
- name: Create instance template
|
||||
run: |
|
||||
gcloud compute instance-templates create-with-container zcashd-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
|
||||
--boot-disk-size 10GB \
|
||||
--boot-disk-type=pd-ssd \
|
||||
--image-project=cos-cloud \
|
||||
--image-family=cos-stable \
|
||||
--container-stdin \
|
||||
--container-tty \
|
||||
--container-image electriccoinco/zcashd \
|
||||
--container-env ZCASHD_NETWORK="${{ inputs.network }}" \
|
||||
--machine-type ${{ vars.GCP_SMALL_MACHINE }} \
|
||||
--network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \
|
||||
--service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \
|
||||
--scopes cloud-platform \
|
||||
--labels=app=zcashd,environment=prod,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }} \
|
||||
--tags zcashd
|
||||
|
||||
# Check if our destination instance group exists already
|
||||
- name: Check if instance group exists
|
||||
id: does-group-exist
|
||||
continue-on-error: true
|
||||
run: |
|
||||
gcloud compute instance-groups list | grep "zcashd-${{ env.GITHUB_REF_SLUG_URL }}-${{ inputs.network }}" | grep "${{ vars.GCP_REGION }}"
|
||||
|
||||
# Deploy new managed instance group using the new instance template
|
||||
- name: Create managed instance group
|
||||
if: steps.does-group-exist.outcome == 'failure'
|
||||
run: |
|
||||
gcloud compute instance-groups managed create \
|
||||
"zcashd-${{ env.GITHUB_REF_SLUG_URL }}-${{ inputs.network }}" \
|
||||
--template "zcashd-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \
|
||||
--region "${{ vars.GCP_REGION }}" \
|
||||
--size "${{ github.event.inputs.size }}"
|
||||
|
||||
# Rolls out update to existing group using the new instance template
|
||||
- name: Update managed instance group
|
||||
if: steps.does-group-exist.outcome == 'success'
|
||||
run: |
|
||||
gcloud compute instance-groups managed rolling-action start-update \
|
||||
"zcashd-${{ env.GITHUB_REF_SLUG_URL }}-${{ inputs.network }}" \
|
||||
--version template="zcashd-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \
|
||||
--region "${{ vars.GCP_REGION }}"
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
# This workflow is meant to trigger a build of Docker binaries when a release
|
||||
# is published, it uses the existing `sub-build-docker-image.yml` workflow
|
||||
#
|
||||
# We use a separate action as we might want to trigger this under
|
||||
# different circumstances than a Continuous Deployment, for example.
|
||||
#
|
||||
# This workflow is triggered if:
|
||||
# - A release is published
|
||||
# - A pre-release is changed to a release
|
||||
name: Release binaries
|
||||
|
||||
on:
|
||||
release:
|
||||
types:
|
||||
- released
|
||||
|
||||
jobs:
|
||||
# Each time this workflow is executed, a build will be triggered to create a new image
|
||||
# with the corresponding tags using information from git
|
||||
|
||||
# The image will be named `zebra:<semver>-experimental`
|
||||
build-experimental:
|
||||
name: Build Experimental Features Release Docker
|
||||
uses: ./.github/workflows/sub-build-docker-image.yml
|
||||
with:
|
||||
dockerfile_path: ./docker/Dockerfile
|
||||
dockerfile_target: runtime
|
||||
image_name: zebra
|
||||
tag_suffix: -experimental
|
||||
features: ${{ format('{0} {1}', vars.RUST_PROD_FEATURES, vars.RUST_EXPERIMENTAL_FEATURES) }}
|
||||
rust_log: ${{ vars.RUST_LOG }}
|
||||
# This step needs access to Docker Hub secrets to run successfully
|
||||
secrets: inherit
|
||||
|
||||
# The image will be named `zebra:<semver>`
|
||||
# It should be built last, so tags with the same name point to the production build, not the experimental build.
|
||||
build:
|
||||
name: Build Release Docker
|
||||
# Run this build last, regardless of whether experimental worked
|
||||
needs: build-experimental
|
||||
if: always()
|
||||
uses: ./.github/workflows/sub-build-docker-image.yml
|
||||
with:
|
||||
dockerfile_path: ./docker/Dockerfile
|
||||
dockerfile_target: runtime
|
||||
image_name: zebra
|
||||
latest_tag: true
|
||||
features: ${{ vars.RUST_PROD_FEATURES }}
|
||||
rust_log: ${{ vars.RUST_LOG }}
|
||||
# This step needs access to Docker Hub secrets to run successfully
|
||||
secrets: inherit
|
||||
|
||||
failure-issue:
|
||||
name: Open or update issues for release binaries failures
|
||||
# When a new job is added to this workflow, add it to this list.
|
||||
needs: [ build, build-experimental ]
|
||||
# Open tickets for any failed build in this workflow.
|
||||
if: failure() || cancelled()
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: jayqi/failed-build-issue-action@v1
|
||||
with:
|
||||
title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}"
|
||||
# New failures open an issue with this label.
|
||||
label-name: S-ci-fail-binaries-auto-issue
|
||||
# If there is already an open issue with this label, any failures become comments on that issue.
|
||||
always-create-new-issue: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
name: Release crates
|
||||
|
||||
on:
|
||||
# Only patch the Release PR test job
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
# code and tests
|
||||
- '**/*.rs'
|
||||
# hard-coded checkpoints (and proptest regressions, which are not actually needed)
|
||||
- '**/*.txt'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# READMEs, which are shown on the crate page
|
||||
- '**/README.md'
|
||||
# workflow definitions
|
||||
- '.github/workflows/release-crates.io.yml'
|
||||
|
||||
|
||||
jobs:
|
||||
check-release:
|
||||
name: Check crate release dry run
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: 'echo "No check required"'
|
||||
|
|
@ -0,0 +1,126 @@
|
|||
# This workflow checks that Zebra's crates.io release script works.
|
||||
#
|
||||
# We use a separate action, because the changed files are different to a Continuous Deployment
|
||||
# or Docker release.
|
||||
#
|
||||
# This workflow is triggered when:
|
||||
# - A PR that changes Rust files, a README, or this workflow is opened or updated
|
||||
# - A change is pushed to the main branch
|
||||
#
|
||||
# TODO:
|
||||
# If we decide to automate crates.io releases, we can also publish crates using this workflow, when:
|
||||
# - A release is published
|
||||
# - A pre-release is changed to a release
|
||||
|
||||
name: Release crates
|
||||
|
||||
# Ensures that only one workflow task will run at a time. Previous releases, if
|
||||
# already in process, won't get cancelled. Instead, we let the first release complete,
|
||||
# then queue the latest pending workflow, cancelling any workflows in between.
|
||||
#
|
||||
# Since the different event types do very different things (test vs release),
|
||||
# we can run different event types concurrently.
|
||||
#
|
||||
# For pull requests, we only run the tests from this workflow, and don't do any releases.
|
||||
# So an in-progress pull request gets cancelled, just like other tests.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.ref }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
|
||||
on:
|
||||
# disabled for now
|
||||
# release:
|
||||
# types:
|
||||
# - released
|
||||
|
||||
# Only runs the release tests, doesn't release any crates.
|
||||
#
|
||||
# We test all changes on the main branch, just in case the PR paths are too strict.
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
# code and tests
|
||||
- '**/*.rs'
|
||||
# hard-coded checkpoints (and proptest regressions, which are not actually needed)
|
||||
- '**/*.txt'
|
||||
# dependencies
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
# configuration files
|
||||
- '.cargo/config.toml'
|
||||
- '**/clippy.toml'
|
||||
# READMEs, which are shown on the crate page
|
||||
- '**/README.md'
|
||||
# workflow definitions
|
||||
- '.github/workflows/release-crates.io.yml'
|
||||
|
||||
|
||||
jobs:
|
||||
# Test that Zebra can be released to crates.io using `cargo`.
|
||||
# This checks that Zebra's dependencies and release configs are correct.
|
||||
check-release:
|
||||
name: Check crate release dry run
|
||||
timeout-minutes: 15
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Checkout git repository
|
||||
uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Inject slug/short variables
|
||||
uses: rlespinasse/github-slug-action@v4
|
||||
with:
|
||||
short-length: 7
|
||||
|
||||
# Setup Rust with stable toolchain and minimal profile
|
||||
- name: Setup Rust
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain=stable --profile=minimal
|
||||
|
||||
- name: Install cargo-release
|
||||
uses: baptiste0928/cargo-install@v3.1.0
|
||||
with:
|
||||
crate: cargo-release
|
||||
|
||||
# Make sure Zebra can be released!
|
||||
#
|
||||
# These steps should be kept up to date with the release checklist.
|
||||
#
|
||||
- name: Crate release dry run
|
||||
run: |
|
||||
./.github/workflows/scripts/release-crates-dry-run.sh
|
||||
|
||||
# TODO: actually do the release here
|
||||
#release-crates:
|
||||
# name: Release Zebra Crates
|
||||
# needs: [ check-release ]
|
||||
# runs-on: ubuntu-latest
|
||||
# timeout-minutes: 30
|
||||
# if: ${{ !cancelled() && !failure() && github.event_name == 'release' }}
|
||||
# steps:
|
||||
# ...
|
||||
|
||||
failure-issue:
|
||||
name: Open or update issues for release crates failures
|
||||
# When a new job is added to this workflow, add it to this list.
|
||||
needs: [ check-release ]
|
||||
# Only open tickets for failed or cancelled jobs that are not coming from PRs.
|
||||
# (PR statuses are already reported in the PR jobs list, and checked by Mergify.)
|
||||
if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: jayqi/failed-build-issue-action@v1
|
||||
with:
|
||||
title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}"
|
||||
# New failures open an issue with this label.
|
||||
label-name: S-ci-fail-release-crates-auto-issue
|
||||
# If there is already an open issue with this label, any failures become comments on that issue.
|
||||
always-create-new-issue: false
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
# This workflow automates the creation and updating of draft releases. It compiles PR titles into the draft release notes.
|
||||
# https://github.com/ZcashFoundation/zebra/releases
|
||||
#
|
||||
# - Updates the draft release upon each merge into 'main'.
|
||||
# - Utilizes the release-drafter GitHub Action to accumulate PR titles since the last release into a draft release note.
|
||||
# - Suitable permissions are set for creating releases and handling pull requests.
|
||||
#
|
||||
# Workflow is based on:
|
||||
# https://github.com/marketplace/actions/release-drafter#usage
|
||||
name: Release Drafter
|
||||
|
||||
on:
|
||||
# Automatically update the draft release every time a PR merges to `main`
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
# pull_request event is required only for autolabeler
|
||||
pull_request:
|
||||
# Only following types are handled by the action, but one can default to all as well
|
||||
#types: [opened, reopened, synchronize]
|
||||
# pull_request_target event is required for autolabeler to support PRs from forks
|
||||
pull_request_target:
|
||||
#types: [opened, reopened, synchronize]
|
||||
# Manually update the draft release without waiting for a PR to merge
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
update_release_draft:
|
||||
permissions:
|
||||
# write permission is required to create a github release
|
||||
contents: write
|
||||
# write permission is required for autolabeler
|
||||
# otherwise, read permission is required at least
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Drafts your next Release notes
|
||||
- uses: release-drafter/release-drafter@v6
|
||||
with:
|
||||
config-name: release-drafter.yml
|
||||
commitish: main
|
||||
#disable-autolabeler: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Function to handle image deletion logic
|
||||
delete_images() {
|
||||
local image_type="$1"
|
||||
local filter="$2"
|
||||
local kept_images=0
|
||||
|
||||
echo "Processing ${image_type} images"
|
||||
images=$(gcloud compute images list --sort-by=~creationTimestamp --filter="${filter} AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME)')
|
||||
|
||||
for image in ${images}; do
|
||||
if [[ "${kept_images}" -lt "${KEEP_LATEST_IMAGE_COUNT}" ]]; then
|
||||
((kept_images++))
|
||||
echo "Keeping image ${kept_images} named ${image}"
|
||||
else
|
||||
echo "Deleting image: ${image}"
|
||||
gcloud compute images delete "${image}" || echo "Failed to delete image: ${image}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Check if necessary variables are set
|
||||
if ! [[ "${DELETE_AGE_DAYS}" =~ ^[0-9]+$ && "${KEEP_LATEST_IMAGE_COUNT}" =~ ^[0-9]+$ ]]; then
|
||||
echo "ERROR: One or more required variables are not set or not numeric"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set pipefail
|
||||
set -o pipefail
|
||||
|
||||
# Calculate the date before which images should be deleted
|
||||
DELETE_BEFORE_DATE=$(date --date="${DELETE_AGE_DAYS} days ago" '+%Y%m%d')
|
||||
|
||||
# Mainnet and Testnet zebrad checkpoint
|
||||
delete_images "Mainnet zebrad checkpoint" "name~^zebrad-cache-.*-mainnet-checkpoint" # As of April 2023, these disk names look like: zebrad-cache-6556-merge-a2ca4de-v25-mainnet-tip(-u)?-140654
|
||||
delete_images "Testnet zebrad checkpoint" "name~^zebrad-cache-.*-testnet-checkpoint"
|
||||
|
||||
# Mainnet and Testnet zebrad tip
|
||||
delete_images "Mainnet zebrad tip" "name~^zebrad-cache-.*-mainnet-tip"
|
||||
delete_images "Testnet zebrad tip" "name~^zebrad-cache-.*-testnet-tip"
|
||||
|
||||
# Mainnet and Testnet lightwalletd tip
|
||||
delete_images "Mainnet lightwalletd tip" "name~^lwd-cache-.*-mainnet-tip"
|
||||
delete_images "Testnet lightwalletd tip" "name~^lwd-cache-.*-testnet-tip"
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Check if DELETE_AGE_DAYS is set and is a number
|
||||
if ! [[ "${DELETE_AGE_DAYS}" =~ ^[0-9]+$ ]]; then
|
||||
echo "ERROR: DELETE_AGE_DAYS is not set or not a number"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set pipefail to catch errors in pipelines
|
||||
set -o pipefail
|
||||
|
||||
# Calculate the date before which disks should be deleted
|
||||
DELETE_BEFORE_DATE=$(date --date="${DELETE_AGE_DAYS} days ago" '+%Y%m%d')
|
||||
|
||||
# Fetch disks created by PR jobs, and other jobs that use a commit hash
|
||||
if ! COMMIT_DISKS=$(gcloud compute disks list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME,LOCATION,LOCATION_SCOPE)' | sed 's/\(.*\)\t\(.*\)\t\(.*\)/\1 --\3=\2/'); then
|
||||
echo "Error fetching COMMIT_DISKS."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Delete commit disks if any are found
|
||||
IFS=$'\n'
|
||||
for DISK_AND_LOCATION in ${COMMIT_DISKS}; do
|
||||
IFS=$' '
|
||||
echo "Deleting disk: ${DISK_AND_LOCATION}"
|
||||
if ! gcloud compute disks delete --verbosity=info "${DISK_AND_LOCATION}"; then
|
||||
echo "Failed to delete disk: ${DISK_AND_LOCATION}"
|
||||
fi
|
||||
IFS=$'\n'
|
||||
done
|
||||
IFS=$' \t\n' # Reset IFS to its default value
|
||||
|
||||
# Fetch disks created by managed instance groups, and other jobs that start with "zebrad-"
|
||||
if ! ZEBRAD_DISKS=$(gcloud compute disks list --sort-by=creationTimestamp --filter="name~^zebrad- AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME,LOCATION,LOCATION_SCOPE)' | sed 's/\(.*\)\t\(.*\)\t\(.*\)/\1 --\3=\2/'); then
|
||||
echo "Error fetching ZEBRAD_DISKS."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Delete zebrad disks if any are found
|
||||
IFS=$'\n'
|
||||
for DISK_AND_LOCATION in ${ZEBRAD_DISKS}; do
|
||||
IFS=$' '
|
||||
echo "Deleting disk: ${DISK_AND_LOCATION}"
|
||||
if ! gcloud compute disks delete --verbosity=info "${DISK_AND_LOCATION}"; then
|
||||
echo "Failed to delete disk: ${DISK_AND_LOCATION}"
|
||||
fi
|
||||
IFS=$'\n'
|
||||
done
|
||||
IFS=$' \t\n' # Reset IFS to its default value
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Check if DELETE_INSTANCE_DAYS is set and is a number
|
||||
if ! [[ "${DELETE_INSTANCE_DAYS}" =~ ^[0-9]+$ ]]; then
|
||||
echo "ERROR: DELETE_INSTANCE_DAYS is not set or not a number"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set pipefail to catch errors in pipelines
|
||||
set -o pipefail
|
||||
|
||||
# Calculate the date before which instances should be deleted
|
||||
DELETE_BEFORE_DATE=$(date --date="${DELETE_INSTANCE_DAYS} days ago" '+%Y%m%d')
|
||||
|
||||
# Check if gcloud command is available
|
||||
if ! command -v gcloud &> /dev/null; then
|
||||
echo "ERROR: gcloud command not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Fetch the list of instances to delete
|
||||
if ! INSTANCES=$(gcloud compute instances list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME,ZONE)' | sed 's/\(.*\)\t\(.*\)/\1 --zone=\2/'); then
|
||||
echo "Error fetching instances."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Delete instances if any are found
|
||||
if [[ -n "${INSTANCES}" ]]; then
|
||||
IFS=$'\n'
|
||||
for INSTANCE_AND_ZONE in ${INSTANCES}; do
|
||||
IFS=$' '
|
||||
echo "Deleting instance: ${INSTANCE_AND_ZONE}"
|
||||
gcloud compute instances delete --verbosity=info "${INSTANCE_AND_ZONE}" --delete-disks=all || {
|
||||
echo "Failed to delete instance: ${INSTANCE_AND_ZONE}"
|
||||
continue
|
||||
}
|
||||
IFS=$'\n'
|
||||
done
|
||||
IFS=$' \t\n' # Reset IFS to its default value
|
||||
else
|
||||
echo "No instances to delete."
|
||||
fi
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Check if DELETE_AGE_DAYS is set and is a number
|
||||
if ! [[ "${DELETE_AGE_DAYS}" =~ ^[0-9]+$ ]]; then
|
||||
echo "ERROR: DELETE_AGE_DAYS is not set or not a number"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set pipefail to catch errors in pipelines
|
||||
set -o pipefail
|
||||
|
||||
# Calculate the date before which templates should be deleted
|
||||
DELETE_BEFORE_DATE=$(date --date="${DELETE_AGE_DAYS} days ago" '+%Y%m%d')
|
||||
|
||||
# Check if gcloud command is available
|
||||
if ! command -v gcloud &> /dev/null; then
|
||||
echo "ERROR: gcloud command not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Fetch the list of instance templates to delete
|
||||
if ! TEMPLATES=$(gcloud compute instance-templates list --sort-by=creationTimestamp --filter="name~-[0-9a-f]{7,}$ AND creationTimestamp < ${DELETE_BEFORE_DATE}" --format='value(NAME)'); then
|
||||
echo "Error fetching instance templates."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Delete templates if any are found
|
||||
for TEMPLATE in ${TEMPLATES}; do
|
||||
echo "Deleting template: ${TEMPLATE}"
|
||||
if ! gcloud compute instance-templates delete "${TEMPLATE}"; then
|
||||
echo "Failed to delete template: ${TEMPLATE}"
|
||||
fi
|
||||
done
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Description:
|
||||
# Check if there are cached state disks available for subsequent jobs to use.
|
||||
#
|
||||
# This lookup uses the state version from constants.rs.
|
||||
# It accepts disks generated by any branch, including draft and unmerged PRs.
|
||||
#
|
||||
# If the disk exists, sets the corresponding output to "true":
|
||||
# - lwd_tip_disk
|
||||
# - zebra_tip_disk
|
||||
# - zebra_checkpoint_disk
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
|
||||
LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "${GITHUB_WORKSPACE}/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1)
|
||||
echo "STATE_VERSION: ${LOCAL_STATE_VERSION}"
|
||||
|
||||
# Function to find a disk image and output its name
|
||||
find_disk_image() {
|
||||
local base_name="${1}"
|
||||
local disk_type="${2}"
|
||||
local disk_pattern="${base_name}-cache"
|
||||
local output_var="${base_name}_${disk_type}_disk"
|
||||
local disk_image
|
||||
|
||||
disk_image=$(gcloud compute images list --filter="status=READY AND name~${disk_pattern}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${disk_type}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
|
||||
|
||||
if [[ -z "${disk_image}" ]]; then
|
||||
echo "No ${disk_type^^} disk found for ${base_name^^} on network: ${NETWORK}"
|
||||
echo "${output_var}=false" >> "${GITHUB_OUTPUT}"
|
||||
else
|
||||
echo "Disk: ${disk_image}"
|
||||
echo "${output_var}=true" >> "${GITHUB_OUTPUT}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Find and output LWD and Zebra disks
|
||||
find_disk_image "lwd" "tip"
|
||||
find_disk_image "zebrad" "tip"
|
||||
find_disk_image "zebrad" "checkpoint"
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Description:
|
||||
# This script finds a cached Google Cloud Compute image based on specific criteria.
|
||||
# It prioritizes images from the current commit, falls back to the main branch,
|
||||
# and finally checks other branches if needed. The selected image is used for
|
||||
# setting up the environment in a CI/CD pipeline.
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
# Function to find and report a cached disk image
|
||||
find_cached_disk_image() {
|
||||
local search_pattern="${1}"
|
||||
local git_source="${2}"
|
||||
local disk_name
|
||||
|
||||
disk_name=$(gcloud compute images list --filter="status=READY AND name~${search_pattern}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
|
||||
|
||||
# Use >&2 to redirect to stderr and avoid sending wrong assignments to stdout
|
||||
if [[ -n "${disk_name}" ]]; then
|
||||
echo "Found ${git_source} Disk: ${disk_name}" >&2
|
||||
disk_description=$(gcloud compute images describe "${disk_name}" --format="value(DESCRIPTION)")
|
||||
echo "Description: ${disk_description}" >&2
|
||||
echo "${disk_name}" # This is the actual return value when a disk is found
|
||||
else
|
||||
echo "No ${git_source} disk found." >&2
|
||||
fi
|
||||
}
|
||||
|
||||
# Extract local state version
|
||||
echo "Extracting local state version..."
|
||||
LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "${GITHUB_WORKSPACE}/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1)
|
||||
echo "STATE_VERSION: ${LOCAL_STATE_VERSION}"
|
||||
|
||||
# Define DISK_PREFIX based on the requiring state directory
|
||||
if [[ "${NEEDS_LWD_STATE}" == "true" ]]; then
|
||||
DISK_PREFIX="${LWD_STATE_DIR}"
|
||||
else
|
||||
DISK_PREFIX="${ZEBRA_STATE_DIR:-${DISK_PREFIX}}"
|
||||
fi
|
||||
|
||||
# Find the most suitable cached disk image
|
||||
echo "Finding the most suitable cached disk image..."
|
||||
if [[ -z "${CACHED_DISK_NAME}" ]]; then
|
||||
# Try to find a cached disk image from the current commit
|
||||
COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${GITHUB_SHA_SHORT}-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}"
|
||||
CACHED_DISK_NAME=$(find_cached_disk_image "${COMMIT_DISK_PREFIX}" "commit")
|
||||
# If no cached disk image is found, try to find one from the main branch
|
||||
if [[ "${PREFER_MAIN_CACHED_STATE}" == "true" ]]; then
|
||||
MAIN_DISK_PREFIX="${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}"
|
||||
CACHED_DISK_NAME=$(find_cached_disk_image "${MAIN_DISK_PREFIX}" "main branch")
|
||||
# Else, try to find one from any branch
|
||||
else
|
||||
ANY_DISK_PREFIX="${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}"
|
||||
CACHED_DISK_NAME=$(find_cached_disk_image "${ANY_DISK_PREFIX}" "any branch")
|
||||
fi
|
||||
fi
|
||||
|
||||
# Handle case where no suitable disk image is found
|
||||
if [[ -z "${CACHED_DISK_NAME}" ]]; then
|
||||
echo "No suitable cached state disk available."
|
||||
echo "Expected pattern: ${COMMIT_DISK_PREFIX}"
|
||||
echo "Cached state test jobs must depend on the cached state rebuild job."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Selected Disk: ${CACHED_DISK_NAME}"
|
||||
|
||||
# Exporting variables for subsequent steps
|
||||
echo "Exporting variables for subsequent steps..."
|
||||
export CACHED_DISK_NAME="${CACHED_DISK_NAME}"
|
||||
export LOCAL_STATE_VERSION="${LOCAL_STATE_VERSION}"
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
#!/usr/bin/env bash
|
||||
# Increase the Google Cloud instance sshd connection limit
|
||||
#
|
||||
# This script appends 'MaxStartups 500' to /etc/ssh/sshd_config allowing up to 500
|
||||
# unauthenticated connections to Google Cloud instances.
|
||||
ps auxwww | grep sshd
|
||||
echo
|
||||
sudo grep MaxStartups /etc/ssh/sshd_config
|
||||
echo 'Original config:'
|
||||
sudo cat /etc/ssh/sshd_config
|
||||
echo
|
||||
echo 'Modifying config:'
|
||||
echo 'MaxStartups 500' | sudo tee --append /etc/ssh/sshd_config \
|
||||
|| \
|
||||
(echo "updating instance sshd config failed: failing test"; exit 1)
|
||||
sudo grep MaxStartups /etc/ssh/sshd_config
|
||||
echo 'Modified config:'
|
||||
sudo cat /etc/ssh/sshd_config
|
||||
echo
|
||||
sudo systemctl reload sshd.service
|
||||
echo
|
||||
ps auxwww | grep sshd
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
#!/usr/bin/env bash
|
||||
set -ex
|
||||
|
||||
# Check if necessary tools are installed
|
||||
if ! command -v git &> /dev/null || ! command -v cargo &> /dev/null; then
|
||||
echo "ERROR: Required tools (git, cargo) are not installed."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git config --global user.email "release-tests-no-reply@zfnd.org"
|
||||
git config --global user.name "Automated Release Test"
|
||||
|
||||
# Ensure cargo-release is installed
|
||||
if ! cargo release --version &> /dev/null; then
|
||||
echo "ERROR: cargo release must be installed."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Release process
|
||||
# We use the same commands as the [release drafter](https://github.com/ZcashFoundation/zebra/blob/main/.github/PULL_REQUEST_TEMPLATE/release-checklist.md#update-crate-versions)
|
||||
# with an extra `--no-confirm` argument for non-interactive testing.
|
||||
# Update everything except for alpha crates and zebrad:
|
||||
cargo release version --verbose --execute --no-confirm --allow-branch '*' --workspace --exclude zebrad --exclude zebra-scan --exclude zebra-grpc beta
|
||||
|
||||
# Due to a bug in cargo-release, we need to pass exact versions for alpha crates:
|
||||
cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebra-scan 0.1.0-alpha.6
|
||||
cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebra-grpc 0.1.0-alpha.4
|
||||
|
||||
# Update zebrad:
|
||||
cargo release version --verbose --execute --no-confirm --allow-branch '*' --package zebrad patch
|
||||
# Continue with the release process:
|
||||
cargo release replace --verbose --execute --no-confirm --allow-branch '*' --package zebrad
|
||||
cargo release commit --verbose --execute --no-confirm --allow-branch '*'
|
||||
|
||||
# Dry run to check the release
|
||||
# Workaround for unpublished dependency version errors: https://github.com/crate-ci/cargo-release/issues/691
|
||||
# TODO: check all crates after fixing these errors
|
||||
cargo release publish --verbose --dry-run --allow-branch '*' --workspace --exclude zebra-consensus --exclude zebra-utils --exclude zebrad --exclude zebra-scan
|
||||
|
||||
echo "Release process completed."
|
||||
|
|
@ -0,0 +1,187 @@
|
|||
# This workflow automates the building and pushing of Docker images based on user-defined inputs. It includes:
|
||||
# - Accepting various inputs like image name, Dockerfile path, target, and additional Rust-related parameters.
|
||||
# - Authenticates with Google Cloud and logs into Google Artifact Registry and DockerHub.
|
||||
# - Uses Docker Buildx for improved build performance and caching.
|
||||
# - Builds the Docker image and pushes it to both Google Artifact Registry and potentially DockerHub, depending on release type.
|
||||
# - Manages caching strategies to optimize build times across different branches.
|
||||
name: Build docker image
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
image_name:
|
||||
required: true
|
||||
type: string
|
||||
dockerfile_path:
|
||||
required: true
|
||||
type: string
|
||||
dockerfile_target:
|
||||
required: true
|
||||
type: string
|
||||
short_sha:
|
||||
required: false
|
||||
type: string
|
||||
rust_backtrace:
|
||||
required: false
|
||||
type: string
|
||||
rust_lib_backtrace:
|
||||
required: false
|
||||
type: string
|
||||
# defaults to: vars.RUST_LOG
|
||||
rust_log:
|
||||
required: false
|
||||
type: string
|
||||
# defaults to: vars.RUST_PROD_FEATURES
|
||||
features:
|
||||
required: false
|
||||
type: string
|
||||
# defaults to: vars.RUST_TEST_FEATURES (and entrypoint.sh adds vars.RUST_PROD_FEATURES)
|
||||
test_features:
|
||||
required: false
|
||||
type: string
|
||||
latest_tag:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
tag_suffix:
|
||||
required: false
|
||||
type: string
|
||||
no_cache:
|
||||
description: 'Disable the Docker cache for this build'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
outputs:
|
||||
image_digest:
|
||||
description: 'The image digest to be used on a caller workflow'
|
||||
value: ${{ jobs.build.outputs.image_digest }}
|
||||
|
||||
|
||||
env:
|
||||
FEATURES: ${{ inputs.features || vars.RUST_PROD_FEATURES }}
|
||||
TEST_FEATURES: ${{ inputs.test_features || vars.RUST_TEST_FEATURES }}
|
||||
RUST_LOG: ${{ inputs.rust_log || vars.RUST_LOG }}
|
||||
CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }}
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build images
|
||||
timeout-minutes: 210
|
||||
runs-on: ubuntu-latest-xl
|
||||
outputs:
|
||||
image_digest: ${{ steps.docker_build.outputs.digest }}
|
||||
image_name: ${{ fromJSON(steps.docker_build.outputs.metadata)['image.name'] }}
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Inject slug/short variables
|
||||
uses: rlespinasse/github-slug-action@v4
|
||||
with:
|
||||
short-length: 7
|
||||
|
||||
# Automatic tag management and OCI Image Format Specification for labels
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5.5.1
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
us-docker.pkg.dev/${{ vars.GCP_PROJECT }}/zebra/${{ inputs.image_name }}
|
||||
zfnd/${{ inputs.image_name }},enable=${{ github.event_name == 'release' && !github.event.release.prerelease }}
|
||||
# appends inputs.tag_suffix to image tags/names
|
||||
flavor: |
|
||||
suffix=${{ inputs.tag_suffix }}
|
||||
latest=${{ inputs.latest_tag }}
|
||||
# generate Docker tags based on the following events/attributes
|
||||
tags: |
|
||||
# These DockerHub release tags support the following use cases:
|
||||
# - `latest`: always use the latest Zebra release when you pull or update
|
||||
# - `v1.x.y` or `1.x.y`: always use the exact version, don't automatically upgrade
|
||||
#
|
||||
# `semver` adds a "latest" tag if `inputs.latest_tag` is `true`.
|
||||
type=semver,pattern={{version}}
|
||||
type=ref,event=tag
|
||||
# DockerHub release and CI tags.
|
||||
# This tag makes sure tests are using exactly the right image, even when multiple PRs run at the same time.
|
||||
type=sha,event=push
|
||||
# These CI-only tags support CI on PRs, the main branch, and scheduled full syncs.
|
||||
# These tags do not appear on DockerHub, because DockerHub images are only published on the release event.
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=edge,enable={{is_default_branch}}
|
||||
type=schedule
|
||||
|
||||
# Setup Docker Buildx to allow use of docker cache layers from GH
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Authenticate to Google Cloud
|
||||
id: auth
|
||||
uses: google-github-actions/auth@v2.1.2
|
||||
with:
|
||||
retries: '3'
|
||||
workload_identity_provider: '${{ vars.GCP_WIF }}'
|
||||
service_account: '${{ vars.GCP_ARTIFACTS_SA }}'
|
||||
token_format: 'access_token'
|
||||
# Some builds might take over an hour, and Google's default lifetime duration for
|
||||
# an access token is 1 hour (3600s). We increase this to 3 hours (10800s)
|
||||
# as some builds take over an hour.
|
||||
access_token_lifetime: 10800s
|
||||
|
||||
- name: Login to Google Artifact Registry
|
||||
uses: docker/login-action@v3.1.0
|
||||
with:
|
||||
registry: us-docker.pkg.dev
|
||||
username: oauth2accesstoken
|
||||
password: ${{ steps.auth.outputs.access_token }}
|
||||
|
||||
- name: Login to DockerHub
|
||||
# We only publish images to DockerHub if a release is not a pre-release
|
||||
# Ref: https://github.com/orgs/community/discussions/26281#discussioncomment-3251177
|
||||
if: ${{ github.event_name == 'release' && !github.event.release.prerelease }}
|
||||
uses: docker/login-action@v3.1.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# Build and push image to Google Artifact Registry, and possibly DockerHub
|
||||
- name: Build & push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
target: ${{ inputs.dockerfile_target }}
|
||||
context: .
|
||||
file: ${{ inputs.dockerfile_path }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
SHORT_SHA=${{ env.GITHUB_SHA_SHORT }}
|
||||
RUST_LOG=${{ env.RUST_LOG }}
|
||||
FEATURES=${{ env.FEATURES }}
|
||||
TEST_FEATURES=${{ env.TEST_FEATURES }}
|
||||
push: true
|
||||
# Don't read from the cache if the caller disabled it.
|
||||
# https://docs.docker.com/engine/reference/commandline/buildx_build/#options
|
||||
no-cache: ${{ inputs.no_cache }}
|
||||
# To improve build speeds, for each branch we push an additional image to the registry,
|
||||
# to be used as the caching layer, using the `max` caching mode.
|
||||
#
|
||||
# We use multiple cache sources to confirm a cache hit, starting from a per-branch cache.
|
||||
# If there's no hit, we continue with a `main` branch cache, which helps us avoid
|
||||
# rebuilding cargo-chef, most dependencies, and possibly some Zebra crates.
|
||||
#
|
||||
# The caches are tried in top-down order, the first available cache is used:
|
||||
# https://github.com/moby/moby/pull/26839#issuecomment-277383550
|
||||
cache-from: |
|
||||
type=registry,ref=us-docker.pkg.dev/${{ vars.GCP_PROJECT }}/zebra-caching/${{ inputs.image_name }}${{ inputs.tag_suffix }}:${{ env.GITHUB_REF_SLUG_URL }}-cache
|
||||
type=registry,ref=us-docker.pkg.dev/${{ vars.GCP_PROJECT }}/zebra-caching/${{ inputs.image_name }}${{ inputs.tag_suffix }}:main-cache
|
||||
cache-to: |
|
||||
type=registry,ref=us-docker.pkg.dev/${{ vars.GCP_PROJECT }}/zebra-caching/${{ inputs.image_name }}${{ inputs.tag_suffix }}:${{ env.GITHUB_REF_SLUG_URL }}-cache,mode=max
|
||||
|
|
@ -0,0 +1,748 @@
|
|||
name: Deploy Tests to GCP
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
# Status and logging
|
||||
test_id:
|
||||
required: true
|
||||
type: string
|
||||
description: 'Unique identifier for the test'
|
||||
test_description:
|
||||
required: true
|
||||
type: string
|
||||
description: 'Explains what the test does'
|
||||
height_grep_text:
|
||||
required: false
|
||||
type: string
|
||||
description: 'Regular expression to find the tip height in test logs, and add it to newly created cached state image metadata'
|
||||
|
||||
# Test selection and parameters
|
||||
test_variables:
|
||||
required: true
|
||||
type: string
|
||||
description: 'Environmental variables used to select and configure the test'
|
||||
network:
|
||||
required: false
|
||||
type: string
|
||||
default: Mainnet
|
||||
description: 'Zcash network to test against'
|
||||
is_long_test:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
description: 'Does this test need multiple run jobs? (Does it run longer than 6 hours?)'
|
||||
|
||||
# Cached state
|
||||
#
|
||||
# TODO: find a better name
|
||||
root_state_path:
|
||||
required: false
|
||||
type: string
|
||||
default: '/zebrad-cache'
|
||||
description: 'Cached state base directory path'
|
||||
# TODO: find a better name
|
||||
zebra_state_dir:
|
||||
required: false
|
||||
type: string
|
||||
default: ''
|
||||
description: 'Zebra cached state directory and input image prefix to search in GCP'
|
||||
# TODO: find a better name
|
||||
lwd_state_dir:
|
||||
required: false
|
||||
type: string
|
||||
default: ''
|
||||
description: 'Lightwalletd cached state directory and input image prefix to search in GCP'
|
||||
disk_prefix:
|
||||
required: false
|
||||
type: string
|
||||
default: 'zebrad-cache'
|
||||
description: 'Image name prefix, and `zebra_state_dir` name for newly created cached states'
|
||||
disk_suffix:
|
||||
required: false
|
||||
type: string
|
||||
description: 'Image name suffix'
|
||||
needs_zebra_state:
|
||||
required: true
|
||||
type: boolean
|
||||
description: 'Does the test use Zebra cached state?'
|
||||
needs_lwd_state:
|
||||
required: false
|
||||
type: boolean
|
||||
description: 'Does the test use Lightwalletd and Zebra cached state?'
|
||||
# main branch states can be outdated and slower, but they can also be more reliable
|
||||
prefer_main_cached_state:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
description: 'Does the test prefer to use a main branch cached state?'
|
||||
saves_to_disk:
|
||||
required: true
|
||||
type: boolean
|
||||
description: 'Can this test create new or updated cached state disks?'
|
||||
force_save_to_disk:
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
description: 'Force this test to create a new or updated cached state disk'
|
||||
app_name:
|
||||
required: false
|
||||
type: string
|
||||
default: 'zebra'
|
||||
description: 'Application name, used to work out when a job is an update job'
|
||||
|
||||
env:
|
||||
# How many previous log lines we show at the start of each new log job.
|
||||
# Increase this number if some log lines are skipped between jobs
|
||||
#
|
||||
# We want to show all the logs since the last job finished,
|
||||
# but we don't know how long it will be between jobs.
|
||||
# 200 lines is about 6-15 minutes of sync logs, or one panic log.
|
||||
EXTRA_LOG_LINES: 200
|
||||
# How many blocks to wait before creating an updated cached state image.
|
||||
# 1 day is approximately 1152 blocks.
|
||||
CACHED_STATE_UPDATE_LIMIT: 576
|
||||
|
||||
jobs:
|
||||
# Show all the test logs, then follow the logs of the test we just launched, until it finishes.
|
||||
# Then check the result of the test.
|
||||
#
|
||||
# If `inputs.is_long_test` is `true`, the timeout is 5 days, otherwise it's 3 hours.
|
||||
test-result:
|
||||
name: Run ${{ inputs.test_id }} test
|
||||
runs-on: zfnd-runners
|
||||
timeout-minutes: ${{ inputs.is_long_test && 7200 || 180 }}
|
||||
outputs:
|
||||
cached_disk_name: ${{ steps.get-disk-name.outputs.cached_disk_name }}
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: '2'
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Inject slug/short variables
|
||||
uses: rlespinasse/github-slug-action@v4
|
||||
with:
|
||||
short-length: 7
|
||||
|
||||
- name: Downcase network name for disks and labels
|
||||
run: |
|
||||
NETWORK_CAPS="${{ inputs.network }}"
|
||||
echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV"
|
||||
|
||||
# Install our SSH secret
|
||||
- name: Install private SSH key
|
||||
uses: shimataro/ssh-key-action@v2.7.0
|
||||
with:
|
||||
key: ${{ secrets.GCP_SSH_PRIVATE_KEY }}
|
||||
name: google_compute_engine
|
||||
known_hosts: unnecessary
|
||||
|
||||
- name: Generate public SSH key
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client
|
||||
ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub
|
||||
|
||||
# Setup gcloud CLI
|
||||
- name: Authenticate to Google Cloud
|
||||
id: auth
|
||||
uses: google-github-actions/auth@v2.1.2
|
||||
with:
|
||||
retries: '3'
|
||||
workload_identity_provider: '${{ vars.GCP_WIF }}'
|
||||
service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}'
|
||||
|
||||
- name: Set up Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v2.1.0
|
||||
|
||||
# Find a cached state disk for this job, matching all of:
|
||||
# - disk cached state (lwd_state_dir/zebra_state_dir or disk_prefix) - zebrad-cache or lwd-cache
|
||||
# - state version (from the source code) - v{N}
|
||||
# - network (network) - mainnet or testnet
|
||||
# - disk target height kind (disk_suffix) - checkpoint or tip
|
||||
#
|
||||
# If the test needs a lightwalletd state (needs_lwd_state) set the variable DISK_PREFIX accordingly
|
||||
# - To ${{ inputs.lwd_state_dir }}" if needed
|
||||
# - To ${{ inputs.zebra_state_dir || inputs.disk_prefix }} if not
|
||||
#
|
||||
# If there are multiple disks:
|
||||
# - prefer images generated from the same commit, then
|
||||
# - if prefer_main_cached_state is true, prefer images from the `main` branch, then
|
||||
# - use any images from any other branch or commit.
|
||||
# Within each of these categories:
|
||||
# - prefer newer images to older images
|
||||
#
|
||||
# Passes the disk name to subsequent steps using $CACHED_DISK_NAME env variable
|
||||
# Passes the state version to subsequent steps using $STATE_VERSION env variable
|
||||
#
|
||||
# TODO: move this script into a file, and call it from sub-find-cached-disks.yml as well.
|
||||
- name: Find ${{ inputs.test_id }} cached state disk
|
||||
id: get-disk-name
|
||||
if: ${{ inputs.needs_zebra_state || inputs.needs_lwd_state }}
|
||||
env:
|
||||
GITHUB_SHA_SHORT: ${{ env.GITHUB_SHA_SHORT }}
|
||||
NEEDS_LWD_STATE: ${{ inputs.needs_lwd_state }}
|
||||
LWD_STATE_DIR: ${{ inputs.lwd_state_dir }}
|
||||
ZEBRA_STATE_DIR: ${{ inputs.zebra_state_dir }}
|
||||
DISK_PREFIX: ${{ inputs.disk_prefix }}
|
||||
NETWORK: ${{ env.NETWORK }} # use lowercase version from env, not input
|
||||
DISK_SUFFIX: ${{ inputs.disk_suffix }}
|
||||
PREFER_MAIN_CACHED_STATE: ${{ inputs.prefer_main_cached_state }}
|
||||
run: |
|
||||
source ./.github/workflows/scripts/gcp-get-cached-disks.sh
|
||||
echo "STATE_VERSION=${LOCAL_STATE_VERSION}" >> "${GITHUB_ENV}"
|
||||
echo "CACHED_DISK_NAME=${CACHED_DISK_NAME}" >> "${GITHUB_ENV}"
|
||||
echo "cached_disk_name=${CACHED_DISK_NAME}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# Create a Compute Engine virtual machine and attach a cached state disk using the
|
||||
# $CACHED_DISK_NAME variable as the source image to populate the disk cached state
|
||||
# if the test needs it.
|
||||
- name: Create ${{ inputs.test_id }} GCP compute instance
|
||||
id: create-instance
|
||||
shell: /usr/bin/bash -x {0}
|
||||
run: |
|
||||
NAME="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}"
|
||||
DISK_PARAMS="size=400GB,type=pd-ssd,name=${NAME},device-name=${NAME}"
|
||||
if [ -n "${{ env.CACHED_DISK_NAME }}" ]; then
|
||||
DISK_PARAMS+=",image=${{ env.CACHED_DISK_NAME }}"
|
||||
fi
|
||||
gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \
|
||||
--boot-disk-size 50GB \
|
||||
--boot-disk-type pd-ssd \
|
||||
--image-project=cos-cloud \
|
||||
--image-family=cos-stable \
|
||||
--create-disk="${DISK_PARAMS}" \
|
||||
--container-image=gcr.io/google-containers/busybox \
|
||||
--machine-type ${{ vars.GCP_LARGE_MACHINE }} \
|
||||
--network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \
|
||||
--scopes cloud-platform \
|
||||
--metadata=google-monitoring-enabled=TRUE,google-logging-enabled=TRUE \
|
||||
--metadata-from-file=startup-script=.github/workflows/scripts/gcp-vm-startup-script.sh \
|
||||
--labels=app=${{ inputs.app_name }},environment=test,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }},test=${{ inputs.test_id }} \
|
||||
--tags ${{ inputs.app_name }} \
|
||||
--zone ${{ vars.GCP_ZONE }}
|
||||
|
||||
# Format the mounted disk if the test doesn't use a cached state.
|
||||
- name: Format ${{ inputs.test_id }} volume
|
||||
if: ${{ !inputs.needs_zebra_state && !inputs.needs_lwd_state }}
|
||||
shell: /usr/bin/bash -ex {0}
|
||||
run: |
|
||||
gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
|
||||
--zone ${{ vars.GCP_ZONE }} \
|
||||
--ssh-flag="-o ServerAliveInterval=5" \
|
||||
--ssh-flag="-o ConnectionAttempts=20" \
|
||||
--ssh-flag="-o ConnectTimeout=5" \
|
||||
--command=' \
|
||||
set -ex;
|
||||
# Extract the correct disk name based on the device-name
|
||||
DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-);
|
||||
sudo mkfs.ext4 -v /dev/$DISK_NAME \
|
||||
'
|
||||
|
||||
# Launch the test with the previously created disk or cached state.
|
||||
#
|
||||
# This step uses a $MOUNT_FLAGS variable to mount the disk to the docker container.
|
||||
# If the test needs Lightwalletd state, we add the Lightwalletd state mount to the $MOUNT_FLAGS variable.
|
||||
#
|
||||
# SSH into the just created VM, and create a Docker container to run the incoming test
|
||||
# from ${{ inputs.test_id }}, then mount the sudo docker volume created in the previous job.
|
||||
#
|
||||
# In this step we're using the same disk for simplicity, as mounting multiple disks to the
|
||||
# VM and to the container might require more steps in this workflow, and additional
|
||||
# considerations.
|
||||
#
|
||||
# The disk mounted in the VM is located at /dev/$DISK_NAME, we mount the root `/` of this disk to the docker
|
||||
# container, and might have two different paths (if lightwalletd state is needed):
|
||||
# - /var/cache/zebrad-cache -> ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} -> $ZEBRA_CACHED_STATE_DIR
|
||||
# - /var/cache/lwd-cache -> ${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} -> $LIGHTWALLETD_DATA_DIR
|
||||
#
|
||||
# Currently we do this by mounting the same disk at both paths.
|
||||
#
|
||||
# This doesn't cause any path conflicts, because Zebra and lightwalletd create different
|
||||
# subdirectories for their data. (But Zebra, lightwalletd, and the test harness must not
|
||||
# delete the whole cache directory.)
|
||||
#
|
||||
# These paths must match the variables used by the tests in Rust, which are also set in
|
||||
# `ci-unit-tests-docker.yml` to be able to run this tests.
|
||||
#
|
||||
# Although we're mounting the disk root to both directories, Zebra and Lightwalletd
|
||||
# will only respect the values from $ZEBRA_CACHED_STATE_DIR and $LIGHTWALLETD_DATA_DIR,
|
||||
# the inputs like ${{ inputs.zebra_state_dir }} and ${{ inputs.lwd_state_dir }}
|
||||
# are only used to match those variables paths.
|
||||
- name: Launch ${{ inputs.test_id }} test
|
||||
id: launch-test
|
||||
shell: /usr/bin/bash -x {0}
|
||||
run: |
|
||||
gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
|
||||
--zone ${{ vars.GCP_ZONE }} \
|
||||
--ssh-flag="-o ServerAliveInterval=5" \
|
||||
--ssh-flag="-o ConnectionAttempts=20" \
|
||||
--ssh-flag="-o ConnectTimeout=5" \
|
||||
--command=' \
|
||||
|
||||
# Extract the correct disk name based on the device-name
|
||||
DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-)
|
||||
|
||||
MOUNT_FLAGS="--mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }}"
|
||||
|
||||
# Check if we need to mount for Lightwalletd state
|
||||
# lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially.
|
||||
if [[ "${{ inputs.needs_lwd_state }}" == "true" || "${{ inputs.test_id }}" == "lwd-full-sync" ]]; then
|
||||
MOUNT_FLAGS="$MOUNT_FLAGS --mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }}"
|
||||
fi
|
||||
|
||||
sudo docker run \
|
||||
--name ${{ inputs.test_id }} \
|
||||
--tty \
|
||||
--detach \
|
||||
${{ inputs.test_variables }} \
|
||||
${MOUNT_FLAGS} \
|
||||
${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \
|
||||
'
|
||||
|
||||
# Show debug logs if previous job failed
|
||||
- name: Show debug logs if previous job failed
|
||||
if: ${{ failure() }}
|
||||
shell: /usr/bin/bash -x {0}
|
||||
run: |
|
||||
gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
|
||||
--zone ${{ vars.GCP_ZONE }} \
|
||||
--ssh-flag="-o ServerAliveInterval=5" \
|
||||
--ssh-flag="-o ConnectionAttempts=20" \
|
||||
--ssh-flag="-o ConnectTimeout=5" \
|
||||
--command=' \
|
||||
lsblk;
|
||||
sudo lsof /dev/$DISK_NAME;
|
||||
sudo dmesg;
|
||||
sudo journalctl -b \
|
||||
'
|
||||
|
||||
# Show all the logs since the container launched,
|
||||
# following until we see zebrad startup messages.
|
||||
#
|
||||
# This check limits the number of log lines, so tests running on the wrong network don't
|
||||
# run until the job timeout. If Zebra does a complete recompile, there are a few hundred log
|
||||
# lines before the startup logs. So that's what we use here.
|
||||
#
|
||||
# The log pipeline ignores the exit status of `docker logs`.
|
||||
# It also ignores the expected 'broken pipe' error from `tee`,
|
||||
# which happens when `grep` finds a matching output and moves on to the next job.
|
||||
#
|
||||
# Errors in the tests are caught by the final test status job.
|
||||
- name: Check startup logs for ${{ inputs.test_id }}
|
||||
shell: /usr/bin/bash -x {0}
|
||||
run: |
|
||||
gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
|
||||
--zone ${{ vars.GCP_ZONE }} \
|
||||
--ssh-flag="-o ServerAliveInterval=5" \
|
||||
--ssh-flag="-o ConnectionAttempts=20" \
|
||||
--ssh-flag="-o ConnectTimeout=5" \
|
||||
--command=' \
|
||||
sudo docker logs \
|
||||
--tail all \
|
||||
--follow \
|
||||
${{ inputs.test_id }} | \
|
||||
head -700 | \
|
||||
tee --output-error=exit-nopipe /dev/stderr | \
|
||||
grep --max-count=1 --extended-regexp --color=always \
|
||||
"Zcash network: ${{ inputs.network }}"; \
|
||||
'
|
||||
|
||||
# Check that the container executed at least 1 Rust test harness test, and that all tests passed.
|
||||
# Then wait for the container to finish, and exit with the test's exit status.
|
||||
# Also shows all the test logs.
|
||||
#
|
||||
# If the container has already finished, `docker wait` should return its status.
|
||||
# But sometimes this doesn't work, so we use `docker inspect` as a fallback.
|
||||
#
|
||||
# `docker wait` prints the container exit status as a string, but we need to exit the `ssh` command
|
||||
# with that status.
|
||||
# (`docker wait` can also wait for multiple containers, but we only ever wait for a single container.)
|
||||
- name: Result of ${{ inputs.test_id }} test
|
||||
shell: /usr/bin/bash -x {0}
|
||||
run: |
|
||||
gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
|
||||
--zone ${{ vars.GCP_ZONE }} \
|
||||
--ssh-flag="-o ServerAliveInterval=5" \
|
||||
--ssh-flag="-o ConnectionAttempts=20" \
|
||||
--ssh-flag="-o ConnectTimeout=5" \
|
||||
--command=' \
|
||||
sudo docker logs \
|
||||
--tail all \
|
||||
--follow \
|
||||
${{ inputs.test_id }} | \
|
||||
tee --output-error=exit-nopipe /dev/stderr | \
|
||||
grep --max-count=1 --extended-regexp --color=always \
|
||||
"test result: .*ok.* [1-9][0-9]* passed.*finished in";
|
||||
LOGS_EXIT_STATUS=$?;
|
||||
|
||||
EXIT_STATUS=$(sudo docker wait ${{ inputs.test_id }} || echo "Error retrieving exit status");
|
||||
echo "sudo docker exit status: $EXIT_STATUS";
|
||||
|
||||
# If grep found the pattern, exit with the Docker container"s exit status
|
||||
if [ $LOGS_EXIT_STATUS -eq 0 ]; then
|
||||
exit $EXIT_STATUS;
|
||||
fi
|
||||
|
||||
# Handle other potential errors here
|
||||
echo "An error occurred while processing the logs.";
|
||||
exit 1; \
|
||||
'
|
||||
|
||||
# create a state image from the instance's state disk, if requested by the caller
|
||||
create-state-image:
|
||||
name: Create ${{ inputs.test_id }} cached state image
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ test-result ]
|
||||
# We run exactly one of without-cached-state or with-cached-state, and we always skip the other one.
|
||||
# Normally, if a job is skipped, all the jobs that depend on it are also skipped.
|
||||
# So we need to override the default success() check to make this job run.
|
||||
if: ${{ !cancelled() && !failure() && (inputs.saves_to_disk || inputs.force_save_to_disk) }}
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: '2'
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Inject slug/short variables
|
||||
uses: rlespinasse/github-slug-action@v4
|
||||
with:
|
||||
short-length: 7
|
||||
|
||||
# Performs formatting on disk name components.
|
||||
#
|
||||
# Disk images in GCP are required to be in lowercase, but the blockchain network
|
||||
# uses sentence case, so we need to downcase ${{ inputs.network }}.
|
||||
#
|
||||
# Disk image names in GCP are limited to 63 characters, so we need to limit
|
||||
# branch names to 12 characters.
|
||||
#
|
||||
# Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable.
|
||||
# Passes ${{ env.GITHUB_REF_SLUG_URL }} to subsequent steps using $SHORT_GITHUB_REF env variable.
|
||||
- name: Format network name and branch name for disks
|
||||
run: |
|
||||
NETWORK_CAPS="${{ inputs.network }}"
|
||||
echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV"
|
||||
LONG_GITHUB_REF="${{ env.GITHUB_REF_SLUG_URL }}"
|
||||
echo "SHORT_GITHUB_REF=${LONG_GITHUB_REF:0:12}" >> "$GITHUB_ENV"
|
||||
|
||||
# Install our SSH secret
|
||||
- name: Install private SSH key
|
||||
uses: shimataro/ssh-key-action@v2.7.0
|
||||
with:
|
||||
key: ${{ secrets.GCP_SSH_PRIVATE_KEY }}
|
||||
name: google_compute_engine
|
||||
known_hosts: unnecessary
|
||||
|
||||
- name: Generate public SSH key
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client
|
||||
ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub
|
||||
|
||||
# Setup gcloud CLI
|
||||
- name: Authenticate to Google Cloud
|
||||
id: auth
|
||||
uses: google-github-actions/auth@v2.1.2
|
||||
with:
|
||||
workload_identity_provider: '${{ vars.GCP_WIF }}'
|
||||
service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}'
|
||||
|
||||
- name: Set up Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v2.1.0
|
||||
|
||||
# Get the state version from the local constants.rs file to be used in the image creation,
|
||||
# as the state version is part of the disk image name.
|
||||
#
|
||||
# Passes the state version to subsequent steps using $STATE_VERSION env variable
|
||||
- name: Get state version from constants.rs
|
||||
run: |
|
||||
LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" $GITHUB_WORKSPACE/zebra-state/src/constants.rs | grep -oE "[0-9]+" | tail -n1)
|
||||
echo "STATE_VERSION: $LOCAL_STATE_VERSION"
|
||||
|
||||
echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> "$GITHUB_ENV"
|
||||
|
||||
# Sets the $UPDATE_SUFFIX env var to "-u" if updating a previous cached state,
|
||||
# and the empty string otherwise.
|
||||
#
|
||||
# Also sets a unique date and time suffix $TIME_SUFFIX.
|
||||
- name: Set update and time suffixes
|
||||
run: |
|
||||
UPDATE_SUFFIX=""
|
||||
|
||||
if [[ "${{ inputs.needs_zebra_state }}" == "true" ]] && [[ "${{ inputs.app_name }}" == "zebrad" ]]; then
|
||||
UPDATE_SUFFIX="-u"
|
||||
fi
|
||||
|
||||
# TODO: find a better logic for the lwd-full-sync case
|
||||
if [[ "${{ inputs.needs_lwd_state }}" == "true" ]] && [[ "${{ inputs.app_name }}" == "lightwalletd" ]] && [[ "${{ inputs.test_id }}" != 'lwd-full-sync' ]]; then
|
||||
UPDATE_SUFFIX="-u"
|
||||
fi
|
||||
|
||||
# We're going to delete old images after a few days, so we only need the time here
|
||||
TIME_SUFFIX=$(date '+%H%M%S' --utc)
|
||||
|
||||
echo "UPDATE_SUFFIX=$UPDATE_SUFFIX" >> "$GITHUB_ENV"
|
||||
echo "TIME_SUFFIX=$TIME_SUFFIX" >> "$GITHUB_ENV"
|
||||
|
||||
# Get the full initial and running database versions from the test logs.
|
||||
# These versions are used as part of the disk description and labels.
|
||||
#
|
||||
# If these versions are missing from the logs, the job fails.
|
||||
#
|
||||
# Typically, the database versions are around line 20 in the logs..
|
||||
# But we check the first 1000 log lines, just in case the test harness recompiles all the
|
||||
# dependencies before running the test. (This can happen if the cache is invalid.)
|
||||
#
|
||||
# Passes the versions to subsequent steps using the $INITIAL_DISK_DB_VERSION,
|
||||
# $RUNNING_DB_VERSION, and $DB_VERSION_SUMMARY env variables.
|
||||
- name: Get database versions from logs
|
||||
shell: /usr/bin/bash -x {0}
|
||||
run: |
|
||||
INITIAL_DISK_DB_VERSION=""
|
||||
RUNNING_DB_VERSION=""
|
||||
DB_VERSION_SUMMARY=""
|
||||
|
||||
DOCKER_LOGS=$( \
|
||||
gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
|
||||
--zone ${{ vars.GCP_ZONE }} \
|
||||
--ssh-flag="-o ServerAliveInterval=5" \
|
||||
--ssh-flag="-o ConnectionAttempts=20" \
|
||||
--ssh-flag="-o ConnectTimeout=5" \
|
||||
--command=' \
|
||||
sudo docker logs ${{ inputs.test_id }} | head -1000 \
|
||||
')
|
||||
|
||||
# either a semantic version or "creating new database"
|
||||
INITIAL_DISK_DB_VERSION=$( \
|
||||
echo "$DOCKER_LOGS" | \
|
||||
grep --extended-regexp --only-matching 'initial disk state version: [0-9a-z\.]+' | \
|
||||
grep --extended-regexp --only-matching '[0-9a-z\.]+' | \
|
||||
tail -1 || \
|
||||
[[ $? == 1 ]] \
|
||||
)
|
||||
|
||||
if [[ -z "$INITIAL_DISK_DB_VERSION" ]]; then
|
||||
echo "Checked logs:"
|
||||
echo ""
|
||||
echo "$DOCKER_LOGS"
|
||||
echo ""
|
||||
echo "Missing initial disk database version in logs: $INITIAL_DISK_DB_VERSION"
|
||||
# Fail the tests, because Zebra didn't log the initial disk database version,
|
||||
# or the regex in this step is wrong.
|
||||
false
|
||||
fi
|
||||
|
||||
if [[ "$INITIAL_DISK_DB_VERSION" = "creating.new.database" ]]; then
|
||||
INITIAL_DISK_DB_VERSION="new"
|
||||
else
|
||||
INITIAL_DISK_DB_VERSION="v${INITIAL_DISK_DB_VERSION//./-}"
|
||||
fi
|
||||
|
||||
echo "Found initial disk database version in logs: $INITIAL_DISK_DB_VERSION"
|
||||
echo "INITIAL_DISK_DB_VERSION=$INITIAL_DISK_DB_VERSION" >> "$GITHUB_ENV"
|
||||
|
||||
RUNNING_DB_VERSION=$( \
|
||||
echo "$DOCKER_LOGS" | \
|
||||
grep --extended-regexp --only-matching 'running state version: [0-9\.]+' | \
|
||||
grep --extended-regexp --only-matching '[0-9\.]+' | \
|
||||
tail -1 || \
|
||||
[[ $? == 1 ]] \
|
||||
)
|
||||
|
||||
if [[ -z "$RUNNING_DB_VERSION" ]]; then
|
||||
echo "Checked logs:"
|
||||
echo ""
|
||||
echo "$DOCKER_LOGS"
|
||||
echo ""
|
||||
echo "Missing running database version in logs: $RUNNING_DB_VERSION"
|
||||
# Fail the tests, because Zebra didn't log the running database version,
|
||||
# or the regex in this step is wrong.
|
||||
false
|
||||
fi
|
||||
|
||||
RUNNING_DB_VERSION="v${RUNNING_DB_VERSION//./-}"
|
||||
echo "Found running database version in logs: $RUNNING_DB_VERSION"
|
||||
echo "RUNNING_DB_VERSION=$RUNNING_DB_VERSION" >> "$GITHUB_ENV"
|
||||
|
||||
if [[ "$INITIAL_DISK_DB_VERSION" = "$RUNNING_DB_VERSION" ]]; then
|
||||
DB_VERSION_SUMMARY="$RUNNING_DB_VERSION"
|
||||
elif [[ "$INITIAL_DISK_DB_VERSION" = "new" ]]; then
|
||||
DB_VERSION_SUMMARY="$RUNNING_DB_VERSION in new database"
|
||||
else
|
||||
DB_VERSION_SUMMARY="$INITIAL_DISK_DB_VERSION changing to $RUNNING_DB_VERSION"
|
||||
fi
|
||||
|
||||
echo "Summarised database versions from logs: $DB_VERSION_SUMMARY"
|
||||
echo "DB_VERSION_SUMMARY=$DB_VERSION_SUMMARY" >> "$GITHUB_ENV"
|
||||
|
||||
# Get the sync height from the test logs, which is later used as part of the
|
||||
# disk description and labels.
|
||||
#
|
||||
# The regex used to grep the sync height is provided by ${{ inputs.height_grep_text }},
|
||||
# this allows to dynamically change the height as needed by different situations or
|
||||
# based on the logs output from different tests.
|
||||
#
|
||||
# If the sync height is missing from the logs, the job fails.
|
||||
#
|
||||
# Passes the sync height to subsequent steps using the $SYNC_HEIGHT env variable.
|
||||
- name: Get sync height from logs
|
||||
shell: /usr/bin/bash -x {0}
|
||||
run: |
|
||||
SYNC_HEIGHT=""
|
||||
|
||||
DOCKER_LOGS=$( \
|
||||
gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \
|
||||
--zone ${{ vars.GCP_ZONE }} \
|
||||
--ssh-flag="-o ServerAliveInterval=5" \
|
||||
--ssh-flag="-o ConnectionAttempts=20" \
|
||||
--ssh-flag="-o ConnectTimeout=5" \
|
||||
--command=' \
|
||||
sudo docker logs ${{ inputs.test_id }} --tail 200 \
|
||||
')
|
||||
|
||||
SYNC_HEIGHT=$( \
|
||||
echo "$DOCKER_LOGS" | \
|
||||
grep --extended-regexp --only-matching '${{ inputs.height_grep_text }}[0-9]+' | \
|
||||
grep --extended-regexp --only-matching '[0-9]+' | \
|
||||
tail -1 || \
|
||||
[[ $? == 1 ]] \
|
||||
)
|
||||
|
||||
if [[ -z "$SYNC_HEIGHT" ]]; then
|
||||
echo "Checked logs:"
|
||||
echo ""
|
||||
echo "$DOCKER_LOGS"
|
||||
echo ""
|
||||
echo "Missing sync height in logs: $SYNC_HEIGHT"
|
||||
# Fail the tests, because Zebra and lightwalletd didn't log their sync heights,
|
||||
# or the CI workflow sync height regex is wrong.
|
||||
false
|
||||
fi
|
||||
|
||||
echo "Found sync height in logs: $SYNC_HEIGHT"
|
||||
echo "SYNC_HEIGHT=$SYNC_HEIGHT" >> "$GITHUB_ENV"
|
||||
|
||||
# Get the original cached state height from google cloud.
|
||||
#
|
||||
# If the height is missing from the image labels, uses zero instead.
|
||||
#
|
||||
# TODO: fail the job if needs_zebra_state but the height is missing
|
||||
# we can make this change after all the old images have been deleted, this should happen around 15 September 2022
|
||||
# we'll also need to do a manual checkpoint rebuild before opening the PR for this change
|
||||
#
|
||||
# Passes the original height to subsequent steps using $ORIGINAL_HEIGHT env variable.
|
||||
- name: Get original cached state height from google cloud
|
||||
run: |
|
||||
ORIGINAL_HEIGHT="0"
|
||||
ORIGINAL_DISK_NAME="${{ format('{0}', needs.test-result.outputs.cached_disk_name) }}"
|
||||
|
||||
if [[ -n "$ORIGINAL_DISK_NAME" ]]; then
|
||||
ORIGINAL_HEIGHT=$(gcloud compute images list --filter="status=READY AND name=$ORIGINAL_DISK_NAME" --format="value(labels.height)")
|
||||
ORIGINAL_HEIGHT=${ORIGINAL_HEIGHT:-0}
|
||||
echo "$ORIGINAL_DISK_NAME height: $ORIGINAL_HEIGHT"
|
||||
else
|
||||
ORIGINAL_DISK_NAME="new-disk"
|
||||
echo "newly created disk, original height set to 0"
|
||||
fi
|
||||
|
||||
echo "ORIGINAL_HEIGHT=$ORIGINAL_HEIGHT" >> "$GITHUB_ENV"
|
||||
echo "ORIGINAL_DISK_NAME=$ORIGINAL_DISK_NAME" >> "$GITHUB_ENV"
|
||||
|
||||
# Create an image from the state disk, which will be used for any tests that start
|
||||
# after it is created. These tests can be in the same workflow, or in a different PR.
|
||||
#
|
||||
# Using the newest image makes future jobs faster, because it is closer to the chain tip.
|
||||
#
|
||||
# Skips creating updated images if the original image is less than $CACHED_STATE_UPDATE_LIMIT behind the current tip.
|
||||
# Full sync images are always created.
|
||||
#
|
||||
# The image can contain:
|
||||
# - Zebra cached state, or
|
||||
# - Zebra + lightwalletd cached state.
|
||||
# Which cached state is being saved to the disk is defined by ${{ inputs.disk_prefix }}.
|
||||
#
|
||||
# Google Cloud doesn't have an atomic image replacement operation.
|
||||
# We don't want to delete and re-create the image, because that causes a ~5 minute
|
||||
# window where might be no recent image. So we add an extra image with a unique name,
|
||||
# which gets selected because it has a later creation time.
|
||||
# This also simplifies the process of deleting old images,
|
||||
# because we don't have to worry about accidentally deleting all the images.
|
||||
#
|
||||
# The timestamp makes images from the same commit unique,
|
||||
# as long as they don't finish in the same second.
|
||||
# (This is unlikely, because each image created by a workflow has a different name.)
|
||||
#
|
||||
# The image name must also be 63 characters or less.
|
||||
#
|
||||
# Force the image creation (--force) as the disk is still attached even though is not being
|
||||
# used by the container.
|
||||
- name: Create image from state disk
|
||||
run: |
|
||||
MINIMUM_UPDATE_HEIGHT=$((ORIGINAL_HEIGHT+CACHED_STATE_UPDATE_LIMIT))
|
||||
if [[ -z "$UPDATE_SUFFIX" ]] || [[ "$SYNC_HEIGHT" -gt "$MINIMUM_UPDATE_HEIGHT" ]] || [[ "${{ inputs.force_save_to_disk }}" == "true" ]]; then
|
||||
gcloud compute images create \
|
||||
"${{ inputs.disk_prefix }}-${SHORT_GITHUB_REF}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${NETWORK}-${{ inputs.disk_suffix }}${UPDATE_SUFFIX}-${TIME_SUFFIX}" \
|
||||
--force \
|
||||
--source-disk=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \
|
||||
--source-disk-zone=${{ vars.GCP_ZONE }} \
|
||||
--storage-location=us \
|
||||
--description="Created from commit ${{ env.GITHUB_SHA_SHORT }} with height ${{ env.SYNC_HEIGHT }} and database format ${{ env.DB_VERSION_SUMMARY }}" \
|
||||
--labels="height=${{ env.SYNC_HEIGHT }},purpose=${{ inputs.disk_prefix }},commit=${{ env.GITHUB_SHA_SHORT }},state-version=${{ env.STATE_VERSION }},state-running-version=${RUNNING_DB_VERSION},initial-state-disk-version=${INITIAL_DISK_DB_VERSION},network=${NETWORK},target-height-kind=${{ inputs.disk_suffix }},update-flag=${UPDATE_SUFFIX},force-save=${{ inputs.force_save_to_disk }},updated-from-height=${ORIGINAL_HEIGHT},updated-from-disk=${ORIGINAL_DISK_NAME},test-id=${{ inputs.test_id }},app-name=${{ inputs.app_name }}"
|
||||
else
|
||||
echo "Skipped cached state update because the new sync height $SYNC_HEIGHT was less than $CACHED_STATE_UPDATE_LIMIT blocks above the original height $ORIGINAL_HEIGHT of $ORIGINAL_DISK_NAME"
|
||||
fi
|
||||
|
||||
# delete the Google Cloud instance for this test
|
||||
delete-instance:
|
||||
name: Delete ${{ inputs.test_id }} instance
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ create-state-image ]
|
||||
# If a disk generation step timeouts (+6 hours) the previous job (creating the image) will be skipped.
|
||||
# Even if the instance continues running, no image will be created, so it's better to delete it.
|
||||
if: always()
|
||||
continue-on-error: true
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: '2'
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Inject slug/short variables
|
||||
uses: rlespinasse/github-slug-action@v4
|
||||
with:
|
||||
short-length: 7
|
||||
|
||||
# Setup gcloud CLI
|
||||
- name: Authenticate to Google Cloud
|
||||
id: auth
|
||||
uses: google-github-actions/auth@v2.1.2
|
||||
with:
|
||||
workload_identity_provider: '${{ vars.GCP_WIF }}'
|
||||
service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}'
|
||||
|
||||
- name: Set up Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v2.1.0
|
||||
|
||||
# Deletes the instances that has been recently deployed in the actual commit after all
|
||||
# previous jobs have run, no matter the outcome of the job.
|
||||
- name: Delete test instance
|
||||
continue-on-error: true
|
||||
run: |
|
||||
INSTANCE=$(gcloud compute instances list --filter=${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} --format='value(NAME)')
|
||||
if [ -z "${INSTANCE}" ]; then
|
||||
echo "No instance to delete"
|
||||
else
|
||||
gcloud compute instances delete "${INSTANCE}" --zone "${{ vars.GCP_ZONE }}" --delete-disks all --quiet
|
||||
fi
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
# Check if Cached State Disks Exist Workflow
|
||||
# This workflow is designed to check the availability of cached state disks in Google Cloud Platform (GCP) for different types of Zcash applications.
|
||||
# - Accepts network type as input to determine which disks to search for.
|
||||
# - Checks for the existence of three types of disks: lightwalletd tip, Zebra tip, and Zebra checkpoint.
|
||||
# - Uses Google Cloud SDK to query and identify available disks based on network and version.
|
||||
# - Outputs the availability of each disk type, which can be utilized in subsequent workflows.
|
||||
# The workflow streamlines the process of verifying disk availability, crucial for optimizing and speeding up integration tests and deployments.
|
||||
name: Check if cached state disks exist
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
network:
|
||||
description: 'The Zcash network used to look up the disks'
|
||||
required: true
|
||||
type: string
|
||||
outputs:
|
||||
lwd_tip_disk:
|
||||
description: 'true if there is a lightwalletd and Zebra cached state disk, synced near the chain tip'
|
||||
value: ${{ jobs.get-available-disks.outputs.lwd_tip_disk }}
|
||||
zebra_tip_disk:
|
||||
description: 'true if there is a Zebra cached state disk synced near the chain tip'
|
||||
value: ${{ jobs.get-available-disks.outputs.zebra_tip_disk }}
|
||||
zebra_checkpoint_disk:
|
||||
description: 'true if there is a Zebra cached state disk synced to the mandatory Zebra checkpoint'
|
||||
value: ${{ jobs.get-available-disks.outputs.zebra_checkpoint_disk }}
|
||||
|
||||
jobs:
|
||||
get-available-disks:
|
||||
name: Check if cached state disks exist
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
lwd_tip_disk: ${{ steps.get-available-disks.outputs.lwd_tip_disk }}
|
||||
zebra_tip_disk: ${{ steps.get-available-disks.outputs.zebra_tip_disk }}
|
||||
zebra_checkpoint_disk: ${{ steps.get-available-disks.outputs.zebra_checkpoint_disk }}
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
# Setup gcloud CLI
|
||||
- name: Authenticate to Google Cloud
|
||||
id: auth
|
||||
uses: google-github-actions/auth@v2.1.2
|
||||
with:
|
||||
retries: '3'
|
||||
workload_identity_provider: '${{ vars.GCP_WIF }}'
|
||||
service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}'
|
||||
|
||||
- name: Set up Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v2.1.0
|
||||
|
||||
# Disk images in GCP are required to be in lowercase, but the blockchain network
|
||||
# uses sentence case, so we need to downcase ${{ inputs.network }}
|
||||
#
|
||||
# Passes a lowercase Network name to subsequent steps using $NETWORK env variable
|
||||
- name: Downcase network name for disks
|
||||
run: |
|
||||
NETWORK_CAPS=${{ inputs.network }}
|
||||
echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV
|
||||
|
||||
# Check if there are cached state disks available for subsequent jobs to use.
|
||||
- name: Check if cached state disks exist
|
||||
id: get-available-disks
|
||||
env:
|
||||
GITHUB_WORKSPACE: ${{ env.GITHUB_WORKSPACE }}
|
||||
NETWORK: ${{ env.NETWORK }} # use lowercase version from env, not input
|
||||
# TODO: Use the `gcp-get-available-disks.sh` script instead of the inline script,
|
||||
# as this is crashing. And it might related to the returned JSON values.
|
||||
run: |
|
||||
# ./.github/workflows/scripts/gcp-get-available-disks.sh
|
||||
LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "$GITHUB_WORKSPACE/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1)
|
||||
echo "STATE_VERSION: $LOCAL_STATE_VERSION"
|
||||
LWD_TIP_DISK=$(gcloud compute images list --filter="status=READY AND name~lwd-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-tip" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
|
||||
if [[ -z "$LWD_TIP_DISK" ]]; then
|
||||
echo "No TIP disk found for lightwalletd on network: ${NETWORK}"
|
||||
echo "lwd_tip_disk=${{ toJSON(false) }}" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "Disk: $LWD_TIP_DISK"
|
||||
echo "lwd_tip_disk=${{ toJSON(true) }}" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
ZEBRA_TIP_DISK=$(gcloud compute images list --filter="status=READY AND name~zebrad-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-tip" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
|
||||
if [[ -z "$ZEBRA_TIP_DISK" ]]; then
|
||||
echo "No TIP disk found for Zebra on network: ${NETWORK}"
|
||||
echo "zebra_tip_disk=${{ toJSON(false) }}" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "Disk: $ZEBRA_TIP_DISK"
|
||||
echo "zebra_tip_disk=${{ toJSON(true) }}" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
ZEBRA_CHECKPOINT_DISK=$(gcloud compute images list --filter="status=READY AND name~zebrad-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-checkpoint" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1)
|
||||
if [[ -z "$ZEBRA_CHECKPOINT_DISK" ]]; then
|
||||
echo "No CHECKPOINT disk found for Zebra on network: ${NETWORK}"
|
||||
echo "zebra_checkpoint_disk=${{ toJSON(false) }}" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "Disk: $ZEBRA_CHECKPOINT_DISK"
|
||||
echo "zebra_checkpoint_disk=${{ toJSON(true) }}" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
|
@ -0,0 +1,91 @@
|
|||
# This workflow is designed to test Zebra configuration files using Docker containers.
|
||||
# - Runs a specified Docker image with the provided test variables and network settings.
|
||||
# - Monitors and analyzes container logs for specific patterns to determine test success.
|
||||
# - Provides flexibility in testing various configurations and networks by dynamically adjusting input parameters.
|
||||
name: Test Zebra Config Files
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
# Status and logging
|
||||
test_id:
|
||||
required: true
|
||||
type: string
|
||||
description: 'Unique identifier for the test'
|
||||
grep_patterns:
|
||||
required: true
|
||||
type: string
|
||||
description: 'Patterns to grep for in the logs'
|
||||
|
||||
# Test selection and parameters
|
||||
docker_image:
|
||||
required: true
|
||||
type: string
|
||||
description: 'Docker image to test'
|
||||
test_variables:
|
||||
required: true
|
||||
type: string
|
||||
description: 'Environmental variables used to select and configure the test'
|
||||
network:
|
||||
required: false
|
||||
type: string
|
||||
default: Mainnet
|
||||
description: 'Zcash network to test against'
|
||||
|
||||
jobs:
|
||||
test-docker-config:
|
||||
name: Test ${{ inputs.test_id }} in Docker
|
||||
timeout-minutes: 30
|
||||
runs-on: ubuntu-latest-m
|
||||
steps:
|
||||
- uses: actions/checkout@v4.1.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Inject slug/short variables
|
||||
uses: rlespinasse/github-slug-action@v4
|
||||
with:
|
||||
short-length: 7
|
||||
|
||||
- uses: r7kamura/rust-problem-matchers@v1.4.0
|
||||
|
||||
- name: Run ${{ inputs.test_id }} test
|
||||
run: |
|
||||
docker pull ${{ inputs.docker_image }}
|
||||
docker run ${{ inputs.test_variables }} --detach --name ${{ inputs.test_id }} -t ${{ inputs.docker_image }} zebrad start
|
||||
# Use a subshell to handle the broken pipe error gracefully
|
||||
(
|
||||
trap "" PIPE;
|
||||
docker logs \
|
||||
--tail all \
|
||||
--follow \
|
||||
${{ inputs.test_id }} | \
|
||||
tee --output-error=exit /dev/stderr | \
|
||||
grep --max-count=1 --extended-regexp --color=always \
|
||||
${{ inputs.grep_patterns }}
|
||||
) || true
|
||||
LOGS_EXIT_STATUS=$?
|
||||
|
||||
docker stop ${{ inputs.test_id }}
|
||||
|
||||
EXIT_STATUS=$(docker wait ${{ inputs.test_id }} || echo "Error retrieving exit status");
|
||||
echo "docker exit status: $EXIT_STATUS";
|
||||
|
||||
# If grep found the pattern, exit with the Docker container exit status
|
||||
if [ $LOGS_EXIT_STATUS -eq 0 ]; then
|
||||
# We can't diagnose or fix these errors, so we're just ignoring them for now.
|
||||
# They don't actually impact the test because they happen after it succeeds.
|
||||
# See ticket #7898 for details.
|
||||
if [ $EXIT_STATUS -eq 137 ] || [ $EXIT_STATUS -eq 139 ]; then
|
||||
echo "Warning: ignoring docker exit status $EXIT_STATUS";
|
||||
exit 0;
|
||||
else
|
||||
exit $EXIT_STATUS;
|
||||
fi
|
||||
fi
|
||||
|
||||
# Handle other potential errors here
|
||||
echo "An error occurred while processing the logs.";
|
||||
exit 1;
|
||||
env:
|
||||
NETWORK: '${{ inputs.network }}'
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
# Configuration for zbot - https://github.com/ZcashFoundation/zbot
|
||||
|
||||
issues:
|
||||
column_id: 6927590
|
||||
pull_requests:
|
||||
column_id: 6927591
|
||||
|
|
@ -0,0 +1,160 @@
|
|||
# Cargo files
|
||||
/coverage-target/
|
||||
# Legacy Zebra state (alpha versions only)
|
||||
.zebra-state/
|
||||
# Nix configs
|
||||
shell.nix
|
||||
# Docker compose env files
|
||||
*.env
|
||||
|
||||
# ---- Below here this is an autogenerated .gitignore using Toptal ----
|
||||
# Created by https://www.toptal.com/developers/gitignore/api/firebase,emacs,visualstudiocode,rust,windows,macos
|
||||
# Edit at https://www.toptal.com/developers/gitignore?templates=firebase,emacs,visualstudiocode,rust,windows,macos
|
||||
|
||||
### Emacs ###
|
||||
# -*- mode: gitignore; -*-
|
||||
*~
|
||||
\#*\#
|
||||
/.emacs.desktop
|
||||
/.emacs.desktop.lock
|
||||
*.elc
|
||||
auto-save-list
|
||||
tramp
|
||||
.\#*
|
||||
|
||||
# Org-mode
|
||||
.org-id-locations
|
||||
*_archive
|
||||
|
||||
# flymake-mode
|
||||
*_flymake.*
|
||||
|
||||
# eshell files
|
||||
/eshell/history
|
||||
/eshell/lastdir
|
||||
|
||||
# elpa packages
|
||||
/elpa/
|
||||
|
||||
# reftex files
|
||||
*.rel
|
||||
|
||||
# AUCTeX auto folder
|
||||
/auto/
|
||||
|
||||
# cask packages
|
||||
.cask/
|
||||
dist/
|
||||
|
||||
# Flycheck
|
||||
flycheck_*.el
|
||||
|
||||
# server auth directory
|
||||
/server/
|
||||
|
||||
# projectiles files
|
||||
.projectile
|
||||
|
||||
# directory configuration
|
||||
.dir-locals.el
|
||||
|
||||
# network security
|
||||
/network-security.data
|
||||
|
||||
|
||||
### Firebase ###
|
||||
.idea
|
||||
**/node_modules/*
|
||||
# We need to check in the .firebaserc file because it contains the target names
|
||||
# **/.firebaserc
|
||||
|
||||
### Firebase Patch ###
|
||||
.runtimeconfig.json
|
||||
.firebase/
|
||||
|
||||
### macOS ###
|
||||
# General
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
# Icon must end with two \r
|
||||
Icon
|
||||
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
# Files that might appear in the root of a volume
|
||||
.DocumentRevisions-V100
|
||||
.fseventsd
|
||||
.Spotlight-V100
|
||||
.TemporaryItems
|
||||
.Trashes
|
||||
.VolumeIcon.icns
|
||||
.com.apple.timemachine.donotpresent
|
||||
|
||||
# Directories potentially created on remote AFP share
|
||||
.AppleDB
|
||||
.AppleDesktop
|
||||
Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
### Rust ###
|
||||
# Generated by Cargo
|
||||
# will have compiled files and executables
|
||||
debug/
|
||||
target/
|
||||
|
||||
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
||||
# Cargo.lock
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
# MSVC Windows builds of rustc generate these, which store debugging information
|
||||
*.pdb
|
||||
|
||||
### VisualStudioCode ###
|
||||
.vscode/*
|
||||
!.vscode/settings.json
|
||||
!.vscode/tasks.json
|
||||
!.vscode/launch.json
|
||||
!.vscode/extensions.json
|
||||
*.code-workspace
|
||||
|
||||
# Local History for Visual Studio Code
|
||||
.history/
|
||||
|
||||
### VisualStudioCode Patch ###
|
||||
# Ignore all local history of files
|
||||
.history
|
||||
.ionide
|
||||
|
||||
### Windows ###
|
||||
# Windows thumbnail cache files
|
||||
Thumbs.db
|
||||
Thumbs.db:encryptable
|
||||
ehthumbs.db
|
||||
ehthumbs_vista.db
|
||||
|
||||
# Dump file
|
||||
*.stackdump
|
||||
|
||||
# Folder config file
|
||||
[Dd]esktop.ini
|
||||
|
||||
# Recycle Bin used on file shares
|
||||
$RECYCLE.BIN/
|
||||
|
||||
# Windows Installer files
|
||||
*.cab
|
||||
*.msi
|
||||
*.msix
|
||||
*.msm
|
||||
*.msp
|
||||
|
||||
# Windows shortcuts
|
||||
*.lnk
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,39 @@
|
|||
# Code of Conduct
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
## Conduct
|
||||
|
||||
- We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic.
|
||||
- Please avoid using overtly sexual aliases or other nicknames that might detract from a friendly, safe and welcoming environment for all.
|
||||
- Please be kind and courteous. There’s no need to be mean or rude.
|
||||
- Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer.
|
||||
- Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works.
|
||||
- We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behavior. We interpret the term “harassment” as including the definition in the Citizen Code of Conduct; if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups.
|
||||
- Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact the Zcash Foundation [moderation team] immediately. Whether you’re a regular contributor or a newcomer, we care about making this community a safe place for you and we’ve got your back.
|
||||
- Likewise any spamming, trolling, flaming, baiting or other attention-stealing behavior is not welcome.
|
||||
|
||||
> [[EMAIL THE MODERATION TEAM][moderation team]]
|
||||
|
||||
## Moderation
|
||||
|
||||
These are the policies for upholding our community’s standards of conduct. If you feel that a thread needs moderation, please contact the Zcash Foundation [moderation team].
|
||||
|
||||
1. Remarks that violate the Zcash Foundation standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.)
|
||||
2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed.
|
||||
3. Moderators will first respond to such remarks with a warning.
|
||||
4. If the warning is unheeded, the user will be “kicked,” i.e., kicked out of the communication channel to cool off.
|
||||
5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded.
|
||||
6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology.
|
||||
7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, in private. Complaints about bans in-channel are not allowed.
|
||||
8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others.
|
||||
|
||||
In the community fostered by the Zcash Foundation we strive to go the extra step to look out for each other. Don’t just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they’re off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely.
|
||||
|
||||
And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could’ve communicated better — remember that it’s your responsibility to make your fellow humans comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust.
|
||||
|
||||
The enforcement policies listed above apply to all official Zcash Foundation venues; including Discord channels (https://discord.com/channels/676527656170160146/716086297210650634); and GitHub repositories under ZcashFoundation. For other projects adopting the Zcash Foundation Code of Conduct, please contact the maintainers of those projects for enforcement. If you wish to use this code of conduct for your own project, consider explicitly mentioning your moderation policy or making a copy with your own moderation policy so as to avoid confusion.
|
||||
|
||||
Based on the Rust Code of Conduct, adapted from the Node.js Policy on Trolling as well as the Contributor Covenant v1.3.0.
|
||||
|
||||
[moderation team]: mailto:moderation@zfnd.org
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
# Contributing
|
||||
|
||||
* [Running and Debugging](#running-and-debugging)
|
||||
* [Bug Reports](#bug-reports)
|
||||
* [Pull Requests](#pull-requests)
|
||||
|
||||
## Running and Debugging
|
||||
[running-and-debugging]: #running-and-debugging
|
||||
|
||||
See the [user documentation](https://zebra.zfnd.org/user.html) for details on
|
||||
how to build, run, and instrument Zebra.
|
||||
|
||||
## Bug Reports
|
||||
[bug-reports]: #bug-reports
|
||||
|
||||
Please [create an issue](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-bug%2C+S-needs-triage&projects=&template=bug_report.yml&title=) on the Zebra issue tracker.
|
||||
|
||||
## Pull Requests
|
||||
[pull-requests]: #pull-requests
|
||||
|
||||
PRs are welcome for small and large changes, but please don't make large PRs
|
||||
without coordinating with us via the issue tracker or Discord. This helps
|
||||
increase development coordination and makes PRs easier to merge.
|
||||
|
||||
Check out the [help wanted][hw] or [good first issue][gfi] labels if you're
|
||||
looking for a place to get started!
|
||||
|
||||
Zebra follows the [conventional commits][conventional] standard for the commits
|
||||
merged to main. Since PRs are squashed before merging to main, the PR titles
|
||||
should follow the conventional commits standard so that the merged commits
|
||||
are conformant.
|
||||
|
||||
[hw]: https://github.com/ZcashFoundation/zebra/labels/E-help-wanted
|
||||
[gfi]: https://github.com/ZcashFoundation/zebra/labels/good%20first%20issue
|
||||
[conventional]: https://www.conventionalcommits.org/en/v1.0.0/#specification
|
||||
|
||||
## Coverage Reports
|
||||
[coverage-reports]: #coverage-reports
|
||||
|
||||
Zebra's CI currently generates coverage reports for every PR with rust's new
|
||||
source based coverage feature. The coverage reports are generated by the
|
||||
`coverage.yml` file.
|
||||
|
||||
These reports are then saved as html and zipped up into a github action's
|
||||
artifact. These artifacts can be accessed on the `checks` tab of any PR, next
|
||||
to the "re-run jobs" button on the `Coverage (+nightly)` CI job's tab
|
||||
[example](https://github.com/ZcashFoundation/zebra/pull/1907/checks?check_run_id=2127676611).
|
||||
|
||||
To access a report download and extract the zip artifact then open the top
|
||||
level `index.html`.
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,91 @@
|
|||
[workspace]
|
||||
members = [
|
||||
"zebrad",
|
||||
"zebra-chain",
|
||||
"zebra-network",
|
||||
"zebra-state",
|
||||
"zebra-script",
|
||||
"zebra-consensus",
|
||||
"zebra-rpc",
|
||||
"zebra-node-services",
|
||||
"zebra-test",
|
||||
"zebra-utils",
|
||||
"zebra-scan",
|
||||
"zebra-grpc",
|
||||
"tower-batch-control",
|
||||
"tower-fallback",
|
||||
]
|
||||
|
||||
# Use the edition 2021 dependency resolver in the workspace, to match the crates
|
||||
resolver = "2"
|
||||
|
||||
# `cargo release` settings
|
||||
|
||||
[workspace.metadata.release]
|
||||
|
||||
# We always do releases from the main branch
|
||||
allow-branch = ["main"]
|
||||
|
||||
# Compilation settings
|
||||
|
||||
[profile.dev]
|
||||
panic = "abort"
|
||||
|
||||
# Speed up tests by optimizing performance-critical crates
|
||||
|
||||
# Cryptographic crates
|
||||
|
||||
[profile.dev.package.blake2b_simd]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.ff]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.group]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.pasta_curves]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.halo2_proofs]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.halo2_gadgets]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.bls12_381]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.byteorder]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.equihash]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.zcash_proofs]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.ring]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.spin]
|
||||
opt-level = 3
|
||||
|
||||
[profile.dev.package.untrusted]
|
||||
opt-level = 3
|
||||
|
||||
|
||||
[profile.release]
|
||||
panic = "abort"
|
||||
|
||||
# Speed up release builds and sync tests using link-time optimization.
|
||||
# Some of Zebra's code is CPU-intensive, and needs extra optimizations for peak performance.
|
||||
#
|
||||
# TODO:
|
||||
# - add "-Clinker-plugin-lto" in .cargo/config.toml to speed up release builds
|
||||
# - add "-Clinker=clang -Clink-arg=-fuse-ld=lld" in .cargo/config.toml
|
||||
# - also use LTO on C/C++ code:
|
||||
# - use clang to compile all C/C++ code
|
||||
# - add "-flto=thin" to all C/C++ code builds
|
||||
# - see https://doc.rust-lang.org/rustc/linker-plugin-lto.html#cc-code-as-a-dependency-in-rust
|
||||
lto = "thin"
|
||||
|
|
@ -0,0 +1,203 @@
|
|||
Copyright (c) 2019-2024 Zcash Foundation
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2019-2024 Zcash Foundation
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
227
README.md
227
README.md
|
|
@ -1,2 +1,227 @@
|
|||
# Zebra
|
||||

|
||||
|
||||
---
|
||||
|
||||
[](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-integration-tests-gcp.yml) [](https://github.com/ZcashFoundation/zebra/actions/workflows/ci-unit-tests-os.yml) [](https://github.com/ZcashFoundation/zebra/actions/workflows/cd-deploy-nodes-gcp.yml) [](https://codecov.io/gh/ZcashFoundation/zebra) [](https://github.com/ZcashFoundation/zebra/actions/workflows/docs-deploy-firebase.yml)
|
||||

|
||||
|
||||
## Contents
|
||||
|
||||
- [About](#about)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Docker](#docker)
|
||||
- [Building Zebra](#building-zebra)
|
||||
- [Optional Configs & Features](#optional-configs--features)
|
||||
- [Known Issues](#known-issues)
|
||||
- [Future Work](#future-work)
|
||||
- [Documentation](#documentation)
|
||||
- [User support](#user-support)
|
||||
- [Security](#security)
|
||||
- [License](#license)
|
||||
|
||||
## About
|
||||
|
||||
[Zebra](https://zebra.zfnd.org/) is the Zcash Foundation's independent,
|
||||
consensus-compatible implementation of a Zcash node.
|
||||
|
||||
Zebra's network stack is interoperable with `zcashd`, and Zebra implements all
|
||||
the features required to reach Zcash network consensus, including the validation
|
||||
of all the consensus rules for the NU5 network upgrade.
|
||||
[Here](https://docs.rs/zebrad/latest/zebrad/index.html#zebra-advantages) are some
|
||||
benefits of Zebra.
|
||||
|
||||
Zebra validates blocks and transactions, but needs extra software to generate
|
||||
them:
|
||||
|
||||
- To generate transactions, [run Zebra with `lightwalletd`](https://zebra.zfnd.org/user/lightwalletd.html).
|
||||
- To generate blocks, use a mining pool or miner with Zebra's mining JSON-RPCs.
|
||||
Currently Zebra can only send mining rewards to a single fixed address.
|
||||
To distribute rewards, use mining software that creates its own distribution transactions,
|
||||
a light wallet or the `zcashd` wallet.
|
||||
|
||||
Please [join us on Discord](https://discord.gg/na6QZNd) if you'd like to find
|
||||
out more or get involved!
|
||||
|
||||
## Getting Started
|
||||
|
||||
You can run Zebra using our Docker image or you can build it manually. Please
|
||||
see the [System Requirements](https://zebra.zfnd.org/user/requirements.html)
|
||||
section in the Zebra book for system requirements.
|
||||
|
||||
### Docker
|
||||
|
||||
This command will run our latest release, and sync it to the tip:
|
||||
|
||||
```sh
|
||||
docker run zfnd/zebra:latest
|
||||
```
|
||||
|
||||
For more information, read our [Docker documentation](https://zebra.zfnd.org/user/docker.html).
|
||||
|
||||
### Building Zebra
|
||||
|
||||
Building Zebra requires [Rust](https://www.rust-lang.org/tools/install),
|
||||
[libclang](https://clang.llvm.org/doxygen/group__CINDEX.html), and a C++
|
||||
compiler.
|
||||
|
||||
Zebra is tested with the latest `stable` Rust version. Earlier versions are not
|
||||
supported or tested. Any Zebra release can start depending on new features in the
|
||||
latest stable Rust.
|
||||
|
||||
Every few weeks, we release a [new Zebra version](https://github.com/ZcashFoundation/zebra/releases).
|
||||
|
||||
Below are quick summaries for installing the dependencies on your machine.
|
||||
|
||||
[//]: # "The empty line in the `summary` tag below is required for correct Markdown rendering."
|
||||
<details><summary>
|
||||
|
||||
#### General instructions for installing dependencies
|
||||
</summary>
|
||||
|
||||
1. Install [`cargo` and `rustc`](https://www.rust-lang.org/tools/install).
|
||||
|
||||
2. Install Zebra's build dependencies:
|
||||
|
||||
- **libclang** is a library that might have different names depending on your
|
||||
package manager. Typical names are `libclang`, `libclang-dev`, `llvm`, or
|
||||
`llvm-dev`.
|
||||
- **clang** or another C++ compiler: `g++` (all platforms) or `Xcode` (macOS).
|
||||
- **[`protoc`](https://grpc.io/docs/protoc-installation/)**
|
||||
|
||||
> [!NOTE]
|
||||
> Zebra uses the `--experimental_allow_proto3_optional` flag with `protoc`
|
||||
> during compilation. This flag was introduced in [Protocol Buffers
|
||||
> v3.12.0](https://github.com/protocolbuffers/protobuf/releases/tag/v3.12.0)
|
||||
> released in May 16, 2020, so make sure you're not using a version of `protoc`
|
||||
> older than 3.12.
|
||||
|
||||
</details>
|
||||
|
||||
[//]: # "The empty line in the `summary` tag below is required for correct Markdown rendering."
|
||||
<details><summary>
|
||||
|
||||
#### Dependencies on Arch
|
||||
</summary>
|
||||
|
||||
```sh
|
||||
sudo pacman -S rust clang protobuf
|
||||
```
|
||||
|
||||
Note that the package `clang` includes `libclang` as well as the C++ compiler.
|
||||
|
||||
</details>
|
||||
|
||||
Once the dependencies are in place, you can build and install Zebra:
|
||||
|
||||
```sh
|
||||
cargo install --locked zebrad
|
||||
```
|
||||
|
||||
You can start Zebra by
|
||||
|
||||
```sh
|
||||
zebrad start
|
||||
```
|
||||
|
||||
See the [Installing Zebra](https://zebra.zfnd.org/user/install.html) and [Running Zebra](https://zebra.zfnd.org/user/run.html)
|
||||
sections in the book for more details.
|
||||
|
||||
#### Optional Configs & Features
|
||||
|
||||
##### Initializing Configuration File
|
||||
|
||||
```console
|
||||
zebrad generate -o ~/.config/zebrad.toml
|
||||
```
|
||||
|
||||
The above command places the generated `zebrad.toml` config file in the default preferences directory of Linux. For other OSes default locations [see here](https://docs.rs/dirs/latest/dirs/fn.preference_dir.html).
|
||||
|
||||
##### Configuring Progress Bars
|
||||
|
||||
Configure `tracing.progress_bar` in your `zebrad.toml` to
|
||||
[show key metrics in the terminal using progress bars](https://zfnd.org/experimental-zebra-progress-bars/).
|
||||
When progress bars are active, Zebra automatically sends logs to a file.
|
||||
|
||||
There is a known issue where [progress bar estimates become extremely large](https://github.com/console-rs/indicatif/issues/556).
|
||||
|
||||
In future releases, the `progress_bar = "summary"` config will show a few key metrics,
|
||||
and the "detailed" config will show all available metrics. Please let us know which metrics are
|
||||
important to you!
|
||||
|
||||
##### Configuring Mining
|
||||
|
||||
Zebra can be configured for mining by passing a `MINER_ADDRESS` and port mapping to Docker.
|
||||
See the [mining support docs](https://zebra.zfnd.org/user/mining-docker.html) for more details.
|
||||
|
||||
##### Custom Build Features
|
||||
|
||||
You can also build Zebra with additional [Cargo features](https://doc.rust-lang.org/cargo/reference/features.html#command-line-feature-options):
|
||||
|
||||
- `prometheus` for [Prometheus metrics](https://zebra.zfnd.org/user/metrics.html)
|
||||
- `sentry` for [Sentry monitoring](https://zebra.zfnd.org/user/tracing.html#sentry-production-monitoring)
|
||||
- `elasticsearch` for [experimental Elasticsearch support](https://zebra.zfnd.org/user/elasticsearch.html)
|
||||
- `shielded-scan` for [experimental shielded scan support](https://zebra.zfnd.org/user/shielded-scan.html)
|
||||
|
||||
You can combine multiple features by listing them as parameters of the `--features` flag:
|
||||
|
||||
```sh
|
||||
cargo install --features="<feature1> <feature2> ..." ...
|
||||
```
|
||||
|
||||
Our full list of experimental and developer features is in [the API documentation](https://docs.rs/zebrad/latest/zebrad/index.html#zebra-feature-flags).
|
||||
|
||||
Some debugging and monitoring features are disabled in release builds to increase
|
||||
performance.
|
||||
|
||||
## Known Issues
|
||||
|
||||
There are a few bugs in Zebra that we're still working on fixing:
|
||||
|
||||
- [The `getpeerinfo` RPC shows current and recent outbound connections](https://github.com/ZcashFoundation/zebra/issues/7893), rather than current inbound and outbound connections.
|
||||
|
||||
- [Progress bar estimates can become extremely large](https://github.com/console-rs/indicatif/issues/556). We're waiting on a fix in the progress bar library.
|
||||
|
||||
- Zebra currently gossips and connects to [private IP addresses](https://en.wikipedia.org/wiki/IP_address#Private_addresses), we want to [disable private IPs but provide a config (#3117)](https://github.com/ZcashFoundation/zebra/issues/3117) in an upcoming release
|
||||
|
||||
- Block download and verification sometimes times out during Zebra's initial sync [#5709](https://github.com/ZcashFoundation/zebra/issues/5709). The full sync still finishes reasonably quickly.
|
||||
|
||||
- No Windows support [#3801](https://github.com/ZcashFoundation/zebra/issues/3801). We used to test with Windows Server 2019, but not any more; `zcash_script` has recently been updated to compile with MSVC, we're now waiting on a `zcash_script` release and dependency update, see the issue for details.
|
||||
|
||||
- Experimental Tor support is disabled until Zebra upgrades to the latest `arti-client`. This happened due to a Rust dependency conflict ([#5492](https://github.com/ZcashFoundation/zebra/issues/5492)) and is still an issue due to [another dependency conflict](https://github.com/ZcashFoundation/zebra/issues/8328#issuecomment-1969989648).
|
||||
|
||||
## Documentation
|
||||
|
||||
The Zcash Foundation maintains the following resources documenting Zebra:
|
||||
|
||||
- The Zebra Book:
|
||||
- [General Introduction](https://zebra.zfnd.org/index.html),
|
||||
- [User Documentation](https://zebra.zfnd.org/user.html),
|
||||
- [Developer Documentation](https://zebra.zfnd.org/dev.html).
|
||||
|
||||
- The [documentation of the public
|
||||
APIs](https://docs.rs/zebrad/latest/zebrad/#zebra-crates) for the latest
|
||||
releases of the individual Zebra crates.
|
||||
|
||||
- The [documentation of the internal APIs](https://doc-internal.zebra.zfnd.org)
|
||||
for the `main` branch of the whole Zebra monorepo.
|
||||
|
||||
## User support
|
||||
|
||||
For bug reports please [open a bug report ticket in the Zebra repository](https://github.com/ZcashFoundation/zebra/issues/new?assignees=&labels=C-bug%2C+S-needs-triage&projects=&template=bug_report.yml&title=%5BUser+reported+bug%5D%3A+).
|
||||
|
||||
Alternatively by chat, [Join the Zcash Foundation Discord Server](https://discord.com/invite/aRgNRVwsM8) and find the #zebra-support channel.
|
||||
|
||||
## Security
|
||||
|
||||
Zebra has a [responsible disclosure policy](https://github.com/ZcashFoundation/zebra/blob/main/SECURITY.md), which we encourage security researchers to follow.
|
||||
|
||||
## License
|
||||
|
||||
Zebra is distributed under the terms of both the MIT license
|
||||
and the Apache License (Version 2.0).
|
||||
|
||||
See [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT).
|
||||
|
||||
Some Zebra crates are distributed under the [MIT license only](LICENSE-MIT),
|
||||
because some of their code was originally from MIT-licensed projects.
|
||||
See each crate's directory for details.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,49 @@
|
|||
This page is copyright Zcash Foundation, 2021. It is posted in order to conform to this standard: https://github.com/RD-Crypto-Spec/Responsible-Disclosure/tree/d47a5a3dafa5942c8849a93441745fdd186731e6
|
||||
|
||||
# Security Disclosures
|
||||
|
||||
## Disclosure Principles
|
||||
|
||||
The Zcash Foundation's security disclosure process aims to achieve the following goals:
|
||||
- protecting Zcash users and the wider Zcash ecosystem
|
||||
- respecting the work of security researchers
|
||||
- improving the ongoing health of the Zcash ecosystem
|
||||
|
||||
Specifically, we will:
|
||||
- assume good faith from researchers and ecosystem partners
|
||||
- operate a no fault process, focusing on the technical issues
|
||||
- work with security researchers, regardless of how they choose to disclose issues
|
||||
|
||||
## Receiving Disclosures
|
||||
|
||||
The Zcash Foundation is committed to working with researchers who submit security vulnerability notifications to us to resolve those issues on an appropriate timeline and perform a coordinated release, giving credit to the reporter if they would like.
|
||||
|
||||
Our best contact for security issues is security@zfnd.org.
|
||||
|
||||
## Sending Disclosures
|
||||
|
||||
In the case where we become aware of security issues affecting other projects that has never affected Zebra or Zcash, our intention is to inform those projects of security issues on a best effort basis.
|
||||
|
||||
In the case where we fix a security issue in Zebra or Zcash that also affects the following neighboring projects, our intention is to engage in responsible disclosures with them as described in https://github.com/RD-Crypto-Spec/Responsible-Disclosure, subject to the deviations described in the section at the bottom of this document.
|
||||
|
||||
## Bilateral Responsible Disclosure Agreements
|
||||
|
||||
We have set up agreements with the following neighboring projects to share vulnerability information, subject to the deviations described in the next section.
|
||||
|
||||
Specifically, we have agreed to engage in responsible disclosures for security issues affecting Zebra or Zcash technology with the following contacts:
|
||||
|
||||
- The Electric Coin Company - security@z.cash via PGP
|
||||
|
||||
## Deviations from the Standard
|
||||
|
||||
### Monetary Base Protection
|
||||
|
||||
Zcash is a technology that provides strong privacy. Notes are encrypted to their destination, and then the monetary base is kept via zero-knowledge proofs intended to only be creatable by the real holder of Zcash. If this fails, and a counterfeiting bug results, that counterfeiting bug might be exploited without any way for blockchain analyzers to identify the perpetrator or which data in the blockchain has been used to exploit the bug. Rollbacks before that point, such as have been executed in some other projects in such cases, are therefore impossible.
|
||||
|
||||
The standard describes reporters of vulnerabilities including full details of an issue, in order to reproduce it. This is necessary for instance in the case of an external researcher both demonstrating and proving that there really is a security issue, and that security issue really has the impact that they say it has - allowing the development team to accurately prioritize and resolve the issue.
|
||||
|
||||
In the case of a counterfeiting bug, we might decide not to include those details with our reports to partners ahead of coordinated release, so long as we are sure that they are vulnerable.
|
||||
|
||||
### Alpha Release Disclosures
|
||||
|
||||
The Zcash Foundation will generate encryption keys for security disclosures for our first stable release. Until then, disclosures should be sent to security@zfnd.org unencrypted.
|
||||
|
|
@ -0,0 +1 @@
|
|||
book
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
[book]
|
||||
authors = ["Zcash Foundation <zebra@zfnd.org>"]
|
||||
language = "en"
|
||||
multilingual = false
|
||||
src = "src"
|
||||
title = "The Zebra Book"
|
||||
|
||||
[preprocessor]
|
||||
|
||||
[preprocessor.mermaid]
|
||||
command = "mdbook-mermaid"
|
||||
|
||||
[output]
|
||||
|
||||
[output.html]
|
||||
additional-js = ["mermaid.min.js", "mermaid-init.js"]
|
||||
additional-css = ["theme/css/custom.css"]
|
||||
|
|
@ -0,0 +1 @@
|
|||
mermaid.initialize({startOnLoad:true});
|
||||
File diff suppressed because one or more lines are too long
|
|
@ -0,0 +1 @@
|
|||
{{#include ../../CONTRIBUTING.md}}
|
||||
|
|
@ -0,0 +1 @@
|
|||
{{#include ../../README.md}}
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
# Summary
|
||||
|
||||
[Zebra](README.md)
|
||||
|
||||
- [User Documentation](user.md)
|
||||
- [System Requirements](user/requirements.md)
|
||||
- [Supported Platforms](user/supported-platforms.md)
|
||||
- [Platform Tier Policy](user/target-tier-policies.md)
|
||||
- [Installing Zebra](user/install.md)
|
||||
- [Running Zebra](user/run.md)
|
||||
- [Zebra with Docker](user/docker.md)
|
||||
- [Tracing Zebra](user/tracing.md)
|
||||
- [Zebra Metrics](user/metrics.md)
|
||||
- [Lightwalletd](user/lightwalletd.md)
|
||||
- [zk-SNARK Parameters](user/parameters.md)
|
||||
- [Mining](user/mining.md)
|
||||
- [Testnet Mining with s-nomp](user/mining-testnet-s-nomp.md)
|
||||
- [Mining with Zebra in Docker](user/mining-docker.md)
|
||||
- [Shielded Scanning](user/shielded-scan.md)
|
||||
- [Shielded Scanning gRPC Server](user/shielded-scan-grpc-server.md)
|
||||
- [Kibana blockchain explorer](user/elasticsearch.md)
|
||||
- [Forking the Zcash Testnet with Zebra](user/fork-zebra-testnet.md)
|
||||
- [OpenAPI specification](user/openapi.md)
|
||||
- [Troubleshooting](user/troubleshooting.md)
|
||||
- [Developer Documentation](dev.md)
|
||||
- [Contribution Guide](CONTRIBUTING.md)
|
||||
- [Design Overview](dev/overview.md)
|
||||
- [Diagrams](dev/diagrams.md)
|
||||
- [Network Architecture](dev/diagrams/zebra-network.md)
|
||||
- [Upgrading the State Database](dev/state-db-upgrades.md)
|
||||
- [Zebra versioning and releases](dev/release-process.md)
|
||||
- [Continuous Integration](dev/continuous-integration.md)
|
||||
- [Continuous Delivery](dev/continuous-delivery.md)
|
||||
- [Generating Zebra Checkpoints](dev/zebra-checkpoints.md)
|
||||
- [Doing Mass Renames](dev/mass-renames.md)
|
||||
- [Updating the ECC dependencies](dev/ecc-updates.md)
|
||||
- [Zebra RFCs](dev/rfcs.md)
|
||||
- [Pipelinable Block Lookup](dev/rfcs/0001-pipelinable-block-lookup.md)
|
||||
- [Parallel Verification](dev/rfcs/0002-parallel-verification.md)
|
||||
- [Inventory Tracking](dev/rfcs/0003-inventory-tracking.md)
|
||||
- [Asynchronous Script Verification](dev/rfcs/0004-asynchronous-script-verification.md)
|
||||
- [State Updates](dev/rfcs/0005-state-updates.md)
|
||||
- [Contextual Difficulty Validation](dev/rfcs/0006-contextual-difficulty.md)
|
||||
- [Zebra Client](dev/rfcs/0009-zebra-client.md)
|
||||
- [V5 Transaction](dev/rfcs/0010-v5-transaction.md)
|
||||
- [Async Rust in Zebra](dev/rfcs/0011-async-rust-in-zebra.md)
|
||||
- [Value Pools](dev/rfcs/0012-value-pools.md)
|
||||
- [API Reference](api.md)
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
# API Reference
|
||||
|
||||
The Zcash Foundation maintains the following API documentation for Zebra:
|
||||
|
||||
- The [documentation of the public
|
||||
APIs](https://docs.rs/zebrad/latest/zebrad/#zebra-crates) for the latest
|
||||
releases of the individual Zebra crates.
|
||||
|
||||
- The [documentation of the internal APIs](https://doc-internal.zebra.zfnd.org)
|
||||
for the `main` branch of the whole Zebra monorepo.
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
# Developer Documentation
|
||||
|
||||
This section contains the contribution guide and design documentation. It does
|
||||
not contain:
|
||||
|
||||
- The [documentation of the public
|
||||
APIs](https://docs.rs/zebrad/latest/zebrad/#zebra-crates) for the latest
|
||||
releases of the individual Zebra crates.
|
||||
|
||||
- The [documentation of the internal APIs](https://doc-internal.zebra.zfnd.org)
|
||||
for the `main` branch of the whole Zebra monorepo.
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
# Zebra audits
|
||||
|
||||
In addition to our normal [release process](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/release-process.md), we do these steps to prepare for an audit:
|
||||
1. [Tag a release candidate](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/release-process.md#preview-releases) with the code to be audited
|
||||
2. Declare a feature and fix freeze: non-essential changes must wait until after the audit, new features must be behind a [Rust feature flag](https://doc.rust-lang.org/cargo/reference/features.html)
|
||||
3. Prepare a list of dependencies that are [in scope, partially in scope, and out of scope](https://github.com/ZcashFoundation/zebra/issues/5214). Audits focus on:
|
||||
- production Rust code that the Zcash Foundation has written, or is responsible for
|
||||
- consensus-critical and security-critical code
|
||||
- code that hasn't already been audited
|
||||
|
||||
Some code might require specialist audits, for example, consensus-critical cryptographic code.
|
||||
|
||||
The audit tag and freeze allow us to create an audit branch, and merge it into the `main` branch easily. Audit branches are optional, we'll make a decision based on:
|
||||
- if the auditors want a separate branch to review recommended changes, and
|
||||
- the complexity of the changes.
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
# Zebra Continuous Delivery
|
||||
|
||||
Zebra has an extension of it's continuous integration since it automatically deploys all
|
||||
code changes to a testing and/or pre-production environment after each PR gets merged
|
||||
into the `main` branch, and on each Zebra `release`.
|
||||
|
||||
## Triggers
|
||||
|
||||
The Continuous delivery pipeline is triggered when:
|
||||
|
||||
* A PR is merged to `main` (technically, a `push` event)
|
||||
* A new release is published in GitHub
|
||||
|
||||
## Deployments
|
||||
|
||||
On each trigger Zebra is deployed using the branch or version references as part of
|
||||
the deployment naming convention. Deployments are made using [Managed Instance Groups (MIGs)](https://cloud.google.com/compute/docs/instance-groups#managed_instance_groups)
|
||||
from Google Cloud Platform with, 2 nodes in the us-central1 region.
|
||||
|
||||
**Note**: These *MIGs* are always replaced when PRs are merged to the `main` branch and
|
||||
when a release is published. If a new major version is released, a new *MIG* is also
|
||||
created, keeping the previous major version running until it's no longer needed.
|
||||
|
||||
A single instance can also be deployed, on an on-demand basis, if required, when a
|
||||
long-lived instance, with specific changes, is needed to be tested in the Mainnet with
|
||||
the same infrastructure used for CI & CD.
|
||||
|
||||
Further validations of the actual process can be done on our continuous delivery [workflow file](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/cd-deploy-nodes-gcp.yml).
|
||||
|
|
@ -0,0 +1,280 @@
|
|||
# Zebra Continuous Integration
|
||||
|
||||
## Overview
|
||||
|
||||
Zebra has extensive continuous integration tests for node syncing and `lightwalletd` integration.
|
||||
|
||||
On every PR change, Zebra runs [these Docker tests](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/ci-integration-tests-gcp.yml):
|
||||
- Zebra update syncs from a cached state Google Cloud tip image
|
||||
- lightwalletd full syncs from a cached state Google Cloud tip image
|
||||
- lightwalletd update syncs from a cached state Google Cloud tip image
|
||||
- lightwalletd integration with Zebra JSON-RPC and Light Wallet gRPC calls
|
||||
|
||||
When a PR is merged to the `main` branch, we also run a Zebra full sync test from genesis.
|
||||
Some of our builds and tests are repeated on the `main` branch, due to:
|
||||
- GitHub's cache sharing rules,
|
||||
- our cached state sharing rules, or
|
||||
- generating base coverage for PR coverage reports.
|
||||
|
||||
Currently, each Zebra and lightwalletd full and update sync will updates cached state images,
|
||||
which are shared by all tests. Tests prefer the latest image generated from the same commit.
|
||||
But if a state from the same commit is not available, tests will use the latest image from
|
||||
any branch and commit, as long as the state version is the same.
|
||||
|
||||
Zebra also does [a smaller set of tests](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/ci-unit-tests-os.yml) on tier 2 platforms using GitHub actions runners.
|
||||
|
||||
## Automated Merges
|
||||
|
||||
We use [Mergify](https://dashboard.mergify.com/github/ZcashFoundation/repo/zebra/queues) to automatically merge most pull requests.
|
||||
To merge, a PR has to pass all required `main` branch protection checks, and be approved by a Zebra developer.
|
||||
|
||||
We try to use Mergify as much as we can, so all PRs get consistent checks.
|
||||
|
||||
Some PRs don't use Mergify:
|
||||
- Mergify config updates
|
||||
- Admin merges, which happen when there are multiple failures on the `main` branch
|
||||
- Manual merges (these are allowed by our branch protection rules, but we almost always use Mergify)
|
||||
|
||||
Merging with failing CI is usually disabled by our branch protection rules.
|
||||
See the `Admin: Manually Merging PRs` section below for manual merge instructions.
|
||||
|
||||
We use workflow conditions to skip some checks on PRs, Mergify, or the `main` branch.
|
||||
For example, some workflow changes skip Rust code checks. When a workflow can skip a check, we need to create [a patch workflow](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/collaborating-on-repositories-with-code-quality-features/troubleshooting-required-status-checks#handling-skipped-but-required-checks)
|
||||
with an empty job with the same name. This is a [known Actions issue](https://github.com/orgs/community/discussions/13690#discussioncomment-6653382).
|
||||
This lets the branch protection rules pass when the job is skipped. In Zebra, we name these workflows with the extension `.patch.yml`.
|
||||
|
||||
### Branch Protection Rules
|
||||
|
||||
Branch protection rules should be added for every failure that should stop a PR merging, break a release, or cause problems for Zebra users.
|
||||
We also add branch protection rules for developer or devops features that we need to keep working, like coverage.
|
||||
|
||||
But the following jobs don't need branch protection rules:
|
||||
* Testnet jobs: testnet is unreliable.
|
||||
* Optional linting jobs: some lint jobs are required, but some jobs like spelling and actions are optional.
|
||||
* Jobs that rarely run: for example, cached state rebuild jobs.
|
||||
* Setup jobs that will fail another later job which always runs, for example: Google Cloud setup jobs.
|
||||
We have branch protection rules for build jobs, but we could remove them if we want.
|
||||
|
||||
When a new job is added in a PR, use the `#devops` Slack channel to ask a GitHub admin to add a branch protection rule after it merges.
|
||||
Adding a new Zebra crate automatically adds a new job to build that crate by itself in [ci-build-crates.yml](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/ci-build-crates.yml),
|
||||
so new crate PRs also need to add a branch protection rule.
|
||||
|
||||
#### Admin: Changing Branch Protection Rules
|
||||
|
||||
[Zebra repository admins](https://github.com/orgs/ZcashFoundation/teams/zebra-admins) and
|
||||
[Zcash Foundation organisation owners](https://github.com/orgs/ZcashFoundation/people?query=role%3Aowner)
|
||||
can add or delete branch protection rules in the Zebra repository.
|
||||
|
||||
To change branch protection rules:
|
||||
|
||||
Any developer:
|
||||
|
||||
0. Run a PR containing the new rule, so its name is available to autocomplete.
|
||||
1. If the job doesn't run on all PRs, add a patch job with the name of the job.
|
||||
If the job calls a reusable workflow, the name is `Caller job / Reusable step`.
|
||||
(The name of the job inside the reusable workflow is ignored.)
|
||||
|
||||
Admin:
|
||||
|
||||
2. Go to the [branch protection rule settings](https://github.com/ZcashFoundation/zebra/settings/branches)
|
||||
3. Click on `Edit` for the `main` branch
|
||||
4. Scroll down to the `Require status checks to pass before merging` section.
|
||||
(This section must always be enabled. If it is disabled, all the rules get deleted.)
|
||||
|
||||
To add jobs:
|
||||
|
||||
5. Start typing the name of the job or step in the search box
|
||||
6. Select the name of the job or step to add it
|
||||
|
||||
To remove jobs:
|
||||
|
||||
7. Go to `Status checks that are required.`
|
||||
8. Find the job name, and click the cross on the right to remove it
|
||||
|
||||
And finally:
|
||||
|
||||
9. Click `Save changes`, using your security key if needed
|
||||
|
||||
If you accidentally delete a lot of rules, and you can't remember what they were, ask a
|
||||
ZF organisation owner to send you a copy of the rules from the [audit log](https://github.com/organizations/ZcashFoundation/settings/audit-log).
|
||||
|
||||
Organisation owners can also monitor rule changes and other security settings using this log.
|
||||
|
||||
#### Admin: Manually Merging PRs
|
||||
|
||||
Admins can allow merges with failing CI, to fix CI when multiple issues are causing failures.
|
||||
|
||||
Admin:
|
||||
1. Follow steps 2 and 3 above to open the `main` branch protection rule settings
|
||||
2. Scroll down to `Do not allow bypassing the above settings`
|
||||
3. Uncheck it
|
||||
4. Click `Save changes`
|
||||
5. Do the manual merge, and put an explanation on the PR
|
||||
6. Re-open the branch protection rule settings, and re-enable `Do not allow bypassing the above settings`
|
||||
|
||||
|
||||
### Pull Requests from Forked Repositories
|
||||
|
||||
GitHub doesn't allow PRs from forked repositories to have access to our repository secret keys, even after we approve their CI.
|
||||
This means that Google Cloud CI fails on these PRs.
|
||||
|
||||
Until we [fix this CI bug](https://github.com/ZcashFoundation/zebra/issues/4529), we can merge external PRs by:
|
||||
1. Reviewing the code to make sure it won't give our secret keys to anyone
|
||||
2. Pushing a copy of the branch to the Zebra repository
|
||||
3. Opening a PR using that branch
|
||||
4. Closing the original PR with a note that it will be merged (closing duplicate PRs is required by Mergify)
|
||||
5. Asking another Zebra developer to approve the new PR
|
||||
|
||||
## Manual Testing Using Google Cloud
|
||||
|
||||
Some Zebra developers have access to the Zcash Foundation's Google Cloud instance, which also runs our automatic CI.
|
||||
|
||||
Please shut down large instances when they are not being used.
|
||||
|
||||
### Automated Deletion
|
||||
|
||||
The [Delete GCP Resources](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/delete-gcp-resources.yml)
|
||||
workflow automatically deletes test instances, instance templates, disks, and images older than a few days.
|
||||
|
||||
If you want to keep instances, instance templates, disks, or images in Google Cloud, name them so they don't match the automated names:
|
||||
- deleted instances, instance templates and disks end in a commit hash, so use a name that doesn't end in `-[0-9a-f]{7,}`
|
||||
- deleted disks and images start with `zebrad-` or `lwd-`, so use a name starting with anything else
|
||||
|
||||
Our production Google Cloud project doesn't have automated deletion.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
To improve CI performance, some Docker tests are stateful.
|
||||
|
||||
Tests can depend on:
|
||||
- built Zebra and `lightwalletd` docker images
|
||||
- cached state images in Google cloud
|
||||
- jobs that launch Google Cloud instances for each test
|
||||
- multiple jobs that follow the logs from Google Cloud (to work around the 6 hour GitHub actions limit)
|
||||
- a final "Run" job that checks the exit status of the Rust acceptance test
|
||||
- the current height and user-submitted transactions on the blockchain, which changes every minute
|
||||
|
||||
To support this test state, some Docker tests depend on other tests finishing first.
|
||||
This means that the entire workflow must be re-run when a single test fails.
|
||||
|
||||
### Finding Errors
|
||||
|
||||
0. Check if the same failure is happening on the `main` branch or multiple PRs.
|
||||
If it is, open a ticket and tell the Zebra team lead.
|
||||
|
||||
1. Look for the earliest job that failed, and find the earliest failure.
|
||||
|
||||
For example, this failure doesn't tell us what actually went wrong:
|
||||
> Error: The template is not valid. ZcashFoundation/zebra/.github/workflows/sub-build-docker-image.yml@8bbc5b21c97fafc83b70fbe7f3b5e9d0ffa19593 (Line: 52, Col: 19): Error reading JToken from JsonReader. Path '', line 0, position 0.
|
||||
|
||||
https://github.com/ZcashFoundation/zebra/runs/8181760421?check_suite_focus=true#step:41:4
|
||||
|
||||
But the specific failure is a few steps earlier:
|
||||
> #24 2117.3 error[E0308]: mismatched types
|
||||
> ...
|
||||
|
||||
https://github.com/ZcashFoundation/zebra/runs/8181760421?check_suite_focus=true#step:8:2112
|
||||
|
||||
2. The earliest failure can also be in another job or pull request:
|
||||
a. check the whole workflow run (use the "Summary" button on the top left of the job details, and zoom in)
|
||||
b. if Mergify failed with "The pull request embarked with main cannot be merged", look at the PR "Conversation" tab, and find the latest Mergify PR that tried to merge this PR. Then start again from step 1.
|
||||
|
||||
3. If that doesn't help, try looking for the latest failure. In Rust tests, the "failure:" notice contains the failed test names.
|
||||
|
||||
### Fixing CI Sync Timeouts
|
||||
|
||||
CI sync jobs near the tip will take different amounts of time as:
|
||||
- the blockchain grows, and
|
||||
- Zebra's checkpoints are updated.
|
||||
|
||||
To fix a CI sync timeout, follow these steps until the timeouts are fixed:
|
||||
1. Check for recent PRs that could have caused a performance decrease
|
||||
2. [Update Zebra's checkpoints](https://github.com/ZcashFoundation/zebra/blob/main/zebra-utils/README.md#zebra-checkpoints)
|
||||
3. If a Rust test fails with "command did not log any matches for the given regex, within the ... timeout":
|
||||
|
||||
a. If it's the full sync test, [increase the full sync timeout](https://github.com/ZcashFoundation/zebra/pull/5129/files)
|
||||
|
||||
b. If it's an update sync test, [increase the update sync timeouts](https://github.com/ZcashFoundation/zebra/commit/9fb87425b76ba3747985ea2f22043ff0276a03bd#diff-92f93c26e696014d82c3dc1dbf385c669aa61aa292f44848f52167ab747cb6f6R51)
|
||||
|
||||
### Fixing Duplicate Dependencies in `Check deny.toml bans`
|
||||
|
||||
Zebra's CI checks for duplicate crate dependencies: multiple dependencies on different versions of the same crate.
|
||||
If a developer or dependabot adds a duplicate dependency, the `Check deny.toml bans` CI job will fail.
|
||||
|
||||
You can view Zebra's entire dependency tree using `cargo tree`. It can also show the active features on each dependency.
|
||||
|
||||
To fix duplicate dependencies, follow these steps until the duplicate dependencies are fixed:
|
||||
|
||||
1. Check for updates to the crates mentioned in the `Check deny.toml bans` logs, and try doing them in the same PR.
|
||||
For an example, see [PR #5009](https://github.com/ZcashFoundation/zebra/pull/5009#issuecomment-1232488943).
|
||||
|
||||
a. Check for open dependabot PRs, and
|
||||
|
||||
b. Manually check for updates to those crates on https://crates.io .
|
||||
|
||||
2. If there are still duplicate dependencies, try removing those dependencies by disabling crate features:
|
||||
|
||||
a. Check for features that Zebra activates in its `Cargo.toml` files, and try turning them off, then
|
||||
|
||||
b. Try adding `default-features = false` to Zebra's dependencies (see [PR #4082](https://github.com/ZcashFoundation/zebra/pull/4082/files)).
|
||||
|
||||
3. If there are still duplicate dependencies, add or update `skip-tree` in [`deny.toml`](https://github.com/ZcashFoundation/zebra/blob/main/deny.toml):
|
||||
|
||||
a. Prefer exceptions for dependencies that are closer to Zebra in the dependency tree (sometimes this resolves other duplicates as well),
|
||||
|
||||
b. Add or update exceptions for the earlier version of duplicate dependencies, not the later version, and
|
||||
|
||||
c. Add a comment about why the dependency exception is needed: what was the direct Zebra dependency that caused it?
|
||||
|
||||
d. For an example, see [PR #4890](https://github.com/ZcashFoundation/zebra/pull/4890/files).
|
||||
|
||||
4. Repeat step 3 until the dependency warnings are fixed. Adding a single `skip-tree` exception can resolve multiple warnings.
|
||||
|
||||
#### Fixing "unmatched skip root" warnings in `Check deny.toml bans`
|
||||
|
||||
1. Run `cargo deny --all-features check bans`, or look at the output of the latest "Check deny.toml bans --all-features" job on the `main` branch
|
||||
|
||||
2. If there are any "skip tree root was not found in the dependency graph" warnings, delete those versions from `deny.toml`
|
||||
|
||||
### Fixing Disk Full Errors
|
||||
|
||||
If the Docker cached state disks are full, increase the disk sizes in:
|
||||
- [deploy-gcp-tests.yml](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/deploy-gcp-tests.yml)
|
||||
- [cd-deploy-nodes-gcp.yml](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/cd-deploy-nodes-gcp.yml)
|
||||
|
||||
If the GitHub Actions disks are full, follow these steps until the errors are fixed:
|
||||
|
||||
0. Check if error is also happening on the `main` branch. If it is, skip the next step.
|
||||
1. Update your branch to the latest `main` branch, this builds with all the latest dependencies in the `main` branch cache.
|
||||
2. Clear the GitHub Actions code cache for the failing branch. Code caches are named after the compiler version.
|
||||
3. Clear the GitHub Actions code caches for all the branches and the `main` branch.
|
||||
|
||||
These errors often happen after a new compiler version is released, because the caches can end up with files from both compiler versions.
|
||||
|
||||
You can find a list of caches using:
|
||||
```sh
|
||||
gh api -H "Accept: application/vnd.github+json" repos/ZcashFoundation/Zebra/actions/caches
|
||||
```
|
||||
|
||||
And delete a cache by `id` using:
|
||||
```sh
|
||||
gh api --method DELETE -H "Accept: application/vnd.github+json" /repos/ZcashFoundation/Zebra/actions/caches/<id>
|
||||
```
|
||||
|
||||
These commands are from the [GitHub Actions Cache API reference](https://docs.github.com/en/rest/actions/cache).
|
||||
|
||||
### Retrying After Temporary Errors
|
||||
|
||||
Some errors happen due to network connection issues, high load, or other rare situations.
|
||||
|
||||
If it looks like a failure might be temporary, try re-running all the jobs on the PR using one of these methods:
|
||||
1. `@mergifyio update`
|
||||
2. `@dependabot recreate` (for dependabot PRs only)
|
||||
3. click on the failed job, and select "re-run all jobs". If the workflow hasn't finished, you might need to cancel it, and wait for it to finish.
|
||||
|
||||
Here are some of the rare and temporary errors that should be retried:
|
||||
- Docker: "buildx failed with ... cannot reuse body, request must be retried"
|
||||
- Failure in `local_listener_fixed_port_localhost_addr_v4` Rust test, mention [ticket #4999](https://github.com/ZcashFoundation/zebra/issues/4999) on the PR
|
||||
- any network connection or download failures
|
||||
|
||||
We track some rare errors using tickets, so we know if they are becoming more common and we need to fix them.
|
||||
|
|
@ -0,0 +1,121 @@
|
|||
# Zebra Crates
|
||||
|
||||
The Zebra project publishes around 20 crates to the Rust [crates.io website](https://crates.io).
|
||||
Zcash Foundation crates are controlled by the [`ZcashFoundation/owners`](https://github.com/orgs/ZcashFoundation/teams/owners) GitHub team.
|
||||
|
||||
The latest list of Zebra and FROST crates is [available on crates.io](https://crates.io/teams/github:zcashfoundation:owners).
|
||||
|
||||
The Zebra repository can be used to publish the crates in this list that match these patterns:
|
||||
- starts with `zebra` (including `zebrad` and the `zebra` placeholder)
|
||||
- starts with `tower`
|
||||
|
||||
We also depend on these separate ZF crates:
|
||||
- `zcash_script`
|
||||
- `ed25519-zebra`
|
||||
|
||||
And these crates shared with ECC:
|
||||
- `reddsa`
|
||||
- `redjubjub`
|
||||
|
||||
## Logging in to crates.io
|
||||
|
||||
To publish a crate or change owners, you'll need to [log in to crates.io](https://doc.rust-lang.org/cargo/reference/publishing.html#before-your-first-publish) using `cargo login`.
|
||||
|
||||
When you create a token, give it an expiry date, and limit its permissions to the task you're doing. For example, if you're doing a release, create a token for releasing crates.
|
||||
|
||||
Tokens that allow changing owners should have the shortest expiry possible.
|
||||
|
||||
[Revoke the token](https://crates.io/me) after you're finished using it.
|
||||
|
||||
Here is an example login command:
|
||||
```sh
|
||||
$ cargo login
|
||||
please paste the token found on https://crates.io/me below
|
||||
...
|
||||
Login token for `crates.io` saved
|
||||
```
|
||||
|
||||
## Publishing New Crates
|
||||
|
||||
We publish a new placeholder crate as soon as we have a good idea for a crate name.
|
||||
|
||||
Before starting with the publishing, please clone zebra and use the `main` branch to create the placeholder crate, you need `cargo release` installed in the system and be logged to crates.io with `cargo login`.
|
||||
|
||||
Next, execute the following commands to publish a new placeholder and set the owners:
|
||||
```sh
|
||||
cargo new new-crate-name
|
||||
cd new-crate-name
|
||||
cargo release publish --verbose --package new-crate-name --execute
|
||||
cargo owner --add oxarbitrage
|
||||
cargo owner --add teor2345
|
||||
cargo owner --add github:zcashfoundation:owners
|
||||
```
|
||||
|
||||
## Changing Crate Ownership
|
||||
|
||||
crates.io has two kinds of owners: group owners and individual owners. All owners can publish and yank crates.
|
||||
But [only individual owners can change crate owners](https://doc.rust-lang.org/cargo/reference/publishing.html#cargo-owner).
|
||||
|
||||
Zcash Foundation crates should have:
|
||||
- at least 2 individual owners, who are typically engineers on the relevant project
|
||||
- a group owner that contains everyone who can publish the crate
|
||||
|
||||
When an individual owner leaves the foundation, they should be [replaced with another individual owner](https://doc.rust-lang.org/cargo/reference/publishing.html#cargo-owner).
|
||||
|
||||
New crate owners should go to [crates.io/me](https://crates.io/me) to accept the invitation, then they will appear on the list of owners.
|
||||
|
||||
Here are some example commands for changing owners:
|
||||
|
||||
To change owners of deleted/placeholder Zebra crates:
|
||||
```sh
|
||||
$ mkdir placeholders
|
||||
$ cd placeholders
|
||||
$ for crate in tower-batch-cpu zebra zebra-cli zebra-client; do cargo new $crate; pushd $crate; cargo owner --add oxarbitrage; cargo owner --remove dconnolly; popd; done
|
||||
Created binary (application) `zebra-cli` package
|
||||
~/zebra-cli ~
|
||||
Updating crates.io index
|
||||
Owner user oxarbitrage has been invited to be an owner of crate zebra-cli
|
||||
Updating crates.io index
|
||||
Owner removing ["dconnolly"] from crate zebra-cli
|
||||
~
|
||||
Created binary (application) `zebra-client` package
|
||||
~/zebra-client ~
|
||||
Updating crates.io index
|
||||
Owner user oxarbitrage has been invited to be an owner of crate zebra-client
|
||||
Updating crates.io index
|
||||
Owner removing ["dconnolly"] from crate zebra-client
|
||||
~
|
||||
...
|
||||
```
|
||||
|
||||
To change owners of `zcash_script`:
|
||||
```sh
|
||||
$ git clone https://github.com/ZcashFoundation/zcash_script
|
||||
$ cd zcash_script
|
||||
$ cargo owner --add oxarbitrage
|
||||
Updating crates.io index
|
||||
Owner user oxarbitrage has been invited to be an owner of crate zcash_script
|
||||
$ cargo owner --remove dconnolly
|
||||
Updating crates.io index
|
||||
Owner removing ["dconnolly"] from crate zcash_script
|
||||
```
|
||||
|
||||
To change owners of current Zebra crates:
|
||||
```sh
|
||||
$ git clone https://github.com/ZcashFoundation/zebra
|
||||
$ cd zebra
|
||||
$ for crate in tower-* zebra*; do pushd $crate; cargo owner --add oxarbitrage; cargo owner --remove dconnolly; popd; done
|
||||
~/zebra/tower-batch-control ~/zebra
|
||||
Updating crates.io index
|
||||
Owner user oxarbitrage already has a pending invitation to be an owner of crate tower-batch-control
|
||||
Updating crates.io index
|
||||
Owner removing ["dconnolly"] from crate tower-batch-control
|
||||
~/zebra
|
||||
~/zebra/tower-fallback ~/zebra
|
||||
Updating crates.io index
|
||||
Owner user oxarbitrage has been invited to be an owner of crate tower-fallback
|
||||
Updating crates.io index
|
||||
Owner removing ["dconnolly"] from crate tower-fallback
|
||||
~/zebra
|
||||
...
|
||||
```
|
||||
|
|
@ -0,0 +1 @@
|
|||
# Diagrams
|
||||
|
|
@ -0,0 +1,174 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
|
||||
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<!-- Generated by graphviz version 2.40.1 (20161225.0304)
|
||||
-->
|
||||
<!-- Title: services Pages: 1 -->
|
||||
<svg width="569pt" height="404pt"
|
||||
viewBox="0.00 0.00 568.65 404.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 400)">
|
||||
<title>services</title>
|
||||
<!-- transaction_verifier -->
|
||||
<a id="node1" class="node" target="_blank" href="https://doc-internal.zebra.zfnd.org/zebra_consensus/transaction/index.html">
|
||||
<title>transaction_verifier</title>
|
||||
<ellipse fill="transparent" stroke="#000000" cx="342.6515" cy="-90" rx="86.7914" ry="18"/>
|
||||
<text text-anchor="middle" x="342.6515" y="-85.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">transaction_verifier</text>
|
||||
</a>
|
||||
<!-- state -->
|
||||
<a id="node2" class="node" target="_blank" href="https://doc-internal.zebra.zfnd.org/zebra_state/service/index.html">
|
||||
<title>state</title>
|
||||
<ellipse fill="transparent" stroke="#000000" cx="284.6515" cy="-18" rx="28.9676" ry="18"/>
|
||||
<text text-anchor="middle" x="284.6515" y="-13.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">state</text>
|
||||
</a>
|
||||
<!-- transaction_verifier->state -->
|
||||
<g id="edge1" class="edge">
|
||||
<title>transaction_verifier->state</title>
|
||||
<path fill="none" stroke="#000000" d="M328.3144,-72.2022C320.9763,-63.0928 311.9383,-51.8733 303.9925,-42.0096"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="306.6352,-39.7109 297.6362,-34.119 301.1839,-44.1022 306.6352,-39.7109"/>
|
||||
</g>
|
||||
<!-- mempool -->
|
||||
<a id="node3" class="node" target="_blank" href="https://doc-internal.zebra.zfnd.org/zebrad/components/mempool/index.html">
|
||||
<title>mempool</title>
|
||||
<ellipse fill="transparent" stroke="#000000" cx="413.6515" cy="-234" rx="47.5332" ry="18"/>
|
||||
<text text-anchor="middle" x="413.6515" y="-229.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">mempool</text>
|
||||
</a>
|
||||
<!-- mempool->transaction_verifier -->
|
||||
<g id="edge5" class="edge">
|
||||
<title>mempool->transaction_verifier</title>
|
||||
<path fill="none" stroke="#000000" d="M404.8029,-216.0535C392.5792,-191.2618 370.4207,-146.3207 356.1305,-117.3378"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="359.1731,-115.594 351.6117,-108.1727 352.8948,-118.6896 359.1731,-115.594"/>
|
||||
</g>
|
||||
<!-- mempool->state -->
|
||||
<g id="edge2" class="edge">
|
||||
<title>mempool->state</title>
|
||||
<path fill="none" stroke="#000000" d="M418.4301,-215.8051C424.5542,-191.4586 434.8355,-146.8405 438.6515,-108 440.216,-92.0767 447.906,-85.052 438.6515,-72 424.904,-52.6112 363.0269,-35.4418 322.0132,-25.9109"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="322.6042,-22.456 312.0775,-23.6556 321.0547,-29.2824 322.6042,-22.456"/>
|
||||
</g>
|
||||
<!-- peer_set -->
|
||||
<a id="node9" class="node" target="_blank" href="https://doc-internal.zebra.zfnd.org/zebra_network/peer_set/set/index.html">
|
||||
<title>peer_set</title>
|
||||
<ellipse fill="transparent" stroke="#000000" cx="489.6515" cy="-162" rx="43.4163" ry="18"/>
|
||||
<text text-anchor="middle" x="489.6515" y="-157.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">peer_set</text>
|
||||
</a>
|
||||
<!-- mempool->peer_set -->
|
||||
<g id="edge13" class="edge">
|
||||
<title>mempool->peer_set</title>
|
||||
<path fill="none" stroke="#000000" d="M431.6624,-216.937C441.4819,-207.6344 453.7908,-195.9733 464.5392,-185.7906"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="467.1796,-188.1105 472.032,-178.6921 462.3654,-183.0288 467.1796,-188.1105"/>
|
||||
</g>
|
||||
<!-- inbound -->
|
||||
<a id="node4" class="node" target="_blank" href="https://doc-internal.zebra.zfnd.org/zebrad/components/inbound/index.html">
|
||||
<title>inbound</title>
|
||||
<ellipse fill="transparent" stroke="#000000" cx="229.6515" cy="-306" rx="42.8829" ry="18"/>
|
||||
<text text-anchor="middle" x="229.6515" y="-301.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">inbound</text>
|
||||
</a>
|
||||
<!-- inbound->state -->
|
||||
<g id="edge3" class="edge">
|
||||
<title>inbound->state</title>
|
||||
<path fill="none" stroke="#000000" d="M200.246,-292.729C126.5724,-257.7262 -55.0317,-159.5675 16.6515,-72 45.118,-37.2256 178.1169,-24.3498 245.4215,-20.003"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="245.8664,-23.4823 255.6333,-19.3765 245.4378,-16.4955 245.8664,-23.4823"/>
|
||||
</g>
|
||||
<!-- inbound->mempool -->
|
||||
<g id="edge7" class="edge">
|
||||
<title>inbound->mempool</title>
|
||||
<path fill="none" stroke="#000000" d="M261.2822,-293.6228C291.5824,-281.7662 337.5245,-263.7888 371.0388,-250.6745"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="372.5364,-253.847 380.5734,-246.9436 369.9855,-247.3283 372.5364,-253.847"/>
|
||||
</g>
|
||||
<!-- block_verifier_router -->
|
||||
<a id="node6" class="node" target="_blank" href="https://doc-internal.zebra.zfnd.org/zebra_consensus/chain/index.html">
|
||||
<title>block_verifier_router</title>
|
||||
<ellipse fill="transparent" stroke="#000000" cx="244.6515" cy="-234" rx="65.3859" ry="18"/>
|
||||
<text text-anchor="middle" x="244.6515" y="-229.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">block_verifier_router</text>
|
||||
</a>
|
||||
<!-- inbound->block_verifier_router -->
|
||||
<g id="edge9" class="edge">
|
||||
<title>inbound->block_verifier_router</title>
|
||||
<path fill="none" stroke="#000000" d="M233.4366,-287.8314C235.0409,-280.131 236.9485,-270.9743 238.7314,-262.4166"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="242.2022,-262.9169 240.8154,-252.4133 235.3494,-261.4892 242.2022,-262.9169"/>
|
||||
</g>
|
||||
<!-- rpc_server -->
|
||||
<a id="node5" class="node" target="_blank" href="https://doc-internal.zebra.zfnd.org/zebra_rpc/server/index.html">
|
||||
<title>rpc_server</title>
|
||||
<ellipse fill="transparent" stroke="#000000" cx="364.6515" cy="-378" rx="51.565" ry="18"/>
|
||||
<text text-anchor="middle" x="364.6515" y="-373.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">rpc_server</text>
|
||||
</a>
|
||||
<!-- rpc_server->state -->
|
||||
<g id="edge4" class="edge">
|
||||
<title>rpc_server->state</title>
|
||||
<path fill="none" stroke="#000000" d="M406.6984,-367.4128C464.2897,-350.0944 560.6515,-309.7926 560.6515,-234 560.6515,-234 560.6515,-234 560.6515,-162 560.6515,-56.3112 399.6037,-28.0637 323.5492,-20.6165"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="323.6823,-17.1144 313.4073,-19.6979 323.0507,-24.0858 323.6823,-17.1144"/>
|
||||
</g>
|
||||
<!-- rpc_server->mempool -->
|
||||
<g id="edge8" class="edge">
|
||||
<title>rpc_server->mempool</title>
|
||||
<path fill="none" stroke="#000000" d="M383.846,-360.9895C393.4567,-351.221 404.1854,-338.1106 409.6515,-324 417.2551,-304.3715 417.9695,-280.5065 416.9367,-262.2845"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="420.424,-261.9839 416.1656,-252.2825 413.4447,-262.522 420.424,-261.9839"/>
|
||||
</g>
|
||||
<!-- rpc_server->block_verifier_router -->
|
||||
<g id="edge11" class="edge">
|
||||
<title>rpc_server->block_verifier_router</title>
|
||||
<path fill="none" stroke="#000000" stroke-dasharray="1,5" d="M350.1767,-360.6302C329.2082,-335.4681 290.2442,-288.7112 265.9807,-259.595"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="268.6447,-257.3247 259.5541,-251.8831 263.2672,-261.806 268.6447,-257.3247"/>
|
||||
</g>
|
||||
<!-- checkpoint_verifier -->
|
||||
<a id="node7" class="node" target="_blank" href="https://doc-internal.zebra.zfnd.org/zebra_consensus/checkpoint/index.html">
|
||||
<title>checkpoint_verifier</title>
|
||||
<ellipse fill="transparent" stroke="#000000" cx="112.6515" cy="-90" rx="86.7972" ry="18"/>
|
||||
<text text-anchor="middle" x="112.6515" y="-85.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">checkpoint_verifier</text>
|
||||
</a>
|
||||
<!-- block_verifier_router->checkpoint_verifier -->
|
||||
<g id="edge6" class="edge">
|
||||
<title>block_verifier_router->checkpoint_verifier</title>
|
||||
<path fill="none" stroke="#000000" d="M216.638,-217.5178C201.6091,-207.8136 183.4054,-194.5969 169.6515,-180 151.8569,-161.1147 136.447,-135.8982 126.1523,-116.962"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="129.1594,-115.1615 121.3857,-107.9628 122.9735,-118.438 129.1594,-115.1615"/>
|
||||
</g>
|
||||
<!-- block_verifier -->
|
||||
<a id="node10" class="node" target="_blank" href="https://doc-internal.zebra.zfnd.org/zebra_consensus/block/index.html">
|
||||
<title>block_verifier</title>
|
||||
<ellipse fill="transparent" stroke="#000000" cx="244.6515" cy="-162" rx="65.9697" ry="18"/>
|
||||
<text text-anchor="middle" x="244.6515" y="-157.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">block_verifier</text>
|
||||
</a>
|
||||
<!-- block_verifier_router->block_verifier -->
|
||||
<g id="edge17" class="edge">
|
||||
<title>block_verifier_router->block_verifier</title>
|
||||
<path fill="none" stroke="#000000" d="M244.6515,-215.8314C244.6515,-208.131 244.6515,-198.9743 244.6515,-190.4166"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="248.1516,-190.4132 244.6515,-180.4133 241.1516,-190.4133 248.1516,-190.4132"/>
|
||||
</g>
|
||||
<!-- checkpoint_verifier->state -->
|
||||
<g id="edge15" class="edge">
|
||||
<title>checkpoint_verifier->state</title>
|
||||
<path fill="none" stroke="#000000" d="M151.2517,-73.8418C181.2868,-61.2689 222.5394,-44.0004 251.1856,-32.009"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="252.8804,-35.0939 260.7533,-28.0039 250.1774,-28.6368 252.8804,-35.0939"/>
|
||||
</g>
|
||||
<!-- syncer -->
|
||||
<a id="node8" class="node" target="_blank" href="https://doc-internal.zebra.zfnd.org/zebrad/components/sync/index.html">
|
||||
<title>syncer</title>
|
||||
<ellipse fill="transparent" stroke="#000000" cx="364.6515" cy="-306" rx="36.4761" ry="18"/>
|
||||
<text text-anchor="middle" x="364.6515" y="-301.8" font-family="'Opens sans', sans-serif" font-size="14.00" fill="#000000">syncer</text>
|
||||
</a>
|
||||
<!-- syncer->block_verifier_router -->
|
||||
<g id="edge10" class="edge">
|
||||
<title>syncer->block_verifier_router</title>
|
||||
<path fill="none" stroke="#000000" d="M341.5143,-292.1177C324.2684,-281.7701 300.3887,-267.4423 280.6551,-255.6022"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="282.2946,-252.5042 271.9189,-250.3604 278.6931,-258.5067 282.2946,-252.5042"/>
|
||||
</g>
|
||||
<!-- syncer->peer_set -->
|
||||
<g id="edge12" class="edge">
|
||||
<title>syncer->peer_set</title>
|
||||
<path fill="none" stroke="#000000" d="M396.8815,-297.7541C420.8491,-289.9376 452.4365,-275.7706 470.6515,-252 484.1116,-234.4347 488.5668,-209.5147 489.822,-190.3492"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="493.3293,-190.2352 490.2313,-180.1034 486.3349,-189.9557 493.3293,-190.2352"/>
|
||||
</g>
|
||||
<!-- block_verifier->transaction_verifier -->
|
||||
<g id="edge16" class="edge">
|
||||
<title>block_verifier->transaction_verifier</title>
|
||||
<path fill="none" stroke="#000000" d="M267.8761,-144.937C280.6256,-135.57 296.6297,-123.8119 310.5571,-113.5796"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="312.9263,-116.1821 318.9128,-107.4407 308.7817,-110.5409 312.9263,-116.1821"/>
|
||||
</g>
|
||||
<!-- block_verifier->state -->
|
||||
<g id="edge14" class="edge">
|
||||
<title>block_verifier->state</title>
|
||||
<path fill="none" stroke="#000000" d="M242.0259,-143.8919C240.0264,-125.3377 238.7909,-95.9031 246.6515,-72 250.2338,-61.1067 256.8185,-50.4816 263.5123,-41.5783"/>
|
||||
<polygon fill="#000000" stroke="#000000" points="266.3645,-43.6155 269.8691,-33.6171 260.8943,-39.2477 266.3645,-43.6155"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 11 KiB |
|
|
@ -0,0 +1,79 @@
|
|||
```
|
||||
┌───────────┐ ┌───────────┐ ┌───────────┐ ┌───────────┐
|
||||
│PeerServer │ │PeerServer │ │PeerServer │ │PeerServer │
|
||||
│ ┌───────┐ │ │ ┌───────┐ │ │ ┌───────┐ │ │ ┌───────┐ │
|
||||
│ │┌─────┐│ │ │ │┌─────┐│ │ │ │┌─────┐│ │ │ │┌─────┐│ │
|
||||
│ ││ Tcp ││ │ │ ││ Tcp ││ │ │ ││ Tcp ││ │ │ ││ Tcp ││ │
|
||||
│ │└─────┘│ │ │ │└─────┘│ │ │ │└─────┘│ │ │ │└─────┘│ │
|
||||
│ │Framed │ │ │ │Framed │ │ │ │Framed │ │ │ │Framed │ │
|
||||
│ │Stream │ │ │ │Stream │ │ │ │Stream │ │ │ │Stream │ │
|
||||
│ └───────┘─┼─┐ │ └───────┘─┼─┐ │ └───────┘─┼─┐ │ └───────┘─┼─┐
|
||||
┏▶│ ┃ │ │ ┏▶│ ┃ │ │ ┏▶│ ┃ │ │ ┏▶│ ┃ │ │
|
||||
┃ │ ┃ │ │ ┃ │ ┃ │ │ ┃ │ ┃ │ │ ┃ │ ┃ │ │
|
||||
┃ │ ▼ │ │ ┃ │ ▼ │ │ ┃ │ ▼ │ │ ┃ │ ▼ │ │
|
||||
┃ │ ┌───────┐ │ │ ┃ │ ┌───────┐ │ │ ┃ │ ┌───────┐ │ │ ┃ │ ┌───────┐ │ │
|
||||
┃ │ │ Tower │ │ │ ┃ │ │ Tower │ │ │ ┃ │ │ Tower │ │ │ ┃ │ │ Tower │ │ │
|
||||
┃ │ │Buffer │ │ │ ┃ │ │Buffer │ │ │ ┃ │ │Buffer │ │ │ ┃ │ │Buffer │ │ │
|
||||
┃ │ └───────┘ │ │ ┃ │ └───────┘ │ │ ┃ │ └───────┘ │ │ ┃ │ └───────┘ │ │
|
||||
┃ │ ┃ │ │ ┃ │ ┃ │ │ ┃ │ ┃ │ │ ┃ │ ┃ │ │
|
||||
┃ └─────╋─────┘ │ ┃ └─────╋─────┘ │ ┃ └─────╋─────┘ │ ┃ └─────╋─────┘ │
|
||||
┃ ┃ └─╋───────╋───────┴─╋───────╋───────┴─╋───────╋───────┴───────┐
|
||||
┃ ┃ ┃ ┃ ┃ ┃ ┃ ┃ │
|
||||
┃ ┃ ┃ ┃ ┃ ┃ ┃ ┃ │
|
||||
┃ ┗━━━━━━━━━╋━━━━━━━┻━━━━━━━━━╋━━━━━━━┻━━━━━━━━━╋━━━━━━━┻━━━━━━━━━┓ │
|
||||
┗━━━━━━━┓ ┗━━━━━━━┓ ┗━━━━━━━┓ ┗━━━━━━━┓ ┃ │
|
||||
┌──────╋─────────────────╋─────────────────╋─────────────────╋──────┐ ┃ │
|
||||
│ ┃ ┃ ┃ ┃ │ ┃ │
|
||||
│┌───────────┐ ┌───────────┐ ┌───────────┐ ┌───────────┐│ ┃ │
|
||||
││PeerClient │ │PeerClient │ │PeerClient │ │PeerClient ││ ┃ │
|
||||
│└───────────┘ └───────────┘ └───────────┘ └───────────┘│ ┃ │
|
||||
│ │ ┃ │
|
||||
│┌──────┐ ┌──────────────┐ │ ┃ │
|
||||
││ load │ │peer discovery│ PeerSet│ ┃ │
|
||||
││signal│ ┏━▶│ receiver │ req: Request, rsp: Response│ ┃ │
|
||||
│└──────┘ ┃ └──────────────┘ routes all outgoing requests│ ┃ │
|
||||
│ ┃ ┃ adds peers via discovery│ ┃ │
|
||||
└────╋──────╋───────────────────────────────────────────────────────┘ ┃ │
|
||||
┃ ┃ ▲ ┃ │
|
||||
┃ ┣━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┓ ┃ ┃ │
|
||||
┃ ┃ ┏━━━━━━━━━━━╋━━━━━━━━━━━━━╋━━━━━━━━━━━━━┫ ┃ │
|
||||
▼ ┃ ┃ ┃ ┃ ┃ ┃ │
|
||||
┌────────────────╋───┐┌────────────┐┌─────────────┐ ┃ ┃ │
|
||||
│Crawler ┃ ││ Listener ││Initial Peers│ ┃ ┃ │
|
||||
│ ┌──────┐││ ││ │ ┃ ┃ │
|
||||
│ │Tower │││ ││ │ ┃ ┃ │
|
||||
│ │Buffer│││listens for ││ connects on │ ┃ ┃ │
|
||||
│ └──────┘││ incoming ││ launch to │ ┃ ┃ │
|
||||
│uses peerset to ││connections,││ seed peers │ ┃ ┃ │
|
||||
│crawl network, ││ sends ││specified in │ ┃ ┃ │
|
||||
│maintains candidate ││ handshakes ││ config file │ ┃ ┃ │
|
||||
│peer set, connects ││ to peer ││ to build │ ┃ ┃ │
|
||||
│to new peers on load││ discovery ││initial peer │ ┃ ┃ │
|
||||
│signal or timer ││ receiver ││ set │ ┃ ┃ │
|
||||
└────────────────────┘└────────────┘└─────────────┘ ┃ ┃ │
|
||||
│ zebra-network internals ┃ ┃ │
|
||||
─ ─ ─ ─ ─ ─ ─│─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─┃─ ─ ─ ─ ─ ─ ╋ ─ ─ ┼
|
||||
│ exposed api ┃ ┃ │
|
||||
│ ┌────────────────────────┐ ┃ ┃ │
|
||||
│ │Arc<Mutex<AddressBook>> │ ┃ ┃ │
|
||||
│ │last-seen timestamps for│ ┃ ┃ │
|
||||
└─────────────│ each peer, obtained by │◀─────╋────────────╋─────┘
|
||||
│ hooking into incoming │ ┃ ┃
|
||||
│ message streams │ ┃ ┃
|
||||
└────────────────────────┘ ┃ ▼
|
||||
┌────────────────┐┌───────────────┐
|
||||
│Outbound Service││Inbound Service│
|
||||
│ req: Request, ││ req: Request, │
|
||||
│ rsp: Response ││ rsp: Response │
|
||||
│ ││ │
|
||||
│ Tower Buffer ││ routes all │
|
||||
└────────────────┘│ incoming │
|
||||
│requests, uses │
|
||||
│ load-shed │
|
||||
│ middleware to │
|
||||
│ remove peers │
|
||||
│ when internal │
|
||||
│ services are │
|
||||
│ overloaded │
|
||||
└───────────────┘
|
||||
```
|
||||
Binary file not shown.
|
|
@ -0,0 +1,70 @@
|
|||
# Updating the ECC dependencies
|
||||
|
||||
Zebra relies on numerous Electric Coin Company ([ECC](https://electriccoin.co/)) dependencies, and updating them can be a complex task. This guide will help you navigate the process.
|
||||
|
||||
|
||||
The main dependency that influences that is [zcash](https://github.com/zcash/zcash) itself. This is because [zebra_script](https://github.com/ZcashFoundation/zcash_script) links to specific files from it (zcash_script.cpp and all on which it depends). Due to the architecture of zcash, this requires linking to a lot of seemingly unrelated dependencies like orchard, halo2, etc (which are all Rust crates).
|
||||
|
||||
## Steps for upgrading
|
||||
|
||||
Let's dive into the details of each step required to perform an upgrade:
|
||||
|
||||
### Before starting
|
||||
|
||||
- Zebra developers often dismiss ECC dependency upgrade suggestions from dependabot. For instance, see [this closed PR](https://github.com/ZcashFoundation/zebra/pull/7745) in favor of the [5.7.0 zcashd upgrade PR](https://github.com/ZcashFoundation/zebra/pull/7784), which followed this guide.
|
||||
|
||||
- Determine the version of `zcashd` to use. This version will determine which versions of other crates to use. Typically, this should be a [tag](https://github.com/zcash/zcash/tags), but in some cases, it might be a reference to a branch (e.g., nu5-consensus) for testing unreleased developments.
|
||||
|
||||
- Upgrading the `zcash_script` crate can be challenging, depending on changes in the latest `zcashd` release. Follow the instructions in the project's [README](https://github.com/ZcashFoundation/zcash_script/blob/master/README.md) for guidance.
|
||||
|
||||
- Upgrade and release `zcash_script` before upgrading other ECC dependencies in Zebra.
|
||||
|
||||
### Upgrade versions
|
||||
|
||||
- Use the `cargo upgrade` command to upgrade all the ECC dependency versions in Zebra. For example, in [this PR](https://github.com/ZcashFoundation/zebra/pull/7784), the following command was used:
|
||||
|
||||
```
|
||||
cargo upgrade --incompatible -p bridgetree -p incrementalmerkletree -p orchard -p zcash_primitives -p zcash_proofs -p zcash_address -p zcash_encoding -p zcash_note_encryption -p zcash_script
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- Insert all the crate names to be updated to the command.
|
||||
|
||||
- Use `crate-name@version` to upgrade to a specific version of that crate, instead of just the highest version.
|
||||
|
||||
- You need to have [cargo upgrade](https://crates.io/crates/cargo-upgrades) and [cargo edit](https://crates.io/crates/cargo-edit) installed for this command to work.
|
||||
|
||||
### Version consistency check
|
||||
|
||||
- Ensure that the crate versions in the `Cargo.toml` of the zcashd release, `Cargo.toml` of `zcash_script`, and the `Cargo.toml` files of Zebra crates are all the same. Version consistency is crucial.
|
||||
|
||||
### Build/Test zebra & fix issues
|
||||
|
||||
- Build zebra and make sure it compiles.
|
||||
|
||||
```
|
||||
cargo build
|
||||
```
|
||||
|
||||
- Test Zebra and make sure all test code compiles and all tests pass:
|
||||
|
||||
```
|
||||
cargo test
|
||||
```
|
||||
|
||||
- When upgrading, it's common for things to break, such as deprecated or removed functionality. Address these issues by referring to the broken dependency's changelog, which often provides explanations and workarounds.
|
||||
|
||||
- If you encounter issues that you can't resolve, consider reaching out to ECC team members who worked on the upgrade, as they may have more context.
|
||||
|
||||
### Check `deny.toml`
|
||||
|
||||
- Review Zebra's `deny.toml` file for potential duplicates that can be removed due to the upgrade. You may also need to add new entries to `deny.toml`.
|
||||
- You can identify issues with the dependencies using `cargo deny check bans` command, need to have [cargo deny](https://crates.io/crates/cargo-deny) installed.
|
||||
- Push your changes and let the CI identify any additional problems.
|
||||
|
||||
### Push the Pull Request (PR)
|
||||
|
||||
- Push the pull request with all the changes and ensure that the full CI process passes.
|
||||
- Seek approval for the PR.
|
||||
- Merge to `main` branch.
|
||||
|
|
@ -0,0 +1,116 @@
|
|||
# Doing Mass Renames in Zebra Code
|
||||
|
||||
Sometimes we want to rename a Rust type or function, or change a log message.
|
||||
|
||||
But our types and functions are also used in our documentation,
|
||||
so the compiler can sometimes miss when their names are changed.
|
||||
|
||||
Our log messages are also used in our integration tests,
|
||||
so changing them can lead to unexpected test failures or hangs.
|
||||
|
||||
## Universal Renames with `sed`
|
||||
|
||||
You can use `sed` to rename all the instances of a name in Zebra's code, documentation, and tests:
|
||||
```sh
|
||||
git ls-tree --full-tree -r --name-only HEAD | \
|
||||
xargs sed -i -e 's/OldName/NewName/g' -e 's/OtherOldName/OtherNewName/g'
|
||||
```
|
||||
|
||||
Or excluding specific paths:
|
||||
```sh
|
||||
git ls-tree --full-tree -r --name-only HEAD | \
|
||||
grep -v -e 'path-to-skip' -e 'other-path-to-skip' | \
|
||||
xargs sed -i -e 's/OldName/NewName/g' -e 's/OtherOldName/OtherNewName/g'
|
||||
```
|
||||
|
||||
`sed` also supports regular expressions to replace a pattern with another pattern.
|
||||
|
||||
Here's how to make a PR with these replacements:
|
||||
1. Run the `sed` commands
|
||||
2. Run `cargo fmt --all` after doing all the replacements
|
||||
3. Put the commands in the commit message and pull request, so the reviewer can check them
|
||||
|
||||
Here's how to review that PR:
|
||||
1. Check out two copies of the repository, one with the PR, and one without:
|
||||
```sh
|
||||
cd zebra
|
||||
git fetch --all
|
||||
# clear the checkout so we can use main elsewhere
|
||||
git checkout main^
|
||||
# Use the base branch or commit for the PR, which is usually main
|
||||
git worktree add ../zebra-sed main
|
||||
git worktree add ../zebra-pr origin/pr-branch-name
|
||||
```
|
||||
|
||||
2. Run the scripts on the repository without the PR:
|
||||
```sh
|
||||
cd ../zebra-sed
|
||||
# run the scripts in the PR or commit message
|
||||
git ls-tree --full-tree -r --name-only HEAD | \
|
||||
grep -v -e 'path-to-skip' -e 'other-path-to-skip' | \
|
||||
xargs sed -i -e 's/OldName/NewName/g' -e 's/OtherOldName/OtherNewName/g'
|
||||
cargo fmt --all
|
||||
```
|
||||
|
||||
3. Automatically check that they match
|
||||
```sh
|
||||
cd ..
|
||||
git diff zebra-sed zebra-pr
|
||||
```
|
||||
|
||||
If there are no differences, then the PR can be approved.
|
||||
|
||||
If there are differences, then post them as a review in the PR,
|
||||
and ask the author to re-run the script on the latest `main`.
|
||||
|
||||
## Interactive Renames with `fastmod`
|
||||
|
||||
You can use `fastmod` to rename some instances, but skip others:
|
||||
```sh
|
||||
fastmod --hidden --fixed-strings "OldName" "NewName" [paths to change]
|
||||
```
|
||||
|
||||
Using the `--hidden` flag does renames in `.github` workflows, issue templates, and other configs.
|
||||
|
||||
`fastmod` also supports regular expressions to replace a pattern with another pattern.
|
||||
|
||||
Here's how to make a PR with these replacements:
|
||||
1. Run the `fastmod` commands, choosing which instances to replace
|
||||
2. Run `cargo fmt --all` after doing all the replacements
|
||||
3. Put the commands in the commit message and pull request, so the reviewer can check them
|
||||
4. If there are a lot of renames:
|
||||
- use `sed` on any directories or files that are always renamed, and put them in the first PR,
|
||||
- do a cleanup using `fastmod` in the next PR.
|
||||
|
||||
Here's how to review that PR:
|
||||
1. Manually review each replacement (there's no shortcut)
|
||||
|
||||
## Using `rustdoc` links to detect name changes
|
||||
|
||||
When you're referencing a type or function in a doc comment,
|
||||
use a `rustdoc` link to refer to it.
|
||||
|
||||
This makes the documentation easier to navigate,
|
||||
and our `rustdoc` lint will detect any typos or name changes.
|
||||
|
||||
```rust
|
||||
//! This is what `rustdoc` links look like:
|
||||
//! - [`u32`] type or trait
|
||||
//! - [`drop()`] function
|
||||
//! - [`Clone::clone()`] method
|
||||
//! - [`Option::None`] enum variant
|
||||
//! - [`Option::Some(_)`](Option::Some) enum variant with data
|
||||
//! - [`HashMap`](std::collections::HashMap) fully-qualified path
|
||||
//! - [`BTreeSet<String>`](std::collections::BTreeSet) fully-qualified path with generics
|
||||
```
|
||||
|
||||
If a type isn't imported in the module or Rust prelude,
|
||||
then it needs a fully-qualified path in the docs, or an unused import:
|
||||
```rust
|
||||
// For rustdoc
|
||||
#[allow(unused_imports)]
|
||||
use std::collections::LinkedList;
|
||||
|
||||
//! Link to [`LinkedList`].
|
||||
struct Type;
|
||||
```
|
||||
|
|
@ -0,0 +1,377 @@
|
|||
# Design Overview
|
||||
|
||||
This document sketches the design for Zebra.
|
||||
|
||||
## Desiderata
|
||||
|
||||
The following are general desiderata for Zebra:
|
||||
|
||||
* [George's list..]
|
||||
|
||||
* As much as reasonably possible, it and its dependencies should be
|
||||
implemented in Rust. While it may not make sense to require this in
|
||||
every case (for instance, it probably doesn't make sense to rewrite
|
||||
libsecp256k1 in Rust, instead of using the same upstream library as
|
||||
Bitcoin), we should generally aim for it.
|
||||
|
||||
* As much as reasonably possible, Zebra should minimize trust in
|
||||
required dependencies. Note that "minimize number of dependencies"
|
||||
is usually a proxy for this desideratum, but is not exactly the same:
|
||||
for instance, a collection of crates like the tokio crates are all
|
||||
developed together and have one trust boundary.
|
||||
|
||||
* Zebra should be well-factored internally into a collection of
|
||||
component libraries which can be used by other applications to
|
||||
perform Zcash-related tasks. Implementation details of each
|
||||
component should not leak into all other components.
|
||||
|
||||
* Zebra should checkpoint on Canopy activation and drop all
|
||||
Sprout-related functionality not required post-Canopy.
|
||||
|
||||
## Non-Goals
|
||||
|
||||
* Zebra keeps a copy of the chain state, so it isn't intended for
|
||||
lightweight applications like light wallets. Those applications
|
||||
should use a light client protocol.
|
||||
|
||||
## Notable Blog Posts
|
||||
- [A New Network Stack For Zcash](https://www.zfnd.org/blog/a-new-network-stack-for-zcash)
|
||||
- [Composable Futures-based Batch Verification](https://www.zfnd.org/blog/futures-batch-verification)
|
||||
- [Decoding Bitcoin Messages with Tokio Codecs](https://www.zfnd.org/blog/decoding-bitcoin-messages-with-tokio-codecs)
|
||||
|
||||
## Service Dependencies
|
||||
|
||||
Note: dotted lines are for "getblocktemplate-rpcs" feature
|
||||
|
||||
<div id="service-dep-diagram">
|
||||
{{#include diagrams/service-dependencies.svg}}
|
||||
</div>
|
||||
|
||||
<!--
|
||||
Service dependencies diagram source:
|
||||
|
||||
digraph services {
|
||||
transaction_verifier -> state
|
||||
mempool -> state
|
||||
inbound -> state
|
||||
rpc_server -> state
|
||||
mempool -> transaction_verifier
|
||||
block_verifier_router -> checkpoint_verifier
|
||||
inbound -> mempool
|
||||
rpc_server -> mempool
|
||||
inbound -> block_verifier_router
|
||||
syncer -> block_verifier_router
|
||||
rpc_server -> block_verifier_router [style=dotted]
|
||||
syncer -> peer_set
|
||||
mempool -> peer_set
|
||||
block_verifier -> state
|
||||
checkpoint_verifier -> state
|
||||
block_verifier -> transaction_verifier
|
||||
block_verifier_router -> block_verifier
|
||||
rpc_server -> inbound [style=invis] // for layout of the diagram
|
||||
}
|
||||
|
||||
Render here: https://dreampuf.github.io/GraphvizOnline
|
||||
-->
|
||||
|
||||
## Architecture
|
||||
|
||||
Unlike `zcashd`, which originated as a Bitcoin Core fork and inherited its
|
||||
monolithic architecture, Zebra has a modular, library-first design, with the
|
||||
intent that each component can be independently reused outside of the `zebrad`
|
||||
full node. For instance, the `zebra-network` crate containing the network stack
|
||||
can also be used to implement anonymous transaction relay, network crawlers, or
|
||||
other functionality, without requiring a full node.
|
||||
|
||||
At a high level, the fullnode functionality required by `zebrad` is factored
|
||||
into several components:
|
||||
|
||||
- [`zebra-chain`](https://docs.rs/zebra_chain), providing
|
||||
definitions of core data structures for Zcash, such as blocks, transactions,
|
||||
addresses, etc., and related functionality. It also contains the
|
||||
implementation of the consensus-critical serialization formats used in Zcash.
|
||||
The data structures in `zebra-chain` are defined to enforce
|
||||
[*structural validity*](https://zebra.zfnd.org/dev/rfcs/0002-parallel-verification.html#verification-stages)
|
||||
by making invalid states unrepresentable. For instance, the
|
||||
`Transaction` enum has variants for each transaction version, and it's
|
||||
impossible to construct a transaction with, e.g., spend or output
|
||||
descriptions but no binding signature, or, e.g., a version 2 (Sprout)
|
||||
transaction with Sapling proofs. Currently, `zebra-chain` is oriented
|
||||
towards verifying transactions, but will be extended to support creating them
|
||||
in the future.
|
||||
|
||||
- [`zebra-network`](https://docs.rs/zebra_network),
|
||||
providing an asynchronous, multithreaded implementation of the Zcash network
|
||||
protocol inherited from Bitcoin. In contrast to `zcashd`, each peer
|
||||
connection has a separate state machine, and the crate translates the
|
||||
external network protocol into a stateless, request/response-oriented
|
||||
protocol for internal use. The crate provides two interfaces:
|
||||
- an auto-managed connection pool that load-balances local node requests
|
||||
over available peers, and sends peer requests to a local inbound service,
|
||||
and
|
||||
- a `connect_isolated` method that produces a peer connection completely
|
||||
isolated from all other node state. This can be used, for instance, to
|
||||
safely relay data over Tor, without revealing distinguishing information.
|
||||
|
||||
- [`zebra-script`](https://docs.rs/zebra_script) provides
|
||||
script validation. Currently, this is implemented by linking to the C++
|
||||
script verification code from `zcashd`, but in the future we may implement a
|
||||
pure-Rust script implementation.
|
||||
|
||||
- [`zebra-consensus`](https://docs.rs/zebra_consensus)
|
||||
performs [*semantic validation*](https://zebra.zfnd.org/dev/rfcs/0002-parallel-verification.html#verification-stages)
|
||||
of blocks and transactions: all consensus
|
||||
rules that can be checked independently of the chain state, such as
|
||||
verification of signatures, proofs, and scripts. Internally, the library
|
||||
uses [`tower-batch-control`](https://docs.rs/tower_batch_control) to
|
||||
perform automatic, transparent batch processing of contemporaneous
|
||||
verification requests.
|
||||
|
||||
- [`zebra-state`](https://docs.rs/zebra_state) is
|
||||
responsible for storing, updating, and querying the chain state. The state
|
||||
service is responsible for [*contextual verification*](https://zebra.zfnd.org/dev/rfcs/0002-parallel-verification.html#verification-stages):
|
||||
all consensus rules
|
||||
that check whether a new block is a valid extension of an existing chain,
|
||||
such as updating the nullifier set or checking that transaction inputs remain
|
||||
unspent.
|
||||
|
||||
- [`zebrad`](https://docs.rs/zebrad) contains the full
|
||||
node, which connects these components together and implements logic to handle
|
||||
inbound requests from peers and the chain sync process.
|
||||
|
||||
All of these components can be reused as independent libraries, and all
|
||||
communication between stateful components is handled internally by
|
||||
[internal asynchronous RPC abstraction](https://docs.rs/tower/)
|
||||
("microservices in one process").
|
||||
|
||||
### `zebra-chain`
|
||||
|
||||
#### Internal Dependencies
|
||||
|
||||
None: these are the core data structure definitions.
|
||||
|
||||
#### Responsible for
|
||||
|
||||
- definitions of commonly used data structures, e.g.,
|
||||
- `Block`,
|
||||
- `Transaction`,
|
||||
- `Address`,
|
||||
- `KeyPair`...
|
||||
- parsing bytes into these data structures
|
||||
|
||||
- definitions of core traits, e.g.,
|
||||
- `ZcashSerialize` and `ZcashDeserialize`, which perform
|
||||
consensus-critical serialization logic.
|
||||
|
||||
#### Exported types
|
||||
|
||||
- [...]
|
||||
|
||||
### `zebra-network`
|
||||
|
||||
#### Internal Dependencies
|
||||
|
||||
- `zebra-chain`
|
||||
|
||||
#### Responsible for
|
||||
|
||||
- definition of a well structured, internal request/response protocol
|
||||
- provides an abstraction for "this node" and "the network" using the
|
||||
internal protocol
|
||||
- dynamic, backpressure-driven peer set management
|
||||
- per-peer state machine that translates the internal protocol to the
|
||||
Bitcoin/Zcash protocol
|
||||
- tokio codec for Bitcoin/Zcash message encoding.
|
||||
|
||||
#### Exported types
|
||||
|
||||
- `Request`, an enum representing all possible requests in the internal protocol;
|
||||
- `Response`, an enum representing all possible responses in the internal protocol;
|
||||
- `AddressBook`, a data structure for storing peer addresses;
|
||||
- `Config`, a configuration object for all networking-related parameters;
|
||||
- `init<S: Service>(Config, S) -> (impl Service,
|
||||
Arc<Mutex<AddressBook>>)`, the main entry-point.
|
||||
|
||||
The `init` entrypoint constructs a dynamically-sized pool of peers
|
||||
sending inbound requests to the provided `S: tower::Service`
|
||||
representing "this node", and returns a `Service` that can be used to
|
||||
send requests to "the network", together with an `AddressBook` updated
|
||||
with liveness information from the peer pool. The `AddressBook` can
|
||||
be used to respond to inbound requests for peers.
|
||||
|
||||
All peerset management (finding new peers, creating new outbound
|
||||
connections, etc) is completely encapsulated, as is responsibility for
|
||||
routing outbound requests to appropriate peers.
|
||||
|
||||
### `zebra-state`
|
||||
|
||||
#### Internal Dependencies
|
||||
|
||||
- `zebra-chain` for data structure definitions.
|
||||
|
||||
#### Responsible for
|
||||
|
||||
- block storage API
|
||||
- operates on parsed block structs
|
||||
- these structs can be converted from and into raw bytes
|
||||
- primarily aimed at network replication, not at processing
|
||||
- can be used to rebuild the database below
|
||||
- maintaining a database of tx, address, etc data
|
||||
- this database can be blown away and rebuilt from the blocks, which
|
||||
are otherwise unused.
|
||||
- threadsafe, typed lookup API that completely encapsulates the
|
||||
database logic
|
||||
- handles stuff like "transactions are reference counted by outputs"
|
||||
etc.
|
||||
- providing `tower::Service` interfaces for all of the above to
|
||||
support backpressure.
|
||||
|
||||
#### Exported types
|
||||
|
||||
- `Request`, an enum representing all possible requests in the internal protocol;
|
||||
- blocks can be accessed via their chain height or hash
|
||||
- confirmed transactions can be accessed via their block, or directly via their hash
|
||||
- `Response`, an enum representing all possible responses in the internal protocol;
|
||||
- `init() -> impl Service`, the main entry-point.
|
||||
|
||||
The `init` entrypoint returns a `Service` that can be used to
|
||||
send requests for the chain state.
|
||||
|
||||
All state management (adding blocks, getting blocks by index or hash) is completely
|
||||
encapsulated.
|
||||
|
||||
### `zebra-script`
|
||||
|
||||
#### Internal Dependencies
|
||||
|
||||
- ??? depends on how it's implemented internally
|
||||
|
||||
#### Responsible for
|
||||
|
||||
- the minimal Bitcoin script implementation required for Zcash
|
||||
- script parsing
|
||||
- context-free script validation
|
||||
|
||||
#### Notes
|
||||
|
||||
This can wrap an existing script implementation at the beginning.
|
||||
|
||||
If this existed in a "good" way, we could use it to implement tooling
|
||||
for Zcash script inspection, debugging, etc.
|
||||
|
||||
#### Questions
|
||||
|
||||
- How does this interact with NU4 script changes?
|
||||
|
||||
#### Exported types
|
||||
|
||||
- [...]
|
||||
|
||||
### `zebra-consensus`
|
||||
|
||||
#### Internal Dependencies
|
||||
|
||||
- `zebra-chain` for data structures and parsing.
|
||||
- `zebra-state` to read and update the state database.
|
||||
- `zebra-script` for script parsing and validation.
|
||||
|
||||
#### Responsible for
|
||||
|
||||
- consensus-specific parameters (network magics, genesis block, pow
|
||||
parameters, etc) that determine the network consensus
|
||||
- consensus logic to decide which block is the current block
|
||||
- block and transaction verification
|
||||
- context-free validation, e.g., signature, proof verification, etc.
|
||||
- context-dependent validation, e.g., determining whether a
|
||||
transaction is accepted in a particular chain state context.
|
||||
- verifying mempool (unconfirmed) transactions
|
||||
- block checkpoints
|
||||
- mandatory checkpoints (genesis block, canopy activation)
|
||||
- optional regular checkpoints (every Nth block)
|
||||
- modifying the chain state
|
||||
- adding new blocks to `ZebraState`, including chain reorganisation
|
||||
- adding new transactions to `ZebraMempoolState`
|
||||
- storing the transaction mempool state
|
||||
- mempool transactions can be accessed via their hash
|
||||
- providing `tower::Service` interfaces for all of the above to
|
||||
support backpressure and batch validation.
|
||||
|
||||
#### Exported types
|
||||
|
||||
- `block::init() -> impl Service`, the main entry-point for block
|
||||
verification.
|
||||
- `ZebraMempoolState`
|
||||
- all state management (adding transactions, getting transactions
|
||||
by hash) is completely encapsulated.
|
||||
- `mempool::init() -> impl Service`, the main entry-point for
|
||||
mempool transaction verification.
|
||||
|
||||
The `init` entrypoints return `Service`s that can be used to
|
||||
verify blocks or transactions, and add them to the relevant state.
|
||||
|
||||
### `zebra-rpc`
|
||||
|
||||
#### Internal Dependencies
|
||||
|
||||
- `zebra-chain` for data structure definitions
|
||||
- `zebra-node-services` for shared request type definitions
|
||||
- `zebra-utils` for developer and power user tools
|
||||
|
||||
#### Responsible for
|
||||
|
||||
- rpc interface
|
||||
|
||||
#### Exported types
|
||||
|
||||
- [...]
|
||||
|
||||
### `zebra-client`
|
||||
|
||||
#### Internal Dependencies
|
||||
|
||||
- `zebra-chain` for structure definitions
|
||||
- `zebra-state` for transaction queries and client/wallet state storage
|
||||
- `zebra-script` possibly? for constructing transactions
|
||||
|
||||
#### Responsible for
|
||||
|
||||
- implementation of some event a user might trigger
|
||||
- would be used to implement a full wallet
|
||||
- create transactions, monitors shielded wallet state, etc.
|
||||
|
||||
#### Notes
|
||||
|
||||
Communication between the client code and the rest of the node should be done
|
||||
by a tower service interface. Since the `Service` trait can abstract from a
|
||||
function call to RPC, this means that it will be possible for us to isolate
|
||||
all client code to a subprocess.
|
||||
|
||||
#### Exported types
|
||||
|
||||
- [...]
|
||||
|
||||
### `zebrad`
|
||||
|
||||
Abscissa-based application which loads configs, all application components,
|
||||
and connects them to each other.
|
||||
|
||||
#### Responsible for
|
||||
|
||||
- actually running the server
|
||||
- connecting functionality in dependencies
|
||||
|
||||
#### Internal Dependencies
|
||||
|
||||
- `zebra-chain`
|
||||
- `zebra-network`
|
||||
- `zebra-state`
|
||||
- `zebra-consensus`
|
||||
- `zebra-client`
|
||||
- `zebra-rpc`
|
||||
|
||||
### Unassigned functionality
|
||||
|
||||
Responsibility for this functionality needs to be assigned to one of
|
||||
the modules above (subject to discussion):
|
||||
|
||||
- [ ... add to this list ... ]
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
# Randomised Property Testing in Zebra
|
||||
|
||||
Zebra uses the [proptest](https://docs.rs/proptest/) crate for randomised property testing.
|
||||
|
||||
Most types in `zebra-chain` have an `Arbitrary` implementation, which generates randomised test cases.
|
||||
|
||||
We try to derive `Arbitrary` impls whenever possible, so that they automatically update when we make structural changes.
|
||||
To derive, add the following attribute to the struct or enum:
|
||||
```rust
|
||||
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Arbitrary))]
|
||||
struct Example(u32);
|
||||
```
|
||||
|
||||
When we want to use those `Arbitrary` impls in proptests in other crates, we use the `proptest-impl` feature as a dev dependency:
|
||||
1. in `zebra-chain`: make the `Arbitrary` impl depend on `#[cfg(any(test, feature = "proptest-impl"))]`
|
||||
2. in the other crate: add zebra-chain as a dev dependency: `zebra-chain = { path = "../zebra-chain", features = ["proptest-impl"] }`
|
||||
|
||||
If we need to add another dependency as part of the `proptest-impl` feature:
|
||||
1. Add the crate name to the list of crates in the `proptest-impl` `features` entry
|
||||
2. Add the crate as an optional `dependencies` entry
|
||||
3. Add the crate as a required `dev-dependencies` entry
|
||||
|
||||
For an example of these changes, see [PR 2070](https://github.com/ZcashFoundation/zebra/pull/2070/files).
|
||||
|
|
@ -0,0 +1,131 @@
|
|||
# Zebra versioning and releases
|
||||
|
||||
This document contains the practices that we follow to provide you with a leading-edge application, balanced with stability.
|
||||
We strive to ensure that future changes are always introduced in a predictable way.
|
||||
We want everyone who depends on Zebra to know when and how new features are added, and to be well-prepared when obsolete ones are removed.
|
||||
|
||||
Before reading, you should understand [Semantic Versioning](https://semver.org/spec/v2.0.0.html) and how a [Trunk-based development](https://www.atlassian.com/continuous-delivery/continuous-integration/trunk-based-development) works
|
||||
|
||||
<a id="versioning"></a>
|
||||
|
||||
## Zebra versioning
|
||||
|
||||
Zebra version numbers show the impact of the changes in a release. They are composed of three parts: `major.minor.patch`.
|
||||
For example, version `3.1.11` indicates major version 3, minor version 1, and patch level 11.
|
||||
|
||||
The version number is incremented based on the level of change included in the release.
|
||||
|
||||
<div class="alert pre-release">
|
||||
|
||||
**NOTE**: <br />
|
||||
As Zebra is in a `pre-release` state (is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version).
|
||||
The pre-release version is denoted by appending a hyphen and a series of dot separated identifiers immediately following the patch version.
|
||||
|
||||
</div>
|
||||
|
||||
| Level of change | Details |
|
||||
|:--- |:--- |
|
||||
| Major release | Contains significant new features, and commonly correspond to network upgrades; some technical assistance may be needed during the update. When updating to a major release, you may need to follow the specific upgrade instructions provided in the release notes. |
|
||||
| Minor release | Contains new smaller features. Minor releases should be fully backward-compatible. No technical assistance is expected during update. If you want to use the new features in a minor release, you might need to follow the instructions in the release notes. |
|
||||
| Patch release | Low risk, bug fix release. No technical assistance is expected during update. |
|
||||
|
||||
<a id="supported-releases"></a>
|
||||
|
||||
### Supported Releases
|
||||
|
||||
Every Zebra version released by the Zcash Foundation is supported up to a specific height. Currently we support each version for about **16 weeks** but this can change from release to release.
|
||||
|
||||
When the Zcash chain reaches this end of support height, `zebrad` will shut down and the binary will refuse to start.
|
||||
|
||||
Our process is similar to `zcashd`: https://zcash.github.io/zcash/user/release-support.html
|
||||
|
||||
Older Zebra versions that only support previous network upgrades will never be supported, because they are operating on an unsupported Zcash chain fork.
|
||||
|
||||
<a id="updating"></a>
|
||||
|
||||
### Supported update paths
|
||||
|
||||
You can update to any version of Zebra, provided that the following criteria are met:
|
||||
|
||||
* The version you want to update *to* is supported.
|
||||
* The version you want to update *from* is within one major version of the version you want to upgrade to.
|
||||
|
||||
See [Keeping Up-to-Date](guide/updating "Updating your projects") for more information about updating your Zebra projects to the most recent version.
|
||||
|
||||
<a id="previews"></a>
|
||||
|
||||
### Preview releases
|
||||
|
||||
We let you preview what's coming by providing Release Candidate \(`rc`\) pre-releases for some major releases:
|
||||
|
||||
| Pre-release type | Details |
|
||||
|:--- |:--- |
|
||||
| Beta | The release that is under active development and testing. The beta release is indicated by a release tag appended with the `-beta` identifier, such as `8.1.0-beta.0`. |
|
||||
| Release candidate | A release for final testing of new features. A release candidate is indicated by a release tag appended with the `-rc` identifier, such as version `8.1.0-rc.0`. |
|
||||
|
||||
### Distribution tags
|
||||
|
||||
Zebras's tagging relates directly to versions published on Docker. We will reference these [Docker Hub distribution tags](https://hub.docker.com/r/zfnd/zebra/tags) throughout:
|
||||
|
||||
| Tag | Description |
|
||||
|:--- |:--- |
|
||||
| latest | The most recent stable version. |
|
||||
| beta | The most recent pre-release version of Zebra for testing. May not always exist. |
|
||||
| rc | The most recent release candidate of Zebra, meant to become a stable version. May not always exist. |
|
||||
|
||||
### Feature Flags
|
||||
|
||||
To keep the `main` branch in a releasable state, experimental features must be gated behind a [Rust feature flag](https://doc.rust-lang.org/cargo/reference/features.html).
|
||||
Breaking changes should also be gated behind a feature flag, unless the team decides they are urgent.
|
||||
(For example, security fixes which also break backwards compatibility.)
|
||||
|
||||
<a id="frequency"></a>
|
||||
|
||||
## Release frequency
|
||||
|
||||
We work toward a regular schedule of releases, so that you can plan and coordinate your updates with the continuing evolution of Zebra.
|
||||
|
||||
<div class="alert is-helpful">
|
||||
|
||||
Dates are offered as general guidance and are subject to change.
|
||||
|
||||
</div>
|
||||
|
||||
In general, expect the following release cycle:
|
||||
|
||||
* A major release for each network upgrade, whenever there are breaking changes to Zebra (by API, severe bugs or other kind of upgrades)
|
||||
* Minor releases for significant new Zebra features or severe bug fixes
|
||||
* A patch release every few weeks
|
||||
|
||||
This cadence of releases gives eager developers access to new features as soon as they are fully developed and pass through our code review and integration testing processes, while maintaining the stability and reliability of the platform for production users that prefer to receive features after they have been validated by Zcash and other developers that use the pre-release builds.
|
||||
|
||||
<a id="deprecation"></a>
|
||||
|
||||
## Deprecation practices
|
||||
|
||||
Sometimes "breaking changes", such as the removal of support for RPCs, APIs, and features, are necessary to:
|
||||
|
||||
* add new Zebra features,
|
||||
* improve Zebra performance or reliability,
|
||||
* stay current with changing dependencies, or
|
||||
* implement changes in the \(blockchain\) itself.
|
||||
|
||||
To make these transitions as straightforward as possible, we make these commitments to you:
|
||||
|
||||
* We work hard to minimize the number of breaking changes and to provide migration tools, when possible
|
||||
* We follow the deprecation policy described here, so you have time to update your applications to the latest Zebra binaries, RPCs and APIs
|
||||
* If a feature has critical security or reliability issues, and we need to remove it as soon as possible, we will explain why at the top of the release notes
|
||||
|
||||
To help ensure that you have sufficient time and a clear path to update, this is our deprecation policy:
|
||||
|
||||
| Deprecation stages | Details |
|
||||
|:--- |:--- |
|
||||
| Announcement | We announce deprecated RPCs and features in the [change log](https://github.com/ZcashFoundation/zebra/blob/main/CHANGELOG.md "Zebra change log"). When we announce a deprecation, we also announce a recommended update path. |
|
||||
| Deprecation period | When a RPC or a feature is deprecated, it is still present until the next major release. A deprecation can be announced in any release, but the removal of a deprecated RPC or feature happens only in major release. Until a deprecated RPC or feature is removed, it is maintained according to the Tier 1 support policy, meaning that only critical and security issues are fixed. |
|
||||
| Rust APIs | The Rust APIs of the Zebra crates are currently unstable and unsupported. Use the `zebrad` commands or JSON-RPCs to interact with Zebra. |
|
||||
|
||||
<a id="process"></a>
|
||||
|
||||
## Release candidate & release process
|
||||
|
||||
Our release checklist is available as a template, which defines each step our team needs to follow to create a new pre-release or release, and to also build and push the binaries to the official channels [Release Checklist Template](https://github.com/ZcashFoundation/zebra/blob/main/.github/PULL_REQUEST_TEMPLATE/release-checklist.md).
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
# Zebra RFCs
|
||||
|
||||
We are experimenting with using a process similar to [the Rust RFC
|
||||
process](https://github.com/rust-lang/rfcs/)
|
||||
to document design decisions for Zebra.
|
||||
|
|
@ -0,0 +1,137 @@
|
|||
- Feature Name: (fill me in with a unique ident, `my_awesome_feature`)
|
||||
- Start Date: (fill me in with today's date, YYYY-MM-DD)
|
||||
- Design PR: [ZcashFoundation/zebra#0000](https://github.com/ZcashFoundation/zebra/pull/0000)
|
||||
- Zebra Issue: [ZcashFoundation/zebra#0000](https://github.com/ZcashFoundation/zebra/issues/0000)
|
||||
|
||||
# Summary
|
||||
[summary]: #summary
|
||||
|
||||
One paragraph explanation of the feature.
|
||||
|
||||
# Motivation
|
||||
[motivation]: #motivation
|
||||
|
||||
Why are we doing this? What use cases does it support? What is the expected outcome?
|
||||
|
||||
# Definitions
|
||||
[definitions]: #definitions
|
||||
|
||||
Lay out explicit definitions of any terms that are newly introduced or which cause confusion during the RFC design process.
|
||||
|
||||
# Guide-level explanation
|
||||
[guide-level-explanation]: #guide-level-explanation
|
||||
|
||||
Explain the proposal as if it was already included in the project and you were teaching it to another Zebra programmer. That generally means:
|
||||
|
||||
- Introducing new named concepts.
|
||||
- Explaining the feature largely in terms of examples.
|
||||
- Explaining how Zebra users should *think* about the feature, and how it should impact the way they use Zebra. It should explain the impact as concretely as possible.
|
||||
- If applicable, provide sample error messages or test strategies.
|
||||
|
||||
For implementation-oriented RFCs (e.g. for Zebra internals), this section should focus on how Zebra contributors should think about the change, and give examples of its concrete impact.
|
||||
|
||||
For policy RFCs, this section should provide an example-driven introduction to the policy, and explain its impact in concrete terms.
|
||||
|
||||
# Reference-level explanation
|
||||
[reference-level-explanation]: #reference-level-explanation
|
||||
|
||||
This is the technical portion of the RFC. Explain the design in sufficient detail that:
|
||||
|
||||
- Its interaction with other features is clear.
|
||||
- It is reasonably clear how the feature would be implemented, tested, monitored, and maintained.
|
||||
- Corner cases are dissected by example.
|
||||
|
||||
The section should return to the examples given in the previous section, and explain more fully how the detailed proposal makes those examples work.
|
||||
|
||||
## Specifications
|
||||
[specifications]: #specifications
|
||||
|
||||
If this design is based on Zcash consensus rules, quote them, and link to the Zcash spec or ZIP:
|
||||
https://zips.z.cash/protocol/nu5.pdf#contents
|
||||
https://zips.z.cash/#nu5-zips
|
||||
|
||||
If this design changes network behaviour, quote and link to the Bitcoin network reference or wiki:
|
||||
https://developer.bitcoin.org/reference/p2p_networking.html
|
||||
https://en.bitcoin.it/wiki/Protocol_documentation
|
||||
|
||||
## Module Structure
|
||||
[module-structure]: #module-structure
|
||||
|
||||
Describe the crate and modules that will implement the feature.
|
||||
|
||||
## Test Plan
|
||||
[test-plan]: #test-plan
|
||||
|
||||
Explain how the feature will be tested, including:
|
||||
* tests for consensus-critical functionality
|
||||
* existing test vectors, if available
|
||||
* Zcash blockchain block test vectors (specify the network upgrade, feature, or block height and network)
|
||||
* property testing or fuzzing
|
||||
|
||||
The tests should cover:
|
||||
* positive cases: make sure the feature accepts valid inputs
|
||||
* using block test vectors for each network upgrade provides some coverage of valid inputs
|
||||
* negative cases: make sure the feature rejects invalid inputs
|
||||
* make sure there is a test case for each error condition in the code
|
||||
* if there are lots of potential errors, prioritise:
|
||||
* consensus-critical errors
|
||||
* security-critical errors, and
|
||||
* likely errors
|
||||
* edge cases: make sure that boundary conditions are correctly handled
|
||||
|
||||
# Drawbacks
|
||||
[drawbacks]: #drawbacks
|
||||
|
||||
Why should we *not* do this?
|
||||
|
||||
# Rationale and alternatives
|
||||
[rationale-and-alternatives]: #rationale-and-alternatives
|
||||
|
||||
- What makes this design a good design?
|
||||
- Is this design a good basis for later designs or implementations?
|
||||
- What other designs have been considered and what is the rationale for not choosing them?
|
||||
- What is the impact of not doing this?
|
||||
|
||||
# Prior art
|
||||
[prior-art]: #prior-art
|
||||
|
||||
Discuss prior art, both the good and the bad, in relation to this proposal.
|
||||
A few examples of what this can include are:
|
||||
|
||||
- For community proposals: Is this done by some other community and what were their experiences with it?
|
||||
- For other teams: What lessons can we learn from what other communities have done here?
|
||||
- Papers: Are there any published papers or great posts that discuss this? If you have some relevant papers to refer to, this can serve as a more detailed theoretical background.
|
||||
|
||||
This section is intended to encourage you as an author to think about the lessons from other projects, to provide readers of your RFC with a fuller picture.
|
||||
If there is no prior art, that is fine - your ideas are interesting to us whether they are brand new or if they are an adaptation from other projects.
|
||||
|
||||
Note that while precedent set by other projects is some motivation, it does not on its own motivate an RFC.
|
||||
Please also take into consideration that Zebra sometimes intentionally diverges from common Zcash features and designs.
|
||||
|
||||
# Unresolved questions
|
||||
[unresolved-questions]: #unresolved-questions
|
||||
|
||||
- What parts of the design do you expect to resolve through the RFC process before this gets merged?
|
||||
- What parts of the design do you expect to resolve through the implementation of this feature before stabilization?
|
||||
- What related issues do you consider out of scope for this RFC that could be addressed in the future independently of the solution that comes out of this RFC?
|
||||
|
||||
# Future possibilities
|
||||
[future-possibilities]: #future-possibilities
|
||||
|
||||
Think about what the natural extension and evolution of your proposal would
|
||||
be and how it would affect Zebra and Zcash as a whole. Try to use this
|
||||
section as a tool to more fully consider all possible
|
||||
interactions with the project and cryptocurrency ecosystem in your proposal.
|
||||
Also consider how the this all fits into the roadmap for the project
|
||||
and of the relevant sub-team.
|
||||
|
||||
This is also a good place to "dump ideas", if they are out of scope for the
|
||||
RFC you are writing but otherwise related.
|
||||
|
||||
If you have tried and cannot think of any future possibilities,
|
||||
you may simply state that you cannot think of anything.
|
||||
|
||||
Note that having something written down in the future-possibilities section
|
||||
is not a reason to accept the current or a future RFC; such notes should be
|
||||
in the section on motivation or rationale in this or subsequent RFCs.
|
||||
The section merely provides additional information.
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
- Feature Name: Pipelinable Block Syncing and Lookup
|
||||
- Start Date: 2020-07-02
|
||||
- Design PR: [ZcashFoundation/zebra#583](https://github.com/ZcashFoundation/zebra/pull/583)
|
||||
- Zebra Issue: [ZcashFoundation/zebra#504](https://github.com/ZcashFoundation/zebra/issues/504)
|
||||
|
||||
# Summary
|
||||
[summary]: #summary
|
||||
|
||||
The Bitcoin network protocol used by Zcash allows nodes to download blocks from other peers. This RFC describes how we find and download this data asynchronously.
|
||||
|
||||
# Motivation
|
||||
[motivation]: #motivation
|
||||
|
||||
To sync the chain, we need to find out which blocks to download and then download them. Downloaded blocks can then be fed into the verification system and (assuming they verify correctly) into the state system. In `zcashd`, blocks are processed one at a time. In Zebra, however, we want to be able to pipeline block download and verification operations, using futures to explicitly specify logical dependencies between sub-tasks, which we execute concurrently and potentially out-of-order on a threadpool. This means that the procedure we use to determine which blocks to download must look somewhat different than `zcashd`.
|
||||
|
||||
## Block fetching in Bitcoin
|
||||
|
||||
Zcash inherits its network protocol from Bitcoin. Bitcoin block fetching works roughly as follows. A node can request block information from peers using either a `getblocks` or `getheaders` message. Both of these messages contain a *block locator object* consisting of a sequence of block hashes. The block hashes are ordered from highest to lowest, and represent checkpoints along the path from the node's current tip back to genesis. The remote peer computes the intersection between its chain and the node's chain by scanning through the block locator for the first hash in its chain. Then, it sends (up to) 500 subsequent block hashes in an `inv` message (in the case of `getblocks`) or (up to) 2000 block headers in a `headers` message (in the case of `getheaders`). Note: `zcashd` reduces the `getheaders` count to 160, because Zcash headers are much larger than Bitcoin headers, as noted below.
|
||||
|
||||
The `headers` message sent after `getheaders` contains the actual block headers, while the `inv` message sent after `getblocks` contains only hashes, which have to be fetched with a `getdata` message. In Bitcoin, the block headers are small relative to the size of the full block, but this is not always the case for Zcash, where the block headers are much larger due to the use of Equihash and many blocks have only a few transactions. Also, `getblocks` allows parallelizing block downloads, while `getheaders` doesn't. For these reasons and because we know we need full blocks anyways, we should probably use `getblocks`.
|
||||
|
||||
The `getblocks` Bitcoin message corresponds to our `zebra_network::Request::FindBlocksByHash`, and the `getdata` message is generated by `zebra_network::Request::Blocks`.
|
||||
|
||||
## Pipelining block verification
|
||||
|
||||
As mentioned above, our goal is to be able to pipeline block download and verification. This means that the process for block lookup should ideally attempt to fetch and begin verification of future blocks without blocking on complete verification of all earlier blocks. To do this, we split the chain state into the *verified* block chain (held by the state component) and the *prospective* block chain (held only by the syncer), and use the following algorithm to pursue prospective chain tips.
|
||||
|
||||
#### ObtainTips
|
||||
|
||||
1. Query the current state to construct the sequence of hashes
|
||||
```
|
||||
[tip, tip-1, tip-2, ..., tip-9, tip-20, tip-40, tip-80, tip-160 ]
|
||||
```
|
||||
The precise construction is unimportant, but this should have a Bitcoin-style dense-first, then-sparse hash structure.
|
||||
|
||||
The initial state should contain the genesis block for the relevant network. So the sequence of hashes will only contain the genesis block
|
||||
```
|
||||
[genesis ]
|
||||
```
|
||||
The network will respond with a list of hashes, starting at the child of the genesis block.
|
||||
|
||||
2. Make a `FindBlocksByHash` request to the network `F` times, where `F` is a fanout parameter, to get `resp1, ..., respF`.
|
||||
|
||||
3. For each response, starting from the beginning of the list, prune any block hashes already included in the state, stopping at the first unknown hash to get `resp1', ..., respF'`. (These lists may be empty).
|
||||
|
||||
4. Combine the last elements of each list into a set; this is the set of prospective tips.
|
||||
|
||||
5. Combine all elements of each list into a set, and queue download and verification of those blocks.
|
||||
|
||||
6. If there are any prospective tips, call ExtendTips, which returns a new set of prospective tips. Continue calling ExtendTips with this new set, until there are no more prospective tips.
|
||||
|
||||
7. Restart after some delay, say 15 seconds.
|
||||
|
||||
#### ExtendTips
|
||||
|
||||
1. Remove all prospective tips from the set of prospective tips, then iterate through them. For each removed tip:
|
||||
|
||||
2. Create a `FindBlocksByHash` request consisting of just the prospective tip. Send this request to the network `F` times.
|
||||
|
||||
3. For each response, check whether the first hash in the response is a genesis block (for either the main or test network). If so, discard the response. It indicates that the remote peer does not have any blocks following the prospective tip. (Or that the remote peer is on the wrong network.)
|
||||
|
||||
4. Combine the last elements of the remaining responses into a set, and add this set to the set of prospective tips.
|
||||
|
||||
5. Combine all elements of the remaining responses into a set, and queue download and verification of those blocks.
|
||||
|
||||
### DoS resistance
|
||||
|
||||
Because this strategy aggressively downloads any available blocks, it could be vulnerable to a DoS attack, where a malicious peer feeds us bogus chain tips, causing us to waste network and CPU on blocks that will never be valid. However, because we separate block finding from block downloading, and because of the design of our network stack, this attack is probably not feasible. The primary reason is that `zebra_network` randomly loadbalances outbound requests over all available peers.
|
||||
|
||||
Consider a malicious peer who responds to block discovery with a bogus list of hashes. We will eagerly attempt to download all of those bogus blocks, but our requests to do so will be randomly load-balanced to other peers, who are unlikely to know about the bogus blocks. When we try to extend a bogus tip, the extension request will also be randomly load-balanced, so it will likely be routed to a peer that doesn't know about it and can't extend it. And because we perform multiple block discovery queries, which will also be randomly load balanced, we're unlikely to get stuck on a false chain tip.
|
||||
|
||||
### Fork-finding
|
||||
|
||||
When starting from a verified chain tip, the choice of block locator can find forks at least up to the reorg limit (99 blocks). When extending a prospective tip, forks are ignored, but this is fine, since unless we are prefetching the longest chain, we won't be able to keep extending the tip prospectively.
|
||||
|
||||
### Retries and Fanout
|
||||
|
||||
We should consider the fanout parameter `F` and the retry policy for the different requests. I'm not sure whether we need to retry requests to discover new block hashes, since the fanout may already provide redundancy. For the block requests themselves, we should have a retry policy with a limited number of attempts, enough to insulate against network failures but not so many that we would retry a bogus block indefinitely. Maybe fanout 4 and 3 retries?
|
||||
|
|
@ -0,0 +1,334 @@
|
|||
# Parallel Verification
|
||||
|
||||
- Feature Name: parallel_verification
|
||||
- Start Date: 2020-07-27
|
||||
- Design PR: [ZcashFoundation/zebra#763](https://github.com/ZcashFoundation/zebra/pull/763)
|
||||
- Zebra Issue: [ZcashFoundation/zebra#682](https://github.com/ZcashFoundation/zebra/issues/682)
|
||||
|
||||
# Summary
|
||||
[summary]: #summary
|
||||
|
||||
Zebra verifies blocks in several stages, most of which can be executed in
|
||||
parallel.
|
||||
|
||||
We use several different design patterns to enable this parallelism:
|
||||
* We download blocks and start verifying them in parallel,
|
||||
* We batch signature and proof verification using verification services, and
|
||||
* We defer data dependencies until just before the block is committed to the
|
||||
state (see the detailed design RFCs).
|
||||
|
||||
# Motivation
|
||||
[motivation]: #motivation
|
||||
|
||||
Zcash (and Bitcoin) are designed to verify each block in sequence, starting
|
||||
from the genesis block. But during the initial sync, and when restarting with
|
||||
an older state, this process can be quite slow.
|
||||
|
||||
By deferring data dependencies, we can partially verify multiple blocks in
|
||||
parallel.
|
||||
|
||||
By parallelising block and transaction verification, we can use multithreading
|
||||
and batch verification for signatures, proofs, scripts, and hashes.
|
||||
|
||||
# Definitions
|
||||
[definitions]: #definitions
|
||||
|
||||
Blockchain:
|
||||
* **chain fork:** Zcash is implemented using a tree of blocks. Each block has a
|
||||
single previous block, and zero to many next blocks. A chain
|
||||
fork consists of a tip and all its previous blocks, back to
|
||||
the genesis block.
|
||||
* **genesis:** The root of the tree of blocks is called the genesis block. It has
|
||||
no previous block.
|
||||
* **tip:** A block which has no next block is called a tip. Each chain fork can
|
||||
be identified using its tip.
|
||||
|
||||
Data:
|
||||
* **consensus rule:** A protocol rule which all nodes must apply consistently,
|
||||
so they can converge on the same chain fork.
|
||||
* **context-free:** Consensus rules which do not have a data dependency on
|
||||
previous blocks.
|
||||
* **data dependency:** Information contained in the previous block and its
|
||||
chain fork, which is required to verify the current block.
|
||||
* **state:** The set of verified blocks. The state might also cache some
|
||||
dependent data, so that we can efficiently verify subsequent blocks.
|
||||
|
||||
Verification Stages:
|
||||
<!-- The verification stages are listed in chronological order -->
|
||||
* **structural verification:** Parsing raw bytes into the data structures defined
|
||||
by the protocol.
|
||||
* **semantic verification:** Verifying the consensus rules on the data structures
|
||||
defined by the protocol.
|
||||
* **contextual verification:** Verifying the current block, once its data
|
||||
dependencies have been satisfied by a verified
|
||||
previous block. This verification might also use
|
||||
the cached state corresponding to the previous
|
||||
block.
|
||||
|
||||
# Guide-level explanation
|
||||
[guide-level-explanation]: #guide-level-explanation
|
||||
|
||||
In Zebra, we want to verify blocks in parallel. Some fields can be verified
|
||||
straight away, because they don't depend on the output of previous blocks.
|
||||
But other fields have **data dependencies**, which means that we need previous
|
||||
blocks before we can fully validate them.
|
||||
|
||||
If we delay checking some of these data dependencies, then we can do more of
|
||||
the verification in parallel.
|
||||
|
||||
## Example: BlockHeight
|
||||
[block-height]: #block-height
|
||||
|
||||
Here's how Zebra can verify the different Block Height consensus rules in
|
||||
parallel:
|
||||
|
||||
**Structural Verification:**
|
||||
|
||||
1. Parse the Block into a BlockHeader and a list of transactions.
|
||||
|
||||
**Semantic Verification: No Data Dependencies:**
|
||||
|
||||
2. Check that the first input of the first transaction in the block is a coinbase
|
||||
input with a valid block height in its data field.
|
||||
|
||||
**Semantic Verification: Deferring a Data Dependency:**
|
||||
|
||||
3. Verify other consensus rules that depend on Block Height, assuming that the
|
||||
Block Height is correct. For example, many consensus rules depend on the
|
||||
current Network Upgrade, which is determined by the Block Height. We verify
|
||||
these consensus rules, assuming the Block Height and Network Upgrade are
|
||||
correct.
|
||||
|
||||
**Contextual Verification:**
|
||||
|
||||
4. Submit the block to the state for contextual verification. When it is ready to
|
||||
be committed (it may arrive before the previous block), check all deferred
|
||||
constraints, including the constraint that the block height of this block is
|
||||
one more than the block height of its parent block. If all constraints are
|
||||
satisfied, commit the block to the state. Otherwise, reject the block as
|
||||
invalid.
|
||||
|
||||
## Zebra Design
|
||||
[zebra-design]: #zebra-design
|
||||
|
||||
### Design Patterns
|
||||
[design-patterns]: #design-patterns
|
||||
|
||||
When designing changes to Zebra verification, use these design patterns:
|
||||
* perform context-free verification as soon as possible,
|
||||
(that is, verification which has no data dependencies on previous blocks),
|
||||
* defer data dependencies as long as possible, then
|
||||
* check the data dependencies.
|
||||
|
||||
### Minimise Deferred Data
|
||||
[minimise-deferred-data]: #minimise-deferred-data
|
||||
|
||||
Keep the data dependencies and checks as simple as possible.
|
||||
|
||||
For example, Zebra could defer checking both the Block Height and Network Upgrade.
|
||||
|
||||
But since the Network Upgrade depends on the Block Height, we only need to defer
|
||||
the Block Height check. Then we can use all the fields that depend on the
|
||||
Block Height, as if it is correct. If the final Block Height check fails, we will
|
||||
reject the entire block, including all the verification we performed using the
|
||||
assumed Network Upgrade.
|
||||
|
||||
### Implementation Strategy
|
||||
[implementation-strategy]: #implementation-strategy
|
||||
|
||||
When implementing these designs, perform as much verification as possible, await
|
||||
any dependencies, then perform the necessary checks.
|
||||
|
||||
# Reference-level explanation
|
||||
[reference-level-explanation]: #reference-level-explanation
|
||||
|
||||
## Verification Stages
|
||||
[verification-stages]: #verification-stages
|
||||
|
||||
In Zebra, verification occurs in the following stages:
|
||||
* **Structural Verification:** Raw block data is parsed into a block header and
|
||||
transactions. Invalid data is not representable in these structures:
|
||||
deserialization (parsing) can fail, but serialization always succeeds.
|
||||
* **Semantic Verification:** Parsed block fields are verified, based on their
|
||||
data dependencies:
|
||||
* Context-free fields have no data dependencies, so they can be verified as
|
||||
needed.
|
||||
* Fields with simple data dependencies defer that dependency as long as
|
||||
possible, so they can perform more verification in parallel. Then they await
|
||||
the required data, which is typically the previous block. (And potentially
|
||||
older blocks in its chain fork.)
|
||||
* Fields with complex data dependencies require their own parallel verification
|
||||
designs. These designs are out of scope for this RFC.
|
||||
* **Contextual Verification:** After a block is verified, it is added to the state. The
|
||||
details of state updates, and their interaction with semantic verification,
|
||||
are out of scope for this RFC.
|
||||
|
||||
This RFC focuses on Semantic Verification, and the design patterns that enable
|
||||
blocks to be verified in parallel.
|
||||
|
||||
## Verification Interfaces
|
||||
[verification-interfaces]: #verification-interfaces
|
||||
|
||||
Verification is implemented by the following traits and services:
|
||||
* **Structural Verification:**
|
||||
* `zebra_chain::ZcashDeserialize`: A trait for parsing consensus-critical
|
||||
data structures from a byte buffer.
|
||||
* **Semantic Verification:**
|
||||
* `ChainVerifier`: Provides a verifier service that accepts a `Block` request,
|
||||
performs verification on the block, and responds with a `block::Hash` on
|
||||
success.
|
||||
* Internally, the `ChainVerifier` selects between a `CheckpointVerifier` for
|
||||
blocks that are within the checkpoint range, and a `BlockVerifier` for
|
||||
recent blocks.
|
||||
* **Contextual Verification:**
|
||||
* `zebra_state::init`: Provides the state update service, which accepts
|
||||
requests to add blocks to the state.
|
||||
|
||||
### Checkpoint Verification
|
||||
[checkpoint-verification]: #checkpoint-verification
|
||||
|
||||
The `CheckpointVerifier` performs rapid verification of blocks, based on a set
|
||||
of hard-coded checkpoints. Each checkpoint hash can be used to verify all the
|
||||
|
||||
previous blocks, back to the genesis block. So Zebra can skip almost all
|
||||
verification for blocks in the checkpoint range.
|
||||
|
||||
The `CheckpointVerifier` uses an internal queue to store pending blocks.
|
||||
Checkpoint verification is cheap, so it is implemented using non-async
|
||||
functions within the CheckpointVerifier service.
|
||||
|
||||
Here is how the `CheckpointVerifier` implements each verification stage:
|
||||
|
||||
* **Structural Verification:**
|
||||
* *As Above:* the `CheckpointVerifier` accepts parsed `Block` structs.
|
||||
* **Semantic Verification:**
|
||||
* `check_height`: makes sure the block height is within the unverified
|
||||
checkpoint range, and adds the block to its internal queue.
|
||||
* `target_checkpoint_height`: Checks for a continuous range of blocks from
|
||||
the previous checkpoint to a subsequent checkpoint. If the chain is
|
||||
incomplete, returns a future, and waits for more blocks. If the chain is
|
||||
complete, assumes that the `previous_block_hash` fields of these blocks
|
||||
form an unbroken chain from checkpoint to checkpoint, and starts
|
||||
processing the checkpoint range. (This constraint is an implicit part of
|
||||
the `CheckpointVerifier` design.)
|
||||
* `process_checkpoint_range`: makes sure that the blocks in the checkpoint
|
||||
range have an unbroken chain of previous block hashes.
|
||||
* **Contextual Verification:**
|
||||
* *As Above:* the `CheckpointVerifier` returns success to the `ChainVerifier`,
|
||||
which sends verified `Block`s to the state service.
|
||||
|
||||
### Block Verification
|
||||
[block-verification]: #block-verification
|
||||
|
||||
The `BlockVerifier` performs detailed verification of recent blocks, in parallel.
|
||||
|
||||
Here is how the `BlockVerifier` implements each verification stage:
|
||||
|
||||
* **Structural Verification:**
|
||||
* *As Above:* the `BlockVerifier` accepts parsed `Block` structs.
|
||||
* **Semantic Verification:**
|
||||
* *As Above:* verifies each field in the block. Defers any data dependencies as
|
||||
long as possible, awaits those data dependencies, then performs data
|
||||
dependent checks.
|
||||
* Note: Since futures are executed concurrently, we can use the same function
|
||||
to:
|
||||
* perform context-free verification,
|
||||
* perform verification with deferred data dependencies,
|
||||
* await data dependencies, and
|
||||
* check data dependencies.
|
||||
To maximise concurrency, we should write verification functions in this
|
||||
specific order, so the awaits are as late as possible.
|
||||
* **Contextual Verification:**
|
||||
* *As Above:* the `BlockVerifier` returns success to the `ChainVerifier`,
|
||||
which sends verified `Block`s to the state service.
|
||||
|
||||
## Zcash Protocol Design
|
||||
[zcash-protocol]: #zcash-protocol
|
||||
|
||||
When designing a change to the Zcash protocol, minimise the data dependencies
|
||||
between blocks.
|
||||
|
||||
Try to create designs that:
|
||||
* Eliminate data dependencies,
|
||||
* Make the changes depend on a version field in the block header or transaction,
|
||||
* Make the changes depend on the current Network Upgrade, or
|
||||
* Make the changes depend on a field in the current block, with an additional
|
||||
consensus rule to check that field against previous blocks.
|
||||
|
||||
When making decisions about these design tradeoffs, consider:
|
||||
* how the data dependency could be deferred, and
|
||||
* the CPU cost of the verification - if it is trivial, then it does not matter if
|
||||
the verification is parallelised.
|
||||
|
||||
# Drawbacks
|
||||
[drawbacks]: #drawbacks
|
||||
|
||||
This design is a bit complicated, but we think it's necessary to achieve our
|
||||
goals.
|
||||
|
||||
# Rationale and alternatives
|
||||
[rationale-and-alternatives]: #rationale-and-alternatives
|
||||
|
||||
- What makes this design a good design?
|
||||
- It enables a significant amount of parallelism
|
||||
- It is simpler than some other alternatives
|
||||
- It uses existing Rust language facilities, mainly Futures and await/async
|
||||
- Is this design a good basis for later designs or implementations?
|
||||
- We have built a UTXO design on this design
|
||||
- We believe we can build "recent blocks" and "chain summary" designs on this
|
||||
design
|
||||
- Each specific detailed design will need to consider how the relevant data
|
||||
dependencies are persisted
|
||||
- What other designs have been considered and what is the rationale for not choosing them?
|
||||
- Serial verification
|
||||
- Effectively single-threaded
|
||||
- Awaiting data dependencies as soon as they are needed
|
||||
- Less parallelism
|
||||
- Providing direct access to the state
|
||||
- Might cause data races, might be prevented by Rust's ownership rules
|
||||
- Higher risk of bugs
|
||||
- What is the impact of not doing this?
|
||||
- Verification is slow, we can't batch or parallelise some parts of the
|
||||
verification
|
||||
|
||||
# Prior art
|
||||
[prior-art]: #prior-art
|
||||
|
||||
**TODO: expand this section**
|
||||
- zcashd
|
||||
- serial block verification
|
||||
- Zebra implements the same consensus rules, but a different design
|
||||
- tower
|
||||
|
||||
# Unresolved questions
|
||||
[unresolved-questions]: #unresolved-questions
|
||||
|
||||
- [ ] Is this design good enough to use as a framework for future RFCs?
|
||||
- [ ] Does this design require any changes to the current implementation?
|
||||
- [ ] Implement block height consensus rule (check previous block hash and height)
|
||||
- [ ] Check that the `BlockVerifier` performs checks in the following order:
|
||||
- verification, deferring dependencies as needed,
|
||||
- await dependencies,
|
||||
- check deferred data dependencies
|
||||
|
||||
Out of Scope:
|
||||
- What is the most efficient design for parallel verification?
|
||||
- (Optimisations are out of scope.)
|
||||
|
||||
- How is each specific field verified?
|
||||
- How do we verify fields with complex data dependencies?
|
||||
- How does verification change with different network upgrades?
|
||||
|
||||
- How do multiple chains work, in detail?
|
||||
- How do state updates work, in detail?
|
||||
|
||||
- Moving the verifiers into the state service
|
||||
|
||||
# Future possibilities
|
||||
[future-possibilities]: #future-possibilities
|
||||
|
||||
- Separate RFCs for other data dependencies
|
||||
- Recent blocks
|
||||
- Overall chain summaries (for example, total work)
|
||||
- Reorganisation limit: multiple chains to single chain transition
|
||||
- Optimisations for parallel verification
|
||||
|
|
@ -0,0 +1,252 @@
|
|||
- Feature Name: `inventory_tracking`
|
||||
- Start Date: 2020-08-25
|
||||
- Design PR: [ZcashFoundation/zebra#952](https://github.com/ZcashFoundation/zebra/pull/952)
|
||||
- Zebra Issue: [ZcashFoundation/zebra#960](https://github.com/ZcashFoundation/zebra/issues/960)
|
||||
|
||||
# Summary
|
||||
[summary]: #summary
|
||||
|
||||
The Bitcoin network protocol used by Zcash allows nodes to advertise data
|
||||
(inventory items) for download by other peers. This RFC describes how we track
|
||||
and use this information.
|
||||
|
||||
# Motivation
|
||||
[motivation]: #motivation
|
||||
|
||||
In order to participate in the network, we need to be able to fetch new data
|
||||
that our peers notify us about. Because our network stack abstracts away
|
||||
individual peer connections, and load-balances over available peers, we need a
|
||||
way to direct requests for new inventory only to peers that advertised to us
|
||||
that they have it.
|
||||
|
||||
# Definitions
|
||||
[definitions]: #definitions
|
||||
|
||||
- Inventory item: either a block or transaction.
|
||||
- Inventory hash: the hash of an inventory item, represented by the
|
||||
[`InventoryHash`](https://doc-internal.zebra.zfnd.org/zebra_network/protocol/external/inv/enum.InventoryHash.html)
|
||||
type.
|
||||
- Inventory advertisement: a notification from another peer that they have some inventory item.
|
||||
- Inventory request: a request to another peer for an inventory item.
|
||||
|
||||
# Guide-level explanation
|
||||
[guide-level-explanation]: #guide-level-explanation
|
||||
|
||||
The Bitcoin network protocol used by Zcash provides a mechanism for nodes to
|
||||
gossip blockchain data to each other. This mechanism is used to distribute
|
||||
(mined) blocks and (unmined) transactions through the network. Nodes can
|
||||
advertise data available in their inventory by sending an `inv` message
|
||||
containing the hashes and types of those data items. After receiving an `inv`
|
||||
message advertising data, a node can determine whether to download it.
|
||||
|
||||
This poses a challenge for our network stack, which goes to some effort to
|
||||
abstract away details of individual peers and encapsulate all peer connections
|
||||
behind a single request/response interface representing "the network".
|
||||
Currently, the peer set tracks readiness of all live peers, reports readiness
|
||||
if at least one peer is ready, and routes requests across ready peers randomly
|
||||
using the ["power of two choices"][p2c] algorithm.
|
||||
|
||||
However, while this works well for data that is already distributed across the
|
||||
network (e.g., existing blocks) it will not work well for fetching data
|
||||
*during* distribution across the network. If a peer informs us of some new
|
||||
data, and we attempt to download it from a random, unrelated peer, we will
|
||||
likely fail. Instead, we track recent inventory advertisements, and make a
|
||||
best-effort attempt to route requests to peers who advertised that inventory.
|
||||
|
||||
[p2c]: https://www.eecs.harvard.edu/~michaelm/postscripts/mythesis.pdf
|
||||
|
||||
# Reference-level explanation
|
||||
[reference-level-explanation]: #reference-level-explanation
|
||||
|
||||
The inventory tracking system has several components:
|
||||
|
||||
1. A registration hook that monitors incoming messages for inventory advertisements;
|
||||
2. An inventory registry that tracks inventory presence by peer;
|
||||
3. Routing logic that uses the inventory registry to appropriately route requests.
|
||||
|
||||
The first two components have fairly straightforward design decisions, but
|
||||
the third has considerably less obvious choices and tradeoffs.
|
||||
|
||||
## Inventory Monitoring
|
||||
|
||||
Zebra uses Tokio's codec mechanism to transform a byte-oriented I/O interface
|
||||
into a `Stream` and `Sink` for incoming and outgoing messages. These are
|
||||
passed to the peer connection state machine, which is written generically over
|
||||
any `Stream` and `Sink`. This construction makes it easy to "tap" the sequence
|
||||
of incoming messages using `.then` and `.with` stream and sink combinators.
|
||||
|
||||
We already do this to record Prometheus metrics on message rates as well as to
|
||||
report message timestamps used for liveness checks and last-seen address book
|
||||
metadata. The message timestamp mechanism is a good example to copy. The
|
||||
handshake logic instruments the incoming message stream with a closure that
|
||||
captures a sender handle for a [mpsc] channel with a large buffer (currently 100
|
||||
timestamp entries). The receiver handle is owned by a separate task that shares
|
||||
an `Arc<Mutex<AddressBook>>` with other parts of the application. This task
|
||||
waits for new timestamp entries, acquires a lock on the address book, and
|
||||
updates the address book. This ensures that timestamp updates are queued
|
||||
asynchronously, without lock contention.
|
||||
|
||||
Unlike the address book, we don't need to share the inventory data with other
|
||||
parts of the application, so it can be owned exclusively by the peer set. This
|
||||
means that no lock is necessary, and the peer set can process advertisements in
|
||||
its `poll_ready` implementation. This method may be called infrequently, which
|
||||
could cause the channel to fill. However, because inventory advertisements are
|
||||
time-limited, in the sense that they're only useful before some item is fully
|
||||
distributed across the network, it's safe to handle excess entries by dropping
|
||||
them. This behavior is provided by a [broadcast]/mpmc channel, which can be
|
||||
used in place of an mpsc channel.
|
||||
|
||||
[mpsc]: https://docs.rs/tokio/0.2.22/tokio/sync/mpsc/index.html
|
||||
[broadcast]: https://docs.rs/tokio/0.2.22/tokio/sync/broadcast/index.html
|
||||
|
||||
An inventory advertisement is an `(InventoryHash, SocketAddr)` pair. The
|
||||
stream hook should check whether an incoming message is an `inv` message with
|
||||
only a small number (e.g., 1) inventory entries. If so, it should extract the
|
||||
hash for each item and send it through the channel. Otherwise, it should
|
||||
ignore the message contents. Why? Because `inv` messages are also sent in
|
||||
response to queries, such as when we request subsequent block hashes, and in
|
||||
that case we want to assume that the inventory is generally available rather
|
||||
than restricting downloads to a single peer. However, items are usually
|
||||
gossiped individually (or potentially in small chunks; `zcashd` has an internal
|
||||
`inv` buffer subject to race conditions), so choosing a small bound such as 1
|
||||
is likely to work as a heuristic for when we should assume that advertised
|
||||
inventory is not yet generally available.
|
||||
|
||||
## Inventory Registry
|
||||
|
||||
The peer set's `poll_ready` implementation should extract all available
|
||||
`(InventoryHash, SocketAddr)` pairs from the channel, and log a warning event
|
||||
if the receiver is lagging. The channel should be configured with a generous
|
||||
buffer size (such as 100) so that this is unlikely to happen in normal
|
||||
circumstances. These pairs should be fed into an `InventoryRegistry` structure
|
||||
along these lines:
|
||||
|
||||
```rust
|
||||
struct InventoryRegistry{
|
||||
current: HashMap<InventoryHash, HashSet<SocketAddr>>,
|
||||
prev: HashMap<InventoryHash, HashSet<SocketAddr>>,
|
||||
}
|
||||
|
||||
impl InventoryRegistry {
|
||||
pub fn register(&mut self, item: InventoryHash, addr: SocketAddr) {
|
||||
self.0.entry(item).or_insert(HashSet::new).insert(addr);
|
||||
}
|
||||
|
||||
pub fn rotate(&mut self) {
|
||||
self.prev = std::mem::take(self.current)
|
||||
}
|
||||
|
||||
pub fn peers(&self, item: InventoryHash) -> impl Iterator<Item=&SocketAddr> {
|
||||
self.prev.get(item).chain(self.current.get(item)).flatten()
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This API allows pruning the inventory registry using `rotate`, which
|
||||
implements generational pruning of registry entries. The peer set should
|
||||
maintain a `tokio::time::Interval` with some interval parameter, and check in
|
||||
`poll_ready` whether the interval stream has any items, calling `rotate` for
|
||||
each one:
|
||||
|
||||
```rust
|
||||
while let Poll::Ready(Some(_)) = timer.poll_next(cx) {
|
||||
registry.rotate();
|
||||
}
|
||||
```
|
||||
By rotating for each available item in the interval stream, rather than just
|
||||
once, we ensure that if the peer set's `poll_ready` is not called for a long
|
||||
time, `rotate` will be called enough times to correctly flush old entries.
|
||||
|
||||
Inventory advertisements live in the registry for twice the length of the
|
||||
timer, so it should be chosen to be half of the desired lifetime for
|
||||
inventory advertisements. Setting the timer to 75 seconds, the block
|
||||
interval, seems like a reasonable choice.
|
||||
|
||||
## Routing Logic
|
||||
|
||||
At this point, the peer set has information on recent inventory advertisements.
|
||||
However, the `Service` trait only allows `poll_ready` to report readiness based
|
||||
on the service's data and the type of the request, not the content of the
|
||||
request. This means that we must report readiness without knowing whether the
|
||||
request should be routed to a specific peer, and we must handle the case where
|
||||
`call` gets a request for an item only available at an unready peer.
|
||||
|
||||
This RFC suggests the following routing logic. First, check whether the
|
||||
request fetches data by hash. If so, and `peers()` returns `Some(ref addrs)`,
|
||||
iterate over `addrs` and route the request to the first ready peer if there is
|
||||
one. In all other cases, fall back to p2c routing. Alternatives are suggested
|
||||
and discussed below.
|
||||
|
||||
# Rationale and alternatives
|
||||
[rationale-and-alternatives]: #rationale-and-alternatives
|
||||
|
||||
The rationale is described above. The alternative choices are primarily around
|
||||
the routing logic.
|
||||
|
||||
Because the `Service` trait does not allow applying backpressure based on the
|
||||
*content* of a request, only based on the service's internal data (via the
|
||||
`&mut self` parameter of `Service::poll_ready`) and on the type of the
|
||||
request (which determines which `impl Service` is used). This means that it
|
||||
is impossible for us to apply backpressure until a service that can process a
|
||||
specific inventory request is ready, because until we get the request, we
|
||||
can't determine which peers might be required to process it.
|
||||
|
||||
We could attempt to ensure that the peer set would be ready to process a
|
||||
specific inventory request would be to pre-emptively "reserve" a peer as soon
|
||||
as it advertises an inventory item. But this doesn't actually work to ensure
|
||||
readiness, because a peer could advertise two inventory items, and only be
|
||||
able to service one request at a time. It also potentially locks the peer
|
||||
set, since if there are only a few peers and they all advertise inventory,
|
||||
the service can't process any other requests. So this approach does not work.
|
||||
|
||||
Another alternative would be to do some kind of buffering of inventory
|
||||
requests that cannot immediately be processed by a peer that advertised that
|
||||
inventory. There are two basic sub-approaches here.
|
||||
|
||||
In the first case, we could maintain an unbounded queue of yet-to-be
|
||||
processed inventory requests in the peer set, and every time `poll_ready` is
|
||||
called, we check whether a service that could serve those inventory requests
|
||||
became ready, and start processing the request if we can. This would provide
|
||||
the lowest latency, because we can dispatch the request to the first
|
||||
available peer. For instance, if peer A advertises inventory I, the peer set
|
||||
gets an inventory request for I, peer A is busy so the request is queued, and
|
||||
peer B advertises inventory I, we could dispatch the queued request to B
|
||||
rather than waiting for A.
|
||||
|
||||
However, it's not clear exactly how we'd implement this, because this
|
||||
mechanism is driven by calls to `poll_ready`, and those might not happen. So
|
||||
we'd need some separate task that would drive processing the buffered task to
|
||||
completion, but this may not be able to do so by `poll_ready`, since that
|
||||
method requires owning the service, and the peer set will be owned by a
|
||||
`Buffer` worker.
|
||||
|
||||
In the second case, we could select an unready peer that advertised the
|
||||
requested inventory, clone it, and move the cloned peer into a task that
|
||||
would wait for that peer to become ready and then make the request. This is
|
||||
conceptually much cleaner than the above mechanism, but it has the downside
|
||||
that we don't dispatch the request to the first ready peer. In the example
|
||||
above, if we cloned peer A and dispatched the request to it, we'd have to
|
||||
wait for A to become ready, even if the second peer B advertised the same
|
||||
inventory just after we dispatched the request to A. However, this is not
|
||||
presently possible anyways, because the `peer::Client`s that handle requests
|
||||
are not clonable. They could be made clonable (they send messages to the
|
||||
connection state machine over a mpsc channel), but we cannot make this change
|
||||
without altering our liveness mechanism, which uses bounds on the
|
||||
time-since-last-message to determine whether a peer connection is live and to
|
||||
prevent immediate reconnections to recently disconnected peers.
|
||||
|
||||
A final alternative would be to fail inventory requests that we cannot route
|
||||
to a peer which advertised that inventory. This moves the failure forward in
|
||||
time, but preemptively fails some cases where the request might succeed --
|
||||
for instance, if the peer has inventory but just didn't tell us, or received
|
||||
the inventory between when we dispatch the request and when it receives our
|
||||
message. It seems preferable to try and fail than to not try at all.
|
||||
|
||||
In practice, we're likely to care about the gossip protocol and inventory
|
||||
fetching once we've already synced close to the chain tip. In this setting,
|
||||
we're likely to already have peer connections, and we're unlikely to be
|
||||
saturating our peer set with requests (as we do during initial block sync).
|
||||
This suggests that the common case is one where we have many idle peers, and
|
||||
that therefore we are unlikely to have dispatched any recent requests to the
|
||||
peer that advertised inventory. So our common case should be one where all of
|
||||
this analysis is irrelevant.
|
||||
|
|
@ -0,0 +1,424 @@
|
|||
- Start Date: 2020-08-10
|
||||
- Design PR: [ZcashFoundation/zebra#868](https://github.com/ZcashFoundation/zebra/pull/868)
|
||||
- Zebra Issue: [ZcashFoundation/zebra#964](https://github.com/ZcashFoundation/zebra/issues/964)
|
||||
|
||||
# Summary
|
||||
[summary]: #summary
|
||||
|
||||
This RFC describes an architecture for asynchronous script verification and
|
||||
its interaction with the state layer. This architecture imposes constraints
|
||||
on the ordering of operations in the state layer.
|
||||
|
||||
# Motivation
|
||||
[motivation]: #motivation
|
||||
|
||||
As in the rest of Zebra, we want to express our work as a collection of
|
||||
work-items with explicit dependencies, then execute these items concurrently
|
||||
and in parallel on a thread pool.
|
||||
|
||||
# Definitions
|
||||
[definitions]: #definitions
|
||||
|
||||
- *UTXO*: unspent transparent transaction output.
|
||||
Transparent transaction outputs are modeled in `zebra-chain` by the [`transparent::Output`][transout] structure.
|
||||
- outpoint: a reference to an unspent transparent transaction output, including a transaction hash and output index.
|
||||
Outpoints are modeled in `zebra-chain` by the [`transparent::OutPoint`][outpoint] structure.
|
||||
- transparent input: a previous transparent output consumed by a later transaction (the one it is an input to).
|
||||
Modeled in `zebra-chain` by the [`transparent::Input::PrevOut`][transin] enum variant.
|
||||
- coinbase transaction: the first transaction in each block, which creates new coins.
|
||||
- lock script: the script that defines the conditions under which some UTXO can be spent.
|
||||
Stored in the [`transparent::Output::lock_script`][lock_script] field.
|
||||
- unlock script: a script satisfying the conditions of the lock script, allowing a UTXO to be spent.
|
||||
Stored in the [`transparent::Input::PrevOut::lock_script`][lock_script] field.
|
||||
|
||||
[transout]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/struct.Output.html
|
||||
[outpoint]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/struct.OutPoint.html
|
||||
[lock_script]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/struct.Output.html#structfield.lock_script
|
||||
[transin]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/enum.Input.html
|
||||
[unlock_script]: https://doc-internal.zebra.zfnd.org/zebra_chain/transparent/enum.Input.html#variant.PrevOut.field.unlock_script
|
||||
|
||||
|
||||
# Guide-level explanation
|
||||
[guide-level-explanation]: #guide-level-explanation
|
||||
|
||||
Zcash's transparent address system is inherited from Bitcoin. Transactions
|
||||
spend unspent transparent transaction outputs (UTXOs) from previous transactions. These
|
||||
UTXOs are encumbered by *locking scripts* that define the conditions under
|
||||
which they can be spent, e.g., requiring a signature from a certain key.
|
||||
Transactions wishing to spend UTXOs supply an *unlocking script* that should
|
||||
satisfy the conditions of the locking script for each input they wish to
|
||||
spend.
|
||||
|
||||
This means that script verification requires access to data about previous
|
||||
UTXOs, in order to determine the conditions under which those UTXOs can be
|
||||
spent. In Zebra, we aim to run operations asynchronously and out-of-order to
|
||||
the greatest extent possible. For instance, we may begin verification of a
|
||||
block before all of its ancestors have been verified or even downloaded. So
|
||||
we need to design a mechanism that allows script verification to declare its
|
||||
data dependencies and execute as soon as all required data is available.
|
||||
|
||||
It's not necessary for this mechanism to ensure that the transaction outputs
|
||||
remain unspent, only to give enough information to perform script
|
||||
verification. Checking that all transaction inputs are actually unspent is
|
||||
done later, at the point that its containing block is committed to the chain.
|
||||
|
||||
At a high level, this adds a new request/response pair to the state service:
|
||||
|
||||
- `Request::AwaitSpendableUtxo { output: OutPoint, ..conditions }`
|
||||
requests a spendable `transparent::Output`, looked up using `OutPoint`.
|
||||
- `Response::SpendableUtxo(Utxo)` supplies the requested `transparent::Output`
|
||||
as part of a new `Utxo` type,
|
||||
if the output is spendable based on `conditions`;
|
||||
|
||||
Note that this request is named differently from the other requests,
|
||||
`AwaitSpendableUtxo` rather than `GetUtxo` or similar. This is because the
|
||||
request has rather different behavior:
|
||||
- the request does not complete until the state service learns about a UTXO
|
||||
matching the request, which could be never. For instance, if the transaction
|
||||
output was already spent, the service is not required to return a response.
|
||||
- the request does not complete until the output is spendable, based on the
|
||||
`conditions` in the request.
|
||||
|
||||
The state service does not cancel long-running UTXO requests. Instead, the caller
|
||||
is responsible for deciding when a request is unlikely to complete. (For example,
|
||||
using a timeout layer.)
|
||||
|
||||
This allows a script verifier to asynchronously obtain information about
|
||||
previous transaction outputs and start verifying scripts as soon as the data
|
||||
is available. For instance, if we begin parallel download and verification of
|
||||
500 blocks, we should be able to begin script verification of all scripts
|
||||
referencing outputs from existing blocks in parallel, and begin verification
|
||||
of scripts referencing outputs from new blocks as soon as they are committed
|
||||
to the chain.
|
||||
|
||||
Because spending outputs from older blocks is more common than spending
|
||||
outputs from recent blocks, this should allow a significant amount of
|
||||
parallelism.
|
||||
|
||||
# Reference-level explanation
|
||||
[reference-level-explanation]: #reference-level-explanation
|
||||
|
||||
## Data structures
|
||||
[data-structures]: #data-structures
|
||||
|
||||
We add the following request and response to the state protocol:
|
||||
```rust
|
||||
enum Request::AwaitSpendableUtxo {
|
||||
outpoint: OutPoint,
|
||||
spend_height: Height,
|
||||
spend_restriction: SpendRestriction,
|
||||
}
|
||||
|
||||
/// Consensus rule:
|
||||
/// "A transaction with one or more transparent inputs from coinbase transactions
|
||||
/// MUST have no transparent outputs (i.e.tx_out_count MUST be 0)."
|
||||
enum SpendRestriction {
|
||||
/// The UTXO is spent in a transaction with transparent outputs
|
||||
SomeTransparentOutputs,
|
||||
/// The UTXO is spent in a transaction with all shielded outputs
|
||||
AllShieldedOutputs,
|
||||
}
|
||||
```
|
||||
|
||||
As described above, the request name is intended to indicate the request's behavior.
|
||||
The request does not resolve until:
|
||||
- the state layer learns of a UTXO described by the request, and
|
||||
- the output is spendable at `height` with `spend_restriction`.
|
||||
|
||||
The new `Utxo` type adds a coinbase flag and height to `transparent::Output`s
|
||||
that we look up in the state, or get from newly committed blocks:
|
||||
```rust
|
||||
enum Response::SpendableUtxo(Utxo)
|
||||
|
||||
pub struct Utxo {
|
||||
/// The output itself.
|
||||
pub output: transparent::Output,
|
||||
|
||||
/// The height at which the output was created.
|
||||
pub height: block::Height,
|
||||
|
||||
/// Whether the output originated in a coinbase transaction.
|
||||
pub from_coinbase: bool,
|
||||
}
|
||||
```
|
||||
|
||||
## Transparent coinbase consensus rules
|
||||
[transparent-coinbase-consensus-rules]: #transparent-coinbase-consensus-rules
|
||||
|
||||
Specifically, if the UTXO is a transparent coinbase output,
|
||||
the service is not required to return a response if:
|
||||
- `spend_height` is less than `MIN_TRANSPARENT_COINBASE_MATURITY` (100) blocks after the `Utxo.height`, or
|
||||
- `spend_restriction` is `SomeTransparentOutputs`.
|
||||
|
||||
This implements the following consensus rules:
|
||||
|
||||
> A transaction MUST NOT spend a transparent output of a coinbase transaction
|
||||
> from a block less than 100 blocks prior to the spend.
|
||||
>
|
||||
> Note that transparent outputs of coinbase transactions include Founders’ Reward
|
||||
> outputs and transparent funding stream outputs.
|
||||
|
||||
> A transaction with one or more transparent inputs from coinbase transactions
|
||||
> MUST have no transparent outputs (i.e.tx_out_count MUST be 0).
|
||||
>
|
||||
> Inputs from coinbase transactions include Founders’ Reward outputs and funding
|
||||
> stream outputs.
|
||||
|
||||
https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus
|
||||
|
||||
## Parallel coinbase checks
|
||||
[parallel-coinbase-checks]: #parallel-coinbase-checks
|
||||
|
||||
We can perform these coinbase checks asynchronously, in the presence of multiple chain forks,
|
||||
as long as the following conditions both hold:
|
||||
|
||||
1. We don't mistakenly accept or reject spends to the transparent pool.
|
||||
|
||||
2. We don't mistakenly accept or reject mature spends.
|
||||
|
||||
### Parallel coinbase justification
|
||||
[parallel-coinbase-justification]: #parallel-coinbase-justification
|
||||
|
||||
There are two parts to a spend restriction:
|
||||
- the `from_coinbase` flag, and
|
||||
- if the `from_coinbase` flag is true, the coinbase `height`.
|
||||
|
||||
If a particular transaction hash `h` always has the same `from_coinbase` value,
|
||||
and `h` exists in multiple chains, then regardless of which `Utxo` arrives first,
|
||||
the outputs of `h` always get the same `from_coinbase` value during validation.
|
||||
So spends can not be mistakenly accepted or rejected due to a different coinbase flag.
|
||||
|
||||
Similarly, if a particular coinbase transaction hash `h` always has the same `height` value,
|
||||
and `h` exists in multiple chains, then regardless of which `Utxo` arrives first,
|
||||
the outputs of `h` always get the same `height` value during validation.
|
||||
So coinbase spends can not be mistakenly accepted or rejected due to a different `height` value.
|
||||
(The heights of non-coinbase outputs are irrelevant, because they are never checked.)
|
||||
|
||||
These conditions hold as long as the following multi-chain properties are satisfied:
|
||||
- `from_coinbase`: across all chains, the set of coinbase transaction hashes is disjoint from
|
||||
the set of non-coinbase transaction hashes, and
|
||||
- coinbase `height`: across all chains, duplicate coinbase transaction hashes can only occur at
|
||||
exactly the same height.
|
||||
|
||||
### Parallel coinbase consensus rules
|
||||
[parallel-coinbase-consensus]: #parallel-coinbase-consensus
|
||||
|
||||
These multi-chain properties can be derived from the following consensus rules:
|
||||
|
||||
Transaction versions 1-4:
|
||||
|
||||
> [Pre-Sapling ] If effectiveVersion = 1 or nJoinSplit = 0, then both tx_in_count and tx_out_count MUST be nonzero.
|
||||
> ...
|
||||
> [Sapling onward] If effectiveVersion < 5, then at least one of tx_in_count, nSpendsSapling, and nJoinSplit MUST be nonzero.
|
||||
|
||||
> A coinbase transaction for a block at block height greater than 0
|
||||
> MUST have a script that, as its first item, encodes the *block height* height as follows.
|
||||
>
|
||||
> For height in the range {1 .. 16}, the encoding is a single byte of value 0x50 + height.
|
||||
>
|
||||
> Otherwise, let heightBytes be the signed little-endian representation of height,
|
||||
> using the minimum nonzero number of bytes such that the most significant byte is < 0x80.
|
||||
> The length of heightBytes MUST be in the range {1 .. 8}.
|
||||
> Then the encoding is the length of heightBytes encoded as one byte, followed by heightBytes itself.
|
||||
|
||||
https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus
|
||||
|
||||
> The transaction ID of a version 4 or earlier transaction is the SHA-256d hash of the transaction encoding in the
|
||||
> pre-v5 format described above.
|
||||
|
||||
https://zips.z.cash/protocol/protocol.pdf#txnidentifiers
|
||||
|
||||
Transaction version 5:
|
||||
|
||||
> [NU5 onward] If effectiveVersion ≥ 5, then this condition must hold: tx_in_count > 0 or nSpendsSapling > 0 or (nActionsOrchard > 0 and enableSpendsOrchard = 1).
|
||||
> ...
|
||||
> [NU5 onward] The nExpiryHeight field of a coinbase transaction MUST be equal to its block height.
|
||||
|
||||
https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus
|
||||
|
||||
> non-malleable transaction identifiers ... commit to all transaction data except for attestations to transaction validity
|
||||
> ...
|
||||
> A new transaction digest algorithm is defined that constructs the identifier for a transaction from a tree of hashes
|
||||
> ...
|
||||
> A BLAKE2b-256 hash of the following values:
|
||||
> ...
|
||||
> T.1e: expiry_height (4-byte little-endian block height)
|
||||
|
||||
https://zips.z.cash/zip-0244#t-1-header-digest
|
||||
|
||||
Since:
|
||||
- coinbase transaction hashes commit to the block `Height`,
|
||||
- non-coinbase transaction hashes commit to their inputs, and
|
||||
- double-spends are not allowed;
|
||||
|
||||
Therefore:
|
||||
- coinbase transaction hashes are unique for distinct heights in any chain,
|
||||
- coinbase transaction hashes are unique in a single chain, and
|
||||
- non-coinbase transaction hashes are unique in a single chain,
|
||||
because they recursively commit to unique inputs.
|
||||
|
||||
So the required parallel verification conditions are satisfied.
|
||||
|
||||
## Script verification
|
||||
[script-verification]: #script-verification
|
||||
|
||||
To verify scripts, a script verifier requests the relevant UTXOs from the
|
||||
state service and waits for all of them to resolve, or fails verification
|
||||
with a timeout error. Currently, we outsource script verification to
|
||||
`zcash_consensus`, which does FFI into the same C++ code as `zcashd` uses.
|
||||
**We need to ensure this code is thread-safe**.
|
||||
|
||||
## Database implementation
|
||||
[database-implementation]: #database-implementation
|
||||
|
||||
Implementing the state request correctly requires considering two sets of behaviors:
|
||||
|
||||
1. behaviors related to the state's external API (a `Buffer`ed `tower::Service`);
|
||||
2. behaviors related to the state's internal implementation (using `rocksdb`).
|
||||
|
||||
Making this distinction helps us to ensure we don't accidentally leak
|
||||
"internal" behaviors into "external" behaviors, which would violate
|
||||
encapsulation and make it more difficult to replace `rocksdb`.
|
||||
|
||||
In the first category, our state is presented to the rest of the application
|
||||
as a `Buffer`ed `tower::Service`. The `Buffer` wrapper allows shared access
|
||||
to a service using an actor model, moving the service to be shared into a
|
||||
worker task and passing messages to it over an multi-producer single-consumer
|
||||
(mpsc) channel. The worker task receives messages and makes `Service::call`s.
|
||||
The `Service::call` method returns a `Future`, and the service is allowed to
|
||||
decide how much work it wants to do synchronously (in `call`) and how much
|
||||
work it wants to do asynchronously (in the `Future` it returns).
|
||||
|
||||
This means that our external API ensures that the state service sees a
|
||||
linearized sequence of state requests, although the exact ordering is
|
||||
unpredictable when there are multiple senders making requests.
|
||||
|
||||
Because the state service has exclusive access to the rocksdb database, and the
|
||||
state service sees a linearized sequence of state requests, we have an easy
|
||||
way to opt in to asynchronous database access. We can perform rocksdb operations
|
||||
synchronously in the `Service::call`, waiting for them to complete, and be
|
||||
sure that all future requests will see the resulting rocksdb state. Or, we can
|
||||
perform rocksdb operations asynchronously in the future returned by
|
||||
`Service::call`.
|
||||
|
||||
If we perform all *writes* synchronously and allow reads to be either
|
||||
synchronous or asynchronous, we ensure that writes cannot race each other.
|
||||
Asynchronous reads are guaranteed to read at least the state present at the
|
||||
time the request was processed, or a later state.
|
||||
|
||||
## Lookup states
|
||||
[lookup-states]: #lookup-states
|
||||
|
||||
Now, returning to the UTXO lookup problem, we can map out the possible states
|
||||
with this restriction in mind. This description assumes that UTXO storage is
|
||||
split into disjoint sets, one in-memory (e.g., blocks after the reorg limit)
|
||||
and the other in rocksdb (e.g., blocks after the reorg limit). The details of
|
||||
this storage are not important for this design, only that the two sets are
|
||||
disjoint.
|
||||
|
||||
When the state service processes a `Request::AwaitSpendableUtxo` referencing
|
||||
some UTXO `u`, there are three disjoint possibilities:
|
||||
|
||||
1. `u` is already contained in an in-memory block storage;
|
||||
2. `u` is already contained in the rocksdb UTXO set;
|
||||
3. `u` is not yet known to the state service.
|
||||
|
||||
In case 3, we need to queue `u` and scan all *future* blocks to see whether
|
||||
they contain `u`. However, if we have a mechanism to queue `u`, we can
|
||||
perform check 2 asynchronously, because restricting to synchronous writes
|
||||
means that any async read will return the current or later state. If `u` was
|
||||
in the rocksdb UTXO set when the request was processed, the only way that an
|
||||
async read would not return `u` is if the UTXO were spent, in which case the
|
||||
service is not required to return a response.
|
||||
|
||||
## Lookup implementation
|
||||
[lookup-implementation]: #lookup-implementation
|
||||
|
||||
This behavior can be encapsulated into a `PendingUtxos`
|
||||
structure described below.
|
||||
|
||||
```rust
|
||||
// sketch
|
||||
#[derive(Default, Debug)]
|
||||
struct PendingUtxos(HashMap<OutPoint, oneshot::Sender<Utxo>>);
|
||||
|
||||
impl PendingUtxos {
|
||||
// adds the outpoint and returns (wrapped) rx end of oneshot
|
||||
// checks the spend height and restriction before sending the utxo response
|
||||
// return can be converted to `Service::Future`
|
||||
pub fn queue(
|
||||
&mut self,
|
||||
outpoint: OutPoint,
|
||||
spend_height: Height,
|
||||
spend_restriction: SpendRestriction,
|
||||
) -> impl Future<Output=Result<Response, ...>>;
|
||||
|
||||
// if outpoint is a hashmap key, remove the entry and send output on the channel
|
||||
pub fn respond(&mut self, outpoint: OutPoint, output: transparent::Output);
|
||||
|
||||
/// check the list of pending UTXO requests against the supplied `utxos`
|
||||
pub fn check_against(&mut self, utxos: &HashMap<transparent::OutPoint, Utxo>);
|
||||
|
||||
// scans the hashmap and removes any entries with closed senders
|
||||
pub fn prune(&mut self);
|
||||
}
|
||||
```
|
||||
|
||||
The state service should maintain an `Arc<Mutex<PendingUtxos>>`, used as follows:
|
||||
|
||||
1. In `Service::call(Request::AwaitSpendableUtxo { outpoint: u, .. }`, the service should:
|
||||
- call `PendingUtxos::queue(u)` to get a future `f` to return to the caller;
|
||||
- spawn a task that does a rocksdb lookup for `u`, calling `PendingUtxos::respond(u, output)` if present;
|
||||
- check the in-memory storage for `u`, calling `PendingUtxos::respond(u, output)` if present;
|
||||
- return `f` to the caller (it may already be ready).
|
||||
The common case is that `u` references an old spendable UTXO, so spawning the lookup
|
||||
task first means that we don't wait to check in-memory storage for `u`
|
||||
before starting the rocksdb lookup.
|
||||
|
||||
2. In `f`, the future returned by `PendingUtxos::queue(u)`, the service should
|
||||
check that the `Utxo` is spendable before returning it:
|
||||
- if `Utxo.from_coinbase` is false, return the utxo;
|
||||
- if `Utxo.from_coinbase` is true, check that:
|
||||
- `spend_restriction` is `AllShieldedOutputs`, and
|
||||
- `spend_height` is greater than or equal to
|
||||
`MIN_TRANSPARENT_COINBASE_MATURITY` plus the `Utxo.height`,
|
||||
- if both checks pass, return the utxo.
|
||||
- if any check fails, drop the utxo, and let the request timeout.
|
||||
|
||||
3. In `Service::call(Request::CommitBlock(block, ..))`, the service should:
|
||||
- [check for double-spends of each UTXO in the block](https://github.com/ZcashFoundation/zebra/issues/2231),
|
||||
and
|
||||
- do any other transactional checks before committing a block as normal.
|
||||
Because the `AwaitSpendableUtxo` request is informational, there's no need to do
|
||||
the transactional checks before matching against pending UTXO requests,
|
||||
and doing so upfront can run expensive verification earlier than needed.
|
||||
|
||||
4. In `Service::poll_ready()`, the service should call
|
||||
`PendingUtxos::prune()` at least *some* of the time. This is required because
|
||||
when a consumer uses a timeout layer, the cancelled requests should be
|
||||
flushed from the queue to avoid a resource leak. However, doing this on every
|
||||
call will result in us spending a bunch of time iterating over the hashmap.
|
||||
|
||||
# Drawbacks
|
||||
[drawbacks]: #drawbacks
|
||||
|
||||
One drawback of this design is that we may have to wait on a lock. However,
|
||||
the critical section basically amounts to a hash lookup and a channel send,
|
||||
so I don't think that we're likely to run into problems with long contended
|
||||
periods, and it's unlikely that we would get a deadlock.
|
||||
|
||||
# Rationale and alternatives
|
||||
[rationale-and-alternatives]: #rationale-and-alternatives
|
||||
|
||||
High-level design rationale is inline with the design sketch. One low-level
|
||||
option would be to avoid encapsulating behavior in the `PendingUtxos` and
|
||||
just have an `Arc<Hashmap<..>>`, so that the lock only protects the hashmap
|
||||
lookup and not sending through the channel. But I think the current design is
|
||||
cleaner and the cost is probably not too large.
|
||||
|
||||
# Unresolved questions
|
||||
[unresolved-questions]: #unresolved-questions
|
||||
|
||||
- We need to pick a timeout for UTXO lookup. This should be long enough to
|
||||
account for the fact that we may start verifying blocks before all of their
|
||||
ancestors are downloaded.
|
||||
|
|
@ -0,0 +1,941 @@
|
|||
# State Updates
|
||||
|
||||
- Feature Name: state_updates
|
||||
- Start Date: 2020-08-14
|
||||
- Design PR: https://github.com/ZcashFoundation/zebra/pull/902
|
||||
- Zebra Issue: https://github.com/ZcashFoundation/zebra/issues/1049
|
||||
|
||||
|
||||
# Summary
|
||||
[summary]: #summary
|
||||
|
||||
Zebra manages chain state in the `zebra-state` crate, which allows state
|
||||
queries via asynchronous RPC (in the form of a Tower service). The state
|
||||
system is responsible for contextual verification in the sense of [RFC2],
|
||||
checking that new blocks are consistent with the existing chain state before
|
||||
committing them. This RFC describes how the state is represented internally,
|
||||
and how state updates are performed.
|
||||
|
||||
[RFC2]: ./0002-parallel-verification.md
|
||||
|
||||
# Motivation
|
||||
[motivation]: #motivation
|
||||
|
||||
We need to be able to access and modify the chain state, and we want to have
|
||||
a description of how this happens and what guarantees are provided by the
|
||||
state service.
|
||||
|
||||
# Definitions
|
||||
[definitions]: #definitions
|
||||
|
||||
* **state data**: Any data the state service uses to represent chain state.
|
||||
|
||||
* **structural/semantic/contextual verification**: as defined in [RFC2].
|
||||
|
||||
* **block chain**: A sequence of valid blocks linked by inclusion of the
|
||||
previous block hash in the subsequent block. Chains are rooted at the
|
||||
genesis block and extend to a **tip**.
|
||||
|
||||
* **chain state**: The state of the ledger after application of a particular
|
||||
sequence of blocks (state transitions).
|
||||
|
||||
* **block work**: The approximate amount of work required for a miner to generate
|
||||
a block hash that passes the difficulty filter. The number of block header
|
||||
attempts and the mining time are proportional to the work value. Numerically
|
||||
higher work values represent longer processing times.
|
||||
|
||||
* **cumulative work**: The sum of the **block work** of all blocks in a chain, from
|
||||
genesis to the chain tip.
|
||||
|
||||
* **best chain**: The chain with the greatest **cumulative work**. This chain
|
||||
represents the consensus state of the Zcash network and transactions.
|
||||
|
||||
* **side chain**: A chain which is not contained in the best chain.
|
||||
Side chains are pruned at the reorg limit, when they are no longer
|
||||
connected to the finalized state.
|
||||
|
||||
* **chain reorganization**: Occurs when a new best chain is found and the
|
||||
previous best chain becomes a side chain.
|
||||
|
||||
* **reorg limit**: The longest reorganization accepted by `zcashd`, 100 blocks.
|
||||
|
||||
* **orphaned block**: A block which is no longer included in the best chain.
|
||||
|
||||
* **non-finalized state**: State data corresponding to blocks above the reorg
|
||||
limit. This data can change in the event of a chain reorg.
|
||||
|
||||
* **finalized state**: State data corresponding to blocks below the reorg
|
||||
limit. This data cannot change in the event of a chain reorg.
|
||||
|
||||
* **non-finalized tips**: The highest blocks in each non-finalized chain. These
|
||||
tips might be at different heights.
|
||||
|
||||
* **finalized tip**: The highest block in the finalized state. The tip of the best
|
||||
chain is usually 100 blocks (the reorg limit) above the finalized tip. But it can
|
||||
be lower during the initial sync, and after a chain reorganization, if the new
|
||||
best chain is at a lower height.
|
||||
|
||||
* **relevant chain**: The relevant chain for a block starts at the previous
|
||||
block, and extends back to genesis.
|
||||
|
||||
* **relevant tip**: The tip of the relevant chain.
|
||||
|
||||
# Guide-level explanation
|
||||
[guide-level-explanation]: #guide-level-explanation
|
||||
|
||||
The `zebra-state` crate provides an implementation of the chain state storage
|
||||
logic in a Zcash consensus node. Its main responsibility is to store chain
|
||||
state, validating new blocks against the existing chain state in the process,
|
||||
and to allow later querying of said chain state. `zebra-state` provides this
|
||||
interface via a `tower::Service` based on the actor model with a
|
||||
request/response interface for passing messages back and forth between the
|
||||
state service and the rest of the application.
|
||||
|
||||
The main entry point for the `zebra-state` crate is the `init` function. This
|
||||
function takes a `zebra_state::Config` and constructs a new state service,
|
||||
which it returns wrapped by a `tower::Buffer`. This service is then interacted
|
||||
with via the `tower::Service` trait.
|
||||
|
||||
```rust
|
||||
use tower::{Service, ServiceExt};
|
||||
|
||||
let state = zebra_state::on_disk::init(state_config, network);
|
||||
let request = zebra_state::Request::BlockLocator;
|
||||
let response = state.ready_and().await?.call(request).await?;
|
||||
|
||||
assert!(matches!(response, zebra_state::Response::BlockLocator(_)));
|
||||
```
|
||||
|
||||
**Note**: The `tower::Service` API requires that `ready` is always called
|
||||
exactly once before each `call`. It is up to users of the zebra state service
|
||||
to uphold this contract.
|
||||
|
||||
The `tower::Buffer` wrapper is `Clone`able, allowing shared access to a common state service. This allows different tasks to share access to the chain state.
|
||||
|
||||
The set of operations supported by `zebra-state` are encoded in its `Request`
|
||||
enum. This enum has one variant for each supported operation.
|
||||
|
||||
```rust
|
||||
pub enum Request {
|
||||
CommitBlock {
|
||||
block: Arc<Block>,
|
||||
},
|
||||
CommitFinalizedBlock {
|
||||
block: Arc<Block>,
|
||||
},
|
||||
Depth(Hash),
|
||||
Tip,
|
||||
BlockLocator,
|
||||
Transaction(Hash),
|
||||
Block(HashOrHeight),
|
||||
|
||||
// .. some variants omitted
|
||||
}
|
||||
```
|
||||
|
||||
`zebra-state` breaks down its requests into two categories and provides
|
||||
different guarantees for each category: requests that modify the state, and
|
||||
requests that do not. Requests that update the state are guaranteed to run
|
||||
sequentially and will never race against each other. Requests that read state
|
||||
are done asynchronously and are guaranteed to read at least the state present
|
||||
at the time the request was processed by the service, or a later state
|
||||
present at the time the request future is executed. The state service avoids
|
||||
race conditions between the read state and the written state by doing all
|
||||
contextual verification internally.
|
||||
|
||||
# Reference-level explanation
|
||||
[reference-level-explanation]: #reference-level-explanation
|
||||
|
||||
## State Components
|
||||
|
||||
Zcash (as implemented by `zcashd`) differs from Bitcoin in its treatment of
|
||||
transaction finality. If a new best chain is detected that does not extend
|
||||
the previous best chain, blocks at the end of the previous best chain become
|
||||
orphaned (no longer included in the best chain). Their state updates are
|
||||
therefore no longer included in the best chain's chain state. The process of
|
||||
rolling back orphaned blocks and applying new blocks is called a chain
|
||||
reorganization. Bitcoin allows chain reorganizations of arbitrary depth,
|
||||
while `zcashd` limits chain reorganizations to 100 blocks. (In `zcashd`, the
|
||||
new best chain must be a side-chain that forked within 100 blocks of the tip
|
||||
of the current best chain.)
|
||||
|
||||
This difference means that in Bitcoin, chain state only has probabilistic
|
||||
finality, while in Zcash, chain state is final once it is beyond the reorg
|
||||
limit. To simplify our implementation, we split the representation of the
|
||||
state data at the finality boundary provided by the reorg limit.
|
||||
|
||||
State data from blocks *above* the reorg limit (*non-finalized state*) is
|
||||
stored in-memory and handles multiple chains. State data from blocks *below*
|
||||
the reorg limit (*finalized state*) is stored persistently using `rocksdb` and
|
||||
only tracks a single chain. This allows a simplification of our state
|
||||
handling, because only finalized data is persistent and the logic for
|
||||
finalized data handles less invariants.
|
||||
|
||||
One downside of this design is that restarting the node loses the last 100
|
||||
blocks, but node restarts are relatively infrequent and a short re-sync is
|
||||
cheap relative to the cost of additional implementation complexity.
|
||||
|
||||
Another downside of this design is that we do not achieve exactly the same
|
||||
behavior as `zcashd` in the event of a 51% attack: `zcashd` limits *each* chain
|
||||
reorganization to 100 blocks, but permits multiple reorgs, while Zebra limits
|
||||
*all* chain reorgs to 100 blocks. In the event of a successful 51% attack on
|
||||
Zcash, this could be resolved by wiping the rocksdb state and re-syncing the new
|
||||
chain, but in this scenario there are worse problems.
|
||||
|
||||
## Service Interface
|
||||
[service-interface]: #service-interface
|
||||
|
||||
The state is accessed asynchronously through a Tower service interface.
|
||||
Determining what guarantees the state service can and should provide to the
|
||||
rest of the application requires considering two sets of behaviors:
|
||||
|
||||
1. behaviors related to the state's external API (a `Buffer`ed `tower::Service`);
|
||||
2. behaviors related to the state's internal implementation (using `rocksdb`).
|
||||
|
||||
Making this distinction helps us to ensure we don't accidentally leak
|
||||
"internal" behaviors into "external" behaviors, which would violate
|
||||
encapsulation and make it more difficult to replace `rocksdb`.
|
||||
|
||||
In the first category, our state is presented to the rest of the application
|
||||
as a `Buffer`ed `tower::Service`. The `Buffer` wrapper allows shared access
|
||||
to a service using an actor model, moving the service to be shared into a
|
||||
worker task and passing messages to it over an multi-producer single-consumer
|
||||
(mpsc) channel. The worker task receives messages and makes `Service::call`s.
|
||||
The `Service::call` method returns a `Future`, and the service is allowed to
|
||||
decide how much work it wants to do synchronously (in `call`) and how much
|
||||
work it wants to do asynchronously (in the `Future` it returns).
|
||||
|
||||
This means that our external API ensures that the state service sees a
|
||||
linearized sequence of state requests, although the exact ordering is
|
||||
unpredictable when there are multiple senders making requests.
|
||||
|
||||
Because the state service has exclusive access to the rocksdb database, and the
|
||||
state service sees a linearized sequence of state requests, we have an easy
|
||||
way to opt in to asynchronous database access. We can perform rocksdb operations
|
||||
synchronously in the `Service::call`, waiting for them to complete, and be
|
||||
sure that all future requests will see the resulting rocksdb state. Or, we can
|
||||
perform rocksdb operations asynchronously in the future returned by
|
||||
`Service::call`.
|
||||
|
||||
If we perform all *writes* synchronously and allow reads to be either
|
||||
synchronous or asynchronous, we ensure that writes cannot race each other.
|
||||
Asynchronous reads are guaranteed to read at least the state present at the
|
||||
time the request was processed, or a later state.
|
||||
|
||||
### Summary
|
||||
|
||||
- **rocksdb reads** may be done synchronously (in `call`) or asynchronously (in
|
||||
the `Future`), depending on the context;
|
||||
|
||||
- **rocksdb writes** must be done synchronously (in `call`)
|
||||
|
||||
## In-memory data structures
|
||||
[in-memory]: #in-memory
|
||||
|
||||
At a high level, the in-memory data structures store a collection of chains,
|
||||
each rooted at the highest finalized block. Each chain consists of a map from
|
||||
heights to blocks. Chains are stored using an ordered map from cumulative work to
|
||||
chains, so that the map ordering is the ordering of worst to best chains.
|
||||
|
||||
### The `Chain` type
|
||||
[chain-type]: #chain-type
|
||||
|
||||
The `Chain` type represents a chain of blocks. Each block represents an
|
||||
incremental state update, and the `Chain` type caches the cumulative state
|
||||
update from its root to its tip.
|
||||
|
||||
The `Chain` type is used to represent the non-finalized portion of a complete
|
||||
chain of blocks rooted at the genesis block. The parent block of the root of
|
||||
a `Chain` is the tip of the finalized portion of the chain. As an exception, the finalized
|
||||
portion of the chain is initially empty, until the genesis block has been finalized.
|
||||
|
||||
The `Chain` type supports several operations to manipulate chains, `push`,
|
||||
`pop_root`, and `fork`. `push` is the most fundamental operation and handles
|
||||
contextual validation of chains as they are extended. `pop_root` is provided
|
||||
for finalization, and is how we move blocks from the non-finalized portion of
|
||||
the state to the finalized portion. `fork` on the other hand handles creating
|
||||
new chains for `push` when new blocks arrive whose parent isn't a tip of an
|
||||
existing chain.
|
||||
|
||||
**Note:** The `Chain` type's API is only designed to handle non-finalized
|
||||
data. The genesis block and all pre canopy blocks are always considered to
|
||||
be finalized blocks and should not be handled via the `Chain` type through
|
||||
`CommitBlock`. They should instead be committed directly to the finalized
|
||||
state with `CommitFinalizedBlock`. This is particularly important with the
|
||||
genesis block since the `Chain` will panic if used while the finalized state
|
||||
is completely empty.
|
||||
|
||||
The `Chain` type is defined by the following struct and API:
|
||||
|
||||
```rust
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Chain {
|
||||
// The function `eq_internal_state` must be updated every time a field is added to [`Chain`].
|
||||
/// The configured network for this chain.
|
||||
network: Network,
|
||||
|
||||
/// The contextually valid blocks which form this non-finalized partial chain, in height order.
|
||||
pub(crate) blocks: BTreeMap<block::Height, ContextuallyValidBlock>,
|
||||
|
||||
/// An index of block heights for each block hash in `blocks`.
|
||||
pub height_by_hash: HashMap<block::Hash, block::Height>,
|
||||
|
||||
/// An index of [`TransactionLocation`]s for each transaction hash in `blocks`.
|
||||
pub tx_loc_by_hash: HashMap<transaction::Hash, TransactionLocation>,
|
||||
|
||||
/// The [`transparent::Utxo`]s created by `blocks`.
|
||||
///
|
||||
/// Note that these UTXOs may not be unspent.
|
||||
/// Outputs can be spent by later transactions or blocks in the chain.
|
||||
//
|
||||
// TODO: replace OutPoint with OutputLocation?
|
||||
pub(crate) created_utxos: HashMap<transparent::OutPoint, transparent::OrderedUtxo>,
|
||||
/// The [`transparent::OutPoint`]s spent by `blocks`,
|
||||
/// including those created by earlier transactions or blocks in the chain.
|
||||
pub(crate) spent_utxos: HashSet<transparent::OutPoint>,
|
||||
|
||||
/// The Sprout note commitment tree of the tip of this [`Chain`],
|
||||
/// including all finalized notes, and the non-finalized notes in this chain.
|
||||
pub(super) sprout_note_commitment_tree: sprout::tree::NoteCommitmentTree,
|
||||
/// The Sprout note commitment tree for each anchor.
|
||||
/// This is required for interstitial states.
|
||||
pub(crate) sprout_trees_by_anchor:
|
||||
HashMap<sprout::tree::Root, sprout::tree::NoteCommitmentTree>,
|
||||
/// The Sapling note commitment tree of the tip of this [`Chain`],
|
||||
/// including all finalized notes, and the non-finalized notes in this chain.
|
||||
pub(super) sapling_note_commitment_tree: sapling::tree::NoteCommitmentTree,
|
||||
/// The Sapling note commitment tree for each height.
|
||||
pub(crate) sapling_trees_by_height: BTreeMap<block::Height, sapling::tree::NoteCommitmentTree>,
|
||||
/// The Orchard note commitment tree of the tip of this [`Chain`],
|
||||
/// including all finalized notes, and the non-finalized notes in this chain.
|
||||
pub(super) orchard_note_commitment_tree: orchard::tree::NoteCommitmentTree,
|
||||
/// The Orchard note commitment tree for each height.
|
||||
pub(crate) orchard_trees_by_height: BTreeMap<block::Height, orchard::tree::NoteCommitmentTree>,
|
||||
/// The ZIP-221 history tree of the tip of this [`Chain`],
|
||||
/// including all finalized blocks, and the non-finalized `blocks` in this chain.
|
||||
pub(crate) history_tree: HistoryTree,
|
||||
|
||||
/// The Sprout anchors created by `blocks`.
|
||||
pub(crate) sprout_anchors: MultiSet<sprout::tree::Root>,
|
||||
/// The Sprout anchors created by each block in `blocks`.
|
||||
pub(crate) sprout_anchors_by_height: BTreeMap<block::Height, sprout::tree::Root>,
|
||||
/// The Sapling anchors created by `blocks`.
|
||||
pub(crate) sapling_anchors: MultiSet<sapling::tree::Root>,
|
||||
/// The Sapling anchors created by each block in `blocks`.
|
||||
pub(crate) sapling_anchors_by_height: BTreeMap<block::Height, sapling::tree::Root>,
|
||||
/// The Orchard anchors created by `blocks`.
|
||||
pub(crate) orchard_anchors: MultiSet<orchard::tree::Root>,
|
||||
/// The Orchard anchors created by each block in `blocks`.
|
||||
pub(crate) orchard_anchors_by_height: BTreeMap<block::Height, orchard::tree::Root>,
|
||||
|
||||
/// The Sprout nullifiers revealed by `blocks`.
|
||||
pub(super) sprout_nullifiers: HashSet<sprout::Nullifier>,
|
||||
/// The Sapling nullifiers revealed by `blocks`.
|
||||
pub(super) sapling_nullifiers: HashSet<sapling::Nullifier>,
|
||||
/// The Orchard nullifiers revealed by `blocks`.
|
||||
pub(super) orchard_nullifiers: HashSet<orchard::Nullifier>,
|
||||
|
||||
/// Partial transparent address index data from `blocks`.
|
||||
pub(super) partial_transparent_transfers: HashMap<transparent::Address, TransparentTransfers>,
|
||||
|
||||
/// The cumulative work represented by `blocks`.
|
||||
///
|
||||
/// Since the best chain is determined by the largest cumulative work,
|
||||
/// the work represented by finalized blocks can be ignored,
|
||||
/// because they are common to all non-finalized chains.
|
||||
pub(super) partial_cumulative_work: PartialCumulativeWork,
|
||||
|
||||
/// The chain value pool balances of the tip of this [`Chain`],
|
||||
/// including the block value pool changes from all finalized blocks,
|
||||
/// and the non-finalized blocks in this chain.
|
||||
///
|
||||
/// When a new chain is created from the finalized tip,
|
||||
/// it is initialized with the finalized tip chain value pool balances.
|
||||
pub(crate) chain_value_pools: ValueBalance<NonNegative>,
|
||||
}
|
||||
```
|
||||
|
||||
#### `pub fn push(&mut self, block: Arc<Block>)`
|
||||
|
||||
Push a block into a chain as the new tip
|
||||
|
||||
1. Update cumulative data members
|
||||
- Add the block's hash to `height_by_hash`
|
||||
- Add work to `self.partial_cumulative_work`
|
||||
- For each `transaction` in `block`
|
||||
- Add key: `transaction.hash` and value: `(height, tx_index)` to `tx_loc_by_hash`
|
||||
- Add created utxos to `self.created_utxos`
|
||||
- Add spent utxos to `self.spent_utxos`
|
||||
- Add nullifiers to the appropriate `self.<version>_nullifiers`
|
||||
|
||||
2. Add block to `self.blocks`
|
||||
|
||||
#### `pub fn pop_root(&mut self) -> Arc<Block>`
|
||||
|
||||
Remove the lowest height block of the non-finalized portion of a chain.
|
||||
|
||||
1. Remove the lowest height block from `self.blocks`
|
||||
|
||||
2. Update cumulative data members
|
||||
- Remove the block's hash from `self.height_by_hash`
|
||||
- Subtract work from `self.partial_cumulative_work`
|
||||
- For each `transaction` in `block`
|
||||
- Remove `transaction.hash` from `tx_loc_by_hash`
|
||||
- Remove created utxos from `self.created_utxos`
|
||||
- Remove spent utxos from `self.spent_utxos`
|
||||
- Remove the nullifiers from the appropriate `self.<version>_nullifiers`
|
||||
|
||||
3. Return the block
|
||||
|
||||
#### `pub fn fork(&self, new_tip: block::Hash) -> Option<Self>`
|
||||
|
||||
Fork a chain at the block with the given hash, if it is part of this chain.
|
||||
|
||||
1. If `self` does not contain `new_tip` return `None`
|
||||
|
||||
2. Clone self as `forked`
|
||||
|
||||
3. While the tip of `forked` is not equal to `new_tip`
|
||||
- call `forked.pop_tip()` and discard the old tip
|
||||
|
||||
4. Return `forked`
|
||||
|
||||
#### `fn pop_tip(&mut self)`
|
||||
|
||||
Remove the highest height block of the non-finalized portion of a chain.
|
||||
|
||||
1. Remove the highest height `block` from `self.blocks`
|
||||
|
||||
2. Update cumulative data members
|
||||
- Remove the corresponding hash from `self.height_by_hash`
|
||||
- Subtract work from `self.partial_cumulative_work`
|
||||
- for each `transaction` in `block`
|
||||
- remove `transaction.hash` from `tx_loc_by_hash`
|
||||
- Remove created utxos from `self.created_utxos`
|
||||
- Remove spent utxos from `self.spent_utxos`
|
||||
- Remove the nullifiers from the appropriate `self.<version>_nullifiers`
|
||||
|
||||
#### `Ord`
|
||||
|
||||
The `Chain` type implements `Ord` for reorganizing chains. First chains are
|
||||
compared by their `partial_cumulative_work`. Ties are then broken by
|
||||
comparing `block::Hash`es of the tips of each chain. (This tie-breaker means
|
||||
that all `Chain`s in the `NonFinalizedState` must have at least one block.)
|
||||
|
||||
**Note**: Unlike `zcashd`, Zebra does not use block arrival times as a
|
||||
tie-breaker for the best tip. Since Zebra downloads blocks in parallel,
|
||||
download times are not guaranteed to be unique. Using the `block::Hash`
|
||||
provides a consistent tip order. (As a side-effect, the tip order is also
|
||||
consistent after a node restart, and between nodes.)
|
||||
|
||||
#### `Default`
|
||||
|
||||
The `Chain` type implements `Default` for constructing new chains whose
|
||||
parent block is the tip of the finalized state. This implementation should be
|
||||
handled by `#[derive(Default)]`.
|
||||
|
||||
1. initialise cumulative data members
|
||||
- Construct an empty `self.blocks`, `height_by_hash`, `tx_loc_by_hash`,
|
||||
`self.created_utxos`, `self.spent_utxos`, `self.<version>_anchors`,
|
||||
`self.<version>_nullifiers`
|
||||
- Zero `self.partial_cumulative_work`
|
||||
|
||||
**Note:** The `ChainState` can be empty after a restart, because the
|
||||
non-finalized state is empty.
|
||||
|
||||
### `NonFinalizedState` Type
|
||||
[nonfinalizedstate-type]: #nonfinalizedstate-type
|
||||
|
||||
The `NonFinalizedState` type represents the set of all non-finalized state.
|
||||
It consists of a set of non-finalized but verified chains and a set of
|
||||
unverified blocks which are waiting for the full context needed to verify
|
||||
them to become available.
|
||||
|
||||
`NonFinalizedState` is defined by the following structure and API:
|
||||
|
||||
```rust
|
||||
/// The state of the chains in memory, including queued blocks.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct NonFinalizedState {
|
||||
/// Verified, non-finalized chains.
|
||||
chain_set: BTreeSet<Chain>,
|
||||
/// Blocks awaiting their parent blocks for contextual verification.
|
||||
contextual_queue: QueuedBlocks,
|
||||
}
|
||||
```
|
||||
|
||||
#### `pub fn finalize(&mut self) -> Arc<Block>`
|
||||
|
||||
Finalize the lowest height block in the non-finalized portion of the best
|
||||
chain and updates all side chains to match.
|
||||
|
||||
1. Extract the best chain from `self.chain_set` into `best_chain`
|
||||
|
||||
2. Extract the rest of the chains into a `side_chains` temporary variable, so
|
||||
they can be mutated
|
||||
|
||||
3. Remove the lowest height block from the best chain with
|
||||
`let finalized_block = best_chain.pop_root();`
|
||||
|
||||
4. Add `best_chain` back to `self.chain_set` if `best_chain` is not empty
|
||||
|
||||
5. For each remaining `chain` in `side_chains`
|
||||
- remove the lowest height block from `chain`
|
||||
- If that block is equal to `finalized_block` and `chain` is not empty add `chain` back to `self.chain_set`
|
||||
- Else, drop `chain`
|
||||
|
||||
6. Return `finalized_block`
|
||||
|
||||
#### `fn commit_block(&mut self, block: Arc<Block>)`
|
||||
|
||||
Commit `block` to the non-finalized state.
|
||||
|
||||
1. If the block is a pre-Canopy block, or the canopy activation block, panic.
|
||||
|
||||
2. If any chains tip hash equal `block.header.previous_block_hash` remove that chain from `self.chain_set`
|
||||
|
||||
3. Else Find the first chain that contains `block.parent` and fork it with
|
||||
`block.parent` as the new tip
|
||||
- `let fork = self.chain_set.iter().find_map(|chain| chain.fork(block.parent));`
|
||||
|
||||
4. Else panic, this should be unreachable because `commit_block` is only
|
||||
called when `block` is ready to be committed.
|
||||
|
||||
5. Push `block` into `parent_chain`
|
||||
|
||||
6. Insert `parent_chain` into `self.chain_set`
|
||||
|
||||
#### `pub(super) fn commit_new_chain(&mut self, block: Arc<Block>)`
|
||||
|
||||
Construct a new chain starting with `block`.
|
||||
|
||||
1. Construct a new empty chain
|
||||
|
||||
2. `push` `block` into that new chain
|
||||
|
||||
3. Insert the new chain into `self.chain_set`
|
||||
|
||||
### The `QueuedBlocks` type
|
||||
|
||||
The queued blocks type represents the non-finalized blocks that were committed
|
||||
before their parent blocks were. It is responsible for tracking which blocks
|
||||
are queued by their parent so they can be committed immediately after the
|
||||
parent is committed. It also tracks blocks by their height so they can be
|
||||
discarded if they ever end up below the reorg limit.
|
||||
|
||||
`NonFinalizedState` is defined by the following structure and API:
|
||||
|
||||
```rust
|
||||
/// A queue of blocks, awaiting the arrival of parent blocks.
|
||||
#[derive(Debug, Default)]
|
||||
struct QueuedBlocks {
|
||||
/// Blocks awaiting their parent blocks for contextual verification.
|
||||
blocks: HashMap<block::Hash, QueuedBlock>,
|
||||
/// Hashes from `queued_blocks`, indexed by parent hash.
|
||||
by_parent: HashMap<block::Hash, Vec<block::Hash>>,
|
||||
/// Hashes from `queued_blocks`, indexed by block height.
|
||||
by_height: BTreeMap<block::Height, Vec<block::Hash>>,
|
||||
}
|
||||
```
|
||||
|
||||
#### `pub fn queue(&mut self, new: QueuedBlock)`
|
||||
|
||||
Add a block to the queue of blocks waiting for their requisite context to
|
||||
become available.
|
||||
|
||||
1. extract the `parent_hash`, `new_hash`, and `new_height` from `new.block`
|
||||
|
||||
2. Add `new` to `self.blocks` using `new_hash` as the key
|
||||
|
||||
3. Add `new_hash` to the set of hashes in
|
||||
`self.by_parent.entry(parent_hash).or_default()`
|
||||
|
||||
4. Add `new_hash` to the set of hashes in
|
||||
`self.by_height.entry(new_height).or_default()`
|
||||
|
||||
#### `pub fn dequeue_children(&mut self, parent: block::Hash) -> Vec<QueuedBlock>`
|
||||
|
||||
Dequeue the set of blocks waiting on `parent`.
|
||||
|
||||
1. Remove the set of hashes waiting on `parent` from `self.by_parent`
|
||||
|
||||
2. Remove and collect each block in that set of hashes from `self.blocks` as
|
||||
`queued_children`
|
||||
|
||||
3. For each `block` in `queued_children` remove the associated `block.hash`
|
||||
from `self.by_height`
|
||||
|
||||
4. Return `queued_children`
|
||||
|
||||
#### `pub fn prune_by_height(&mut self, finalized_height: block::Height)`
|
||||
|
||||
Prune all queued blocks whose height are less than or equal to
|
||||
`finalized_height`.
|
||||
|
||||
1. Split the `by_height` list at the finalized height, removing all heights
|
||||
that are below `finalized_height`
|
||||
|
||||
2. for each hash in the removed values of `by_height`
|
||||
- remove the corresponding block from `self.blocks`
|
||||
- remove the block's hash from the list of blocks waiting on
|
||||
`block.header.previous_block_hash` from `self.by_parent`
|
||||
|
||||
|
||||
### Summary
|
||||
|
||||
- `Chain` represents the non-finalized portion of a single chain
|
||||
|
||||
- `NonFinalizedState` represents the non-finalized portion of all chains
|
||||
|
||||
- `QueuedBlocks` represents all unverified blocks that are waiting for
|
||||
context to be available.
|
||||
|
||||
The state service uses the following entry points:
|
||||
- `commit_block` when it receives new blocks.
|
||||
|
||||
- `finalize` to prevent chains in `NonFinalizedState` from growing beyond the reorg limit.
|
||||
|
||||
- [FinalizedState.queue_and_commit_finalized_blocks](#committing-finalized-blocks) on the blocks returned by `finalize`, to commit those finalized blocks to disk.
|
||||
|
||||
## Committing non-finalized blocks
|
||||
|
||||
New `non-finalized` blocks are committed as follows:
|
||||
|
||||
### `pub(super) fn queue_and_commit_non_finalized_blocks(&mut self, new: Arc<Block>) -> tokio::sync::oneshot::Receiver<block::Hash>`
|
||||
|
||||
1. If a duplicate block hash exists in a non-finalized chain, or the finalized chain,
|
||||
it has already been successfully verified:
|
||||
- create a new oneshot channel
|
||||
- immediately send `Err(DuplicateBlockHash)` drop the sender
|
||||
- return the receiver
|
||||
|
||||
2. If a duplicate block hash exists in the queue:
|
||||
- Find the `QueuedBlock` for that existing duplicate block
|
||||
- create a new channel for the new request
|
||||
- replace the old sender in `queued_block` with the new sender
|
||||
- send `Err(DuplicateBlockHash)` through the old sender channel
|
||||
- continue to use the new receiver
|
||||
|
||||
3. Else create a `QueuedBlock` for `block`:
|
||||
- Create a `tokio::sync::oneshot` channel
|
||||
- Use that channel to create a `QueuedBlock` for `block`
|
||||
- Add `block` to `self.queued_blocks`
|
||||
- continue to use the new receiver
|
||||
|
||||
4. If `block.header.previous_block_hash` is not present in the finalized or
|
||||
non-finalized state:
|
||||
- Return the receiver for the block's channel
|
||||
|
||||
5. Else iteratively attempt to process queued blocks by their parent hash
|
||||
starting with `block.header.previous_block_hash`
|
||||
|
||||
6. While there are recently committed parent hashes to process
|
||||
- Dequeue all blocks waiting on `parent` with `let queued_children =
|
||||
self.queued_blocks.dequeue_children(parent);`
|
||||
- for each queued `block`
|
||||
- **Run contextual validation** on `block`
|
||||
- contextual validation should check that the block height is
|
||||
equal to the previous block height plus 1. This check will
|
||||
reject blocks with invalid heights.
|
||||
- If the block fails contextual validation send the result to the
|
||||
associated channel
|
||||
- Else if the block's previous hash is the finalized tip add to the
|
||||
non-finalized state with `self.mem.commit_new_chain(block)`
|
||||
- Else add the new block to an existing non-finalized chain or new fork
|
||||
with `self.mem.commit_block(block);`
|
||||
- Send `Ok(hash)` over the associated channel to indicate the block
|
||||
was successfully committed
|
||||
- Add `block.hash` to the set of recently committed parent hashes to
|
||||
process
|
||||
|
||||
7. While the length of the non-finalized portion of the best chain is greater
|
||||
than the reorg limit
|
||||
- Remove the lowest height block from the non-finalized state with
|
||||
`self.mem.finalize();`
|
||||
- Commit that block to the finalized state with
|
||||
`self.disk.commit_finalized_direct(finalized);`
|
||||
|
||||
8. Prune orphaned blocks from `self.queued_blocks` with
|
||||
`self.queued_blocks.prune_by_height(finalized_height);`
|
||||
|
||||
9. Return the receiver for the block's channel
|
||||
|
||||
## rocksdb data structures
|
||||
[rocksdb]: #rocksdb
|
||||
|
||||
The current database format is documented in [Upgrading the State Database](../state-db-upgrades.md).
|
||||
|
||||
## Committing finalized blocks
|
||||
|
||||
If the parent block is not committed, add the block to an internal queue for
|
||||
future processing. Otherwise, commit the block described below, then
|
||||
commit any queued children. (Although the checkpointer generates verified
|
||||
blocks in order when it completes a checkpoint, the blocks are committed in the
|
||||
response futures, so they may arrive out of order).
|
||||
|
||||
Committing a block to the rocksdb state should be implemented as a wrapper around
|
||||
a function also called by [`Request::CommitBlock`](#request-commit-block),
|
||||
which should:
|
||||
|
||||
#### `pub(super) fn queue_and_commit_finalized_blocks(&mut self, queued_block: QueuedBlock)`
|
||||
|
||||
1. Obtain the highest entry of `hash_by_height` as `(old_height, old_tip)`.
|
||||
Check that `block`'s parent hash is `old_tip` and its height is
|
||||
`old_height+1`, or panic. This check is performed as defense-in-depth to
|
||||
prevent database corruption, but it is the caller's responsibility (e.g. the
|
||||
zebra-state service's responsibility) to commit finalized blocks in order.
|
||||
|
||||
The genesis block does not have a parent block. For genesis blocks,
|
||||
check that `block`'s parent hash is `null` (all zeroes) and its height is `0`.
|
||||
|
||||
2. Insert the block and transaction data into the relevant column families.
|
||||
|
||||
3. If the block is a genesis block, skip any transaction updates.
|
||||
|
||||
(Due to a [bug in zcashd](https://github.com/ZcashFoundation/zebra/issues/559),
|
||||
genesis block anchors and transactions are ignored during validation.)
|
||||
|
||||
4. Update the block anchors, history tree, and chain value pools.
|
||||
|
||||
5. Iterate over the enumerated transactions in the block. For each transaction,
|
||||
update the relevant column families.
|
||||
|
||||
**Note**: The Sprout and Sapling anchors are the roots of the Sprout and
|
||||
Sapling note commitment trees that have already been calculated for the last
|
||||
transaction(s) in the block that have `JoinSplit`s in the Sprout case and/or
|
||||
`Spend`/`Output` descriptions in the Sapling case. These should be passed as
|
||||
fields in the `Commit*Block` requests.
|
||||
|
||||
Due to the coinbase maturity rules, the Sprout root is the empty root
|
||||
for the first 100 blocks. (These rules are already implemented in contextual
|
||||
validation and the anchor calculations.)
|
||||
|
||||
Hypothetically, if Sapling were activated from genesis, the specification requires
|
||||
a Sapling anchor, but `zcashd` would ignore that anchor.
|
||||
|
||||
[`JoinSplit`]: https://doc-internal.zebra.zfnd.org/zebra_chain/sprout/struct.JoinSplit.html
|
||||
[`Spend`]: https://doc-internal.zebra.zfnd.org/zebra_chain/sapling/spend/struct.Spend.html
|
||||
[`Action`]: https://doc-internal.zebra.zfnd.org/zebra_chain/orchard/struct.Action.html
|
||||
|
||||
These updates can be performed in a batch or without necessarily iterating
|
||||
over all transactions, if the data is available by other means; they're
|
||||
specified this way for clarity.
|
||||
|
||||
## Accessing previous blocks for contextual validation
|
||||
[previous-block-context]: #previous-block-context
|
||||
|
||||
The state service performs contextual validation of blocks received via the
|
||||
`CommitBlock` request. Since `CommitBlock` is synchronous, contextual validation
|
||||
must also be performed synchronously.
|
||||
|
||||
The relevant chain for a block starts at its previous block, and follows the
|
||||
chain of previous blocks back to the genesis block.
|
||||
|
||||
### Relevant chain iterator
|
||||
[relevant-chain-iterator]: #relevant-chain-iterator
|
||||
|
||||
The relevant chain can be retrieved from the state service as follows:
|
||||
* if the previous block is the finalized tip:
|
||||
* get recent blocks from the finalized state
|
||||
* if the previous block is in the non-finalized state:
|
||||
* get recent blocks from the relevant chain, then
|
||||
* get recent blocks from the finalized state, if required
|
||||
|
||||
The relevant chain can start at any non-finalized block, or at the finalized tip.
|
||||
|
||||
### Relevant chain implementation
|
||||
[relevant-chain-implementation]: #relevant-chain-implementation
|
||||
|
||||
The relevant chain is implemented as a `StateService` iterator, which returns
|
||||
`Arc<Block>`s.
|
||||
|
||||
The chain iterator implements `ExactSizeIterator`, so Zebra can efficiently
|
||||
assert that the relevant chain contains enough blocks to perform each contextual
|
||||
validation check.
|
||||
|
||||
```rust
|
||||
impl StateService {
|
||||
/// Return an iterator over the relevant chain of the block identified by
|
||||
/// `hash`.
|
||||
///
|
||||
/// The block identified by `hash` is included in the chain of blocks yielded
|
||||
/// by the iterator.
|
||||
pub fn chain(&self, hash: block::Hash) -> Iter<'_> { ... }
|
||||
}
|
||||
|
||||
impl Iterator for Iter<'_> {
|
||||
type Item = Arc<Block>;
|
||||
...
|
||||
}
|
||||
impl ExactSizeIterator for Iter<'_> { ... }
|
||||
impl FusedIterator for Iter<'_> {}
|
||||
```
|
||||
|
||||
For further details, see [PR 1271].
|
||||
|
||||
[PR 1271]: https://github.com/ZcashFoundation/zebra/pull/1271
|
||||
|
||||
## Request / Response API
|
||||
[request-response]: #request-response
|
||||
|
||||
The state API is provided by a pair of `Request`/`Response` enums. Each
|
||||
`Request` variant corresponds to particular `Response` variants, and it's
|
||||
fine (and encouraged) for caller code to unwrap the expected variants with
|
||||
`unreachable!` on the unexpected variants. This is slightly inconvenient but
|
||||
it means that we have a unified state interface with unified backpressure.
|
||||
|
||||
This API includes both write and read calls. Spotting `Commit` requests in
|
||||
code review should not be a problem, but in the future, if we need to
|
||||
restrict access to write calls, we could implement a wrapper service that
|
||||
rejects these, and export "read" and "write" frontends to the same inner service.
|
||||
|
||||
### `Request::CommitBlock`
|
||||
[request-commit-block]: #request-commit-block
|
||||
|
||||
```rust
|
||||
CommitBlock {
|
||||
block: Arc<Block>,
|
||||
sprout_anchor: sprout::tree::Root,
|
||||
sapling_anchor: sapling::tree::Root,
|
||||
}
|
||||
```
|
||||
|
||||
Performs contextual validation of the given block, committing it to the state
|
||||
if successful. Returns `Response::Added(block::Hash)` with the hash of
|
||||
the newly committed block or an error.
|
||||
|
||||
### `Request::CommitFinalizedBlock`
|
||||
[request-commit-finalized-block]: #request-finalized-block
|
||||
|
||||
```rust
|
||||
CommitFinalizedBlock {
|
||||
block: Arc<Block>,
|
||||
sprout_anchor: sprout::tree::Root,
|
||||
sapling_anchor: sapling::tree::Root,
|
||||
}
|
||||
```
|
||||
|
||||
Commits a finalized block to the rocksdb state, skipping contextual validation.
|
||||
This is exposed for use in checkpointing, which produces in-order finalized
|
||||
blocks. Returns `Response::Added(block::Hash)` with the hash of the
|
||||
committed block if successful.
|
||||
|
||||
### `Request::Depth(block::Hash)`
|
||||
[request-depth]: #request-depth
|
||||
|
||||
Computes the depth in the best chain of the block identified by the given
|
||||
hash, returning
|
||||
|
||||
- `Response::Depth(Some(depth))` if the block is in the best chain;
|
||||
- `Response::Depth(None)` otherwise.
|
||||
|
||||
Implemented by querying:
|
||||
|
||||
- (non-finalized) the `height_by_hash` map in the best chain, and
|
||||
- (finalized) the `height_by_hash` tree
|
||||
|
||||
### `Request::Tip`
|
||||
[request-tip]: #request-tip
|
||||
|
||||
Returns `Response::Tip(block::Hash)` with the current best chain tip.
|
||||
|
||||
Implemented by querying:
|
||||
|
||||
- (non-finalized) the highest height block in the best chain
|
||||
- (finalized) the highest height block in the `hash_by_height` tree, if the `non-finalized` state is empty
|
||||
|
||||
### `Request::BlockLocator`
|
||||
[request-block-locator]: #request-block-locator
|
||||
|
||||
Returns `Response::BlockLocator(Vec<block::Hash>)` with hashes starting from
|
||||
the current chain tip and reaching backwards towards the genesis block. The
|
||||
first hash is the best chain tip. The last hash is the tip of the finalized
|
||||
portion of the state. If the finalized and non-finalized states are both
|
||||
empty, the block locator is also empty.
|
||||
|
||||
This can be used by the sync component to request hashes of subsequent
|
||||
blocks.
|
||||
|
||||
Implemented by querying:
|
||||
|
||||
- (non-finalized) the `hash_by_height` map in the best chain
|
||||
- (finalized) the `hash_by_height` tree.
|
||||
|
||||
### `Request::Transaction(transaction::Hash)`
|
||||
[request-transaction]: #request-transaction
|
||||
|
||||
Returns
|
||||
|
||||
- `Response::Transaction(Some(Transaction))` if the transaction identified by
|
||||
the given hash is contained in the state;
|
||||
|
||||
- `Response::Transaction(None)` if the transaction identified by the given
|
||||
hash is not contained in the state.
|
||||
|
||||
Implemented by querying:
|
||||
|
||||
- (non-finalized) the `tx_loc_by_hash` map (to get the block that contains the
|
||||
transaction) of each chain starting with the best chain, and then find
|
||||
block that chain's `blocks` (to get the block containing the transaction
|
||||
data)
|
||||
- (finalized) the `tx_loc_by_hash` tree (to get the block that contains the
|
||||
transaction) and then `block_header_by_height` tree (to get the block
|
||||
containing the transaction data), if the transaction is not in any
|
||||
non-finalized chain
|
||||
|
||||
### `Request::Block(block::Hash)`
|
||||
[request-block]: #request-block
|
||||
|
||||
Returns
|
||||
|
||||
- `Response::Block(Some(Arc<Block>))` if the block identified by the given
|
||||
hash is contained in the state;
|
||||
|
||||
- `Response::Block(None)` if the block identified by the given hash is not
|
||||
contained in the state;
|
||||
|
||||
Implemented by querying:
|
||||
|
||||
- (non-finalized) the `height_by_hash` of each chain starting with the best
|
||||
chain, then find block that chain's `blocks` (to get the block data)
|
||||
- (finalized) the `height_by_hash` tree (to get the block height) and then the
|
||||
`block_header_by_height` tree (to get the block data), if the block is not in
|
||||
any non-finalized chain
|
||||
|
||||
### `Request::AwaitSpendableUtxo { outpoint: OutPoint, spend_height: Height, spend_restriction: SpendRestriction }`
|
||||
|
||||
Returns
|
||||
|
||||
- `Response::SpendableUtxo(transparent::Output)`
|
||||
|
||||
Implemented by querying:
|
||||
- (non-finalized) if any `Chains` contain `OutPoint` in their `created_utxos`,
|
||||
return the `Utxo` for `OutPoint`;
|
||||
- (finalized) else if `OutPoint` is in `utxos_by_outpoint`,
|
||||
return the `Utxo` for `OutPoint`;
|
||||
- else wait for `OutPoint` to be created as described in [RFC0004];
|
||||
|
||||
Then validating:
|
||||
- check the transparent coinbase spend restrictions specified in [RFC0004];
|
||||
- if the restrictions are satisfied, return the response;
|
||||
- if the spend is invalid, drop the request (and the caller will time out).
|
||||
|
||||
[RFC0004]: https://zebra.zfnd.org/dev/rfcs/0004-asynchronous-script-verification.html
|
||||
|
||||
# Drawbacks
|
||||
[drawbacks]: #drawbacks
|
||||
|
||||
- Restarts can cause `zebrad` to redownload up to the last one hundred blocks
|
||||
it verified in the best chain, and potentially some recent side-chain blocks.
|
||||
|
||||
- The service interface puts some extra responsibility on callers to ensure
|
||||
it is used correctly and does not verify the usage is correct at compile
|
||||
time.
|
||||
|
||||
- the service API is verbose and requires manually unwrapping enums
|
||||
|
||||
- We do not handle reorgs the same way `zcashd` does, and could in theory need
|
||||
to delete our entire on disk state and resync the chain in some
|
||||
pathological reorg cases.
|
||||
- testnet rollbacks are infrequent, but possible, due to bugs in testnet
|
||||
releases. Each testnet rollback will require additional state service code.
|
||||
|
|
@ -0,0 +1,784 @@
|
|||
- Feature Name: contextual_difficulty_validation
|
||||
- Start Date: 2020-11-02
|
||||
- Design PR: [ZcashFoundation/zebra#0000](https://github.com/ZcashFoundation/zebra/pull/0000)
|
||||
- Zebra Issue: [ZcashFoundation/zebra#1036](https://github.com/ZcashFoundation/zebra/issues/1036)
|
||||
|
||||
# Summary
|
||||
[summary]: #summary
|
||||
|
||||
Zcash nodes use a Proof of Work algorithm to reach consensus on the best chain.
|
||||
Valid blocks must reach a difficulty threshold, which is adjusted after every
|
||||
block. The difficulty adjustment calculations depend on the difficulties and
|
||||
times of recent blocks. So Zebra performs contextual validation [RFC2] of
|
||||
difficulty adjustments as part of committing blocks to the state.
|
||||
|
||||
[RFC2]: ./0002-parallel-verification.md
|
||||
|
||||
# Motivation
|
||||
[motivation]: #motivation
|
||||
|
||||
The Zcash block difficulty adjustment is one of the core Zcash consensus rules.
|
||||
Zebra must implement this consensus rule to make sure that its cached chain
|
||||
state is consistent with the consensus of Zcash nodes.
|
||||
|
||||
Difficulty adjustment is also a significant part of Zcash's security guarantees.
|
||||
It ensures that the network continues to resist takeover attacks, even as the
|
||||
number of Zcash miners grows.
|
||||
|
||||
Difficulty adjustment also ensures that blocks are regularly spaced, which
|
||||
allows users to create and finalise transactions with short, consistent delays.
|
||||
These predictable delays contribute to Zcash's usability.
|
||||
|
||||
# Definitions
|
||||
[definitions]: #definitions
|
||||
|
||||
Difficulty:
|
||||
* **hash difficulty**: An arbitrary ranking of blocks, based on their hashes.
|
||||
Defined as the hash of the block, interpreted as a big-endian 256-bit number.
|
||||
Numerically smaller difficulties are harder to generate.
|
||||
|
||||
* **difficulty threshold**: The easiest valid hash difficulty for a block.
|
||||
Numerically lower thresholds are harder to satisfy.
|
||||
|
||||
* **difficulty filter**: A block passes the difficulty filter if the hash
|
||||
difficulty is less than or equal to the difficulty threshold (based on the
|
||||
block's difficulty field).
|
||||
|
||||
* **block work**: The approximate amount of work required for a miner to generate
|
||||
a block hash that passes the difficulty filter. The number of block header
|
||||
attempts and the mining time are proportional to the work value. Numerically
|
||||
higher work values represent longer processing times.
|
||||
|
||||
* **averaging window**: The 17 most recent blocks in the relevant chain.
|
||||
|
||||
* **median block span**: The 11 most recent blocks from a chosen tip, typically
|
||||
the relevant tip.
|
||||
|
||||
* **target spacing**: 150 seconds per block before Blossom activation, 75 seconds
|
||||
per block from Blossom activation onwards.
|
||||
|
||||
* **adjusted difficulty**: After each block is mined, the difficulty threshold of
|
||||
the next block is adjusted, to keep the block gap close to the target spacing.
|
||||
|
||||
* **mean target difficulty**: The arithmetic mean of the difficulty thresholds
|
||||
of the blocks in the averaging window.
|
||||
|
||||
* **median timespan**: The average number of seconds taken to generate the blocks
|
||||
in the averaging window. Calculated using the difference of median block spans
|
||||
in and after the averaging window, then damped and bounded.
|
||||
|
||||
* **target timespan**: The target spacing for an averaging window's worth of
|
||||
blocks.
|
||||
|
||||
Consensus:
|
||||
* **consensus rule:** A protocol rule which all nodes must apply consistently,
|
||||
so they can converge on the same chain fork.
|
||||
|
||||
* **structural/semantic/contextual verification**: as defined in [RFC2].
|
||||
|
||||
State:
|
||||
* **block chain**: A sequence of valid blocks linked by inclusion of the
|
||||
previous block hash in the subsequent block. Chains are rooted at the
|
||||
genesis block and extend to a tip.
|
||||
|
||||
* **relevant chain**: The relevant chain for a block starts at the previous
|
||||
block, and extends back to genesis.
|
||||
|
||||
* **relevant tip**: The tip of the relevant chain.
|
||||
|
||||
* **non-finalized state**: State data corresponding to blocks above the reorg
|
||||
limit. This data can change in the event of a chain reorg.
|
||||
|
||||
* **finalized state**: State data corresponding to blocks below the reorg
|
||||
limit. This data cannot change in the event of a chain reorg.
|
||||
|
||||
* **non-finalized tips**: The highest blocks in each non-finalized chain. These
|
||||
tips might be at different heights.
|
||||
|
||||
* **finalized tip**: The highest block in the finalized state. The tip of the best
|
||||
chain is usually 100 blocks (the reorg limit) above the finalized tip. But it can
|
||||
be lower during the initial sync, and after a chain reorganization, if the new
|
||||
best chain is at a lower height.
|
||||
|
||||
# Guide-level explanation
|
||||
[guide-level-explanation]: #guide-level-explanation
|
||||
|
||||
Zcash's difficulty consensus rules are similar to Bitcoin.
|
||||
|
||||
Each block contains a **difficulty threshold** in its header. The hash of the
|
||||
block header must be less than this **difficulty threshold**. (When interpreted
|
||||
as a 256-bit integer in big-endian byte order.) This context-free semantic
|
||||
verification check is performed by the `BlockVerifier`.
|
||||
|
||||
After each block, the difficulty threshold is adjusted so that the block gap is
|
||||
close to the target spacing. On average, harder blocks take longer to mine, and
|
||||
easier blocks take less time.
|
||||
|
||||
The **adjusted difficulty** for the next block is calculated using the difficulty
|
||||
thresholds and times of recent blocks. Zcash uses the most recent 28 blocks in
|
||||
the **relevant chain** in its difficulty adjustment calculations.
|
||||
|
||||
The difficulty adjustment calculations adjust the **mean target difficulty**,
|
||||
based on the difference between the **median timespan** and the
|
||||
**target timespan**. If the median timespan is less than the target timespan, the
|
||||
next block is harder to mine.
|
||||
|
||||
The `StateService` calculates the adjusted difficulty using the context from the
|
||||
**relevant chain**. The difficulty contextual verification check ensures that the
|
||||
**difficulty threshold** of the next block is equal to the **adjusted difficulty**
|
||||
for its relevant chain.
|
||||
|
||||
## State service interface changes
|
||||
[state-service-interface]: #state-service-interface
|
||||
|
||||
Contextual validation accesses recent blocks. So we modify the internal state
|
||||
service interface to provide an abstraction for accessing recent blocks.
|
||||
|
||||
### The relevant chain
|
||||
[relevant-chain]: #relevant-chain
|
||||
|
||||
The relevant chain consists of the ancestors of a block, starting with its
|
||||
parent block, and extending back to the genesis block.
|
||||
|
||||
In Zebra, recent blocks are part of the non-finalized state, which can contain
|
||||
multiple chains. Past the reorganization limit, Zebra commits a single chain to
|
||||
the finalized state.
|
||||
|
||||
The relevant chain can start at any block in the non-finalized state, or at the
|
||||
finalized tip. See [RFC5] for details.
|
||||
|
||||
[RFC5]: ./0005-state-updates.md
|
||||
|
||||
## Contextual validation design
|
||||
[contextual-validation-design]: #contextual-validation-design
|
||||
|
||||
Contextual validation is performed synchronously by the state service, as soon
|
||||
as the state has:
|
||||
* received the semantically valid next block (via `CommitBlock`), and
|
||||
* committed the previous block.
|
||||
|
||||
The difficulty adjustment check calculates the correct adjusted difficulty
|
||||
threshold value for a candidate block, and ensures that the block's
|
||||
`difficulty_threshold` field is equal to that value.
|
||||
|
||||
This check is implemented as follows:
|
||||
|
||||
### Difficulty adjustment
|
||||
[difficulty-adjustment]: #difficulty-adjustment
|
||||
|
||||
The block difficulty threshold is adjusted by scaling the mean target difficulty
|
||||
by the median timespan.
|
||||
|
||||
On Testnet, if a long time has elapsed since the previous block, the difficulty
|
||||
adjustment is modified to allow minimum-difficulty blocks.
|
||||
|
||||
#### Mean target difficulty
|
||||
[mean-target-difficulty]: #mean-target-difficulty
|
||||
|
||||
The mean target difficulty is the arithmetic mean of the difficulty
|
||||
thresholds of the `PoWAveragingWindow` (17) most recent blocks in the relevant
|
||||
chain.
|
||||
|
||||
Zcash uses block difficulty thresholds in its difficulty adjustment calculations.
|
||||
(Block hashes are not used for difficulty adjustment.)
|
||||
|
||||
#### Median timespan
|
||||
[median-timespan]: #median-timespan
|
||||
|
||||
The average number of seconds taken to generate the 17 blocks in the averaging
|
||||
window.
|
||||
|
||||
The median timespan is calculated by taking the difference of the median times
|
||||
for:
|
||||
* the relevant tip: the `PoWMedianBlockSpan` (11) most recent blocks, and
|
||||
* the 11 blocks after the 17-block `PoWAveragingWindow`: that is, blocks 18-28
|
||||
behind the relevant tip.
|
||||
|
||||
The median timespan is damped by the `PoWDampingFactor`, and bounded by
|
||||
`PoWMaxAdjustDown` and `PoWMaxAdjustUp`.
|
||||
|
||||
#### Test network minimum difficulty blocks
|
||||
[test-net-min-difficulty]: #test-net-min-difficulty
|
||||
|
||||
If there is a large gap after a Testnet block, the next block becomes a minimum
|
||||
difficulty block. Testnet minimum difficulty blocks have their
|
||||
`difficulty_threshold` set to the minimum difficulty for Testnet.
|
||||
|
||||
#### Block difficulty threshold
|
||||
[block-difficulty-threshold]: #block-difficulty-threshold
|
||||
|
||||
The block difficulty threshold for the next block is calculated by scaling the
|
||||
mean target difficulty by the ratio between the median timespan and the averaging
|
||||
window timespan.
|
||||
|
||||
The result of this calculation is limited by `ToCompact(PoWLimit(network))`, a
|
||||
per-network minimum block difficulty. This minimum difficulty is also used when
|
||||
a Testnet block's time gap exceeds the minimum difficulty gap.
|
||||
|
||||
# Reference-level explanation
|
||||
[reference-level-explanation]: #reference-level-explanation
|
||||
|
||||
## Contextual validation
|
||||
[contextual-validation]: #contextual-validation
|
||||
|
||||
Contextual validation is implemented in
|
||||
`StateService::check_contextual_validity`, which calls a separate function for
|
||||
each contextual validity check.
|
||||
|
||||
In Zebra, contextual validation starts after Canopy activation, so we can assume
|
||||
that the relevant chain contains at least 28 blocks on Mainnet and Testnet. (And
|
||||
panic if this assumption does not hold at runtime.)
|
||||
|
||||
## Fundamental data types
|
||||
[fundamental-data-types]: #fundamental-data-types
|
||||
|
||||
Zebra is free to implement its difficulty calculations in any way that produces
|
||||
equivalent results to `zcashd` and the Zcash specification.
|
||||
|
||||
### Difficulty
|
||||
|
||||
In Zcash block headers, difficulty thresholds are stored as a "compact" `nBits`
|
||||
value, which uses a custom 32-bit floating-point encoding. Zebra calls this type
|
||||
`CompactDifficulty`.
|
||||
|
||||
In Zcash, difficulty threshold calculations are performed using unsigned 256-bit
|
||||
integers. Rust has no standard `u256` type, but there are a number of crates
|
||||
available which implement the required operations on 256-bit integers. Zebra
|
||||
abstracts over the chosen `u256` implementation using its `ExpandedDifficulty`
|
||||
type.
|
||||
|
||||
### Time
|
||||
|
||||
In Zcash, time values are unsigned 32-bit integers. But the difficulty adjustment
|
||||
calculations include time subtractions which could overflow an unsigned type, so
|
||||
they are performed using signed 64-bit integers in `zcashd`.
|
||||
|
||||
Zebra parses the `header.time` field into a `DateTime<Utc>`. Conveniently, the
|
||||
`chrono::DateTime<_>::timestamp()` function returns `i64` values. So Zebra can do
|
||||
its signed time calculations using `i64` values internally.
|
||||
|
||||
Note: `i32` is an unsuitable type for signed time calculations. It is
|
||||
theoretically possible for the time gap between blocks to be larger than
|
||||
`i32::MAX`, because those times are provided by miners. Even if the median time
|
||||
gap is that large, the bounds and minimum difficulty in Zcash's difficulty
|
||||
adjustment algorithm will preserve a reasonable difficulty threshold. So Zebra
|
||||
must support this edge case.
|
||||
|
||||
### Consensus-Critical Operations
|
||||
|
||||
The order of operations and overflow semantics for 256-bit integers can be
|
||||
consensus-critical.
|
||||
|
||||
For example:
|
||||
- dividing before multiplying discards lower-order bits, but
|
||||
- multiplying before dividing can cause overflow.
|
||||
|
||||
Zebra's implementation should try to match zcashd's order of operations and
|
||||
overflow handling as closely as possible.
|
||||
|
||||
## Difficulty adjustment check
|
||||
[difficulty-adjustment-check]: #difficulty-adjustment-check
|
||||
|
||||
The difficulty adjustment check calculates the correct difficulty threshold
|
||||
value for a candidate block, and ensures that the block's
|
||||
`difficulty_threshold` field is equal to that value.
|
||||
|
||||
### Context data type
|
||||
[context-data-type]: #context-data-type
|
||||
|
||||
The difficulty adjustment functions use a context consisting of the difficulties
|
||||
and times from the previous 28 blocks in the relevant chain.
|
||||
|
||||
These functions also use the candidate block's `height` and `network`.
|
||||
|
||||
To make these functions more ergonomic, we create a `AdjustedDifficulty`
|
||||
type, and implement the difficulty adjustment calculations as methods on that
|
||||
type.
|
||||
|
||||
```rust
|
||||
/// The averaging window for difficulty threshold arithmetic mean calculations.
|
||||
///
|
||||
/// `PoWAveragingWindow` in the Zcash specification.
|
||||
pub const POW_AVERAGING_WINDOW: usize = 17;
|
||||
|
||||
/// The median block span for time median calculations.
|
||||
///
|
||||
/// `PoWMedianBlockSpan` in the Zcash specification.
|
||||
pub const POW_MEDIAN_BLOCK_SPAN: usize = 11;
|
||||
|
||||
/// Contains the context needed to calculate the adjusted difficulty for a block.
|
||||
struct AdjustedDifficulty {
|
||||
candidate_time: DateTime<Utc>,
|
||||
candidate_height: block::Height,
|
||||
network: Network,
|
||||
relevant_difficulty_thresholds: [CompactDifficulty; POW_AVERAGING_WINDOW + POW_MEDIAN_BLOCK_SPAN],
|
||||
relevant_times: [DateTime<Utc>; POW_AVERAGING_WINDOW + POW_MEDIAN_BLOCK_SPAN],
|
||||
}
|
||||
```
|
||||
|
||||
We implement some initialiser methods on `AdjustedDifficulty` for convenience.
|
||||
We might want to validate downloaded headers in future, so we include a
|
||||
`new_from_header` initialiser.
|
||||
|
||||
```rust
|
||||
/// Initialise and return a new `AdjustedDifficulty` using a `candidate_block`,
|
||||
/// `network`, and a `context`.
|
||||
///
|
||||
/// The `context` contains the previous
|
||||
/// `PoWAveragingWindow + PoWMedianBlockSpan` (28) `difficulty_threshold`s and
|
||||
/// `time`s from the relevant chain for `candidate_block`, in reverse height
|
||||
/// order, starting with the previous block.
|
||||
///
|
||||
/// Note that the `time`s might not be in reverse chronological order, because
|
||||
/// block times are supplied by miners.
|
||||
///
|
||||
/// Panics:
|
||||
/// If the `context` contains fewer than 28 items.
|
||||
pub fn new_from_block<C>(candidate_block: &Block,
|
||||
network: Network,
|
||||
context: C)
|
||||
-> AdjustedDifficulty
|
||||
where
|
||||
C: IntoIterator<Item = (CompactDifficulty, DateTime<Utc>)>,
|
||||
{ ... }
|
||||
|
||||
/// Initialise and return a new `AdjustedDifficulty` using a
|
||||
/// `candidate_header`, `previous_block_height`, `network`, and a `context`.
|
||||
///
|
||||
/// Designed for use when validating block headers, where the full block has not
|
||||
/// been downloaded yet.
|
||||
///
|
||||
/// See `new_from_block` for detailed information about the `context`.
|
||||
///
|
||||
/// Panics:
|
||||
/// If the context contains fewer than 28 items.
|
||||
pub fn new_from_header<C>(candidate_header: &block::Header,
|
||||
previous_block_height: block::Height,
|
||||
network: Network,
|
||||
context: C)
|
||||
-> AdjustedDifficulty
|
||||
where
|
||||
C: IntoIterator<Item = (CompactDifficulty, DateTime<Utc>)>,
|
||||
{ ... }
|
||||
```
|
||||
|
||||
#### Memory usage note
|
||||
|
||||
Copying `CompactDifficulty` values into the `AdjustedDifficulty` struct uses
|
||||
less memory than borrowing those values. `CompactDifficulty` values are 32 bits,
|
||||
but pointers are 64-bit on most modern machines. (And since they all come from
|
||||
different blocks, we need a pointer to each individual value.)
|
||||
|
||||
Borrowing `DateTime<Utc>` values might use slightly less memory than copying
|
||||
them - but that depends on the exact way that Rust stores associated types
|
||||
derived from a generic argument.
|
||||
|
||||
In any case, the overall size of each `AdjustedDifficulty` is only a few
|
||||
hundred bytes. If it turns up in profiles, we can look at borrowing the block
|
||||
header data.
|
||||
|
||||
### Difficulty adjustment check implementation
|
||||
[difficulty-adjustment-check-implementation]: #difficulty-adjustment-check-implementation
|
||||
|
||||
The difficulty adjustment check ensures that the
|
||||
`candidate_difficulty_threshold` is equal to the `difficulty_threshold` value
|
||||
calculated using `AdjustedDifficulty::adjusted_difficulty_threshold`.
|
||||
|
||||
We implement this function:
|
||||
```rust
|
||||
/// Validate the `difficulty_threshold` from a candidate block's header, based
|
||||
/// on an `expected_difficulty` for that block.
|
||||
///
|
||||
/// Uses `expected_difficulty` to calculate the expected `ToCompact(Threshold())`
|
||||
/// value, then compares that value to the `difficulty_threshold`. Returns
|
||||
/// `Ok(())` if the values are equal.
|
||||
pub fn difficulty_threshold_is_valid(difficulty_threshold: CompactDifficulty,
|
||||
expected_difficulty: AdjustedDifficulty)
|
||||
-> Result<(), ValidateContextError> { ... }
|
||||
```
|
||||
|
||||
[Issue 1166]: https://github.com/ZcashFoundation/zebra/issues/1166
|
||||
|
||||
### Mean target difficulty calculation
|
||||
[mean-target-difficulty-calculation]: #mean-target-difficulty-calculation
|
||||
|
||||
The mean target difficulty is the arithmetic mean of the difficulty
|
||||
thresholds of the `PoWAveragingWindow` (17) most recent blocks in the relevant
|
||||
chain.
|
||||
|
||||
We implement this method on `AdjustedDifficulty`:
|
||||
```rust
|
||||
/// Calculate the arithmetic mean of the averaging window thresholds: the
|
||||
/// expanded `difficulty_threshold`s from the previous `PoWAveragingWindow` (17)
|
||||
/// blocks in the relevant chain.
|
||||
///
|
||||
/// Implements `MeanTarget` from the Zcash specification.
|
||||
fn mean_target_difficulty(&self) -> ExpandedDifficulty { ... }
|
||||
```
|
||||
|
||||
#### Implementation notes
|
||||
|
||||
Since the `PoWLimit`s are `2^251 − 1` for Testnet, and `2^243 − 1` for Mainnet,
|
||||
the sum of these difficulty thresholds will be less than or equal to
|
||||
`(2^251 − 1)*17 = 2^255 + 2^251 - 17`. Therefore, this calculation can not
|
||||
overflow a `u256` value. So the function is infalliable.
|
||||
|
||||
In Zebra, contextual validation starts after Canopy activation, so we can assume
|
||||
that the relevant chain contains at least 17 blocks. Therefore, the `PoWLimit`
|
||||
case of `MeanTarget()` in the Zcash specification is unreachable.
|
||||
|
||||
### Median timespan calculation
|
||||
[median-timespan-calculation]: #median-timespan-calculation
|
||||
|
||||
The median timespan is the difference of the median times for:
|
||||
* the relevant tip: the `PoWMedianBlockSpan` (11) most recent blocks, and
|
||||
* the 11 blocks after the 17-block `PoWAveragingWindow`: that is, blocks 18-28
|
||||
behind the relevant tip.
|
||||
|
||||
(The median timespan is known as the `ActualTimespan` in the Zcash specification,
|
||||
but this terminology is confusing, because it is a difference of medians, rather
|
||||
than any "actual" elapsed time.)
|
||||
|
||||
Zebra implements the median timespan using the following methods on
|
||||
`AdjustedDifficulty`:
|
||||
```rust
|
||||
/// Calculate the bounded median timespan. The median timespan is the
|
||||
/// difference of medians of the timespan times, which are the `time`s from
|
||||
/// the previous `PoWAveragingWindow + PoWMedianBlockSpan` (28) blocks in the
|
||||
/// relevant chain.
|
||||
///
|
||||
/// Uses the candidate block's `height' and `network` to calculate the
|
||||
/// `AveragingWindowTimespan` for that block.
|
||||
///
|
||||
/// The median timespan is damped by the `PoWDampingFactor`, and bounded by
|
||||
/// `PoWMaxAdjustDown` and `PoWMaxAdjustUp`.
|
||||
///
|
||||
/// Implements `ActualTimespanBounded` from the Zcash specification.
|
||||
///
|
||||
/// Note: This calculation only uses `PoWMedianBlockSpan` (11) times at the
|
||||
/// start and end of the timespan times. timespan times `[11..=16]` are ignored.
|
||||
fn median_timespan_bounded(&self) -> Duration { ... }
|
||||
|
||||
/// Calculate the median timespan. The median timespan is the difference of
|
||||
/// medians of the timespan times, which are the `time`s from the previous
|
||||
/// `PoWAveragingWindow + PoWMedianBlockSpan` (28) blocks in the relevant chain.
|
||||
///
|
||||
/// Implements `ActualTimespan` from the Zcash specification.
|
||||
///
|
||||
/// See `median_timespan_bounded` for details.
|
||||
fn median_timespan(&self) -> Duration { ... }
|
||||
|
||||
/// Calculate the median of the `median_block_span_times`: the `time`s from a
|
||||
/// slice of `PoWMedianBlockSpan` (11) blocks in the relevant chain.
|
||||
///
|
||||
/// Implements `MedianTime` from the Zcash specification.
|
||||
fn median_time(mut median_block_span_times: [DateTime<Utc>; POW_MEDIAN_BLOCK_SPAN])
|
||||
-> DateTime<Utc> { ... }
|
||||
```
|
||||
|
||||
Zebra implements the `AveragingWindowTimespan` using the following methods on
|
||||
`NetworkUpgrade`:
|
||||
```rust
|
||||
impl NetworkUpgrade {
|
||||
/// Returns the `AveragingWindowTimespan` for the network upgrade.
|
||||
pub fn averaging_window_timespan(&self) -> Duration { ... }
|
||||
|
||||
/// Returns the `AveragingWindowTimespan` for `network` and `height`.
|
||||
pub fn averaging_window_timespan_for_height(network: Network,
|
||||
height: block::Height)
|
||||
-> Duration { ... }
|
||||
}
|
||||
```
|
||||
|
||||
#### Implementation notes
|
||||
|
||||
In Zebra, contextual validation starts after Canopy activation, so we can assume
|
||||
that the relevant chain contains at least 28 blocks. Therefore:
|
||||
* `max(0, height − PoWMedianBlockSpan)` in the `MedianTime()` calculation
|
||||
simplifies to `height − PoWMedianBlockSpan`, and
|
||||
* there is always an odd number of blocks in `MedianTime()`, so the median is
|
||||
always the exact middle of the sequence.
|
||||
|
||||
Therefore, the function is infalliable.
|
||||
|
||||
### Test network minimum difficulty calculation
|
||||
[test-net-min-difficulty-calculation]: #test-net-min-difficulty-calculation
|
||||
|
||||
A block is a Testnet minimum difficulty block if:
|
||||
* the block is a Testnet block,
|
||||
* the block's height is 299188 or greater, and
|
||||
* the time gap from the previous block is greater than the Testnet minimum
|
||||
difficulty gap, which is 6 times the target spacing for the block's height.
|
||||
(The target spacing was halved from the Blossom network upgrade onwards.)
|
||||
|
||||
The difficulty adjustment is modified for Testnet minimum difficulty blocks as
|
||||
follows:
|
||||
* the difficulty threshold in the block header is set to the Testnet minimum
|
||||
difficulty threshold, `ToCompact(PoWLimit(network))`.
|
||||
|
||||
Since the new difficulty changes the block header, Testnet blocks can only
|
||||
satisfy one of the alternate difficulty adjustment rules:
|
||||
* if the time gap is less than or equal to the Testnet minimum difficulty gap:
|
||||
the difficulty threshold is calculated using the default difficulty adjustment
|
||||
rule,
|
||||
* if the time gap is greater than the Testnet minimum difficulty gap:
|
||||
the difficulty threshold is the Testnet minimum difficulty threshold.
|
||||
|
||||
See [ZIP-208] for details.
|
||||
|
||||
Note: some older versions of ZIPs 205 and 208 incorrectly said that:
|
||||
* the time gap threshold uses an "at least" check (it is strictly greater than),
|
||||
* the minimum difficulty threshold value was `PoWLimit`
|
||||
(it is `ToCompact(PoWLimit)`),
|
||||
* the `difficulty_threshold` (`nBits`) field is not modified in Testnet minimum
|
||||
difficulty blocks (the field is modified), and
|
||||
* the Testnet minimum difficulty value is not used to calculate future difficulty
|
||||
adjustments (the modified value is used in future adjustments).
|
||||
|
||||
ZIP 205 and 208 were fixed on 14 November 2020, see [ZIP PR 417] and
|
||||
[ZIP commit 806076c] for details.
|
||||
|
||||
[ZIP-208]: https://zips.z.cash/zip-0208#minimum-difficulty-blocks-on-the-test-network
|
||||
[ZIP PR 417]: https://github.com/zcash/zips/pull/417
|
||||
[ZIP commit 806076c]: https://github.com/zcash/zips/commit/806076c48c9834fd9941b940a32310d737975a3a
|
||||
|
||||
#### Test network minimum difficulty implementation
|
||||
[test-net-min-difficulty-implementation]: #test-net-min-difficulty-implementation
|
||||
|
||||
The Testnet minimum difficulty calculation uses the existing
|
||||
`NetworkUpgrade::minimum_difficulty_spacing_for_height` function to calculate the
|
||||
minimum difficulty gap.
|
||||
|
||||
We implement this method on `NetworkUpgrade`:
|
||||
```rust
|
||||
/// Returns true if the gap between `block_time` and `previous_block_time` is
|
||||
/// greater than the Testnet minimum difficulty time gap. This time gap
|
||||
/// depends on the `network` and `block_height`.
|
||||
///
|
||||
/// Returns false on Mainnet, when `block_height` is less than the minimum
|
||||
/// difficulty start height, and when the time gap is too small.
|
||||
///
|
||||
/// `block_time` can be less than, equal to, or greater than
|
||||
/// `previous_block_time`, because block times are provided by miners.
|
||||
///
|
||||
/// Implements the Testnet minimum difficulty adjustment from ZIPs 205 and 208.
|
||||
///
|
||||
/// Spec Note: Some parts of ZIPs 205 and 208 previously specified an incorrect
|
||||
/// check for the time gap. This function implements the correct "greater than"
|
||||
/// check.
|
||||
pub fn is_testnet_min_difficulty_block(
|
||||
network: Network,
|
||||
block_height: block::Height,
|
||||
block_time: DateTime<Utc>,
|
||||
previous_block_time: DateTime<Utc>,
|
||||
) -> bool { ... }
|
||||
```
|
||||
|
||||
#### Implementation notes
|
||||
|
||||
In Zcash, the Testnet minimum difficulty rule starts at block 299188, and in
|
||||
Zebra, contextual validation starts after Canopy activation. So we can assume
|
||||
that there is always a previous block.
|
||||
|
||||
Therefore, this function is infalliable.
|
||||
|
||||
### Block difficulty threshold calculation
|
||||
[block-difficulty-threshold-calculation]: #block-difficulty-threshold-calculation
|
||||
|
||||
The block difficulty threshold for the next block is calculated by scaling the
|
||||
mean target difficulty by the ratio between the median timespan and the averaging
|
||||
window timespan.
|
||||
|
||||
The result of the scaled threshold calculation is limited by
|
||||
`ToCompact(PoWLimit(network))`, a per-network minimum block difficulty. This
|
||||
minimum difficulty is also used when a Testnet block's time gap exceeds the
|
||||
minimum difficulty gap. We use the existing
|
||||
`ExpandedDifficulty::target_difficulty_limit` function to calculate the value of
|
||||
`ToCompact(PoWLimit(network))`.
|
||||
|
||||
In Zebra, contextual validation starts after Canopy activation, so the genesis
|
||||
case of `Threshold()` in the Zcash specification is unreachable.
|
||||
|
||||
#### Block difficulty threshold implementation
|
||||
[block-difficulty-threshold-implementation]: #block-difficulty-threshold-implementation
|
||||
|
||||
We implement these methods on `AdjustedDifficulty`:
|
||||
```rust
|
||||
/// Calculate the expected `difficulty_threshold` for a candidate block, based
|
||||
/// on the `candidate_time`, `candidate_height`, `network`, and the
|
||||
/// `difficulty_threshold`s and `time`s from the previous
|
||||
/// `PoWAveragingWindow + PoWMedianBlockSpan` (28) blocks in the relevant chain.
|
||||
///
|
||||
/// Implements `ThresholdBits` from the Zcash specification, and the Testnet
|
||||
/// minimum difficulty adjustment from ZIPs 205 and 208.
|
||||
pub fn expected_difficulty_threshold(&self) -> CompactDifficulty { ... }
|
||||
|
||||
/// Calculate the `difficulty_threshold` for a candidate block, based on the
|
||||
/// `candidate_height`, `network`, and the relevant `difficulty_threshold`s and
|
||||
/// `time`s.
|
||||
///
|
||||
/// See `expected_difficulty_threshold` for details.
|
||||
///
|
||||
/// Implements `ThresholdBits` from the Zcash specification. (Which excludes the
|
||||
/// Testnet minimum difficulty adjustment.)
|
||||
fn threshold_bits(&self) -> CompactDifficulty { ... }
|
||||
```
|
||||
|
||||
#### Implementation notes
|
||||
|
||||
Since:
|
||||
* the `PoWLimit`s are `2^251 − 1` for Testnet, and `2^243 − 1` for Mainnet,
|
||||
* the `ActualTimespanBounded` can be at most `MaxActualTimespan`, which is
|
||||
`floor(PoWAveragingWindow * PoWTargetSpacing * (1 + PoWMaxAdjustDown))` or
|
||||
`floor(17 * 150 * (1 + 32/100)) = 3366`,
|
||||
* `AveragingWindowTimespan` is at most `17 * 150 = 2250`, and
|
||||
* `MeanTarget` is at most `PoWLimit`, ...
|
||||
|
||||
The maximum scaled value inside the `Threshold()` calculation is:
|
||||
* `floor(PoWLimit / 2250) * 3366`, which equals
|
||||
* `floor((2^251 − 1) / 2250) * 3366`, which equals
|
||||
* `(2^251 − 1) * 132/100`,
|
||||
* which is less than `2^252`.
|
||||
|
||||
Therefore, this calculation can not overflow a `u256` value. (And even if it did
|
||||
overflow, it would be constrained to a valid value by the `PoWLimit` minimum.)
|
||||
|
||||
Note that the multiplication by `ActualTimespanBounded` must happen after the
|
||||
division by `AveragingWindowTimespan`. Performing the multiplication first
|
||||
could overflow.
|
||||
|
||||
If implemented in this way, the function is infalliable.
|
||||
|
||||
`zcashd` truncates the `MeanTarget` after the mean calculation, and
|
||||
after dividing by `AveragingWindowTimespan`. But as long as there is no overflow,
|
||||
this is [equivalent to the single truncation of the final result] in the Zcash
|
||||
specification. However, Zebra should follow the order of operations in `zcashd`,
|
||||
and use repeated divisions, because that can't overflow. See the relevant
|
||||
[comment in the zcashd source code].
|
||||
|
||||
[equivalent to the single truncation of the final result]: https://math.stackexchange.com/questions/147771/rewriting-repeated-integer-division-with-multiplication
|
||||
[comment in the zcashd source code]: https://github.com/zcash/zcash/pull/4860/files
|
||||
|
||||
## Module Structure
|
||||
[module-structure]: #module-structure
|
||||
|
||||
The structs and functions in this RFC are implemented in a new
|
||||
`zebra_state::service::check::difficulty` module.
|
||||
|
||||
This module has two entry points:
|
||||
* `DifficultyAdjustment::new_from_block`
|
||||
* `difficulty_threshold_is_valid`
|
||||
|
||||
These entry points are both called from
|
||||
`StateService::check_contextual_validity`.
|
||||
|
||||
## Test Plan
|
||||
[test-plan]: #test-plan
|
||||
|
||||
Explain how the feature will be tested, including:
|
||||
- [ ] tests for consensus-critical functionality
|
||||
- [ ] existing test vectors, if available
|
||||
- [ ] Zcash blockchain block test vectors (specify the network upgrade, feature, or block height and network)
|
||||
- [ ] property testing or fuzzing
|
||||
|
||||
The tests should cover:
|
||||
- [ ] positive cases: make sure the feature accepts valid inputs
|
||||
- using block test vectors for each network upgrade provides some coverage of valid inputs
|
||||
- [ ] negative cases: make sure the feature rejects invalid inputs
|
||||
- make sure there is a test case for each error condition in the code
|
||||
- if there are lots of potential errors, prioritise:
|
||||
- consensus-critical errors
|
||||
- security-critical errors, and
|
||||
- likely errors
|
||||
- [ ] edge cases: make sure that boundary conditions are correctly handled
|
||||
|
||||
# Drawbacks
|
||||
[drawbacks]: #drawbacks
|
||||
|
||||
Why should we *not* do this?
|
||||
|
||||
## Alternate consensus parameters
|
||||
[alternate-consensus-parameters]: #alternate-consensus-parameters
|
||||
|
||||
Any alternate consensus parameters or `regtest` mode would have to respect the constraints set by this design.
|
||||
|
||||
In particular:
|
||||
* the `PoWLimit` must be less than or equal to
|
||||
`(2^256 - 1) / PoWAveragingWindow` (approximately `2^251`) to avoid overflow,
|
||||
* the `PoWAveragingWindow` and `PoWMedianBlockSpan` are fixed by function argument types
|
||||
(at least until Rust gets stable const generics), and
|
||||
* the design eliminates a significant number of edge cases by assuming that difficulty adjustments aren't
|
||||
validated for the first `PoWAveragingWindow + PoWMedianBlockSpan` (28) blocks in the chain.
|
||||
|
||||
# Rationale and alternatives
|
||||
[rationale-and-alternatives]: #rationale-and-alternatives
|
||||
|
||||
## Is this design a good basis for later designs or implementations?
|
||||
[good-basis]: #good-basis
|
||||
|
||||
The design includes specific methods for a future header-only validation design.
|
||||
|
||||
## What other designs have been considered and what is the rationale for not choosing them?
|
||||
[alternate-designs]: #alternate-designs
|
||||
|
||||
A previous version of the RFC did not have the `AdjustedDifficulty` struct and
|
||||
methods. That design was easy to misuse, because each function had a complicated
|
||||
argument list.
|
||||
|
||||
## What is the impact of not doing this?
|
||||
[no-action]: #no-action
|
||||
|
||||
Zebra could accept invalid, low-difficulty blocks from arbitrary miners. That
|
||||
would be a security issue.
|
||||
|
||||
# Prior art
|
||||
[prior-art]: #prior-art
|
||||
|
||||
* `zcashd`
|
||||
* the Zcash specification
|
||||
* Bitcoin
|
||||
|
||||
# Unresolved questions
|
||||
[unresolved-questions]: #unresolved-questions
|
||||
|
||||
- What parts of the design do you expect to resolve through the implementation of this feature before stabilization?
|
||||
- Guide-level examples
|
||||
- Reference-level examples
|
||||
- Corner case examples
|
||||
- Testing
|
||||
|
||||
- What related issues do you consider out of scope for this RFC that could be addressed in the future independently of the solution that comes out of this RFC?
|
||||
- Monitoring and maintenance
|
||||
|
||||
# Future possibilities
|
||||
[future-possibilities]: #future-possibilities
|
||||
|
||||
## Re-using the relevant chain API in other contextual checks
|
||||
[relevant-chain-api-reuse]: #relevant-chain-api-reuse
|
||||
|
||||
The relevant chain iterator can be re-used to implement other contextual
|
||||
validation checks.
|
||||
|
||||
For example, responding to peer requests for block locators, which means
|
||||
implementing relevant chain hash queries as a `StateService` request
|
||||
|
||||
## Header-only difficulty adjustment validation
|
||||
[header-only-validation]: #header-only-validation
|
||||
|
||||
Implementing header-only difficulty adjustment validation as a `StateService` request.
|
||||
|
||||
## Caching difficulty calculations
|
||||
[caching-calculations]: #caching-calculations
|
||||
|
||||
Difficulty calculations use `u256` could be a bit expensive, particularly if we
|
||||
get a flood of low-difficulty blocks. To reduce the impact of this kind of DoS,
|
||||
we could cache the value returned by `threshold_bits` for each block in the
|
||||
non-finalized state, and for the finalized tip. This value could be used to
|
||||
quickly calculate the difficulties for any child blocks of these blocks.
|
||||
|
||||
There's no need to persist this cache, or pre-fill it. (Minimum-difficulty
|
||||
Testnet blocks don't call `threshold_bits`, and some side-chain blocks will
|
||||
never have a next block.)
|
||||
|
||||
This caching is only worth implementing if these calculations show up in `zebrad`
|
||||
profiles.
|
||||
|
|
@ -0,0 +1,356 @@
|
|||
- Feature Name: (`zebra-client`)
|
||||
- Start Date: (2020-10-14)
|
||||
- Design PR: [ZcashFoundation/zebra#0000](https://github.com/ZcashFoundation/zebra/pull/1163)
|
||||
- Zebra Issue: [ZcashFoundation/zebra#0000](https://github.com/ZcashFoundation/zebra/issues/0000)
|
||||
|
||||
# Summary
|
||||
[summary]: #summary
|
||||
|
||||
The `zebra-client` crate handles *client functionality*. Client functionality
|
||||
is defined as all functionality related to a particular user's private data,
|
||||
in contrast to the other full node functionality which handles public chain
|
||||
state. This includes:
|
||||
|
||||
- note and key management;
|
||||
- transaction generation;
|
||||
- a client component for `zebrad` that handles block chain scanning, with
|
||||
appropriate side-channel protections;
|
||||
- an RPC endpoint for `zebrad` that allows access to the client component;
|
||||
- Rust library code that implements basic wallet functionality;
|
||||
- a `zebra-cli` binary that wraps the wallet library and RPC queries in a command-line interface.
|
||||
|
||||
Client functionality is restricted to transparent and Sapling shielded
|
||||
transactions; Sprout shielded transactions are not supported. (Users should
|
||||
migrate to Sapling).
|
||||
|
||||
# Motivation
|
||||
[motivation]: #motivation
|
||||
|
||||
We want to allow users to efficiently and securely send and receive funds via
|
||||
Zebra. One challenge unique to Zcash is block chain scanning: because
|
||||
shielded transactions reveal no metadata about the sender or receiver, users
|
||||
must scan the block chain for relevant transactions using *viewing keys*.
|
||||
This means that unlike a transparent blockchain with public transactions, a
|
||||
full node must have online access to viewing keys to scan the chain. This
|
||||
creates the risk of a privacy leak, because the node should not reveal which
|
||||
viewing keys it has access to.
|
||||
|
||||
Block chain scanning requires a mechanism that allows users to manage and
|
||||
store key material. This mechanism should also provide basic wallet
|
||||
functionality, so that users can send and receive funds without requiring
|
||||
third-party software.
|
||||
|
||||
To protect user privacy, this and all secret-dependent functionality should
|
||||
be strongly isolated from the rest of the node implementation. Care should be
|
||||
taken to protect against side channels that could reveal information about
|
||||
viewing keys. To make this isolation easier, all secret-dependent
|
||||
functionality is provided only by the `zebra-client` crate.
|
||||
|
||||
# Definitions
|
||||
[definitions]: #definitions
|
||||
|
||||
- **client functionality**: all functionality related to a particular user's
|
||||
private data, in contrast to other full node functionality which handles
|
||||
public chain state.
|
||||
|
||||
- **block chain scanning**: the process of scanning the block chain for
|
||||
relevant transactions using a viewing key, as described in [§4.19][ps_scan]
|
||||
of the protocol specification.
|
||||
|
||||
- **viewing key**: Sapling shielded addresses support *viewing keys*, which
|
||||
represent the capability to decrypt transactions, as described in
|
||||
[§3.1][ps_keys] and [§4.2.2][ps_sapk] of the protocol specification.
|
||||
|
||||
- **task**: In this document, *task* refers specifically to a [Tokio
|
||||
task][tokio-task]. In brief, a task is a light weight, non-blocking unit of
|
||||
execution (green thread), similar to a Goroutine or Erlang process. Tasks
|
||||
execute independently and are scheduled co-operatively using explicit yield
|
||||
points. Tasks are executed on the Tokio runtime, which can either be single-
|
||||
or multi-threaded.
|
||||
|
||||
[ps_scan]: https://zips.z.cash/protocol/protocol.pdf#saplingscan
|
||||
[ps_keys]: https://zips.z.cash/protocol/protocol.pdf#addressesandkeys
|
||||
[ps_sapk]: https://zips.z.cash/protocol/protocol.pdf#saplingkeycomponents
|
||||
[tokio-task]: https://docs.rs/tokio/0.2.22/tokio/task/index.html
|
||||
|
||||
# Guide-level explanation
|
||||
[guide-level-explanation]: #guide-level-explanation
|
||||
|
||||
There are two main parts of this functionality. The first is a `Client`
|
||||
component running as part of `zebrad`, and the second is a `zebra-cli`
|
||||
command-line tool.
|
||||
|
||||
The `Client` component is responsible for blockchain scanning. It maintains
|
||||
its own distinct `sled` database, which stores the viewing keys it uses to
|
||||
scan as well as the results of scanning. When a new block is added to the
|
||||
chain state, the `Client` component is notified asynchronously using a
|
||||
channel. For each Sapling shielded transaction in the block, the component
|
||||
attempts to perform trial decryption of that transaction's notes using each
|
||||
registered viewing key, as described in [§4.19][ps_scan]. If successful,
|
||||
decrypted notes are saved to the database.
|
||||
|
||||
The [`PING`/`REJECT`][pingreject] attack demonstrates the importance of
|
||||
decoupling execution of normal node operations from secret-dependent
|
||||
operations. Zebra's network stack already makes it immune to those particular
|
||||
attacks, because each peer connection is executed in a different task.
|
||||
However, to eliminate this entire class of vulnerability, we execute the
|
||||
`Client` component in its own task, decoupled from the rest of the node
|
||||
functionality. In fact, each viewing key's scanning is performed
|
||||
independently, as described in more detail below, with an analysis of
|
||||
potential side-channels.
|
||||
|
||||
[pingreject]: https://eprint.iacr.org/2020/220.pdf
|
||||
|
||||
The second part is the `zebra-cli` command-line tool, which provides basic
|
||||
wallet functionality. This tool manages spending keys and addresses, and
|
||||
communicates with the `Client` component in `zebrad` to provide basic wallet
|
||||
functionality. Specifically, `zebra-cli` uses a distinct RPC endpoint to load
|
||||
viewing keys into `zebrad` and to query the results of block chain scanning.
|
||||
`zebra-cli` can then use the results of those queries to generate transactions
|
||||
and submit them to the network using `zebrad`.
|
||||
|
||||
This design upholds the principle of least authority by separating key
|
||||
material required for spending funds from the key material required for block
|
||||
chain scanning. This allows compartmentalization. For instance, a user could
|
||||
in principle run `zebrad` on a cloud VPS with only their viewing keys and
|
||||
store their spending keys on a laptop, or a user could run `zebrad` on a
|
||||
local machine and store their spending keys in a hardware wallet. Both of
|
||||
these use cases would require some additional tooling support, but are
|
||||
possible with this design.
|
||||
|
||||
# Reference-level explanation
|
||||
[reference-level-explanation]: #reference-level-explanation
|
||||
|
||||
## State notifications
|
||||
|
||||
We want a way to subscribe to updates from the state system via a channel. For
|
||||
the purposes of this RFC, these changes are in-flight, but in the future, these
|
||||
could be used for a push-based RPC mechanism.
|
||||
|
||||
Subscribers can subscribe to all state change notifications as they come in.
|
||||
|
||||
Currently the `zebra_state::init()` method returns a `BoxService` that allows you to
|
||||
make requests to the chain state. Instead, we would return a `(BoxService,
|
||||
StateNotifications)` tuple, where `StateNotifications` is a new structure initially
|
||||
defined as:
|
||||
|
||||
```
|
||||
#[non_exhaustive]
|
||||
pub struct StateNotifications {
|
||||
pub new_blocks: tokio::sync::watch::Receiver<Arc<Block>>,
|
||||
}
|
||||
```
|
||||
|
||||
Instead of making repeated polling requests to a state service to look for any
|
||||
new blocks, this channel will push new blocks to a consumer as they come in,
|
||||
for the consumer to use or discard at their discretion. This will be used by
|
||||
the client component described below. This will also be needed for gossiping
|
||||
blocks to other peers, as they are validated.
|
||||
|
||||
## Online client component
|
||||
|
||||
This component maintains its own Sled tree. See RFC#0005 for more details on Sled.
|
||||
|
||||
We use the following Sled trees:
|
||||
|
||||
| Tree | Keys | Values |
|
||||
|-----------------------|-----------------------|-------------------------------------|
|
||||
| `viewing_keys` | `IncomingViewingKey` | `String` |
|
||||
| `height_by_key` | `IncomingViewingKey` | `BE32(height)` |
|
||||
| `received_set_by_key` | `IncomingViewingKey` | ? |
|
||||
| `spend_set_by_key` | `IncomingViewingKey` | ? |
|
||||
| `nullifier_map_by_key`| `IncomingViewingKey` | ? |
|
||||
|
||||
See https://zips.z.cash/protocol/protocol.pdf#saplingscan
|
||||
|
||||
Zcash structures are encoded using `ZcashSerialize`/`ZcashDeserialize`.
|
||||
|
||||
This component runs inside zebrad. After incoming viewing keys are registered,
|
||||
it holds onto them in order to do blockchain scanning. The component keeps track
|
||||
of where it’s scanned to (TODO: per key?). Runs in its own separate task, in
|
||||
case it crashes, it’s not noticeable, and executes independently (but in the
|
||||
same process) of the normal node operation.
|
||||
|
||||
|
||||
In the case of the client component that needs to do blockchain scanning and
|
||||
trial decryption, every valid block with non-coinbase transactions will need to
|
||||
be checked and its transactions trial-decrypted with registered incoming viewing
|
||||
keys to see if any notes have been received by the key's owner and if any notes
|
||||
have already been spent elsewhere.
|
||||
|
||||
|
||||
## RPC's
|
||||
A specific set of _privileged_ RPC endpoints:
|
||||
- Allows registering of incoming viewing keys with zebrad in order to do
|
||||
blockchain scanning
|
||||
- Allows querying of the results of that scanning, to get wallet balance, etc
|
||||
- Not authenticated to start (see 'Future possibilities')
|
||||
- Users can control access by controlling access to the privileged endpoint (ie
|
||||
via a firewall)
|
||||
|
||||
Support for sending tx's via _non-privileged_ RPC endpoints, or via Stolon:
|
||||
- sendTransaction: once you author a transcation you can gossip it via any
|
||||
Zcash node, not just a specific instance of zebrad
|
||||
|
||||
## Wallet functionality
|
||||
- Holds on to your spending keys so you can author transactions
|
||||
- Uses RPC methods to query the online client component inside zebrad about
|
||||
wallet balances
|
||||
|
||||
## CLI binary
|
||||
- zebra-cli talks to the subcomponent running in zebrad
|
||||
- (can use servo/bincode to communicate with zebrad)
|
||||
- via the privileged (and possibly the unprivileged) RPC endpoints
|
||||
- can use [cap-std](https://blog.sunfishcode.online/introducing-cap-std/)
|
||||
to restrict filesystem and network access for zebra-client.
|
||||
See https://github.com/ZcashFoundation/zebra/issues/2340
|
||||
- can use the [tui crate](https://crates.io/crates/tui) to render a terminal UI
|
||||
|
||||
## Task isolation in Tokio
|
||||
- TODO: fill in
|
||||
- cooperative multitasking is fine, IF you cooperate
|
||||
- lots of tasks
|
||||
|
||||
<!-- This is the technical portion of the RFC. Explain the design in sufficient detail that: -->
|
||||
|
||||
<!-- - Its interaction with other features is clear. -->
|
||||
<!-- - It is reasonably clear how the feature would be implemented, tested, monitored, and maintained. -->
|
||||
<!-- - Corner cases are dissected by example. -->
|
||||
|
||||
<!-- The section should return to the examples given in the previous section, and explain more fully how the detailed proposal makes those examples work. -->
|
||||
|
||||
## Module Structure
|
||||
|
||||
<!-- Describe the crate and modules that will implement the feature.-->
|
||||
|
||||
zebra-client ( currently and empty stub) zebra-cli (does not exist yet)
|
||||
zebra-rfc? (exists as an empty stub, we way have zebra-cli communicate with
|
||||
zebra-client inside zebrad via an RPC method any/or a private IPC layer)
|
||||
|
||||
## Test Plan
|
||||
|
||||
<!-- Explain how the feature will be tested, including: -->
|
||||
<!-- * tests for consensus-critical functionality -->
|
||||
<!-- * existing test vectors, if available -->
|
||||
<!-- * Zcash blockchain block test vectors (specify the network upgrade, feature, or block height and network) -->
|
||||
<!-- * property testing or fuzzing -->
|
||||
|
||||
<!-- The tests should cover: -->
|
||||
<!-- * positive cases: make sure the feature accepts valid inputs -->
|
||||
<!-- * using block test vectors for each network upgrade provides some coverage of valid inputs -->
|
||||
<!-- * negative cases: make sure the feature rejects invalid inputs -->
|
||||
<!-- * make sure there is a test case for each error condition in the code -->
|
||||
<!-- * if there are lots of potential errors, prioritise: -->
|
||||
<!-- * consensus-critical errors -->
|
||||
<!-- * security-critical errors, and -->
|
||||
<!-- * likely errors -->
|
||||
<!-- * edge cases: make sure that boundary conditions are correctly handled -->
|
||||
|
||||
# Drawbacks
|
||||
[drawbacks]: #drawbacks
|
||||
|
||||
<!-- Why should we *not* do this?-->
|
||||
|
||||
Supporting a wallet assumes risk. Effort required to implement wallet functionality.
|
||||
|
||||
- need to responsibly handle secret key material;
|
||||
- currently we only handle public data.
|
||||
|
||||
# Rationale and alternatives
|
||||
[rationale-and-alternatives]: #rationale-and-alternatives
|
||||
|
||||
<!-- - What makes this design a good design? -->
|
||||
<!-- - Is this design a good basis for later designs or implementations? -->
|
||||
<!-- - What other designs have been considered and what is the rationale for not choosing them? -->
|
||||
|
||||
- why have a separate RPC endpoint?
|
||||
- extra endpoints are cheap
|
||||
- allows segmentation by capability
|
||||
- alternative is error-prone after-the-fact ACLs like Tor control port filters
|
||||
|
||||
- What is the impact of not doing this?
|
||||
- We can't send money with zebra alone.
|
||||
- rely on third party wallet software to send funds with zebra
|
||||
- we need to provide basic functionality within zebra's trust boundary, rather than forcing users to additionally trust 3p software
|
||||
- there are great 3p wallets, we want to integrate with them, just don't want to rely on them
|
||||
|
||||
- What about the light client protocol?
|
||||
- does not address this use case, has different trust model (private lookup, no scanning)
|
||||
- we want our first client that interacts with zebrad to not have a long
|
||||
startup time, which a lightclient implementation would require
|
||||
- zebra-cli should be within the same trust and privacy boundary as the
|
||||
zebrad node it is interacting with
|
||||
- light client protocol as currently implemented requires stack assumptions
|
||||
such as protobufs and a hardcoded lightserver to talk to
|
||||
|
||||
- What about having one database per key?
|
||||
- easy to reliably delete or backup all data related to a single key
|
||||
- might use slightly more space/CPU
|
||||
- slightly harder to delete all the keys
|
||||
|
||||
# Unresolved questions
|
||||
[unresolved-questions]: #unresolved-questions
|
||||
|
||||
<!-- - What parts of the design do you expect to resolve through the RFC process before this gets merged? -->
|
||||
<!-- - What parts of the design do you expect to resolve through the implementation of this feature before stabilization? -->
|
||||
<!-- - What related issues do you consider out of scope for this RFC that could be addressed in the future independently of the solution that comes out of this RFC? -->
|
||||
|
||||
- wait to fill this in until doing the detailed writeup.
|
||||
|
||||
# Future possibilities
|
||||
[future-possibilities]: #future-possibilities
|
||||
|
||||
- [BlazeSync algorithm](https://forum.zcashcommunity.com/t/zecwallets-blazesync-sync-entire-chain-in-60s/39447)
|
||||
for fast syncing, like Zecwallet
|
||||
|
||||
- mandatory sweeps for legacy keys
|
||||
- blazingly fast wallet startup, to match `zebrad`'s blazingly fast sync
|
||||
- generate unified address from a new seed phrase (or one provided by the user)
|
||||
- user can just backup seed phrase rather than a set of private keys
|
||||
- handles arbitrary keys from `zcashd` and other wallets, even if they weren't generated from a seed phrase
|
||||
- ~handles Sprout funds without `zebra-client` having to support Sprout balances~
|
||||
- startup is incredibly fast
|
||||
- sweep takes a few minutes to be confirmed
|
||||
- scanning the entire chain could take hours
|
||||
- if we know when the seed phrase was created, we can skip millions of blocks during scanning
|
||||
- sweeps can also be initiated by the user for non-linkability / performance / refresh
|
||||
- sweeps should handle the "block reward recipient" case where there are a lot of small outputs
|
||||
- initial release could support mandatory sweeps, and future releases could support legacy keys
|
||||
|
||||
- split `Client` component into subprocess
|
||||
- this helps somewhat but the benefit is reduced by our preexisting memory safety, thanks to Rust
|
||||
- not meaningful without other isolation (need to restrict `zebrad` from accessing viewing keys on disk, etc)
|
||||
- could use [cap-std](https://blog.sunfishcode.online/introducing-cap-std/)
|
||||
to restrict filesystem and network access for zebra-client.
|
||||
See https://github.com/ZcashFoundation/zebra/issues/2340
|
||||
- instead of process isolation, maybe you actually want the Light Client
|
||||
Protocol, or something similar?
|
||||
|
||||
- hardware wallet integration for `zebra-cli`
|
||||
- having `zebra-cli` allows us to do this
|
||||
- much higher security ROI than subprocess
|
||||
- very cool future feature
|
||||
|
||||
- authenticate queries for a particular viewing key by proving knowledge of the
|
||||
viewing key (requires crypto). this could allow public access to the client
|
||||
endpoint
|
||||
|
||||
- Use Unified Addresses only, no legacy addrs.
|
||||
|
||||
<!-- Think about what the natural extension and evolution of your proposal would -->
|
||||
<!-- be and how it would affect Zebra and Zcash as a whole. Try to use this -->
|
||||
<!-- section as a tool to more fully consider all possible -->
|
||||
<!-- interactions with the project and cryptocurrency ecosystem in your proposal. -->
|
||||
<!-- Also consider how the this all fits into the roadmap for the project -->
|
||||
<!-- and of the relevant sub-team. -->
|
||||
|
||||
<!-- This is also a good place to "dump ideas", if they are out of scope for the -->
|
||||
<!-- RFC you are writing but otherwise related. -->
|
||||
|
||||
<!-- If you have tried and cannot think of any future possibilities, -->
|
||||
<!-- you may simply state that you cannot think of anything. -->
|
||||
|
||||
<!-- Note that having something written down in the future-possibilities section -->
|
||||
<!-- is not a reason to accept the current or a future RFC; such notes should be -->
|
||||
<!-- in the section on motivation or rationale in this or subsequent RFCs. -->
|
||||
<!-- The section merely provides additional information. -->
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue