fix(doc): Fix various doc warnings, part 2 (#4561)
* Fix the syntax of links in comments * Fix a mistake in the docs Co-authored-by: Alfredo Garcia <oxarbitrage@gmail.com> * Remove unnecessary angle brackets from a link * Revert the changes for links that serve as references * Revert "Revert the changes for links that serve as references" This reverts commit 8b091aa9fab453e7d3559a5d474e0879183b9bfb. * Remove `<` `>` from links that serve as references This reverts commit 046ef25620ae1a2140760ae7ea379deecb4b583c. * Don't use `<` `>` in normal comments * Don't use `<` `>` for normal comments * Revert changes for comments starting with `//` * Fix some warnings produced by `cargo doc` * Fix some rustdoc warnings * Fix some warnings * Refactor some changes * Fix some rustdoc warnings * Fix some rustdoc warnings * Resolve various TODOs Co-authored-by: teor <teor@riseup.net> Co-authored-by: Alfredo Garcia <oxarbitrage@gmail.com> Co-authored-by: teor <teor@riseup.net> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
This commit is contained in:
parent
00f23e1d96
commit
2e50ccc8f3
|
|
@ -182,7 +182,7 @@ impl ZcashDeserialize for Spend<PerSpendAnchor> {
|
|||
///
|
||||
/// This rule is also implemented in
|
||||
/// `zebra_state::service::check::anchors` and
|
||||
/// [`crate::transaction::serialize`].
|
||||
/// `crate::transaction::serialize`.
|
||||
///
|
||||
/// The "anchor encoding for v4 transactions" is implemented here.
|
||||
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
|
||||
|
|
|
|||
|
|
@ -124,14 +124,17 @@ pub enum Response {
|
|||
Block {
|
||||
/// The witnessed transaction ID for this transaction.
|
||||
///
|
||||
/// [`Block`] responses can be uniquely identified by [`UnminedTxId::mined_id`],
|
||||
/// because the block's authorizing data root will be checked during contextual validation.
|
||||
/// [`Response::Block`] responses can be uniquely identified by
|
||||
/// [`UnminedTxId::mined_id`], because the block's authorizing data root
|
||||
/// will be checked during contextual validation.
|
||||
tx_id: UnminedTxId,
|
||||
|
||||
/// The miner fee for this transaction.
|
||||
///
|
||||
/// `None` for coinbase transactions.
|
||||
///
|
||||
/// Consensus rule:
|
||||
/// # Consensus
|
||||
///
|
||||
/// > The remaining value in the transparent transaction value pool
|
||||
/// > of a coinbase transaction is destroyed.
|
||||
///
|
||||
|
|
@ -151,8 +154,8 @@ pub enum Response {
|
|||
/// Mempool transactions always have a transaction fee,
|
||||
/// because coinbase transactions are rejected from the mempool.
|
||||
///
|
||||
/// [`Mempool`] responses are uniquely identified by the [`UnminedTxId`]
|
||||
/// variant for their transaction version.
|
||||
/// [`Response::Mempool`] responses are uniquely identified by the
|
||||
/// [`UnminedTxId`] variant for their transaction version.
|
||||
transaction: VerifiedUnminedTx,
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ use zebra_chain::{
|
|||
///
|
||||
/// Since the inbound peer limit is higher than the outbound peer limit,
|
||||
/// Zebra can be connected to a majority of peers
|
||||
/// that it has *not* chosen from its [`AddressBook`].
|
||||
/// that it has *not* chosen from its [`crate::AddressBook`].
|
||||
///
|
||||
/// Inbound peer connections are initiated by the remote peer,
|
||||
/// so inbound peer selection is not controlled by the local node.
|
||||
|
|
@ -149,25 +149,28 @@ pub const MAX_RECENT_PEER_AGE: Duration32 = Duration32::from_days(3);
|
|||
/// Using a prime number makes sure that heartbeats don't synchronise with crawls.
|
||||
pub const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(59);
|
||||
|
||||
/// The minimum time between successive calls to [`CandidateSet::next()`][Self::next].
|
||||
/// The minimum time between successive calls to
|
||||
/// [`CandidateSet::next`][crate::peer_set::CandidateSet::next].
|
||||
///
|
||||
/// ## Security
|
||||
///
|
||||
/// Zebra resists distributed denial of service attacks by making sure that new peer connections
|
||||
/// are initiated at least `MIN_PEER_CONNECTION_INTERVAL` apart.
|
||||
/// are initiated at least [`MIN_PEER_CONNECTION_INTERVAL`] apart.
|
||||
pub const MIN_PEER_CONNECTION_INTERVAL: Duration = Duration::from_millis(25);
|
||||
|
||||
/// The minimum time between successive calls to [`CandidateSet::update()`][Self::update].
|
||||
/// The minimum time between successive calls to
|
||||
/// [`CandidateSet::update`][crate::peer_set::CandidateSet::update].
|
||||
///
|
||||
/// Using a prime number makes sure that peer address crawls don't synchronise with other crawls.
|
||||
///
|
||||
/// ## Security
|
||||
///
|
||||
/// Zebra resists distributed denial of service attacks by making sure that requests for more
|
||||
/// peer addresses are sent at least `MIN_PEER_GET_ADDR_INTERVAL` apart.
|
||||
/// peer addresses are sent at least [`MIN_PEER_GET_ADDR_INTERVAL`] apart.
|
||||
pub const MIN_PEER_GET_ADDR_INTERVAL: Duration = Duration::from_secs(31);
|
||||
|
||||
/// The combined timeout for all the requests in [`CandidateSet::update()`][Self::update].
|
||||
/// The combined timeout for all the requests in
|
||||
/// [`CandidateSet::update`][crate::peer_set::CandidateSet::update].
|
||||
///
|
||||
/// `zcashd` doesn't respond to most `getaddr` requests,
|
||||
/// so this timeout needs to be short.
|
||||
|
|
@ -329,8 +332,8 @@ mod tests {
|
|||
use super::*;
|
||||
|
||||
/// This assures that the `Duration` value we are computing for
|
||||
/// MIN_PEER_RECONNECTION_DELAY actually matches the other const values it
|
||||
/// relies on.
|
||||
/// [`MIN_PEER_RECONNECTION_DELAY`] actually matches the other const values
|
||||
/// it relies on.
|
||||
#[test]
|
||||
fn ensure_live_peer_duration_value_matches_others() {
|
||||
zebra_test::init();
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ mod tests;
|
|||
/// Creates a Zcash peer connection using the provided data stream.
|
||||
/// This connection is completely isolated from all other node state.
|
||||
///
|
||||
/// The connection pool returned by [`init`](zebra_network::init)
|
||||
/// The connection pool returned by [`init`](crate::init)
|
||||
/// should be used for all requests that
|
||||
/// don't require isolated state or use of an existing TCP connection. However,
|
||||
/// this low-level API is useful for custom network crawlers or Tor connections.
|
||||
|
|
@ -44,7 +44,7 @@ mod tests;
|
|||
/// - `network`: the Zcash [`Network`] used for this connection.
|
||||
///
|
||||
/// - `data_stream`: an existing data stream. This can be a non-anonymised TCP connection,
|
||||
/// or a Tor client [`DataStream`].
|
||||
/// or a Tor client `arti_client::DataStream`.
|
||||
///
|
||||
/// - `user_agent`: a valid BIP14 user-agent, e.g., the empty string.
|
||||
pub fn connect_isolated<PeerTransport>(
|
||||
|
|
@ -124,7 +124,7 @@ where
|
|||
/// Transactions sent over this connection can be linked to the sending and receiving IP address
|
||||
/// by passive internet observers.
|
||||
///
|
||||
/// Prefer [`connect_isolated_run_tor`](tor::connect_isolated_run_tor) if available.
|
||||
/// Prefer [`connect_isolated_tor`](tor::connect_isolated_tor) if available.
|
||||
pub fn connect_isolated_tcp_direct(
|
||||
network: Network,
|
||||
addr: SocketAddr,
|
||||
|
|
|
|||
|
|
@ -16,11 +16,11 @@
|
|||
//! [`tower::Service`] representing "the network", which load-balances
|
||||
//! outbound [`Request`]s over available peers.
|
||||
//!
|
||||
//! Unlike the underlying legacy network protocol, Zebra's [`PeerSet`]
|
||||
//! [`tower::Service`] guarantees that each `Request` future will resolve to
|
||||
//! the correct `Response`, rather than an unrelated `Response` message.
|
||||
//! Unlike the underlying legacy network protocol, Zebra's `PeerSet`
|
||||
//! [`tower::Service`] guarantees that each `Request` future will resolve to the
|
||||
//! correct `Response`, rather than an unrelated `Response` message.
|
||||
//!
|
||||
//! Each peer connection is handled by a distinct [`Connection`] task.
|
||||
//! Each peer connection is handled by a distinct [`peer::Connection`] task.
|
||||
//! The Zcash network protocol is bidirectional, so Zebra interprets incoming
|
||||
//! Zcash messages as either:
|
||||
//! - [`Response`]s to previously sent outbound [`Request`]s, or
|
||||
|
|
@ -84,7 +84,7 @@
|
|||
//!
|
||||
//! ### Connection Pool
|
||||
//!
|
||||
//! [`PeerSet`] Network Service:
|
||||
//! `PeerSet` Network Service:
|
||||
//! * provides an interface for other services and tasks running within this node
|
||||
//! to make requests to remote peers ("the rest of the network")
|
||||
//! * accepts [`Request`]s from the local node
|
||||
|
|
@ -102,7 +102,8 @@
|
|||
//! Peer Inventory Service:
|
||||
//! * tracks gossiped `inv` advertisements for each peer
|
||||
//! * tracks missing inventory for each peer
|
||||
//! * used by the [`PeerSet`] to route block and transaction requests to peers that have the requested data
|
||||
//! * used by the `PeerSet` to route block and transaction requests
|
||||
//! to peers that have the requested data
|
||||
//!
|
||||
//! ### Peer Discovery
|
||||
//!
|
||||
|
|
|
|||
|
|
@ -10,21 +10,28 @@ use super::{MetaAddr, MetaAddrChange, PeerServices};
|
|||
|
||||
/// The largest number of random changes we want to apply to a [`MetaAddr`].
|
||||
///
|
||||
/// This should be at least twice the number of [`PeerAddrState`]s, so the tests
|
||||
/// can cover multiple transitions through every state.
|
||||
/// This should be at least twice the number of [`PeerAddrState`][1]s, so the
|
||||
/// tests can cover multiple transitions through every state.
|
||||
///
|
||||
/// [1]: super::PeerAddrState
|
||||
#[allow(dead_code)]
|
||||
pub const MAX_ADDR_CHANGE: usize = 15;
|
||||
|
||||
/// The largest number of random addresses we want to add to an [`AddressBook`].
|
||||
/// The largest number of random addresses we want to add to an [`AddressBook`][2].
|
||||
///
|
||||
/// This should be at least the number of [`PeerAddrState`]s, so the tests can
|
||||
/// cover interactions between addresses in different states.
|
||||
/// This should be at least the number of [`PeerAddrState`][1]s, so the tests
|
||||
/// can cover interactions between addresses in different states.
|
||||
///
|
||||
/// [1]: super::PeerAddrState
|
||||
/// [2]: crate::AddressBook
|
||||
#[allow(dead_code)]
|
||||
pub const MAX_META_ADDR: usize = 8;
|
||||
|
||||
impl MetaAddr {
|
||||
/// Create a strategy that generates [`MetaAddr`]s in the
|
||||
/// [`PeerAddrState::NeverAttemptedGossiped`] state.
|
||||
/// [`NeverAttemptedGossiped`][1] state.
|
||||
///
|
||||
/// [1]: super::PeerAddrState::NeverAttemptedGossiped
|
||||
pub fn gossiped_strategy() -> BoxedStrategy<Self> {
|
||||
(
|
||||
canonical_socket_addr_strategy(),
|
||||
|
|
@ -38,7 +45,9 @@ impl MetaAddr {
|
|||
}
|
||||
|
||||
/// Create a strategy that generates [`MetaAddr`]s in the
|
||||
/// [`PeerAddrState::NeverAttemptedAlternate`] state.
|
||||
/// [`NeverAttemptedAlternate`][1] state.
|
||||
///
|
||||
/// [1]: super::PeerAddrState::NeverAttemptedAlternate
|
||||
pub fn alternate_strategy() -> BoxedStrategy<Self> {
|
||||
(canonical_socket_addr_strategy(), any::<PeerServices>())
|
||||
.prop_map(|(socket_addr, untrusted_services)| {
|
||||
|
|
@ -84,9 +93,11 @@ impl MetaAddrChange {
|
|||
/// Create a strategy that generates [`MetaAddrChange`]s which are ready for
|
||||
/// outbound connections.
|
||||
///
|
||||
/// Currently, all generated changes are the [`NewAlternate`] variant.
|
||||
/// TODO: Generate all [`MetaAddrChange`] variants, and give them ready fields.
|
||||
/// (After PR #2276 merges.)
|
||||
/// Currently, all generated changes are the [`NewAlternate`][1] variant.
|
||||
/// TODO: Generate all [`MetaAddrChange`] variants, and give them ready
|
||||
/// fields. (After PR #2276 merges.)
|
||||
///
|
||||
/// [1]: super::NewAlternate
|
||||
pub fn ready_outbound_strategy() -> BoxedStrategy<Self> {
|
||||
canonical_socket_addr_strategy()
|
||||
.prop_filter_map("failed MetaAddr::is_valid_for_outbound", |addr| {
|
||||
|
|
|
|||
|
|
@ -76,8 +76,8 @@ pub(crate) struct ClientRequest {
|
|||
/// The actual network request for the peer.
|
||||
pub request: Request,
|
||||
|
||||
/// The response [`Message`] channel, included because `peer::Client::call` returns a
|
||||
/// future that may be moved around before it resolves.
|
||||
/// The response `Message` channel, included because `peer::Client::call`
|
||||
/// returns a future that may be moved around before it resolves.
|
||||
pub tx: oneshot::Sender<Result<Response, SharedPeerError>>,
|
||||
|
||||
/// Used to register missing inventory in responses on `tx`,
|
||||
|
|
|
|||
|
|
@ -94,28 +94,30 @@ pub enum PeerError {
|
|||
/// or peers can download and verify the missing data.
|
||||
///
|
||||
/// If the peer has some of the data, the request returns an [`Ok`] response,
|
||||
/// with any `notfound` data is marked as [`Missing`][m].
|
||||
/// with any `notfound` data is marked as [`Missing`][1].
|
||||
///
|
||||
/// [m] crate::protocol::external::InventoryResponse::Missing
|
||||
/// [1]: crate::protocol::internal::InventoryResponse::Missing
|
||||
#[error("Remote peer could not find any of the items: {0:?}")]
|
||||
NotFoundResponse(Vec<InventoryHash>),
|
||||
|
||||
/// We requested data, but all our ready peers are marked as recently
|
||||
/// [`Missing`](InventoryResponse::Missing) that data in our local inventory registry.
|
||||
/// [`Missing`][1] that data in our local inventory registry.
|
||||
///
|
||||
/// This is a temporary error.
|
||||
///
|
||||
/// Peers with the inventory can finish their requests and become ready,
|
||||
/// or other peers can download and verify the missing data.
|
||||
/// Peers with the inventory can finish their requests and become ready, or
|
||||
/// other peers can download and verify the missing data.
|
||||
///
|
||||
/// # Correctness
|
||||
///
|
||||
/// This error is produced using Zebra's local inventory registry,
|
||||
/// without contacting any peers.
|
||||
/// This error is produced using Zebra's local inventory registry, without
|
||||
/// contacting any peers.
|
||||
///
|
||||
/// Client responses containing this error must not be used to update the inventory registry.
|
||||
/// This makes sure that we eventually expire our local cache of missing inventory,
|
||||
/// and send requests to peers again.
|
||||
/// Client responses containing this error must not be used to update the
|
||||
/// inventory registry. This makes sure that we eventually expire our local
|
||||
/// cache of missing inventory, and send requests to peers again.
|
||||
///
|
||||
/// [1]: crate::protocol::internal::InventoryResponse::Missing
|
||||
#[error("All ready peers are registered as recently missing these items: {0:?}")]
|
||||
NotFoundRegistry(Vec<InventoryHash>),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
//! Initial [`Handshake`]s with Zebra peers over a [`PeerTransport`].
|
||||
//! Initial [`Handshake`]s with Zebra peers over a `PeerTransport`.
|
||||
|
||||
use std::{
|
||||
cmp::min,
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ pub enum Request {
|
|||
/// Query matching transactions in the mempool,
|
||||
/// using a unique set of [`struct@Hash`]s. Pre-V5 transactions are matched
|
||||
/// directly; V5 transaction are matched just by the Hash, disregarding
|
||||
/// the [`AuthDigest`].
|
||||
/// the [`AuthDigest`](zebra_chain::transaction::AuthDigest).
|
||||
TransactionsByMinedId(HashSet<Hash>),
|
||||
|
||||
/// Query matching cached rejected transaction IDs in the mempool,
|
||||
|
|
@ -59,10 +59,10 @@ pub enum Request {
|
|||
///
|
||||
/// This request is required to avoid hangs in the mempool.
|
||||
///
|
||||
/// The queue checker task can't call `poll_ready` directly on the [`Mempool`] service,
|
||||
/// because the mempool service is wrapped in a `Buffer`.
|
||||
/// Calling [`Buffer::poll_ready`] reserves a buffer slot, which can cause hangs when
|
||||
/// too many slots are reserved but unused:
|
||||
/// The queue checker task can't call `poll_ready` directly on the mempool
|
||||
/// service, because the service is wrapped in a `Buffer`. Calling
|
||||
/// `Buffer::poll_ready` reserves a buffer slot, which can cause hangs
|
||||
/// when too many slots are reserved but unused:
|
||||
/// <https://docs.rs/tower/0.4.10/tower/buffer/struct.Buffer.html#a-note-on-choosing-a-bound>
|
||||
CheckForVerifiedTransactions,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -100,8 +100,8 @@ impl PreparedBlock {
|
|||
}
|
||||
|
||||
impl ContextuallyValidBlock {
|
||||
/// Create a block that's ready for non-finalized `Chain` contextual validation,
|
||||
/// using a [`PreparedBlock`] and fake zero-valued spent UTXOs.
|
||||
/// Create a block that's ready for non-finalized `Chain` contextual
|
||||
/// validation, using a [`PreparedBlock`] and fake zero-valued spent UTXOs.
|
||||
///
|
||||
/// Only for use in tests.
|
||||
pub fn test_with_zero_spent_utxos(block: impl Into<PreparedBlock>) -> Self {
|
||||
|
|
|
|||
|
|
@ -18,7 +18,10 @@ use crate::Request;
|
|||
use crate::{service::read::AddressUtxos, TransactionLocation};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
/// A response to a [`StateService`] [`Request`].
|
||||
/// A response to a [`StateService`][1] [`Request`][2].
|
||||
///
|
||||
/// [1]: crate::service::StateService
|
||||
/// [2]: crate::Request
|
||||
pub enum Response {
|
||||
/// Response to [`Request::CommitBlock`] indicating that a block was
|
||||
/// successfully committed to the state.
|
||||
|
|
@ -50,7 +53,8 @@ pub enum Response {
|
|||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
/// A response to a read-only [`ReadStateService`](crate::ReadStateService)'s
|
||||
/// A response to a read-only
|
||||
/// [`ReadStateService`](crate::service::ReadStateService)'s
|
||||
/// [`ReadRequest`](crate::ReadRequest).
|
||||
pub enum ReadResponse {
|
||||
/// Response to [`ReadRequest::Block`](crate::ReadRequest::Block) with the
|
||||
|
|
@ -72,13 +76,18 @@ pub enum ReadResponse {
|
|||
/// specified Orchard note commitment tree.
|
||||
OrchardTree(Option<Arc<orchard::tree::NoteCommitmentTree>>),
|
||||
|
||||
/// Response to [`ReadRequest::AddressBalance`] with the total balance of the addresses.
|
||||
/// Response to
|
||||
/// [`ReadRequest::AddressBalance`](crate::ReadRequest::AddressBalance) with
|
||||
/// the total balance of the addresses.
|
||||
AddressBalance(Amount<NonNegative>),
|
||||
|
||||
/// Response to [`ReadRequest::TransactionIdsByAddresses`] with the obtained transaction ids,
|
||||
/// in the order they appear in blocks.
|
||||
/// Response to
|
||||
/// [`ReadRequest::TransactionIdsByAddresses`](crate::ReadRequest::TransactionIdsByAddresses)
|
||||
/// with the obtained transaction ids, in the order they appear in blocks.
|
||||
AddressesTransactionIds(BTreeMap<TransactionLocation, transaction::Hash>),
|
||||
|
||||
/// Response to [`ReadRequest::UtxosByAddresses`] with found utxos and transaction data.
|
||||
/// Response to
|
||||
/// [`ReadRequest::UtxosByAddresses`](crate::ReadRequest::UtxosByAddresses)
|
||||
/// with found utxos and transaction data.
|
||||
Utxos(AddressUtxos),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,14 +2,18 @@
|
|||
//!
|
||||
//! Zebra provides cached state access via two main services:
|
||||
//! - [`StateService`]: a read-write service that waits for queued blocks.
|
||||
//! - [`ReadStateService`]: a read-only service that answers from the most recent committed block.
|
||||
//! - [`ReadStateService`]: a read-only service that answers from the most
|
||||
//! recent committed block.
|
||||
//!
|
||||
//! Most users should prefer [`ReadStateService`], unless they need to wait for
|
||||
//! verified blocks to be committed. (For example, the syncer and mempool tasks.)
|
||||
//! verified blocks to be committed. (For example, the syncer and mempool
|
||||
//! tasks.)
|
||||
//!
|
||||
//! Zebra also provides access to the best chain tip via:
|
||||
//! - [`LatestChainTip`]: a read-only channel that contains the latest committed tip.
|
||||
//! - [`ChainTipChange`]: a read-only channel that can asynchronously await chain tip changes.
|
||||
//! - [`LatestChainTip`]: a read-only channel that contains the latest committed
|
||||
//! tip.
|
||||
//! - [`ChainTipChange`]: a read-only channel that can asynchronously await
|
||||
//! chain tip changes.
|
||||
|
||||
use std::{
|
||||
convert,
|
||||
|
|
@ -95,7 +99,7 @@ pub type QueuedFinalized = (
|
|||
/// to delay the next ObtainTips until all queued blocks have been committed.
|
||||
///
|
||||
/// But most state users can ignore any queued blocks, and get faster read responses
|
||||
/// using the [`ReadOnlyStateService`].
|
||||
/// using the [`ReadStateService`].
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct StateService {
|
||||
/// The finalized chain state, including its on-disk database.
|
||||
|
|
@ -317,11 +321,14 @@ impl StateService {
|
|||
rsp_rx
|
||||
}
|
||||
|
||||
/// Update the [`LatestChainTip`], [`ChainTipChange`], and [`LatestChain`] channels
|
||||
/// with the latest non-finalized [`ChainTipBlock`] and [`Chain`].
|
||||
/// Update the [`LatestChainTip`], [`ChainTipChange`], and `best_chain_sender`
|
||||
/// channels with the latest non-finalized [`ChainTipBlock`] and
|
||||
/// [`Chain`][1].
|
||||
///
|
||||
/// Returns the latest non-finalized chain tip height,
|
||||
/// or `None` if the non-finalized state is empty.
|
||||
/// Returns the latest non-finalized chain tip height, or `None` if the
|
||||
/// non-finalized state is empty.
|
||||
///
|
||||
/// [1]: non_finalized_state::Chain
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn update_latest_chain_channels(&mut self) -> Option<block::Height> {
|
||||
let best_chain = self.mem.best_chain();
|
||||
|
|
@ -502,8 +509,8 @@ impl StateService {
|
|||
.or_else(|| self.disk.db().height(hash))
|
||||
}
|
||||
|
||||
/// Return the [`Utxo`] pointed to by `outpoint`, if it exists in any chain,
|
||||
/// or in any pending block.
|
||||
/// Return the [`transparent::Utxo`] pointed to by `outpoint`, if it exists
|
||||
/// in any chain, or in any pending block.
|
||||
///
|
||||
/// Some of the returned UTXOs may be invalid, because:
|
||||
/// - they are not in the best chain, or
|
||||
|
|
|
|||
|
|
@ -372,8 +372,8 @@ impl ChainTip for LatestChainTip {
|
|||
/// Awaits changes and resets of the state's best chain tip,
|
||||
/// returning the latest [`TipAction`] once the state is updated.
|
||||
///
|
||||
/// Each cloned instance separately tracks the last block data it provided.
|
||||
/// If the best chain fork has changed since the last [`tip_change`] on that instance,
|
||||
/// Each cloned instance separately tracks the last block data it provided. If
|
||||
/// the best chain fork has changed since the last tip change on that instance,
|
||||
/// it returns a [`Reset`].
|
||||
///
|
||||
/// The chain tip data is based on:
|
||||
|
|
@ -411,16 +411,19 @@ pub enum TipAction {
|
|||
/// The chain tip was reset to a block with `height` and `hash`.
|
||||
///
|
||||
/// Resets can happen for different reasons:
|
||||
/// * a newly created or cloned [`ChainTipChange`], which is behind the current tip,
|
||||
/// * extending the chain with a network upgrade activation block,
|
||||
/// * switching to a different best [`Chain`], also known as a rollback, and
|
||||
/// * receiving multiple blocks since the previous change.
|
||||
/// - a newly created or cloned [`ChainTipChange`], which is behind the
|
||||
/// current tip,
|
||||
/// - extending the chain with a network upgrade activation block,
|
||||
/// - switching to a different best [`Chain`][1], also known as a rollback, and
|
||||
/// - receiving multiple blocks since the previous change.
|
||||
///
|
||||
/// To keep the code and tests simple, Zebra performs the same reset actions,
|
||||
/// regardless of the reset reason.
|
||||
/// To keep the code and tests simple, Zebra performs the same reset
|
||||
/// actions, regardless of the reset reason.
|
||||
///
|
||||
/// `Reset`s do not have the transaction hashes from the tip block,
|
||||
/// because all transactions should be cleared by a reset.
|
||||
/// `Reset`s do not have the transaction hashes from the tip block, because
|
||||
/// all transactions should be cleared by a reset.
|
||||
///
|
||||
/// [1]: super::non_finalized_state::Chain
|
||||
Reset {
|
||||
/// The block height of the tip, after the chain reset.
|
||||
height: block::Height,
|
||||
|
|
@ -470,7 +473,7 @@ impl ChainTipChange {
|
|||
/// - `Some(`[`TipAction`]`)` if there has been a change since the last time the method was called.
|
||||
/// - `None` if there has been no change.
|
||||
///
|
||||
/// See [`wait_for_tip_change`] for details.
|
||||
/// See [`Self::wait_for_tip_change`] for details.
|
||||
#[instrument(
|
||||
skip(self),
|
||||
fields(
|
||||
|
|
|
|||
|
|
@ -107,13 +107,13 @@ impl AdjustedDifficulty {
|
|||
)
|
||||
}
|
||||
|
||||
/// Initialise and return a new `AdjustedDifficulty` using a
|
||||
/// Initialise and return a new [`AdjustedDifficulty`] using a
|
||||
/// `candidate_header`, `previous_block_height`, `network`, and a `context`.
|
||||
///
|
||||
/// Designed for use when validating block headers, where the full block has not
|
||||
/// been downloaded yet.
|
||||
///
|
||||
/// See [`new_from_block()`] for detailed information about the `context`.
|
||||
/// See [`Self::new_from_block`] for detailed information about the `context`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
|
|
@ -194,7 +194,7 @@ impl AdjustedDifficulty {
|
|||
/// `candidate_height`, `network`, and the relevant `difficulty_threshold`s and
|
||||
/// `time`s.
|
||||
///
|
||||
/// See [`expected_difficulty_threshold()`] for details.
|
||||
/// See [`Self::expected_difficulty_threshold`] for details.
|
||||
///
|
||||
/// Implements `ThresholdBits` from the Zcash specification. (Which excludes the
|
||||
/// Testnet minimum difficulty adjustment.)
|
||||
|
|
@ -293,7 +293,7 @@ impl AdjustedDifficulty {
|
|||
///
|
||||
/// Implements `ActualTimespan` from the Zcash specification.
|
||||
///
|
||||
/// See [`median_timespan_bounded()`] for details.
|
||||
/// See [`Self::median_timespan_bounded`] for details.
|
||||
fn median_timespan(&self) -> Duration {
|
||||
let newer_median = self.median_time_past();
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,8 @@ use crate::{
|
|||
};
|
||||
|
||||
/// Reject double-spends of nullifers:
|
||||
/// - one from this [`PreparedBlock`], and the other already committed to the [`FinalizedState`].
|
||||
/// - one from this [`PreparedBlock`], and the other already committed to the
|
||||
/// [`FinalizedState`](super::super::FinalizedState).
|
||||
///
|
||||
/// (Duplicate non-finalized nullifiers are rejected during the chain update,
|
||||
/// see [`add_to_non_finalized_chain_unique`] for details.)
|
||||
|
|
@ -50,12 +51,12 @@ pub(crate) fn no_duplicates_in_finalized_chain(
|
|||
}
|
||||
|
||||
/// Reject double-spends of nullifers:
|
||||
/// - both within the same [`JoinSplit`] (sprout only),
|
||||
/// - from different [`JoinSplit`]s, [`sapling::Spend`]s or [`Action`]s
|
||||
/// in this [`Transaction`]'s shielded data, or
|
||||
/// - both within the same `JoinSplit` (sprout only),
|
||||
/// - from different `JoinSplit`s, [`sapling::Spend`][2]s or
|
||||
/// [`orchard::Action`][3]s in this [`Transaction`][1]'s shielded data, or
|
||||
/// - one from this shielded data, and another from:
|
||||
/// - a previous transaction in this [`Block`], or
|
||||
/// - a previous block in this non-finalized [`Chain`].
|
||||
/// - a previous transaction in this [`Block`][4], or
|
||||
/// - a previous block in this non-finalized [`Chain`][5].
|
||||
///
|
||||
/// (Duplicate finalized nullifiers are rejected during service contextual validation,
|
||||
/// see [`no_duplicates_in_finalized_chain`] for details.)
|
||||
|
|
@ -74,6 +75,12 @@ pub(crate) fn no_duplicates_in_finalized_chain(
|
|||
/// different pools have nullifiers with same bit pattern, they won't be
|
||||
/// considered the same when determining uniqueness. This is enforced by the
|
||||
/// callers of this function.
|
||||
///
|
||||
/// [1]: zebra_chain::transaction::Transaction
|
||||
/// [2]: zebra_chain::sapling::Spend
|
||||
/// [3]: zebra_chain::orchard::Action
|
||||
/// [4]: zebra_chain::block::Block
|
||||
/// [5]: super::super::Chain
|
||||
#[tracing::instrument(skip(chain_nullifiers, shielded_data_nullifiers))]
|
||||
pub(crate) fn add_to_non_finalized_chain_unique<'block, NullifierT>(
|
||||
chain_nullifiers: &mut HashSet<NullifierT>,
|
||||
|
|
@ -94,8 +101,8 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove nullifiers that were previously added to this non-finalized [`Chain`]
|
||||
/// by this shielded data.
|
||||
/// Remove nullifiers that were previously added to this non-finalized
|
||||
/// [`Chain`][1] by this shielded data.
|
||||
///
|
||||
/// "A note can change from being unspent to spent as a node’s view
|
||||
/// of the best valid block chain is extended by new transactions.
|
||||
|
|
@ -114,8 +121,10 @@ where
|
|||
/// Panics if any nullifier is missing from the chain when we try to remove it.
|
||||
///
|
||||
/// Blocks with duplicate nullifiers are rejected by
|
||||
/// [`add_to_non_finalized_chain_unique`], so this shielded data should
|
||||
/// be the only shielded data that added this nullifier to this [`Chain`].
|
||||
/// [`add_to_non_finalized_chain_unique`], so this shielded data should be the
|
||||
/// only shielded data that added this nullifier to this [`Chain`][1].
|
||||
///
|
||||
/// [1]: super::super::Chain
|
||||
#[tracing::instrument(skip(chain_nullifiers, shielded_data_nullifiers))]
|
||||
pub(crate) fn remove_from_non_finalized_chain<'block, NullifierT>(
|
||||
chain_nullifiers: &mut HashSet<NullifierT>,
|
||||
|
|
|
|||
|
|
@ -3,7 +3,8 @@
|
|||
//! Zebra's database is implemented in 4 layers:
|
||||
//! - [`FinalizedState`]: queues, validates, and commits blocks, using...
|
||||
//! - [`ZebraDb`]: reads and writes [`zebra_chain`] types to the database, using...
|
||||
//! - [`DiskDb`]: reads and writes format-specific types to the database, using...
|
||||
//! - [`DiskDb`](disk_db::DiskDb): reads and writes format-specific types
|
||||
//! to the database, using...
|
||||
//! - [`disk_format`]: converts types to raw database bytes.
|
||||
//!
|
||||
//! These layers allow us to split [`zebra_chain`] types for efficient database storage.
|
||||
|
|
@ -136,7 +137,7 @@ impl FinalizedState {
|
|||
///
|
||||
/// Returns the highest finalized tip block committed from the queue,
|
||||
/// or `None` if no blocks were committed in this call.
|
||||
/// (Use [`tip_block`] to get the finalized tip, regardless of when it was committed.)
|
||||
/// (Use `tip_block` to get the finalized tip, regardless of when it was committed.)
|
||||
pub fn queue_and_commit_finalized(
|
||||
&mut self,
|
||||
queued: QueuedFinalized,
|
||||
|
|
@ -182,9 +183,9 @@ impl FinalizedState {
|
|||
/// Commit a finalized block to the state.
|
||||
///
|
||||
/// It's the caller's responsibility to ensure that blocks are committed in
|
||||
/// order. This function is called by [`queue`], which ensures order.
|
||||
/// It is intentionally not exposed as part of the public API of the
|
||||
/// [`FinalizedState`].
|
||||
/// order. This function is called by [`Self::queue_and_commit_finalized`],
|
||||
/// which ensures order. It is intentionally not exposed as part of the
|
||||
/// public API of the [`FinalizedState`].
|
||||
fn commit_finalized(&mut self, queued_block: QueuedFinalized) -> Result<FinalizedBlock, ()> {
|
||||
let (finalized, rsp_tx) = queued_block;
|
||||
let result = self.commit_finalized_direct(finalized.clone(), "CommitFinalized request");
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
//! Provides low-level access to RocksDB using some database-specific types.
|
||||
//!
|
||||
//! This module makes sure that:
|
||||
//! - all disk writes happen inside a RocksDB transaction ([`WriteBatch`]), and
|
||||
//! - all disk writes happen inside a RocksDB transaction
|
||||
//! ([`rocksdb::WriteBatch`]), and
|
||||
//! - format-specific invariants are maintained.
|
||||
//!
|
||||
//! # Correctness
|
||||
|
|
|
|||
|
|
@ -27,12 +27,15 @@ pub trait IntoDisk {
|
|||
|
||||
/// Converts the current type into serialized raw bytes.
|
||||
///
|
||||
/// Used to convert keys to bytes in [`ReadDisk`],
|
||||
/// and keys and values to bytes in [`WriteDisk`].
|
||||
/// Used to convert keys to bytes in [`ReadDisk`][1],
|
||||
/// and keys and values to bytes in [`WriteDisk`][2].
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// - if the input data doesn't serialize correctly
|
||||
///
|
||||
/// [1]: super::disk_db::ReadDisk
|
||||
/// [2]: super::disk_db::WriteDisk
|
||||
fn as_bytes(&self) -> Self::Bytes;
|
||||
}
|
||||
|
||||
|
|
@ -40,11 +43,13 @@ pub trait IntoDisk {
|
|||
pub trait FromDisk: Sized {
|
||||
/// Converts raw disk bytes back into the deserialized type.
|
||||
///
|
||||
/// Used to convert keys and values from bytes in [`ReadDisk`].
|
||||
/// Used to convert keys and values from bytes in [`ReadDisk`][1].
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// - if the input data doesn't deserialize correctly
|
||||
///
|
||||
/// [1]: super::disk_db::ReadDisk
|
||||
fn from_bytes(bytes: impl AsRef<[u8]>) -> Self;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -22,8 +22,8 @@ use serde::{Deserialize, Serialize};
|
|||
|
||||
/// The maximum value of an on-disk serialized [`Height`].
|
||||
///
|
||||
/// This allows us to store [`OutputLocation`]s in 8 bytes,
|
||||
/// which makes database searches more efficient.
|
||||
/// This allows us to store [`OutputLocation`](crate::OutputLocation)s in
|
||||
/// 8 bytes, which makes database searches more efficient.
|
||||
///
|
||||
/// # Consensus
|
||||
///
|
||||
|
|
|
|||
|
|
@ -128,7 +128,7 @@ impl OutputLocation {
|
|||
}
|
||||
}
|
||||
|
||||
/// Creates an output location from an [`Outpoint`],
|
||||
/// Creates an output location from an [`transparent::OutPoint`],
|
||||
/// and the [`TransactionLocation`] of its transaction.
|
||||
///
|
||||
/// The [`TransactionLocation`] is provided separately,
|
||||
|
|
@ -304,12 +304,16 @@ impl AddressUnspentOutput {
|
|||
}
|
||||
}
|
||||
|
||||
/// Create an [`AddressUnspentOutput`] which starts iteration for the supplied address.
|
||||
/// Used to look up the first output with [`ReadDisk::zs_next_key_value_from`].
|
||||
/// Create an [`AddressUnspentOutput`] which starts iteration for the
|
||||
/// supplied address. Used to look up the first output with
|
||||
/// [`ReadDisk::zs_next_key_value_from`][1].
|
||||
///
|
||||
/// The unspent output location is before all unspent output locations in the index.
|
||||
/// It is always invalid, due to the genesis consensus rules. But this is not an issue
|
||||
/// since [`ReadDisk::zs_next_key_value_from`] will fetch the next existing (valid) value.
|
||||
/// The unspent output location is before all unspent output locations in
|
||||
/// the index. It is always invalid, due to the genesis consensus rules. But
|
||||
/// this is not an issue since [`ReadDisk::zs_next_key_value_from`][1] will
|
||||
/// fetch the next existing (valid) value.
|
||||
///
|
||||
/// [1]: super::super::disk_db::ReadDisk::zs_next_key_value_from
|
||||
pub fn address_iterator_start(address_location: AddressLocation) -> AddressUnspentOutput {
|
||||
// Iterating from the lowest possible output location gets us the first output.
|
||||
let zero_output_location = OutputLocation::from_usize(Height(0), 0, 0);
|
||||
|
|
@ -320,11 +324,15 @@ impl AddressUnspentOutput {
|
|||
}
|
||||
}
|
||||
|
||||
/// Update the unspent output location to the next possible output for the supplied address.
|
||||
/// Used to look up the next output with [`ReadDisk::zs_next_key_value_from`].
|
||||
/// Update the unspent output location to the next possible output for the
|
||||
/// supplied address. Used to look up the next output with
|
||||
/// [`ReadDisk::zs_next_key_value_from`][1].
|
||||
///
|
||||
/// The updated unspent output location may be invalid, which is not an issue
|
||||
/// since [`ReadDisk::zs_next_key_value_from`] will fetch the next existing (valid) value.
|
||||
/// The updated unspent output location may be invalid, which is not an
|
||||
/// issue since [`ReadDisk::zs_next_key_value_from`][1] will fetch the next
|
||||
/// existing (valid) value.
|
||||
///
|
||||
/// [1]: super::super::disk_db::ReadDisk::zs_next_key_value_from
|
||||
pub fn address_iterator_next(&mut self) {
|
||||
// Iterating from the next possible output location gets us the next output,
|
||||
// even if it is in a later block or transaction.
|
||||
|
|
@ -394,14 +402,19 @@ impl AddressTransaction {
|
|||
}
|
||||
}
|
||||
|
||||
/// Create an [`AddressTransaction`] which starts iteration for the supplied address.
|
||||
/// Starts at the first UTXO, or at the `query_start` height, whichever is greater.
|
||||
/// Create an [`AddressTransaction`] which starts iteration for the supplied
|
||||
/// address. Starts at the first UTXO, or at the `query_start` height,
|
||||
/// whichever is greater.
|
||||
///
|
||||
/// Used to look up the first transaction with [`ReadDisk::zs_next_key_value_from`].
|
||||
/// Used to look up the first transaction with
|
||||
/// [`ReadDisk::zs_next_key_value_from`][1].
|
||||
///
|
||||
/// The transaction location might be invalid, if it is based on the `query_start` height.
|
||||
/// But this is not an issue, since [`ReadDisk::zs_next_key_value_from`]
|
||||
/// will fetch the next existing (valid) value.
|
||||
/// The transaction location might be invalid, if it is based on the
|
||||
/// `query_start` height. But this is not an issue, since
|
||||
/// [`ReadDisk::zs_next_key_value_from`][1] will fetch the next existing
|
||||
/// (valid) value.
|
||||
///
|
||||
/// [1]: super::super::disk_db::ReadDisk::zs_next_key_value_from
|
||||
pub fn address_iterator_start(
|
||||
address_location: AddressLocation,
|
||||
query_start: Height,
|
||||
|
|
@ -421,11 +434,15 @@ impl AddressTransaction {
|
|||
}
|
||||
}
|
||||
|
||||
/// Update the transaction location to the next possible transaction for the supplied address.
|
||||
/// Used to look up the next output with [`ReadDisk::zs_next_key_value_from`].
|
||||
/// Update the transaction location to the next possible transaction for the
|
||||
/// supplied address. Used to look up the next output with
|
||||
/// [`ReadDisk::zs_next_key_value_from`][1].
|
||||
///
|
||||
/// The updated transaction location may be invalid, which is not an issue
|
||||
/// since [`ReadDisk::zs_next_key_value_from`] will fetch the next existing (valid) value.
|
||||
/// since [`ReadDisk::zs_next_key_value_from`][1] will fetch the next
|
||||
/// existing (valid) value.
|
||||
///
|
||||
/// [1]: super::super::disk_db::ReadDisk::zs_next_key_value_from
|
||||
pub fn address_iterator_next(&mut self) {
|
||||
// Iterating from the next possible output location gets us the next output,
|
||||
// even if it is in a later block or transaction.
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ impl ZebraDb {
|
|||
///
|
||||
/// # Logs an Error
|
||||
///
|
||||
/// If Zebra is storing block heights that are close to [`MAX_ON_DISK_BLOCK_HEIGHT`].
|
||||
/// If Zebra is storing block heights that are close to [`MAX_ON_DISK_HEIGHT`].
|
||||
fn check_max_on_disk_tip_height(&self) {
|
||||
if let Some((tip_height, tip_hash)) = self.tip() {
|
||||
if tip_height.0 > MAX_ON_DISK_HEIGHT.0 / 2 {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
//! Provides high-level access to database:
|
||||
//! - unspent [`transparent::Outputs`]s (UTXOs), and
|
||||
//! - unspent [`transparent::Output`]s (UTXOs), and
|
||||
//! - transparent address indexes.
|
||||
//!
|
||||
//! This module makes sure that:
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ pub mod index;
|
|||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Chain {
|
||||
// The function `eq_internal_state` must be updated every time a field is added to `Chain`.
|
||||
// The function `eq_internal_state` must be updated every time a field is added to [`Chain`].
|
||||
/// The configured network for this chain.
|
||||
network: Network,
|
||||
|
||||
|
|
@ -50,35 +50,35 @@ pub struct Chain {
|
|||
/// An index of [`TransactionLocation`]s for each transaction hash in `blocks`.
|
||||
pub tx_by_hash: HashMap<transaction::Hash, TransactionLocation>,
|
||||
|
||||
/// The [`Utxo`]s created by `blocks`.
|
||||
/// The [`transparent::Utxo`]s created by `blocks`.
|
||||
///
|
||||
/// Note that these UTXOs may not be unspent.
|
||||
/// Outputs can be spent by later transactions or blocks in the chain.
|
||||
//
|
||||
// TODO: replace OutPoint with OutputLocation?
|
||||
pub(crate) created_utxos: HashMap<transparent::OutPoint, transparent::OrderedUtxo>,
|
||||
/// The [`OutPoint`]s spent by `blocks`,
|
||||
/// The [`transparent::OutPoint`]s spent by `blocks`,
|
||||
/// including those created by earlier transactions or blocks in the chain.
|
||||
pub(crate) spent_utxos: HashSet<transparent::OutPoint>,
|
||||
|
||||
/// The Sprout note commitment tree of the tip of this `Chain`,
|
||||
/// The Sprout note commitment tree of the tip of this [`Chain`],
|
||||
/// including all finalized notes, and the non-finalized notes in this chain.
|
||||
pub(super) sprout_note_commitment_tree: sprout::tree::NoteCommitmentTree,
|
||||
/// The Sprout note commitment tree for each anchor.
|
||||
/// This is required for interstitial states.
|
||||
pub(crate) sprout_trees_by_anchor:
|
||||
HashMap<sprout::tree::Root, sprout::tree::NoteCommitmentTree>,
|
||||
/// The Sapling note commitment tree of the tip of this `Chain`,
|
||||
/// The Sapling note commitment tree of the tip of this [`Chain`],
|
||||
/// including all finalized notes, and the non-finalized notes in this chain.
|
||||
pub(super) sapling_note_commitment_tree: sapling::tree::NoteCommitmentTree,
|
||||
/// The Sapling note commitment tree for each height.
|
||||
pub(crate) sapling_trees_by_height: BTreeMap<block::Height, sapling::tree::NoteCommitmentTree>,
|
||||
/// The Orchard note commitment tree of the tip of this `Chain`,
|
||||
/// The Orchard note commitment tree of the tip of this [`Chain`],
|
||||
/// including all finalized notes, and the non-finalized notes in this chain.
|
||||
pub(super) orchard_note_commitment_tree: orchard::tree::NoteCommitmentTree,
|
||||
/// The Orchard note commitment tree for each height.
|
||||
pub(crate) orchard_trees_by_height: BTreeMap<block::Height, orchard::tree::NoteCommitmentTree>,
|
||||
/// The ZIP-221 history tree of the tip of this `Chain`,
|
||||
/// The ZIP-221 history tree of the tip of this [`Chain`],
|
||||
/// including all finalized blocks, and the non-finalized `blocks` in this chain.
|
||||
pub(crate) history_tree: HistoryTree,
|
||||
|
||||
|
|
@ -112,7 +112,7 @@ pub struct Chain {
|
|||
/// because they are common to all non-finalized chains.
|
||||
pub(super) partial_cumulative_work: PartialCumulativeWork,
|
||||
|
||||
/// The chain value pool balances of the tip of this `Chain`,
|
||||
/// The chain value pool balances of the tip of this [`Chain`],
|
||||
/// including the block value pool changes from all finalized blocks,
|
||||
/// and the non-finalized blocks in this chain.
|
||||
///
|
||||
|
|
@ -222,7 +222,7 @@ impl Chain {
|
|||
/// If the block is invalid, drops this chain, and returns an error.
|
||||
///
|
||||
/// Note: a [`ContextuallyValidBlock`] isn't actually contextually valid until
|
||||
/// [`update_chain_state_with`] returns success.
|
||||
/// [`Self::update_chain_tip_with`] returns success.
|
||||
#[instrument(level = "debug", skip(self, block), fields(block = %block.block))]
|
||||
pub fn push(mut self, block: ContextuallyValidBlock) -> Result<Chain, ValidateContextError> {
|
||||
// update cumulative data members
|
||||
|
|
@ -291,7 +291,7 @@ impl Chain {
|
|||
|
||||
// Rebuild the note commitment trees, starting from the finalized tip tree.
|
||||
// TODO: change to a more efficient approach by removing nodes
|
||||
// from the tree of the original chain (in `pop_tip()`).
|
||||
// from the tree of the original chain (in [`Self::pop_tip`]).
|
||||
// See https://github.com/ZcashFoundation/zebra/issues/2378
|
||||
for block in forked.blocks.values() {
|
||||
for transaction in block.block.transactions.iter() {
|
||||
|
|
@ -674,30 +674,31 @@ impl Chain {
|
|||
/// The revert position being performed on a chain.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
enum RevertPosition {
|
||||
/// The chain root is being reverted via [`pop_root`],
|
||||
/// when a block is finalized.
|
||||
/// The chain root is being reverted via [`Chain::pop_root`], when a block
|
||||
/// is finalized.
|
||||
Root,
|
||||
|
||||
/// The chain tip is being reverted via [`pop_tip`],
|
||||
/// The chain tip is being reverted via [`Chain::pop_tip`],
|
||||
/// when a chain is forked.
|
||||
Tip,
|
||||
}
|
||||
|
||||
/// Helper trait to organize inverse operations done on the `Chain` type.
|
||||
/// Helper trait to organize inverse operations done on the [`Chain`] type.
|
||||
///
|
||||
/// Used to overload update and revert methods, based on the type of the argument,
|
||||
/// and the position of the removed block in the chain.
|
||||
///
|
||||
/// This trait was motivated by the length of the `push`, `pop_root`, and `pop_tip` functions,
|
||||
/// and fear that it would be easy to introduce bugs when updating them,
|
||||
/// unless the code was reorganized to keep related operations adjacent to each other.
|
||||
/// This trait was motivated by the length of the `push`, [`Chain::pop_root`],
|
||||
/// and [`Chain::pop_tip`] functions, and fear that it would be easy to
|
||||
/// introduce bugs when updating them, unless the code was reorganized to keep
|
||||
/// related operations adjacent to each other.
|
||||
trait UpdateWith<T> {
|
||||
/// When `T` is added to the chain tip,
|
||||
/// update `Chain` cumulative data members to add data that are derived from `T`.
|
||||
/// update [`Chain`] cumulative data members to add data that are derived from `T`.
|
||||
fn update_chain_tip_with(&mut self, _: &T) -> Result<(), ValidateContextError>;
|
||||
|
||||
/// When `T` is removed from `position` in the chain,
|
||||
/// revert `Chain` cumulative data members to remove data that are derived from `T`.
|
||||
/// revert [`Chain`] cumulative data members to remove data that are derived from `T`.
|
||||
fn revert_chain_with(&mut self, _: &T, position: RevertPosition);
|
||||
}
|
||||
|
||||
|
|
@ -1268,7 +1269,7 @@ where
|
|||
if let Some(sapling_shielded_data) = sapling_shielded_data {
|
||||
// Note commitments are not removed from the tree here because we
|
||||
// don't support that operation yet. Instead, we recreate the tree
|
||||
// from the finalized tip in NonFinalizedState.
|
||||
// from the finalized tip in `NonFinalizedState`.
|
||||
|
||||
check::nullifier::remove_from_non_finalized_chain(
|
||||
&mut self.sapling_nullifiers,
|
||||
|
|
@ -1348,9 +1349,9 @@ impl UpdateWith<ValueBalance<NegativeAllowed>> for Chain {
|
|||
/// When forking from the tip, subtract the block's chain value pool change.
|
||||
///
|
||||
/// When finalizing the root, leave the chain value pool balances unchanged.
|
||||
/// [`chain_value_pools`] tracks the chain value pools for all finalized blocks,
|
||||
/// and the non-finalized blocks in this chain.
|
||||
/// So finalizing the root doesn't change the set of blocks it tracks.
|
||||
/// [`Self::chain_value_pools`] tracks the chain value pools for all
|
||||
/// finalized blocks, and the non-finalized blocks in this chain. So
|
||||
/// finalizing the root doesn't change the set of blocks it tracks.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
|
|
@ -1373,13 +1374,15 @@ impl UpdateWith<ValueBalance<NegativeAllowed>> for Chain {
|
|||
}
|
||||
|
||||
impl Ord for Chain {
|
||||
/// Chain order for the [`NonFinalizedState`]'s `chain_set`.
|
||||
/// Chain order for the [`NonFinalizedState`][1]'s `chain_set`.
|
||||
///
|
||||
/// Chains with higher cumulative Proof of Work are [`Ordering::Greater`],
|
||||
/// breaking ties using the tip block hash.
|
||||
///
|
||||
/// Despite the consensus rules, Zebra uses the tip block hash as a tie-breaker.
|
||||
/// Zebra blocks are downloaded in parallel, so download timestamps may not be unique.
|
||||
/// (And Zebra currently doesn't track download times, because [`Block`]s are immutable.)
|
||||
/// Despite the consensus rules, Zebra uses the tip block hash as a
|
||||
/// tie-breaker. Zebra blocks are downloaded in parallel, so download
|
||||
/// timestamps may not be unique. (And Zebra currently doesn't track
|
||||
/// download times, because [`Block`](block::Block)s are immutable.)
|
||||
///
|
||||
/// This departure from the consensus rules may delay network convergence,
|
||||
/// for as long as the greater hash belongs to the later mined block.
|
||||
|
|
@ -1414,10 +1417,13 @@ impl Ord for Chain {
|
|||
///
|
||||
/// If two chains compare equal.
|
||||
///
|
||||
/// This panic enforces the `NonFinalizedState.chain_set` unique chain invariant.
|
||||
/// This panic enforces the [`NonFinalizedState::chain_set`][2] unique chain invariant.
|
||||
///
|
||||
/// If the chain set contains duplicate chains, the non-finalized state might
|
||||
/// handle new blocks or block finalization incorrectly.
|
||||
///
|
||||
/// [1]: super::NonFinalizedState
|
||||
/// [2]: super::NonFinalizedState::chain_set
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
if self.partial_cumulative_work != other.partial_cumulative_work {
|
||||
self.partial_cumulative_work
|
||||
|
|
@ -1454,14 +1460,16 @@ impl PartialOrd for Chain {
|
|||
}
|
||||
|
||||
impl PartialEq for Chain {
|
||||
/// Chain equality for the [`NonFinalizedState`]'s `chain_set`,
|
||||
/// using proof of work, then the tip block hash as a tie-breaker.
|
||||
/// Chain equality for [`NonFinalizedState::chain_set`][1], using proof of
|
||||
/// work, then the tip block hash as a tie-breaker.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If two chains compare equal.
|
||||
///
|
||||
/// See [`Chain::cmp`] for details.
|
||||
///
|
||||
/// [1]: super::NonFinalizedState::chain_set
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.partial_cmp(other) == Some(Ordering::Equal)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,16 +20,14 @@ use super::{RevertPosition, UpdateWith};
|
|||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct TransparentTransfers {
|
||||
/// The partial chain balance for a transparent address.
|
||||
///
|
||||
/// TODO:
|
||||
/// - to avoid [`ReadStateService`] response inconsistencies when a block has just been finalized,
|
||||
/// revert UTXO receives and spends that are at a height less than or equal to the finalized tip.
|
||||
balance: Amount<NegativeAllowed>,
|
||||
|
||||
/// The partial list of transactions that spent or received UTXOs to a transparent address.
|
||||
///
|
||||
/// Since transactions can only be added to this set, it does not need special handling
|
||||
/// for [`ReadStateService`] response inconsistencies.
|
||||
/// Since transactions can only be added to this set, it does not need
|
||||
/// special handling for
|
||||
/// [`ReadStateService`](crate::service::ReadStateService) response
|
||||
/// inconsistencies.
|
||||
///
|
||||
/// The `getaddresstxids` RPC needs these transaction IDs to be sorted in chain order.
|
||||
tx_ids: MultiSet<transaction::Hash>,
|
||||
|
|
@ -39,11 +37,7 @@ pub struct TransparentTransfers {
|
|||
/// The `getaddressutxos` RPC doesn't need these transaction IDs to be sorted in chain order,
|
||||
/// but it might in future. So Zebra does it anyway.
|
||||
///
|
||||
/// TODO:
|
||||
/// - to avoid [`ReadStateService`] response inconsistencies when a block has just been finalized,
|
||||
/// combine the created UTXOs, combine the spent UTXOs, and then remove spent from created
|
||||
///
|
||||
/// Optional:
|
||||
/// Optional TODOs:
|
||||
/// - store `Utxo`s in the chain, and just store the created locations for this address
|
||||
/// - if we add an OutputLocation to UTXO, remove this OutputLocation,
|
||||
/// and use the inner OutputLocation to sort Utxos in chain order
|
||||
|
|
@ -210,16 +204,21 @@ impl TransparentTransfers {
|
|||
self.balance
|
||||
}
|
||||
|
||||
/// Returns the [`transaction::Hash`]es of the transactions that sent or received
|
||||
/// transparent transfers to this address, in this partial chain, filtered by `query_height_range`.
|
||||
/// Returns the [`transaction::Hash`]es of the transactions that sent or
|
||||
/// received transparent transfers to this address, in this partial chain,
|
||||
/// filtered by `query_height_range`.
|
||||
///
|
||||
/// The transactions are returned in chain order.
|
||||
///
|
||||
/// `chain_tx_by_hash` should be the `tx_by_hash` field from the [`Chain`] containing this index.
|
||||
/// `chain_tx_by_hash` should be the `tx_by_hash` field from the
|
||||
/// [`Chain`][1] containing this index.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If `chain_tx_by_hash` is missing some transaction hashes from this index.
|
||||
/// If `chain_tx_by_hash` is missing some transaction hashes from this
|
||||
/// index.
|
||||
///
|
||||
/// [1]: super::super::Chain
|
||||
pub fn tx_ids(
|
||||
&self,
|
||||
chain_tx_by_hash: &HashMap<transaction::Hash, TransactionLocation>,
|
||||
|
|
@ -270,7 +269,7 @@ impl Default for TransparentTransfers {
|
|||
}
|
||||
}
|
||||
|
||||
/// Returns the transaction location for an [`OrderedUtxo`].
|
||||
/// Returns the transaction location for an [`transparent::OrderedUtxo`].
|
||||
pub fn transaction_location(ordered_utxo: &transparent::OrderedUtxo) -> TransactionLocation {
|
||||
TransactionLocation::from_usize(ordered_utxo.utxo.height, ordered_utxo.tx_index_in_block)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,8 +35,9 @@ impl PendingUtxos {
|
|||
}
|
||||
}
|
||||
|
||||
/// Notify all requests waiting for the [`Utxo`] pointed to by the given
|
||||
/// [`transparent::OutPoint`] that the [`Utxo`] has arrived.
|
||||
/// Notify all requests waiting for the [`transparent::Utxo`] pointed to by
|
||||
/// the given [`transparent::OutPoint`] that the [`transparent::Utxo`] has
|
||||
/// arrived.
|
||||
pub fn respond(&mut self, outpoint: &transparent::OutPoint, utxo: transparent::Utxo) {
|
||||
if let Some(sender) = self.0.remove(outpoint) {
|
||||
// Adding the outpoint as a field lets us cross-reference
|
||||
|
|
@ -46,7 +47,8 @@ impl PendingUtxos {
|
|||
}
|
||||
}
|
||||
|
||||
/// Check the list of pending UTXO requests against the supplied [`OrderedUtxo`] index.
|
||||
/// Check the list of pending UTXO requests against the supplied
|
||||
/// [`transparent::OrderedUtxo`] index.
|
||||
pub fn check_against_ordered(
|
||||
&mut self,
|
||||
ordered_utxos: &HashMap<transparent::OutPoint, transparent::OrderedUtxo>,
|
||||
|
|
@ -56,7 +58,7 @@ impl PendingUtxos {
|
|||
}
|
||||
}
|
||||
|
||||
/// Check the list of pending UTXO requests against the supplied [`Utxo`] index.
|
||||
/// Check the list of pending UTXO requests against the supplied [`transparent::Utxo`] index.
|
||||
pub fn check_against(&mut self, utxos: &HashMap<transparent::OutPoint, transparent::Utxo>) {
|
||||
for (outpoint, utxo) in utxos.iter() {
|
||||
self.respond(outpoint, utxo.clone())
|
||||
|
|
|
|||
|
|
@ -1,12 +1,14 @@
|
|||
//! Shared state reading code.
|
||||
//!
|
||||
//! Used by [`StateService`](crate::StateService) and
|
||||
//! [`ReadStateService`](crate::ReadStateService) to read from the best
|
||||
//! [`Chain`] in the
|
||||
//! [`NonFinalizedState`](crate::service::non_finalized_state::NonFinalizedState),
|
||||
//! and the database in the
|
||||
//! [`FinalizedState`](crate::service::finalized_state::FinalizedState).
|
||||
|
||||
//! Used by [`StateService`][1] and [`ReadStateService`][2] to read from the
|
||||
//! best [`Chain`][5] in the [`NonFinalizedState`][3], and the database in the
|
||||
//! [`FinalizedState`][4].
|
||||
//!
|
||||
//! [1]: super::StateService
|
||||
//! [2]: super::ReadStateService
|
||||
//! [3]: super::non_finalized_state::NonFinalizedState
|
||||
//! [4]: super::finalized_state::FinalizedState
|
||||
//! [5]: super::Chain
|
||||
use std::{
|
||||
collections::{BTreeMap, BTreeSet, HashSet},
|
||||
ops::RangeInclusive,
|
||||
|
|
|
|||
|
|
@ -46,9 +46,10 @@ where
|
|||
///
|
||||
/// # Performance
|
||||
///
|
||||
/// A single read lock is acquired to clone `T`, and then released after the clone.
|
||||
/// To make this clone efficient, large or expensive `T` can be wrapped in an [`Arc`].
|
||||
/// (Or individual fields can be wrapped in an `Arc`.)
|
||||
/// A single read lock is acquired to clone `T`, and then released after the
|
||||
/// clone. To make this clone efficient, large or expensive `T` can be
|
||||
/// wrapped in an [`std::sync::Arc`]. (Or individual fields can be wrapped
|
||||
/// in an [`std::sync::Arc`].)
|
||||
///
|
||||
/// # Correctness
|
||||
///
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ pub fn test_cmd(command_path: &str, tempdir: &Path) -> Result<Command> {
|
|||
|
||||
// TODO: split these extensions into their own module
|
||||
|
||||
/// Wrappers for `Command` methods to integrate with [`zebra_test`].
|
||||
/// Wrappers for `Command` methods to integrate with [`zebra_test`](crate).
|
||||
pub trait CommandExt {
|
||||
/// wrapper for `status` fn on `Command` that constructs informative error
|
||||
/// reports
|
||||
|
|
@ -637,7 +637,7 @@ impl<T> TestChild<T> {
|
|||
/// if a line matches.
|
||||
///
|
||||
/// Kills the child on error, or after the configured timeout has elapsed.
|
||||
/// See `expect_line_matching` for details.
|
||||
/// See [`Self::expect_line_matching_regex_set`] for details.
|
||||
#[instrument(skip(self))]
|
||||
pub fn expect_stdout_line_matches<R>(&mut self, success_regex: R) -> Result<&mut Self>
|
||||
where
|
||||
|
|
@ -663,7 +663,7 @@ impl<T> TestChild<T> {
|
|||
/// if a line matches.
|
||||
///
|
||||
/// Kills the child on error, or after the configured timeout has elapsed.
|
||||
/// See `expect_line_matching` for details.
|
||||
/// See [`Self::expect_line_matching_regex_set`] for details.
|
||||
#[instrument(skip(self))]
|
||||
pub fn expect_stderr_line_matches<R>(&mut self, success_regex: R) -> Result<&mut Self>
|
||||
where
|
||||
|
|
@ -687,8 +687,8 @@ impl<T> TestChild<T> {
|
|||
|
||||
/// Checks each line in `lines` against a regex set, and returns Ok if a line matches.
|
||||
///
|
||||
/// [`TestChild::expect_line_matching`] wrapper for strings, [`Regex`]es,
|
||||
/// and [`RegexSet`]s.
|
||||
/// [`Self::expect_line_matching_regexes`] wrapper for strings,
|
||||
/// [`Regex`](regex::Regex)es, and [`RegexSet`]s.
|
||||
pub fn expect_line_matching_regex_set<L, R>(
|
||||
&mut self,
|
||||
lines: &mut L,
|
||||
|
|
@ -708,7 +708,7 @@ impl<T> TestChild<T> {
|
|||
|
||||
/// Checks each line in `lines` against a regex set, and returns Ok if a line matches.
|
||||
///
|
||||
/// [`TestChild::expect_line_matching`] wrapper for regular expression iterators.
|
||||
/// [`Self::expect_line_matching_regexes`] wrapper for regular expression iterators.
|
||||
pub fn expect_line_matching_regex_iter<L, I>(
|
||||
&mut self,
|
||||
lines: &mut L,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,8 @@
|
|||
//! * addr (v1): [addr Bitcoin Reference](https://developer.bitcoin.org/reference/p2p_networking.html#addr)
|
||||
//! * addrv2: [ZIP-155](https://zips.z.cash/zip-0155#specification)
|
||||
//!
|
||||
//! These formats are deserialized into the [`zebra_network::Message::Addr`] variant.
|
||||
//! These formats are deserialized into the
|
||||
//! `zebra_network::protocol::external::Message::Addr` variant.
|
||||
|
||||
use hex::FromHex;
|
||||
use lazy_static::lazy_static;
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
//! Common [`zebra_test`] types, traits, and functions.
|
||||
//! Common [`zebra_test`](crate) types, traits, and functions.
|
||||
|
||||
pub use crate::command::{test_cmd, CommandExt, TestChild};
|
||||
pub use std::process::Stdio;
|
||||
|
|
|
|||
Loading…
Reference in New Issue