change(state): Make the types for finalized blocks consistent (#7923)

* Stop using `SemanticallyVerifiedBlockWithTrees`

* Apply suggestions from code review

Co-authored-by: teor <teor@riseup.net>

* Doc that default treestate corresponds to genesis

* Doc the diff between default values of tree roots

---------

Co-authored-by: teor <teor@riseup.net>
This commit is contained in:
Marek 2023-11-08 23:06:16 +01:00 committed by GitHub
parent 86e468f3b0
commit 2fad3573fd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 129 additions and 78 deletions

View File

@ -325,6 +325,10 @@ pub enum NoteCommitmentTreeError {
}
/// Orchard Incremental Note Commitment Tree
///
/// Note that the default value of the [`Root`] type is `[0, 0, 0, 0]`. However, this value differs
/// from the default value of the root of the default tree which is the hash of the root's child
/// nodes. The default tree is the empty tree which has all leaves empty.
#[derive(Debug, Serialize, Deserialize)]
#[serde(into = "LegacyNoteCommitmentTree")]
#[serde(from = "LegacyNoteCommitmentTree")]

View File

@ -11,7 +11,9 @@ use crate::{
};
/// An argument wrapper struct for note commitment trees.
#[derive(Clone, Debug)]
///
/// The default instance represents the trees and subtrees that correspond to the genesis block.
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct NoteCommitmentTrees {
/// The sprout note commitment tree.
pub sprout: Arc<sprout::tree::NoteCommitmentTree>,

View File

@ -305,6 +305,10 @@ pub enum NoteCommitmentTreeError {
}
/// Sapling Incremental Note Commitment Tree.
///
/// Note that the default value of the [`Root`] type is `[0, 0, 0, 0]`. However, this value differs
/// from the default value of the root of the default tree which is the hash of the root's child
/// nodes. The default tree is the empty tree which has all leaves empty.
#[derive(Debug, Serialize, Deserialize)]
#[serde(into = "LegacyNoteCommitmentTree")]
#[serde(from = "LegacyNoteCommitmentTree")]

View File

@ -206,6 +206,10 @@ pub enum NoteCommitmentTreeError {
/// Internally this wraps [`bridgetree::Frontier`], so that we can maintain and increment
/// the full tree with only the minimal amount of non-empty nodes/leaves required.
///
/// Note that the default value of the [`Root`] type is `[0, 0, 0, 0]`. However, this value differs
/// from the default value of the root of the default tree (which is the empty tree) since it is the
/// pair-wise root-hash of the tree's empty leaves at the tree's root level.
///
/// [Sprout Note Commitment Tree]: https://zips.z.cash/protocol/protocol.pdf#merkletree
/// [nullifier set]: https://zips.z.cash/protocol/protocol.pdf#nullifierset
#[derive(Debug, Serialize, Deserialize)]

View File

@ -224,6 +224,9 @@ pub struct ContextuallyVerifiedBlock {
}
/// Wraps note commitment trees and the history tree together.
///
/// The default instance represents the treestate that corresponds to the genesis block.
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct Treestate {
/// Note commitment trees.
pub note_commitment_trees: NoteCommitmentTrees,
@ -253,20 +256,6 @@ impl Treestate {
}
}
/// Contains a block ready to be committed together with its associated
/// treestate.
///
/// Zebra's non-finalized state passes this `struct` over to the finalized state
/// when committing a block. The associated treestate is passed so that the
/// finalized state does not have to retrieve the previous treestate from the
/// database and recompute a new one.
pub struct SemanticallyVerifiedBlockWithTrees {
/// A block ready to be committed.
pub verified: SemanticallyVerifiedBlock,
/// The tresstate associated with the block.
pub treestate: Treestate,
}
/// Contains a block ready to be committed.
///
/// Zebra's state service passes this `enum` over to the finalized state
@ -281,6 +270,54 @@ pub enum FinalizableBlock {
},
}
/// Contains a block with all its associated data that the finalized state can commit to its
/// database.
///
/// Note that it's the constructor's responsibility to ensure that all data is valid and verified.
pub struct FinalizedBlock {
/// The block to commit to the state.
pub(super) block: Arc<Block>,
/// The hash of the block.
pub(super) hash: block::Hash,
/// The height of the block.
pub(super) height: block::Height,
/// New transparent outputs created in this block, indexed by
/// [`OutPoint`](transparent::OutPoint).
pub(super) new_outputs: HashMap<transparent::OutPoint, transparent::OrderedUtxo>,
/// A precomputed list of the hashes of the transactions in this block, in the same order as
/// `block.transactions`.
pub(super) transaction_hashes: Arc<[transaction::Hash]>,
/// The tresstate associated with the block.
pub(super) treestate: Treestate,
}
impl FinalizedBlock {
/// Constructs [`FinalizedBlock`] from [`CheckpointVerifiedBlock`] and its [`Treestate`].
pub fn from_checkpoint_verified(block: CheckpointVerifiedBlock, treestate: Treestate) -> Self {
Self::from_semantically_verified(SemanticallyVerifiedBlock::from(block), treestate)
}
/// Constructs [`FinalizedBlock`] from [`ContextuallyVerifiedBlock`] and its [`Treestate`].
pub fn from_contextually_verified(
block: ContextuallyVerifiedBlock,
treestate: Treestate,
) -> Self {
Self::from_semantically_verified(SemanticallyVerifiedBlock::from(block), treestate)
}
/// Constructs [`FinalizedBlock`] from [`SemanticallyVerifiedBlock`] and its [`Treestate`].
fn from_semantically_verified(block: SemanticallyVerifiedBlock, treestate: Treestate) -> Self {
Self {
block: block.block,
hash: block.hash,
height: block.height,
new_outputs: block.new_outputs,
transaction_hashes: block.transaction_hashes,
treestate,
}
}
}
impl FinalizableBlock {
/// Create a new [`FinalizableBlock`] given a [`ContextuallyVerifiedBlock`].
pub fn new(contextually_verified: ContextuallyVerifiedBlock, treestate: Treestate) -> Self {

View File

@ -23,7 +23,7 @@ use std::{
use zebra_chain::{block, parallel::tree::NoteCommitmentTrees, parameters::Network};
use crate::{
request::{FinalizableBlock, SemanticallyVerifiedBlockWithTrees, Treestate},
request::{FinalizableBlock, FinalizedBlock, Treestate},
service::{check, QueuedCheckpointVerified},
BoxError, CheckpointVerifiedBlock, CloneError, Config,
};
@ -305,17 +305,15 @@ impl FinalizedState {
let sapling_root = note_commitment_trees.sapling.root();
let orchard_root = note_commitment_trees.orchard.root();
history_tree_mut.push(self.network(), block.clone(), sapling_root, orchard_root)?;
let treestate = Treestate {
note_commitment_trees,
history_tree,
};
(
checkpoint_verified.height,
checkpoint_verified.hash,
SemanticallyVerifiedBlockWithTrees {
verified: checkpoint_verified.0,
treestate: Treestate {
note_commitment_trees,
history_tree,
},
},
FinalizedBlock::from_checkpoint_verified(checkpoint_verified, treestate),
Some(prev_note_commitment_trees),
)
}
@ -325,10 +323,7 @@ impl FinalizedState {
} => (
contextually_verified.height,
contextually_verified.hash,
SemanticallyVerifiedBlockWithTrees {
verified: contextually_verified.into(),
treestate,
},
FinalizedBlock::from_contextually_verified(contextually_verified, treestate),
prev_note_commitment_trees,
),
};
@ -339,7 +334,7 @@ impl FinalizedState {
// Assert that callers (including unit tests) get the chain order correct
if self.db.is_empty() {
assert_eq!(
committed_tip_hash, finalized.verified.block.header.previous_block_hash,
committed_tip_hash, finalized.block.header.previous_block_hash,
"the first block added to an empty state must be a genesis block, source: {source}",
);
assert_eq!(
@ -355,13 +350,13 @@ impl FinalizedState {
);
assert_eq!(
committed_tip_hash, finalized.verified.block.header.previous_block_hash,
committed_tip_hash, finalized.block.header.previous_block_hash,
"committed block must be a child of the finalized tip, source: {source}",
);
}
#[cfg(feature = "elasticsearch")]
let finalized_block = finalized.verified.block.clone();
let finalized_inner_block = finalized.block.clone();
let note_commitment_trees = finalized.treestate.note_commitment_trees.clone();
let result =
@ -371,7 +366,7 @@ impl FinalizedState {
if result.is_ok() {
// Save blocks to elasticsearch if the feature is enabled.
#[cfg(feature = "elasticsearch")]
self.elasticsearch(&finalized_block);
self.elasticsearch(&finalized_inner_block);
// TODO: move the stop height check to the syncer (#3442)
if self.is_at_stop_height(height) {

View File

@ -30,7 +30,7 @@ use zebra_chain::{
};
use crate::{
request::SemanticallyVerifiedBlockWithTrees,
request::FinalizedBlock,
service::finalized_state::{
disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk},
disk_format::{
@ -39,7 +39,7 @@ use crate::{
},
zebra_db::{metrics::block_precommit_metrics, ZebraDb},
},
BoxError, HashOrHeight, SemanticallyVerifiedBlock,
BoxError, HashOrHeight,
};
#[cfg(test)]
@ -292,13 +292,12 @@ impl ZebraDb {
/// - Propagates any errors from updating history and note commitment trees
pub(in super::super) fn write_block(
&mut self,
finalized: SemanticallyVerifiedBlockWithTrees,
finalized: FinalizedBlock,
prev_note_commitment_trees: Option<NoteCommitmentTrees>,
network: Network,
source: &str,
) -> Result<block::Hash, BoxError> {
let tx_hash_indexes: HashMap<transaction::Hash, usize> = finalized
.verified
.transaction_hashes
.iter()
.enumerate()
@ -311,12 +310,11 @@ impl ZebraDb {
// simplify the spent_utxos location lookup code,
// and remove the extra new_outputs_by_out_loc argument
let new_outputs_by_out_loc: BTreeMap<OutputLocation, transparent::Utxo> = finalized
.verified
.new_outputs
.iter()
.map(|(outpoint, ordered_utxo)| {
(
lookup_out_loc(finalized.verified.height, outpoint, &tx_hash_indexes),
lookup_out_loc(finalized.height, outpoint, &tx_hash_indexes),
ordered_utxo.utxo.clone(),
)
})
@ -325,7 +323,6 @@ impl ZebraDb {
// Get a list of the spent UTXOs, before we delete any from the database
let spent_utxos: Vec<(transparent::OutPoint, OutputLocation, transparent::Utxo)> =
finalized
.verified
.block
.transactions
.iter()
@ -337,13 +334,12 @@ impl ZebraDb {
// Some utxos are spent in the same block, so they will be in
// `tx_hash_indexes` and `new_outputs`
self.output_location(&outpoint).unwrap_or_else(|| {
lookup_out_loc(finalized.verified.height, &outpoint, &tx_hash_indexes)
lookup_out_loc(finalized.height, &outpoint, &tx_hash_indexes)
}),
self.utxo(&outpoint)
.map(|ordered_utxo| ordered_utxo.utxo)
.or_else(|| {
finalized
.verified
.new_outputs
.get(&outpoint)
.map(|ordered_utxo| ordered_utxo.utxo.clone())
@ -368,7 +364,6 @@ impl ZebraDb {
.values()
.chain(
finalized
.verified
.new_outputs
.values()
.map(|ordered_utxo| &ordered_utxo.utxo),
@ -403,7 +398,7 @@ impl ZebraDb {
tracing::trace!(?source, "committed block from");
Ok(finalized.verified.hash)
Ok(finalized.hash)
}
/// Writes the given batch to the database.
@ -446,7 +441,7 @@ impl DiskWriteBatch {
&mut self,
zebra_db: &ZebraDb,
network: Network,
finalized: &SemanticallyVerifiedBlockWithTrees,
finalized: &FinalizedBlock,
new_outputs_by_out_loc: BTreeMap<OutputLocation, transparent::Utxo>,
spent_utxos_by_outpoint: HashMap<transparent::OutPoint, transparent::Utxo>,
spent_utxos_by_out_loc: BTreeMap<OutputLocation, transparent::Utxo>,
@ -456,7 +451,7 @@ impl DiskWriteBatch {
) -> Result<(), BoxError> {
let db = &zebra_db.db;
// Commit block, transaction, and note commitment tree data.
self.prepare_block_header_and_transaction_data_batch(db, &finalized.verified)?;
self.prepare_block_header_and_transaction_data_batch(db, finalized)?;
// The consensus rules are silent on shielded transactions in the genesis block,
// because there aren't any in the mainnet or testnet genesis blocks.
@ -464,7 +459,7 @@ impl DiskWriteBatch {
// which is already present from height 1 to the first shielded transaction.
//
// In Zebra we include the nullifiers and note commitments in the genesis block because it simplifies our code.
self.prepare_shielded_transaction_batch(db, &finalized.verified)?;
self.prepare_shielded_transaction_batch(db, finalized)?;
self.prepare_trees_batch(zebra_db, finalized, prev_note_commitment_trees)?;
// # Consensus
@ -477,12 +472,12 @@ impl DiskWriteBatch {
// So we ignore the genesis UTXO, transparent address index, and value pool updates
// for the genesis block. This also ignores genesis shielded value pool updates, but there
// aren't any of those on mainnet or testnet.
if !finalized.verified.height.is_min() {
if !finalized.height.is_min() {
// Commit transaction indexes
self.prepare_transparent_transaction_batch(
db,
network,
&finalized.verified,
finalized,
&new_outputs_by_out_loc,
&spent_utxos_by_outpoint,
&spent_utxos_by_out_loc,
@ -492,18 +487,14 @@ impl DiskWriteBatch {
// Commit UTXOs and value pools
self.prepare_chain_value_pools_batch(
db,
&finalized.verified,
finalized,
spent_utxos_by_outpoint,
value_pool,
)?;
}
// The block has passed contextual validation, so update the metrics
block_precommit_metrics(
&finalized.verified.block,
finalized.verified.hash,
finalized.verified.height,
);
block_precommit_metrics(&finalized.block, finalized.hash, finalized.height);
Ok(())
}
@ -518,7 +509,7 @@ impl DiskWriteBatch {
pub fn prepare_block_header_and_transaction_data_batch(
&mut self,
db: &DiskDb,
finalized: &SemanticallyVerifiedBlock,
finalized: &FinalizedBlock,
) -> Result<(), BoxError> {
// Blocks
let block_header_by_height = db.cf_handle("block_header_by_height").unwrap();
@ -530,7 +521,7 @@ impl DiskWriteBatch {
let hash_by_tx_loc = db.cf_handle("hash_by_tx_loc").unwrap();
let tx_loc_by_hash = db.cf_handle("tx_loc_by_hash").unwrap();
let SemanticallyVerifiedBlock {
let FinalizedBlock {
block,
hash,
height,

View File

@ -26,6 +26,7 @@ use zebra_chain::{
use zebra_test::vectors::{MAINNET_BLOCKS, TESTNET_BLOCKS};
use crate::{
request::{FinalizedBlock, Treestate},
service::finalized_state::{disk_db::DiskWriteBatch, ZebraDb},
CheckpointVerifiedBlock, Config,
};
@ -108,7 +109,7 @@ fn test_block_db_round_trip_with(
// Now, use the database
let original_block = Arc::new(original_block);
let finalized = if original_block.coinbase_height().is_some() {
let checkpoint_verified = if original_block.coinbase_height().is_some() {
original_block.clone().into()
} else {
// Fake a zero height
@ -119,6 +120,10 @@ fn test_block_db_round_trip_with(
)
};
let dummy_treestate = Treestate::default();
let finalized =
FinalizedBlock::from_checkpoint_verified(checkpoint_verified, dummy_treestate);
// Skip validation by writing the block directly to the database
let mut batch = DiskWriteBatch::new();
batch

View File

@ -19,12 +19,13 @@ use zebra_chain::{
};
use crate::{
request::FinalizedBlock,
service::finalized_state::{
disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk},
disk_format::RawBytes,
zebra_db::ZebraDb,
},
BoxError, SemanticallyVerifiedBlock,
BoxError,
};
impl ZebraDb {
@ -128,13 +129,13 @@ impl DiskWriteBatch {
pub fn prepare_chain_value_pools_batch(
&mut self,
db: &DiskDb,
finalized: &SemanticallyVerifiedBlock,
finalized: &FinalizedBlock,
utxos_spent_by_block: HashMap<transparent::OutPoint, transparent::Utxo>,
value_pool: ValueBalance<NonNegative>,
) -> Result<(), BoxError> {
let tip_chain_value_pool = db.cf_handle("tip_chain_value_pool").unwrap();
let SemanticallyVerifiedBlock { block, .. } = finalized;
let FinalizedBlock { block, .. } = finalized;
let new_pool = value_pool.add_block(block.borrow(), &utxos_spent_by_block)?;
self.zs_insert(&tip_chain_value_pool, (), new_pool);

View File

@ -27,13 +27,13 @@ use zebra_chain::{
};
use crate::{
request::SemanticallyVerifiedBlockWithTrees,
request::{FinalizedBlock, Treestate},
service::finalized_state::{
disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk},
disk_format::RawBytes,
zebra_db::ZebraDb,
},
BoxError, SemanticallyVerifiedBlock,
BoxError,
};
// Doc-only items
@ -436,9 +436,9 @@ impl DiskWriteBatch {
pub fn prepare_shielded_transaction_batch(
&mut self,
db: &DiskDb,
finalized: &SemanticallyVerifiedBlock,
finalized: &FinalizedBlock,
) -> Result<(), BoxError> {
let SemanticallyVerifiedBlock { block, .. } = finalized;
let FinalizedBlock { block, .. } = finalized;
// Index each transaction's shielded data
for transaction in &block.transactions {
@ -491,11 +491,18 @@ impl DiskWriteBatch {
pub fn prepare_trees_batch(
&mut self,
zebra_db: &ZebraDb,
finalized: &SemanticallyVerifiedBlockWithTrees,
finalized: &FinalizedBlock,
prev_note_commitment_trees: Option<NoteCommitmentTrees>,
) -> Result<(), BoxError> {
let height = finalized.verified.height;
let trees = finalized.treestate.note_commitment_trees.clone();
let FinalizedBlock {
height,
treestate:
Treestate {
note_commitment_trees,
history_tree,
},
..
} = finalized;
let prev_sprout_tree = prev_note_commitment_trees.as_ref().map_or_else(
|| zebra_db.sprout_tree_for_tip(),
@ -511,29 +518,29 @@ impl DiskWriteBatch {
);
// Update the Sprout tree and store its anchor only if it has changed
if height.is_min() || prev_sprout_tree != trees.sprout {
self.update_sprout_tree(zebra_db, &trees.sprout)
if height.is_min() || prev_sprout_tree != note_commitment_trees.sprout {
self.update_sprout_tree(zebra_db, &note_commitment_trees.sprout)
}
// Store the Sapling tree, anchor, and any new subtrees only if they have changed
if height.is_min() || prev_sapling_tree != trees.sapling {
self.create_sapling_tree(zebra_db, &height, &trees.sapling);
if height.is_min() || prev_sapling_tree != note_commitment_trees.sapling {
self.create_sapling_tree(zebra_db, height, &note_commitment_trees.sapling);
if let Some(subtree) = trees.sapling_subtree {
if let Some(subtree) = note_commitment_trees.sapling_subtree {
self.insert_sapling_subtree(zebra_db, &subtree);
}
}
// Store the Orchard tree, anchor, and any new subtrees only if they have changed
if height.is_min() || prev_orchard_tree != trees.orchard {
self.create_orchard_tree(zebra_db, &height, &trees.orchard);
if height.is_min() || prev_orchard_tree != note_commitment_trees.orchard {
self.create_orchard_tree(zebra_db, height, &note_commitment_trees.orchard);
if let Some(subtree) = trees.orchard_subtree {
if let Some(subtree) = note_commitment_trees.orchard_subtree {
self.insert_orchard_subtree(zebra_db, &subtree);
}
}
self.update_history_tree(zebra_db, &finalized.treestate.history_tree);
self.update_history_tree(zebra_db, history_tree);
Ok(())
}

View File

@ -25,6 +25,7 @@ use zebra_chain::{
};
use crate::{
request::FinalizedBlock,
service::finalized_state::{
disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk},
disk_format::{
@ -36,7 +37,7 @@ use crate::{
},
zebra_db::ZebraDb,
},
BoxError, SemanticallyVerifiedBlock,
BoxError,
};
impl ZebraDb {
@ -347,13 +348,13 @@ impl DiskWriteBatch {
&mut self,
db: &DiskDb,
network: Network,
finalized: &SemanticallyVerifiedBlock,
finalized: &FinalizedBlock,
new_outputs_by_out_loc: &BTreeMap<OutputLocation, transparent::Utxo>,
spent_utxos_by_outpoint: &HashMap<transparent::OutPoint, transparent::Utxo>,
spent_utxos_by_out_loc: &BTreeMap<OutputLocation, transparent::Utxo>,
mut address_balances: HashMap<transparent::Address, AddressBalanceLocation>,
) -> Result<(), BoxError> {
let SemanticallyVerifiedBlock { block, height, .. } = finalized;
let FinalizedBlock { block, height, .. } = finalized;
// Update created and spent transparent outputs
self.prepare_new_transparent_outputs_batch(