Reject V5 transactions before NU5 activation (#2285)

* Add a `Transaction::version` getter

Returns the version of the transaction as a `u32`.

* Add `Transaction::is_overwintered` helper method

Returns if the `fOverwintered` flag should be set for the transaction's
version.

* Use new helpers to serialize transaction version

Reduce the repeated code and make it less error-prone with future
changes.

* Add getter methods to `transaction::Request` type

Refactor to move the type deconstruction code into the `Request` type.
The main objective is to make it easier to split the call handler into
methods that receive the request directly.

* Refactor to create `verify_v4_transaction` helper

Split the code specific to V4 transactions into a separate helper
method.

* Create `verify_v5_transaction` helper method

Prepare a separate method to have the validation code.

* Add `UnsupportedByNetworkUpgrade` error variant

An error for when a transaction's version isn't supported by the network
upgrade of the block it's included or for the current network upgrade if
the transaction is for the mempool.

* Verify a V5 transaction's network upgrade

For now, only NU5 supports V5 transactions.

* Test that V5 transaction is rejected on Canopy

Create a fake V5 transaction and try to verify it using a block height
from Canopy's activation. The verifier should reject the transaction
with an error saying that the network upgrade does not support that
transaction version.

* Test if V5 tx. is accepted after NU5 activation

Create a fake V5 transaction and pretend it is placed in a block that
has a height after the NU5 activation. The test should succeed, but
since the NU5 activation height has not been specified yet (neither for
the testnet nor the mainnet), for now this test is marked as
`should_panic`.

* Add `TODO` comment to the code

Add more detail to what's left to do, and link to the appropriate PRs.

* Use `u32` to store transaction version

Use a type consistent with how the version is specified.

Co-authored-by: teor <teor@riseup.net>

Co-authored-by: teor <teor@riseup.net>
This commit is contained in:
Janito Vaqueiro Ferreira Filho 2021-06-14 21:15:59 -03:00 committed by GitHub
parent c8711bd416
commit 0e89236405
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 380 additions and 199 deletions

View File

@ -151,6 +151,25 @@ impl Transaction {
// header // header
/// Return if the `fOverwintered` flag of this transaction is set.
pub fn is_overwintered(&self) -> bool {
match self {
Transaction::V1 { .. } | Transaction::V2 { .. } => false,
Transaction::V3 { .. } | Transaction::V4 { .. } | Transaction::V5 { .. } => true,
}
}
/// Return the version of this transaction.
pub fn version(&self) -> u32 {
match self {
Transaction::V1 { .. } => 1,
Transaction::V2 { .. } => 2,
Transaction::V3 { .. } => 3,
Transaction::V4 { .. } => 4,
Transaction::V5 { .. } => 5,
}
}
/// Get this transaction's lock time. /// Get this transaction's lock time.
pub fn lock_time(&self) -> LockTime { pub fn lock_time(&self) -> LockTime {
match self { match self {

View File

@ -371,13 +371,19 @@ impl ZcashSerialize for Transaction {
// //
// Since we checkpoint on Canopy activation, we won't ever need // Since we checkpoint on Canopy activation, we won't ever need
// to check the smaller pre-Sapling transaction size limit. // to check the smaller pre-Sapling transaction size limit.
// header: Write version and set the fOverwintered bit if necessary
let overwintered_flag = if self.is_overwintered() { 1 << 31 } else { 0 };
let version = overwintered_flag | self.version();
writer.write_u32::<LittleEndian>(version)?;
match self { match self {
Transaction::V1 { Transaction::V1 {
inputs, inputs,
outputs, outputs,
lock_time, lock_time,
} => { } => {
writer.write_u32::<LittleEndian>(1)?;
inputs.zcash_serialize(&mut writer)?; inputs.zcash_serialize(&mut writer)?;
outputs.zcash_serialize(&mut writer)?; outputs.zcash_serialize(&mut writer)?;
lock_time.zcash_serialize(&mut writer)?; lock_time.zcash_serialize(&mut writer)?;
@ -388,7 +394,6 @@ impl ZcashSerialize for Transaction {
lock_time, lock_time,
joinsplit_data, joinsplit_data,
} => { } => {
writer.write_u32::<LittleEndian>(2)?;
inputs.zcash_serialize(&mut writer)?; inputs.zcash_serialize(&mut writer)?;
outputs.zcash_serialize(&mut writer)?; outputs.zcash_serialize(&mut writer)?;
lock_time.zcash_serialize(&mut writer)?; lock_time.zcash_serialize(&mut writer)?;
@ -405,8 +410,6 @@ impl ZcashSerialize for Transaction {
expiry_height, expiry_height,
joinsplit_data, joinsplit_data,
} => { } => {
// Write version 3 and set the fOverwintered bit.
writer.write_u32::<LittleEndian>(3 | (1 << 31))?;
writer.write_u32::<LittleEndian>(OVERWINTER_VERSION_GROUP_ID)?; writer.write_u32::<LittleEndian>(OVERWINTER_VERSION_GROUP_ID)?;
inputs.zcash_serialize(&mut writer)?; inputs.zcash_serialize(&mut writer)?;
outputs.zcash_serialize(&mut writer)?; outputs.zcash_serialize(&mut writer)?;
@ -426,8 +429,6 @@ impl ZcashSerialize for Transaction {
sapling_shielded_data, sapling_shielded_data,
joinsplit_data, joinsplit_data,
} => { } => {
// Write version 4 and set the fOverwintered bit.
writer.write_u32::<LittleEndian>(4 | (1 << 31))?;
writer.write_u32::<LittleEndian>(SAPLING_VERSION_GROUP_ID)?; writer.write_u32::<LittleEndian>(SAPLING_VERSION_GROUP_ID)?;
inputs.zcash_serialize(&mut writer)?; inputs.zcash_serialize(&mut writer)?;
outputs.zcash_serialize(&mut writer)?; outputs.zcash_serialize(&mut writer)?;
@ -492,8 +493,6 @@ impl ZcashSerialize for Transaction {
// Transaction V5 spec: // Transaction V5 spec:
// https://zips.z.cash/protocol/nu5.pdf#txnencodingandconsensus // https://zips.z.cash/protocol/nu5.pdf#txnencodingandconsensus
// header: Write version 5 and set the fOverwintered bit
writer.write_u32::<LittleEndian>(5 | (1 << 31))?;
writer.write_u32::<LittleEndian>(TX_V5_VERSION_GROUP_ID)?; writer.write_u32::<LittleEndian>(TX_V5_VERSION_GROUP_ID)?;
// header: Write the nConsensusBranchId // header: Write the nConsensusBranchId

View File

@ -47,6 +47,9 @@ pub enum TransactionError {
#[error("transaction version number MUST be >= 4")] #[error("transaction version number MUST be >= 4")]
WrongVersion, WrongVersion,
#[error("transaction version {0} not supported by the network upgrade {1:?}")]
UnsupportedByNetworkUpgrade(u32, zebra_chain::parameters::NetworkUpgrade),
#[error("must have at least one input: transparent, shielded spend, or joinsplit")] #[error("must have at least one input: transparent, shielded spend, or joinsplit")]
NoInputs, NoInputs,

View File

@ -16,6 +16,8 @@ use tracing::Instrument;
use zebra_chain::{ use zebra_chain::{
block, block,
parameters::{Network, NetworkUpgrade}, parameters::{Network, NetworkUpgrade},
primitives::Groth16Proof,
sapling,
transaction::{self, HashType, Transaction}, transaction::{self, HashType, Transaction},
transparent, transparent,
}; };
@ -87,6 +89,29 @@ pub enum Request {
}, },
} }
impl Request {
pub fn transaction(&self) -> Arc<Transaction> {
match self {
Request::Block { transaction, .. } => transaction.clone(),
Request::Mempool { transaction, .. } => transaction.clone(),
}
}
pub fn known_utxos(&self) -> Arc<HashMap<transparent::OutPoint, zs::Utxo>> {
match self {
Request::Block { known_utxos, .. } => known_utxos.clone(),
Request::Mempool { known_utxos, .. } => known_utxos.clone(),
}
}
pub fn upgrade(&self, network: Network) -> NetworkUpgrade {
match self {
Request::Block { height, .. } => NetworkUpgrade::current(network, *height),
Request::Mempool { upgrade, .. } => *upgrade,
}
}
}
impl<ZS> Service<Request> for Verifier<ZS> impl<ZS> Service<Request> for Verifier<ZS>
where where
ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static, ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static,
@ -112,34 +137,15 @@ where
unimplemented!(); unimplemented!();
} }
let (tx, known_utxos, upgrade) = match req { let script_verifier = self.script_verifier.clone();
Request::Block { let network = self.network;
transaction,
known_utxos,
height,
} => {
let upgrade = NetworkUpgrade::current(self.network, height);
(transaction, known_utxos, upgrade)
}
Request::Mempool {
transaction,
known_utxos,
upgrade,
} => (transaction, known_utxos, upgrade),
};
let mut spend_verifier = primitives::groth16::SPEND_VERIFIER.clone();
let mut output_verifier = primitives::groth16::OUTPUT_VERIFIER.clone();
let mut ed25519_verifier = primitives::ed25519::VERIFIER.clone();
let mut redjubjub_verifier = primitives::redjubjub::VERIFIER.clone();
let mut script_verifier = self.script_verifier.clone();
let tx = req.transaction();
let span = tracing::debug_span!("tx", hash = %tx.hash()); let span = tracing::debug_span!("tx", hash = %tx.hash());
async move { async move {
tracing::trace!(?tx); tracing::trace!(?tx);
match &*tx { match tx.as_ref() {
Transaction::V1 { .. } | Transaction::V2 { .. } | Transaction::V3 { .. } => { Transaction::V1 { .. } | Transaction::V2 { .. } | Transaction::V3 { .. } => {
tracing::debug!(?tx, "got transaction with wrong version"); tracing::debug!(?tx, "got transaction with wrong version");
Err(TransactionError::WrongVersion) Err(TransactionError::WrongVersion)
@ -153,175 +159,248 @@ where
sapling_shielded_data, sapling_shielded_data,
.. ..
} => { } => {
// A set of asynchronous checks which must all succeed. Self::verify_v4_transaction(
// We finish by waiting on these below. req,
let mut async_checks = FuturesUnordered::new(); network,
script_verifier,
// Do basic checks first inputs,
check::has_inputs_and_outputs(&tx)?; joinsplit_data,
sapling_shielded_data,
// Handle transparent inputs and outputs. )
if tx.is_coinbase() { .await
check::coinbase_tx_no_prevout_joinsplit_spend(&tx)?;
} else {
// feed all of the inputs to the script and shielded verifiers
// the script_verifier also checks transparent sighashes, using its own implementation
let cached_ffi_transaction =
Arc::new(CachedFfiTransaction::new(tx.clone()));
for input_index in 0..inputs.len() {
let rsp = script_verifier.ready_and().await?.call(script::Request {
upgrade,
known_utxos: known_utxos.clone(),
cached_ffi_transaction: cached_ffi_transaction.clone(),
input_index,
});
async_checks.push(rsp);
}
}
let shielded_sighash = tx.sighash(
upgrade,
HashType::ALL,
None,
);
if let Some(joinsplit_data) = joinsplit_data {
// XXX create a method on JoinSplitData
// that prepares groth16::Items with the correct proofs
// and proof inputs, handling interstitial treestates
// correctly.
// Then, pass those items to self.joinsplit to verify them.
// Consensus rule: The joinSplitSig MUST represent a
// valid signature, under joinSplitPubKey, of the
// sighash.
//
// Queue the validation of the JoinSplit signature while
// adding the resulting future to our collection of
// async checks that (at a minimum) must pass for the
// transaction to verify.
//
// https://zips.z.cash/protocol/protocol.pdf#sproutnonmalleability
// https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus
let rsp = ed25519_verifier
.ready_and()
.await?
.call((joinsplit_data.pub_key, joinsplit_data.sig, &shielded_sighash).into());
async_checks.push(rsp.boxed());
}
if let Some(sapling_shielded_data) = sapling_shielded_data {
for spend in sapling_shielded_data.spends_per_anchor() {
// Consensus rule: cv and rk MUST NOT be of small
// order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]rk
// MUST NOT be 𝒪_J.
//
// https://zips.z.cash/protocol/protocol.pdf#spenddesc
check::spend_cv_rk_not_small_order(&spend)?;
// Consensus rule: The proof π_ZKSpend MUST be valid
// given a primary input formed from the other
// fields except spendAuthSig.
//
// Queue the verification of the Groth16 spend proof
// for each Spend description while adding the
// resulting future to our collection of async
// checks that (at a minimum) must pass for the
// transaction to verify.
let spend_rsp = spend_verifier
.ready_and()
.await?
.call(primitives::groth16::ItemWrapper::from(&spend).into());
async_checks.push(spend_rsp.boxed());
// Consensus rule: The spend authorization signature
// MUST be a valid SpendAuthSig signature over
// SigHash using rk as the validating key.
//
// Queue the validation of the RedJubjub spend
// authorization signature for each Spend
// description while adding the resulting future to
// our collection of async checks that (at a
// minimum) must pass for the transaction to verify.
let rsp = redjubjub_verifier
.ready_and()
.await?
.call((spend.rk, spend.spend_auth_sig, &shielded_sighash).into());
async_checks.push(rsp.boxed());
}
for output in sapling_shielded_data.outputs() {
// Consensus rule: cv and wpk MUST NOT be of small
// order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]wpk
// MUST NOT be 𝒪_J.
//
// https://zips.z.cash/protocol/protocol.pdf#outputdesc
check::output_cv_epk_not_small_order(output)?;
// Consensus rule: The proof π_ZKOutput MUST be
// valid given a primary input formed from the other
// fields except C^enc and C^out.
//
// Queue the verification of the Groth16 output
// proof for each Output description while adding
// the resulting future to our collection of async
// checks that (at a minimum) must pass for the
// transaction to verify.
let output_rsp = output_verifier
.ready_and()
.await?
.call(primitives::groth16::ItemWrapper::from(output).into());
async_checks.push(output_rsp.boxed());
}
let bvk = sapling_shielded_data.binding_verification_key();
// TODO: enable async verification and remove this block - #1939
{
let item: zebra_chain::primitives::redjubjub::batch::Item = (bvk, sapling_shielded_data.binding_sig, &shielded_sighash).into();
item.verify_single().unwrap_or_else(|binding_sig_error| {
let binding_sig_error = binding_sig_error.to_string();
tracing::warn!(%binding_sig_error, "ignoring");
metrics::counter!("zebra.error.sapling.binding",
1,
"kind" => binding_sig_error);
});
// Ignore errors until binding signatures are fixed
//.map_err(|e| BoxError::from(Box::new(e)))?;
}
let _rsp = redjubjub_verifier
.ready_and()
.await?
.call((bvk, sapling_shielded_data.binding_sig, &shielded_sighash).into())
.boxed();
// TODO: stop ignoring binding signature errors - #1939
// async_checks.push(rsp);
}
// Finally, wait for all asynchronous checks to complete
// successfully, or fail verification if they error.
while let Some(check) = async_checks.next().await {
tracing::trace!(?check, remaining = async_checks.len());
check?;
}
Ok(tx.hash())
}
Transaction::V5 { .. } => {
unimplemented!("v5 transaction validation as specified in ZIP-216, ZIP-224, ZIP-225, and ZIP-244")
} }
Transaction::V5 { .. } => Self::verify_v5_transaction(req, network).await,
} }
} }
.instrument(span) .instrument(span)
.boxed() .boxed()
} }
} }
impl<ZS> Verifier<ZS>
where
ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static,
ZS::Future: Send + 'static,
{
async fn verify_v4_transaction(
request: Request,
network: Network,
mut script_verifier: script::Verifier<ZS>,
inputs: &[transparent::Input],
joinsplit_data: &Option<transaction::JoinSplitData<Groth16Proof>>,
sapling_shielded_data: &Option<sapling::ShieldedData<sapling::PerSpendAnchor>>,
) -> Result<transaction::Hash, TransactionError> {
let mut spend_verifier = primitives::groth16::SPEND_VERIFIER.clone();
let mut output_verifier = primitives::groth16::OUTPUT_VERIFIER.clone();
let mut ed25519_verifier = primitives::ed25519::VERIFIER.clone();
let mut redjubjub_verifier = primitives::redjubjub::VERIFIER.clone();
// A set of asynchronous checks which must all succeed.
// We finish by waiting on these below.
let mut async_checks = FuturesUnordered::new();
let tx = request.transaction();
let upgrade = request.upgrade(network);
// Do basic checks first
check::has_inputs_and_outputs(&tx)?;
// Handle transparent inputs and outputs.
if tx.is_coinbase() {
check::coinbase_tx_no_prevout_joinsplit_spend(&tx)?;
} else {
// feed all of the inputs to the script and shielded verifiers
// the script_verifier also checks transparent sighashes, using its own implementation
let cached_ffi_transaction = Arc::new(CachedFfiTransaction::new(tx.clone()));
for input_index in 0..inputs.len() {
let rsp = script_verifier.ready_and().await?.call(script::Request {
upgrade,
known_utxos: request.known_utxos(),
cached_ffi_transaction: cached_ffi_transaction.clone(),
input_index,
});
async_checks.push(rsp);
}
}
let shielded_sighash = tx.sighash(upgrade, HashType::ALL, None);
if let Some(joinsplit_data) = joinsplit_data {
// XXX create a method on JoinSplitData
// that prepares groth16::Items with the correct proofs
// and proof inputs, handling interstitial treestates
// correctly.
// Then, pass those items to self.joinsplit to verify them.
// Consensus rule: The joinSplitSig MUST represent a
// valid signature, under joinSplitPubKey, of the
// sighash.
//
// Queue the validation of the JoinSplit signature while
// adding the resulting future to our collection of
// async checks that (at a minimum) must pass for the
// transaction to verify.
//
// https://zips.z.cash/protocol/protocol.pdf#sproutnonmalleability
// https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus
let rsp = ed25519_verifier.ready_and().await?.call(
(
joinsplit_data.pub_key,
joinsplit_data.sig,
&shielded_sighash,
)
.into(),
);
async_checks.push(rsp.boxed());
}
if let Some(sapling_shielded_data) = sapling_shielded_data {
for spend in sapling_shielded_data.spends_per_anchor() {
// Consensus rule: cv and rk MUST NOT be of small
// order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]rk
// MUST NOT be 𝒪_J.
//
// https://zips.z.cash/protocol/protocol.pdf#spenddesc
check::spend_cv_rk_not_small_order(&spend)?;
// Consensus rule: The proof π_ZKSpend MUST be valid
// given a primary input formed from the other
// fields except spendAuthSig.
//
// Queue the verification of the Groth16 spend proof
// for each Spend description while adding the
// resulting future to our collection of async
// checks that (at a minimum) must pass for the
// transaction to verify.
let spend_rsp = spend_verifier
.ready_and()
.await?
.call(primitives::groth16::ItemWrapper::from(&spend).into());
async_checks.push(spend_rsp.boxed());
// Consensus rule: The spend authorization signature
// MUST be a valid SpendAuthSig signature over
// SigHash using rk as the validating key.
//
// Queue the validation of the RedJubjub spend
// authorization signature for each Spend
// description while adding the resulting future to
// our collection of async checks that (at a
// minimum) must pass for the transaction to verify.
let rsp = redjubjub_verifier
.ready_and()
.await?
.call((spend.rk, spend.spend_auth_sig, &shielded_sighash).into());
async_checks.push(rsp.boxed());
}
for output in sapling_shielded_data.outputs() {
// Consensus rule: cv and wpk MUST NOT be of small
// order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]wpk
// MUST NOT be 𝒪_J.
//
// https://zips.z.cash/protocol/protocol.pdf#outputdesc
check::output_cv_epk_not_small_order(output)?;
// Consensus rule: The proof π_ZKOutput MUST be
// valid given a primary input formed from the other
// fields except C^enc and C^out.
//
// Queue the verification of the Groth16 output
// proof for each Output description while adding
// the resulting future to our collection of async
// checks that (at a minimum) must pass for the
// transaction to verify.
let output_rsp = output_verifier
.ready_and()
.await?
.call(primitives::groth16::ItemWrapper::from(output).into());
async_checks.push(output_rsp.boxed());
}
let bvk = sapling_shielded_data.binding_verification_key();
// TODO: enable async verification and remove this block - #1939
{
let item: zebra_chain::primitives::redjubjub::batch::Item =
(bvk, sapling_shielded_data.binding_sig, &shielded_sighash).into();
item.verify_single().unwrap_or_else(|binding_sig_error| {
let binding_sig_error = binding_sig_error.to_string();
tracing::warn!(%binding_sig_error, "ignoring");
metrics::counter!("zebra.error.sapling.binding",
1,
"kind" => binding_sig_error);
});
// Ignore errors until binding signatures are fixed
//.map_err(|e| BoxError::from(Box::new(e)))?;
}
let _rsp = redjubjub_verifier
.ready_and()
.await?
.call((bvk, sapling_shielded_data.binding_sig, &shielded_sighash).into())
.boxed();
// TODO: stop ignoring binding signature errors - #1939
// async_checks.push(rsp);
}
// Finally, wait for all asynchronous checks to complete
// successfully, or fail verification if they error.
while let Some(check) = async_checks.next().await {
tracing::trace!(?check, remaining = async_checks.len());
check?;
}
Ok(tx.hash())
}
async fn verify_v5_transaction(
request: Request,
network: Network,
) -> Result<transaction::Hash, TransactionError> {
Self::verify_v5_transaction_network_upgrade(
&request.transaction(),
request.upgrade(network),
)?;
// TODO:
// - verify transparent pool (#1981)
// - verify sapling shielded pool (#1981)
// - verify orchard shielded pool (ZIP-224) (#2105)
// - ZIP-216 (#1798)
// - ZIP-244 (#1874)
unimplemented!("V5 transaction validation is not yet complete");
}
fn verify_v5_transaction_network_upgrade(
transaction: &Transaction,
upgrade: NetworkUpgrade,
) -> Result<(), TransactionError> {
match upgrade {
// Supports V5 transactions
NetworkUpgrade::Nu5 => Ok(()),
// Does not support V5 transactions
NetworkUpgrade::Genesis
| NetworkUpgrade::BeforeOverwinter
| NetworkUpgrade::Overwinter
| NetworkUpgrade::Sapling
| NetworkUpgrade::Blossom
| NetworkUpgrade::Heartwood
| NetworkUpgrade::Canopy => Err(TransactionError::UnsupportedByNetworkUpgrade(
transaction.version(),
upgrade,
)),
}
}
}

View File

@ -1,15 +1,19 @@
use std::{collections::HashMap, sync::Arc};
use tower::{service_fn, ServiceExt};
use zebra_chain::{ use zebra_chain::{
orchard, orchard,
parameters::Network, parameters::{Network, NetworkUpgrade},
transaction::{ transaction::{
arbitrary::{fake_v5_transactions_for_network, insert_fake_orchard_shielded_data}, arbitrary::{fake_v5_transactions_for_network, insert_fake_orchard_shielded_data},
Transaction, Transaction,
}, },
}; };
use super::check; use super::{check, Request, Verifier};
use crate::error::TransactionError; use crate::{error::TransactionError, script};
use color_eyre::eyre::Report; use color_eyre::eyre::Report;
#[test] #[test]
@ -160,3 +164,80 @@ fn v5_coinbase_transaction_with_enable_spends_flag_fails_validation() {
Err(TransactionError::CoinbaseHasEnableSpendsOrchard) Err(TransactionError::CoinbaseHasEnableSpendsOrchard)
); );
} }
#[tokio::test]
async fn v5_transaction_is_rejected_before_nu5_activation() {
const V5_TRANSACTION_VERSION: u32 = 5;
let canopy = NetworkUpgrade::Canopy;
let networks = vec![
(Network::Mainnet, zebra_test::vectors::MAINNET_BLOCKS.iter()),
(Network::Testnet, zebra_test::vectors::TESTNET_BLOCKS.iter()),
];
for (network, blocks) in networks {
let state_service = service_fn(|_| async { unreachable!("Service should not be called") });
let script_verifier = script::Verifier::new(state_service);
let verifier = Verifier::new(network, script_verifier);
let transaction = fake_v5_transactions_for_network(network, blocks)
.rev()
.next()
.expect("At least one fake V5 transaction in the test vectors");
let result = verifier
.oneshot(Request::Block {
transaction: Arc::new(transaction),
known_utxos: Arc::new(HashMap::new()),
height: canopy
.activation_height(network)
.expect("Canopy activation height is specified"),
})
.await;
assert_eq!(
result,
Err(TransactionError::UnsupportedByNetworkUpgrade(
V5_TRANSACTION_VERSION,
canopy
))
);
}
}
#[tokio::test]
// TODO: Remove `should_panic` once the NU5 activation heights for testnet and mainnet have been
// defined.
#[should_panic]
async fn v5_transaction_is_accepted_after_nu5_activation() {
let nu5 = NetworkUpgrade::Nu5;
let networks = vec![
(Network::Mainnet, zebra_test::vectors::MAINNET_BLOCKS.iter()),
(Network::Testnet, zebra_test::vectors::TESTNET_BLOCKS.iter()),
];
for (network, blocks) in networks {
let state_service = service_fn(|_| async { unreachable!("Service should not be called") });
let script_verifier = script::Verifier::new(state_service);
let verifier = Verifier::new(network, script_verifier);
let transaction = fake_v5_transactions_for_network(network, blocks)
.rev()
.next()
.expect("At least one fake V5 transaction in the test vectors");
let expected_hash = transaction.hash();
let result = verifier
.oneshot(Request::Block {
transaction: Arc::new(transaction),
known_utxos: Arc::new(HashMap::new()),
height: nu5
.activation_height(network)
.expect("NU5 activation height is specified"),
})
.await;
assert_eq!(result, Ok(expected_hash));
}
}