fix(lint): Box large error types to resolve the clippy large result err variant lint (#5759)
* Box errors to deal with large error warnings, add accessor methods for error properties * Remove or explain some large enum variant lints * Turn some tickets into TODOs * Allow missing docs on single-field error enum variants Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
This commit is contained in:
parent
cefc98baac
commit
4078e244d3
|
|
@ -59,8 +59,6 @@ use crate::{
|
||||||
/// activation, we do not validate any pre-Sapling transaction types.
|
/// activation, we do not validate any pre-Sapling transaction types.
|
||||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Serialize))]
|
#[cfg_attr(any(test, feature = "proptest-impl"), derive(Serialize))]
|
||||||
// XXX consider boxing the Optional fields of V4 and V5 txs
|
|
||||||
#[allow(clippy::large_enum_variant)]
|
|
||||||
pub enum Transaction {
|
pub enum Transaction {
|
||||||
/// A fully transparent transaction (`version = 1`).
|
/// A fully transparent transaction (`version = 1`).
|
||||||
V1 {
|
V1 {
|
||||||
|
|
|
||||||
|
|
@ -71,12 +71,24 @@ pub enum VerifyBlockError {
|
||||||
Time(zebra_chain::block::BlockTimeError),
|
Time(zebra_chain::block::BlockTimeError),
|
||||||
|
|
||||||
#[error("unable to commit block after semantic verification")]
|
#[error("unable to commit block after semantic verification")]
|
||||||
|
// TODO: make this into a concrete type, and add it to is_duplicate_request() (#2908)
|
||||||
Commit(#[source] BoxError),
|
Commit(#[source] BoxError),
|
||||||
|
|
||||||
#[error("invalid transaction")]
|
#[error("invalid transaction")]
|
||||||
Transaction(#[from] TransactionError),
|
Transaction(#[from] TransactionError),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl VerifyBlockError {
|
||||||
|
/// Returns `true` if this is definitely a duplicate request.
|
||||||
|
/// Some duplicate requests might not be detected, and therefore return `false`.
|
||||||
|
pub fn is_duplicate_request(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
VerifyBlockError::Block { source, .. } => source.is_duplicate_request(),
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The maximum allowed number of legacy signature check operations in a block.
|
/// The maximum allowed number of legacy signature check operations in a block.
|
||||||
///
|
///
|
||||||
/// This consensus rule is not documented, so Zebra follows the `zcashd` implementation.
|
/// This consensus rule is not documented, so Zebra follows the `zcashd` implementation.
|
||||||
|
|
|
||||||
|
|
@ -91,12 +91,42 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An error while semantically verifying a block.
|
/// An error while semantically verifying a block.
|
||||||
|
//
|
||||||
|
// One or both of these error variants are at least 140 bytes
|
||||||
#[derive(Debug, Display, Error)]
|
#[derive(Debug, Display, Error)]
|
||||||
|
#[allow(missing_docs)]
|
||||||
pub enum VerifyChainError {
|
pub enum VerifyChainError {
|
||||||
/// block could not be checkpointed
|
/// Block could not be checkpointed
|
||||||
Checkpoint(#[source] VerifyCheckpointError),
|
Checkpoint { source: Box<VerifyCheckpointError> },
|
||||||
/// block could not be verified
|
/// Block could not be full-verified
|
||||||
Block(#[source] VerifyBlockError),
|
Block { source: Box<VerifyBlockError> },
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<VerifyCheckpointError> for VerifyChainError {
|
||||||
|
fn from(err: VerifyCheckpointError) -> Self {
|
||||||
|
VerifyChainError::Checkpoint {
|
||||||
|
source: Box::new(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<VerifyBlockError> for VerifyChainError {
|
||||||
|
fn from(err: VerifyBlockError) -> Self {
|
||||||
|
VerifyChainError::Block {
|
||||||
|
source: Box::new(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VerifyChainError {
|
||||||
|
/// Returns `true` if this is definitely a duplicate request.
|
||||||
|
/// Some duplicate requests might not be detected, and therefore return `false`.
|
||||||
|
pub fn is_duplicate_request(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
VerifyChainError::Checkpoint { source, .. } => source.is_duplicate_request(),
|
||||||
|
VerifyChainError::Block { source, .. } => source.is_duplicate_request(),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S, V> Service<Arc<Block>> for ChainVerifier<S, V>
|
impl<S, V> Service<Arc<Block>> for ChainVerifier<S, V>
|
||||||
|
|
@ -132,28 +162,19 @@ where
|
||||||
// The chain verifier holds one slot in each verifier, for each concurrent task.
|
// The chain verifier holds one slot in each verifier, for each concurrent task.
|
||||||
// Therefore, any shared buffers or batches polled by these verifiers should double
|
// Therefore, any shared buffers or batches polled by these verifiers should double
|
||||||
// their bounds. (For example, the state service buffer.)
|
// their bounds. (For example, the state service buffer.)
|
||||||
ready!(self
|
ready!(self.checkpoint.poll_ready(cx))?;
|
||||||
.checkpoint
|
ready!(self.block.poll_ready(cx))?;
|
||||||
.poll_ready(cx)
|
|
||||||
.map_err(VerifyChainError::Checkpoint))?;
|
|
||||||
ready!(self.block.poll_ready(cx).map_err(VerifyChainError::Block))?;
|
|
||||||
Poll::Ready(Ok(()))
|
Poll::Ready(Ok(()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn call(&mut self, block: Arc<Block>) -> Self::Future {
|
fn call(&mut self, block: Arc<Block>) -> Self::Future {
|
||||||
match block.coinbase_height() {
|
match block.coinbase_height() {
|
||||||
Some(height) if height <= self.max_checkpoint_height => self
|
Some(height) if height <= self.max_checkpoint_height => {
|
||||||
.checkpoint
|
self.checkpoint.call(block).map_err(Into::into).boxed()
|
||||||
.call(block)
|
}
|
||||||
.map_err(VerifyChainError::Checkpoint)
|
|
||||||
.boxed(),
|
|
||||||
// This also covers blocks with no height, which the block verifier
|
// This also covers blocks with no height, which the block verifier
|
||||||
// will reject immediately.
|
// will reject immediately.
|
||||||
_ => self
|
_ => self.block.call(block).map_err(Into::into).boxed(),
|
||||||
.block
|
|
||||||
.call(block)
|
|
||||||
.map_err(VerifyChainError::Block)
|
|
||||||
.boxed(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -880,7 +880,7 @@ pub enum VerifyCheckpointError {
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
CheckpointList(BoxError),
|
CheckpointList(BoxError),
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
VerifyBlock(BoxError),
|
VerifyBlock(VerifyBlockError),
|
||||||
#[error("too many queued blocks at this height")]
|
#[error("too many queued blocks at this height")]
|
||||||
QueuedLimit,
|
QueuedLimit,
|
||||||
#[error("the block hash does not match the chained checkpoint hash, expected {expected:?} found {found:?}")]
|
#[error("the block hash does not match the chained checkpoint hash, expected {expected:?} found {found:?}")]
|
||||||
|
|
@ -894,7 +894,7 @@ pub enum VerifyCheckpointError {
|
||||||
|
|
||||||
impl From<VerifyBlockError> for VerifyCheckpointError {
|
impl From<VerifyBlockError> for VerifyCheckpointError {
|
||||||
fn from(err: VerifyBlockError) -> VerifyCheckpointError {
|
fn from(err: VerifyBlockError) -> VerifyCheckpointError {
|
||||||
VerifyCheckpointError::VerifyBlock(err.into())
|
VerifyCheckpointError::VerifyBlock(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -910,6 +910,20 @@ impl From<equihash::Error> for VerifyCheckpointError {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl VerifyCheckpointError {
|
||||||
|
/// Returns `true` if this is definitely a duplicate request.
|
||||||
|
/// Some duplicate requests might not be detected, and therefore return `false`.
|
||||||
|
pub fn is_duplicate_request(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
VerifyCheckpointError::AlreadyVerified { .. } => true,
|
||||||
|
// TODO: make this duplicate-incomplete
|
||||||
|
VerifyCheckpointError::NewerRequest { .. } => true,
|
||||||
|
VerifyCheckpointError::VerifyBlock(block_error) => block_error.is_duplicate_request(),
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The CheckpointVerifier service implementation.
|
/// The CheckpointVerifier service implementation.
|
||||||
///
|
///
|
||||||
/// After verification, the block futures resolve to their hashes.
|
/// After verification, the block futures resolve to their hashes.
|
||||||
|
|
|
||||||
|
|
@ -184,9 +184,17 @@ pub enum TransactionError {
|
||||||
|
|
||||||
#[error("could not validate nullifiers and anchors on best chain")]
|
#[error("could not validate nullifiers and anchors on best chain")]
|
||||||
#[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))]
|
#[cfg_attr(any(test, feature = "proptest-impl"), proptest(skip))]
|
||||||
ValidateNullifiersAndAnchorsError(#[from] ValidateContextError),
|
// This error variant is at least 128 bytes
|
||||||
|
ValidateNullifiersAndAnchorsError(Box<ValidateContextError>),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<ValidateContextError> for TransactionError {
|
||||||
|
fn from(err: ValidateContextError) -> Self {
|
||||||
|
TransactionError::ValidateNullifiersAndAnchorsError(Box::new(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: use a dedicated variant and From impl for each concrete type, and update callers (#5732)
|
||||||
impl From<BoxError> for TransactionError {
|
impl From<BoxError> for TransactionError {
|
||||||
fn from(mut err: BoxError) -> Self {
|
fn from(mut err: BoxError) -> Self {
|
||||||
// TODO: handle redpallas::Error, ScriptInvalid, InvalidSignature
|
// TODO: handle redpallas::Error, ScriptInvalid, InvalidSignature
|
||||||
|
|
@ -212,12 +220,6 @@ impl From<BoxError> for TransactionError {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<SubsidyError> for BlockError {
|
|
||||||
fn from(err: SubsidyError) -> BlockError {
|
|
||||||
BlockError::Transaction(TransactionError::Subsidy(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Error, Clone, Debug, PartialEq, Eq)]
|
#[derive(Error, Clone, Debug, PartialEq, Eq)]
|
||||||
pub enum BlockError {
|
pub enum BlockError {
|
||||||
#[error("block contains invalid transactions")]
|
#[error("block contains invalid transactions")]
|
||||||
|
|
@ -290,3 +292,17 @@ pub enum BlockError {
|
||||||
source: amount::Error,
|
source: amount::Error,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<SubsidyError> for BlockError {
|
||||||
|
fn from(err: SubsidyError) -> BlockError {
|
||||||
|
BlockError::Transaction(TransactionError::Subsidy(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BlockError {
|
||||||
|
/// Returns `true` if this is definitely a duplicate request.
|
||||||
|
/// Some duplicate requests might not be detected, and therefore return `false`.
|
||||||
|
pub fn is_duplicate_request(&self) -> bool {
|
||||||
|
matches!(self, BlockError::AlreadyInChain(..))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -371,6 +371,7 @@ where
|
||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use a generic error constructor (#5548)
|
||||||
#[allow(clippy::unwrap_in_result)]
|
#[allow(clippy::unwrap_in_result)]
|
||||||
fn get_blockchain_info(&self) -> Result<GetBlockChainInfo> {
|
fn get_blockchain_info(&self) -> Result<GetBlockChainInfo> {
|
||||||
let network = self.network;
|
let network = self.network;
|
||||||
|
|
@ -473,6 +474,7 @@ where
|
||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use a generic error constructor (#5548)
|
||||||
fn get_address_balance(
|
fn get_address_balance(
|
||||||
&self,
|
&self,
|
||||||
address_strings: AddressStrings,
|
address_strings: AddressStrings,
|
||||||
|
|
@ -499,6 +501,7 @@ where
|
||||||
.boxed()
|
.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use HexData to handle transaction data, and a generic error constructor (#5548)
|
||||||
fn send_raw_transaction(
|
fn send_raw_transaction(
|
||||||
&self,
|
&self,
|
||||||
raw_transaction_hex: String,
|
raw_transaction_hex: String,
|
||||||
|
|
@ -553,6 +556,7 @@ where
|
||||||
.boxed()
|
.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use a generic error constructor (#5548)
|
||||||
fn get_block(&self, height: String, verbosity: u8) -> BoxFuture<Result<GetBlock>> {
|
fn get_block(&self, height: String, verbosity: u8) -> BoxFuture<Result<GetBlock>> {
|
||||||
let mut state = self.state.clone();
|
let mut state = self.state.clone();
|
||||||
|
|
||||||
|
|
@ -621,6 +625,7 @@ where
|
||||||
.boxed()
|
.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use a generic error constructor (#5548)
|
||||||
fn get_best_block_hash(&self) -> Result<GetBlockHash> {
|
fn get_best_block_hash(&self) -> Result<GetBlockHash> {
|
||||||
self.latest_chain_tip
|
self.latest_chain_tip
|
||||||
.best_tip_hash()
|
.best_tip_hash()
|
||||||
|
|
@ -632,6 +637,7 @@ where
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use a generic error constructor (#5548)
|
||||||
fn get_raw_mempool(&self) -> BoxFuture<Result<Vec<String>>> {
|
fn get_raw_mempool(&self) -> BoxFuture<Result<Vec<String>>> {
|
||||||
let mut mempool = self.mempool.clone();
|
let mut mempool = self.mempool.clone();
|
||||||
|
|
||||||
|
|
@ -667,6 +673,7 @@ where
|
||||||
.boxed()
|
.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use HexData to handle the transaction ID, and a generic error constructor (#5548)
|
||||||
fn get_raw_transaction(
|
fn get_raw_transaction(
|
||||||
&self,
|
&self,
|
||||||
txid_hex: String,
|
txid_hex: String,
|
||||||
|
|
@ -750,6 +757,7 @@ where
|
||||||
.boxed()
|
.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use a generic error constructor (#5548)
|
||||||
fn z_get_treestate(&self, hash_or_height: String) -> BoxFuture<Result<GetTreestate>> {
|
fn z_get_treestate(&self, hash_or_height: String) -> BoxFuture<Result<GetTreestate>> {
|
||||||
let mut state = self.state.clone();
|
let mut state = self.state.clone();
|
||||||
|
|
||||||
|
|
@ -864,6 +872,7 @@ where
|
||||||
.boxed()
|
.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use a generic error constructor (#5548)
|
||||||
fn get_address_tx_ids(
|
fn get_address_tx_ids(
|
||||||
&self,
|
&self,
|
||||||
request: GetAddressTxIdsRequest,
|
request: GetAddressTxIdsRequest,
|
||||||
|
|
@ -928,6 +937,7 @@ where
|
||||||
.boxed()
|
.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use a generic error constructor (#5548)
|
||||||
fn get_address_utxos(
|
fn get_address_utxos(
|
||||||
&self,
|
&self,
|
||||||
address_strings: AddressStrings,
|
address_strings: AddressStrings,
|
||||||
|
|
|
||||||
|
|
@ -22,8 +22,8 @@ use zebra_chain::{
|
||||||
transparent,
|
transparent,
|
||||||
};
|
};
|
||||||
use zebra_consensus::{
|
use zebra_consensus::{
|
||||||
funding_stream_address, funding_stream_values, miner_subsidy, new_coinbase_script, BlockError,
|
funding_stream_address, funding_stream_values, miner_subsidy, new_coinbase_script,
|
||||||
VerifyBlockError, VerifyChainError, VerifyCheckpointError, MAX_BLOCK_SIGOPS,
|
VerifyChainError, MAX_BLOCK_SIGOPS,
|
||||||
};
|
};
|
||||||
use zebra_node_services::mempool;
|
use zebra_node_services::mempool;
|
||||||
|
|
||||||
|
|
@ -261,6 +261,7 @@ where
|
||||||
best_chain_tip_height(&self.latest_chain_tip).map(|height| height.0)
|
best_chain_tip_height(&self.latest_chain_tip).map(|height| height.0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use a generic error constructor (#5548)
|
||||||
fn get_block_hash(&self, index: i32) -> BoxFuture<Result<GetBlockHash>> {
|
fn get_block_hash(&self, index: i32) -> BoxFuture<Result<GetBlockHash>> {
|
||||||
let mut state = self.state.clone();
|
let mut state = self.state.clone();
|
||||||
let latest_chain_tip = self.latest_chain_tip.clone();
|
let latest_chain_tip = self.latest_chain_tip.clone();
|
||||||
|
|
@ -294,6 +295,7 @@ where
|
||||||
.boxed()
|
.boxed()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use HexData to handle block proposal data, and a generic error constructor (#5548)
|
||||||
fn get_block_template(
|
fn get_block_template(
|
||||||
&self,
|
&self,
|
||||||
options: Option<types::get_block_template_opts::JsonParameters>,
|
options: Option<types::get_block_template_opts::JsonParameters>,
|
||||||
|
|
@ -498,13 +500,10 @@ where
|
||||||
.map(|boxed_chain_error| *boxed_chain_error),
|
.map(|boxed_chain_error| *boxed_chain_error),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(match chain_error {
|
let response = match chain_error {
|
||||||
Ok(
|
Ok(source) if source.is_duplicate_request() => {
|
||||||
VerifyChainError::Checkpoint(VerifyCheckpointError::AlreadyVerified { .. })
|
submit_block::ErrorResponse::Duplicate
|
||||||
| VerifyChainError::Block(VerifyBlockError::Block {
|
}
|
||||||
source: BlockError::AlreadyInChain(..),
|
|
||||||
}),
|
|
||||||
) => submit_block::ErrorResponse::Duplicate,
|
|
||||||
|
|
||||||
// Currently, these match arms return Reject for the older duplicate in a queue,
|
// Currently, these match arms return Reject for the older duplicate in a queue,
|
||||||
// but queued duplicates should be DuplicateInconclusive.
|
// but queued duplicates should be DuplicateInconclusive.
|
||||||
|
|
@ -526,8 +525,9 @@ where
|
||||||
// This match arm is currently unreachable, but if future changes add extra error types,
|
// This match arm is currently unreachable, but if future changes add extra error types,
|
||||||
// we want to turn them into `Rejected`.
|
// we want to turn them into `Rejected`.
|
||||||
Err(_unknown_error_type) => submit_block::ErrorResponse::Rejected,
|
Err(_unknown_error_type) => submit_block::ErrorResponse::Rejected,
|
||||||
}
|
};
|
||||||
.into())
|
|
||||||
|
Ok(response.into())
|
||||||
}
|
}
|
||||||
.boxed()
|
.boxed()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -83,6 +83,8 @@ type InboundTxDownloads = TxDownloads<Timeout<Outbound>, Timeout<TxVerifier>, St
|
||||||
///
|
///
|
||||||
/// Indicates whether it is enabled or disabled and, if enabled, contains
|
/// Indicates whether it is enabled or disabled and, if enabled, contains
|
||||||
/// the necessary data to run it.
|
/// the necessary data to run it.
|
||||||
|
//
|
||||||
|
// Zebra only has one mempool, so the enum variant size difference doesn't matter.
|
||||||
#[allow(clippy::large_enum_variant)]
|
#[allow(clippy::large_enum_variant)]
|
||||||
enum ActiveState {
|
enum ActiveState {
|
||||||
/// The Mempool is disabled.
|
/// The Mempool is disabled.
|
||||||
|
|
|
||||||
|
|
@ -19,9 +19,6 @@ use zebra_chain::{
|
||||||
chain_tip::ChainTip,
|
chain_tip::ChainTip,
|
||||||
parameters::genesis_hash,
|
parameters::genesis_hash,
|
||||||
};
|
};
|
||||||
use zebra_consensus::{
|
|
||||||
chain::VerifyChainError, BlockError, VerifyBlockError, VerifyCheckpointError,
|
|
||||||
};
|
|
||||||
use zebra_network as zn;
|
use zebra_network as zn;
|
||||||
use zebra_state as zs;
|
use zebra_state as zs;
|
||||||
|
|
||||||
|
|
@ -1008,7 +1005,6 @@ where
|
||||||
///
|
///
|
||||||
/// See [`Self::handle_response`] for more details.
|
/// See [`Self::handle_response`] for more details.
|
||||||
#[allow(unknown_lints)]
|
#[allow(unknown_lints)]
|
||||||
#[allow(clippy::result_large_err)]
|
|
||||||
fn handle_block_response(
|
fn handle_block_response(
|
||||||
&mut self,
|
&mut self,
|
||||||
response: Result<(Height, block::Hash), BlockDownloadVerifyError>,
|
response: Result<(Height, block::Hash), BlockDownloadVerifyError>,
|
||||||
|
|
@ -1027,7 +1023,6 @@ where
|
||||||
///
|
///
|
||||||
/// See [`Self::handle_response`] for more details.
|
/// See [`Self::handle_response`] for more details.
|
||||||
#[allow(unknown_lints)]
|
#[allow(unknown_lints)]
|
||||||
#[allow(clippy::result_large_err)]
|
|
||||||
fn handle_hash_response(
|
fn handle_hash_response(
|
||||||
response: Result<IndexSet<block::Hash>, BlockDownloadVerifyError>,
|
response: Result<IndexSet<block::Hash>, BlockDownloadVerifyError>,
|
||||||
) -> Result<IndexSet<block::Hash>, BlockDownloadVerifyError> {
|
) -> Result<IndexSet<block::Hash>, BlockDownloadVerifyError> {
|
||||||
|
|
@ -1044,7 +1039,6 @@ where
|
||||||
///
|
///
|
||||||
/// Returns `Err` if an unexpected error occurred, to force the synchronizer to restart.
|
/// Returns `Err` if an unexpected error occurred, to force the synchronizer to restart.
|
||||||
#[allow(unknown_lints)]
|
#[allow(unknown_lints)]
|
||||||
#[allow(clippy::result_large_err)]
|
|
||||||
fn handle_response<T>(
|
fn handle_response<T>(
|
||||||
response: Result<T, BlockDownloadVerifyError>,
|
response: Result<T, BlockDownloadVerifyError>,
|
||||||
) -> Result<(), BlockDownloadVerifyError> {
|
) -> Result<(), BlockDownloadVerifyError> {
|
||||||
|
|
@ -1098,23 +1092,10 @@ where
|
||||||
fn should_restart_sync(e: &BlockDownloadVerifyError) -> bool {
|
fn should_restart_sync(e: &BlockDownloadVerifyError) -> bool {
|
||||||
match e {
|
match e {
|
||||||
// Structural matches: downcasts
|
// Structural matches: downcasts
|
||||||
BlockDownloadVerifyError::Invalid {
|
BlockDownloadVerifyError::Invalid { error, .. } if error.is_duplicate_request() => {
|
||||||
error: VerifyChainError::Checkpoint(VerifyCheckpointError::AlreadyVerified { .. }),
|
|
||||||
..
|
|
||||||
} => {
|
|
||||||
debug!(error = ?e, "block was already verified, possibly from a previous sync run, continuing");
|
debug!(error = ?e, "block was already verified, possibly from a previous sync run, continuing");
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
BlockDownloadVerifyError::Invalid {
|
|
||||||
error:
|
|
||||||
VerifyChainError::Block(VerifyBlockError::Block {
|
|
||||||
source: BlockError::AlreadyInChain(_, _),
|
|
||||||
}),
|
|
||||||
..
|
|
||||||
} => {
|
|
||||||
debug!(error = ?e, "block is already in chain, possibly from a previous sync run, continuing");
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Structural matches: direct
|
// Structural matches: direct
|
||||||
BlockDownloadVerifyError::CancelledDuringDownload { .. }
|
BlockDownloadVerifyError::CancelledDuringDownload { .. }
|
||||||
|
|
@ -1140,12 +1121,15 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
// String matches
|
// String matches
|
||||||
BlockDownloadVerifyError::Invalid {
|
//
|
||||||
error: VerifyChainError::Block(VerifyBlockError::Commit(ref source)),
|
// We want to match VerifyChainError::Block(VerifyBlockError::Commit(ref source)),
|
||||||
..
|
// but that type is boxed.
|
||||||
} if format!("{source:?}").contains("block is already committed to the state")
|
// TODO:
|
||||||
|| format!("{source:?}")
|
// - turn this check into a function on VerifyChainError, like is_duplicate_request()
|
||||||
.contains("block has already been sent to be committed to the state") =>
|
BlockDownloadVerifyError::Invalid { error, .. }
|
||||||
|
if format!("{error:?}").contains("block is already committed to the state")
|
||||||
|
|| format!("{error:?}")
|
||||||
|
.contains("block has already been sent to be committed to the state") =>
|
||||||
{
|
{
|
||||||
// TODO: improve this by checking the type (#2908)
|
// TODO: improve this by checking the type (#2908)
|
||||||
debug!(error = ?e, "block is already committed or pending a commit, possibly from a previous sync run, continuing");
|
debug!(error = ?e, "block is already committed or pending a commit, possibly from a previous sync run, continuing");
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue