Deduplicate metrics dependencies (#1561)
## Motivation This PR is motivated by the regression identified in https://github.com/ZcashFoundation/zebra/issues/1349. That PR notes that the metrics stopped working for most of the crates other than `zebrad`. ## Solution This PR resolves the regression by deduplicating the `metrics` crate dependency. During a recent change we upgraded the metrics version in `zebrad` and a couple other of our crates, but we never updated the dependencies in `zebra-state`, `zebra-consensus`, or `zebra-network`. This caused the metrics macros to attempt to retrieve the current metrics exporter through the wrong function. We would install the metrics exporter in `0.13`, but then attempt to look it up through the `0.12` crate, which contains a different instance of the metrics exporter static variable which is unset. Doing this causes the metrics macros to return `None` for the current exporter after which they just silently give up. ## Related Issues closes https://github.com/ZcashFoundation/zebra/issues/1349 ## Follow Up Work I noticed we have quite a few duplicate dependencies in our tree. We might be able to save some compilation time by auditing those and deduplicating them as much as possible. - https://github.com/ZcashFoundation/zebra/issues/1582 Co-authored-by: teor <teor@riseup.net>
This commit is contained in:
parent
caca450904
commit
15698245e1
|
|
@ -1714,15 +1714,6 @@ dependencies = [
|
|||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "metrics"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51b70227ece8711a1aa2f99655efd795d0cff297a5b9fe39645a93aacf6ad39d"
|
||||
dependencies = [
|
||||
"metrics-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "metrics"
|
||||
version = "0.13.0-alpha.8"
|
||||
|
|
@ -1734,12 +1725,6 @@ dependencies = [
|
|||
"sharded-slab",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "metrics-core"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c064b3a1ff41f4bf6c91185c8a0caeccf8a8a27e9d0f92cc54cf3dbec812f48"
|
||||
|
||||
[[package]]
|
||||
name = "metrics-exporter-prometheus"
|
||||
version = "0.1.0-alpha.7"
|
||||
|
|
@ -1747,7 +1732,7 @@ source = "git+https://github.com/ZcashFoundation/metrics?rev=971133128e5aebe3ad1
|
|||
dependencies = [
|
||||
"hdrhistogram 7.1.0",
|
||||
"hyper 0.14.0-dev",
|
||||
"metrics 0.13.0-alpha.8",
|
||||
"metrics",
|
||||
"metrics-util",
|
||||
"parking_lot",
|
||||
"thiserror",
|
||||
|
|
@ -1778,7 +1763,7 @@ dependencies = [
|
|||
"crossbeam-utils 0.8.0",
|
||||
"dashmap",
|
||||
"indexmap",
|
||||
"metrics 0.13.0-alpha.8",
|
||||
"metrics",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -3921,7 +3906,7 @@ dependencies = [
|
|||
"futures",
|
||||
"futures-util",
|
||||
"jubjub 0.5.1",
|
||||
"metrics 0.12.1",
|
||||
"metrics",
|
||||
"once_cell",
|
||||
"rand 0.7.3",
|
||||
"redjubjub",
|
||||
|
|
@ -3953,7 +3938,7 @@ dependencies = [
|
|||
"futures",
|
||||
"hex",
|
||||
"indexmap",
|
||||
"metrics 0.12.1",
|
||||
"metrics",
|
||||
"pin-project 0.4.27",
|
||||
"proptest",
|
||||
"proptest-derive",
|
||||
|
|
@ -3998,7 +3983,7 @@ dependencies = [
|
|||
"futures",
|
||||
"hex",
|
||||
"lazy_static",
|
||||
"metrics 0.12.1",
|
||||
"metrics",
|
||||
"once_cell",
|
||||
"primitive-types",
|
||||
"proptest",
|
||||
|
|
@ -4066,7 +4051,7 @@ dependencies = [
|
|||
"gumdrop",
|
||||
"hyper 0.14.0-dev",
|
||||
"inferno",
|
||||
"metrics 0.13.0-alpha.8",
|
||||
"metrics",
|
||||
"metrics-exporter-prometheus",
|
||||
"once_cell",
|
||||
"pin-project 0.4.27",
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ serde = { version = "1", features = ["serde_derive"] }
|
|||
|
||||
futures = "0.3.7"
|
||||
futures-util = "0.3.6"
|
||||
metrics = "0.12"
|
||||
metrics = "0.13.0-alpha.8"
|
||||
thiserror = "1.0.23"
|
||||
tokio = { version = "0.3.4", features = ["time", "sync", "stream", "tracing"] }
|
||||
tower = { version = "0.4", features = ["timeout", "util", "buffer"] }
|
||||
|
|
|
|||
|
|
@ -203,8 +203,8 @@ where
|
|||
if height >= checkpoint_list.max_height() {
|
||||
(None, Progress::FinalCheckpoint)
|
||||
} else {
|
||||
metrics::gauge!("checkpoint.verified.height", height.0 as i64);
|
||||
metrics::gauge!("checkpoint.processing.next.height", height.0 as i64);
|
||||
metrics::gauge!("checkpoint.verified.height", height.0 as f64);
|
||||
metrics::gauge!("checkpoint.processing.next.height", height.0 as f64);
|
||||
(Some(hash), Progress::InitialTip(height))
|
||||
}
|
||||
}
|
||||
|
|
@ -298,7 +298,7 @@ where
|
|||
}
|
||||
metrics::gauge!(
|
||||
"checkpoint.queued.continuous.height",
|
||||
pending_height.0 as i64
|
||||
pending_height.0 as f64
|
||||
);
|
||||
|
||||
// Now find the start of the checkpoint range
|
||||
|
|
@ -320,11 +320,11 @@ where
|
|||
if let Some(block::Height(target_checkpoint)) = target_checkpoint {
|
||||
metrics::gauge!(
|
||||
"checkpoint.processing.next.height",
|
||||
target_checkpoint as i64
|
||||
target_checkpoint as f64
|
||||
);
|
||||
} else {
|
||||
// Use the start height if there is no potential next checkpoint
|
||||
metrics::gauge!("checkpoint.processing.next.height", start_height.0 as i64);
|
||||
metrics::gauge!("checkpoint.processing.next.height", start_height.0 as f64);
|
||||
metrics::counter!("checkpoint.waiting.count", 1);
|
||||
}
|
||||
|
||||
|
|
@ -393,12 +393,12 @@ where
|
|||
/// Increase the current checkpoint height to `verified_height`,
|
||||
fn update_progress(&mut self, verified_height: block::Height) {
|
||||
if let Some(max_height) = self.queued.keys().next_back() {
|
||||
metrics::gauge!("checkpoint.queued.max.height", max_height.0 as i64);
|
||||
metrics::gauge!("checkpoint.queued.max.height", max_height.0 as f64);
|
||||
} else {
|
||||
// use -1 as a sentinel value for "None", because 0 is a valid height
|
||||
metrics::gauge!("checkpoint.queued.max.height", -1);
|
||||
// use f64::NAN as a sentinel value for "None", because 0 is a valid height
|
||||
metrics::gauge!("checkpoint.queued.max.height", f64::NAN);
|
||||
}
|
||||
metrics::gauge!("checkpoint.queued_slots", self.queued.len() as i64);
|
||||
metrics::gauge!("checkpoint.queued_slots", self.queued.len() as f64);
|
||||
|
||||
// Ignore blocks that are below the previous checkpoint, or otherwise
|
||||
// have invalid heights.
|
||||
|
|
@ -413,10 +413,10 @@ where
|
|||
|
||||
// Ignore heights that aren't checkpoint heights
|
||||
if verified_height == self.checkpoint_list.max_height() {
|
||||
metrics::gauge!("checkpoint.verified.height", verified_height.0 as i64);
|
||||
metrics::gauge!("checkpoint.verified.height", verified_height.0 as f64);
|
||||
self.verifier_progress = FinalCheckpoint;
|
||||
} else if self.checkpoint_list.contains(verified_height) {
|
||||
metrics::gauge!("checkpoint.verified.height", verified_height.0 as i64);
|
||||
metrics::gauge!("checkpoint.verified.height", verified_height.0 as f64);
|
||||
self.verifier_progress = PreviousCheckpoint(verified_height);
|
||||
// We're done with the initial tip hash now
|
||||
self.initial_tip_hash = None;
|
||||
|
|
@ -525,7 +525,7 @@ where
|
|||
.keys()
|
||||
.next_back()
|
||||
.expect("queued has at least one entry")
|
||||
.0 as i64
|
||||
.0 as f64
|
||||
);
|
||||
|
||||
let is_checkpoint = self.checkpoint_list.contains(height);
|
||||
|
|
@ -853,7 +853,7 @@ where
|
|||
let rx = self.queue_block(block.clone());
|
||||
self.process_checkpoint_range();
|
||||
|
||||
metrics::gauge!("checkpoint.queued_slots", self.queued.len() as i64);
|
||||
metrics::gauge!("checkpoint.queued_slots", self.queued.len() as f64);
|
||||
|
||||
// Because the checkpoint verifier duplicates state from the state
|
||||
// service (it tracks which checkpoints have been verified), we must
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ tokio = { version = "0.3.4", features = ["net", "time", "stream", "tracing", "ma
|
|||
tokio-util = { version = "0.5", features = ["codec"] }
|
||||
tower = { version = "0.4", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] }
|
||||
|
||||
metrics = "0.12"
|
||||
metrics = "0.13.0-alpha.8"
|
||||
tracing = "0.1"
|
||||
tracing-futures = "0.2"
|
||||
tracing-error = { version = "0.1.2", features = ["traced-error"] }
|
||||
|
|
|
|||
|
|
@ -149,9 +149,9 @@ where
|
|||
}
|
||||
|
||||
pub fn next(&mut self) -> Option<MetaAddr> {
|
||||
metrics::gauge!("candidate_set.disconnected", self.disconnected.len() as i64);
|
||||
metrics::gauge!("candidate_set.gossiped", self.gossiped.len() as i64);
|
||||
metrics::gauge!("candidate_set.failed", self.failed.len() as i64);
|
||||
metrics::gauge!("candidate_set.disconnected", self.disconnected.len() as f64);
|
||||
metrics::gauge!("candidate_set.gossiped", self.gossiped.len() as f64);
|
||||
metrics::gauge!("candidate_set.failed", self.failed.len() as f64);
|
||||
debug!(
|
||||
disconnected_peers = self.disconnected.len(),
|
||||
gossiped_peers = self.gossiped.len(),
|
||||
|
|
|
|||
|
|
@ -272,7 +272,13 @@ where
|
|||
let mut crawl_timer = tokio::time::interval(new_peer_interval);
|
||||
|
||||
loop {
|
||||
metrics::gauge!("crawler.in_flight_handshakes", handshakes.len() as i64 - 1);
|
||||
metrics::gauge!(
|
||||
"crawler.in_flight_handshakes",
|
||||
handshakes
|
||||
.len()
|
||||
.checked_sub(1)
|
||||
.expect("the pool always contains an unresolved future") as f64
|
||||
);
|
||||
// This is a little awkward because there's no select3.
|
||||
match select(
|
||||
select(demand_rx.next(), crawl_timer.next()),
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
use std::net::SocketAddr;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
convert::TryInto,
|
||||
fmt::Debug,
|
||||
future::Future,
|
||||
marker::PhantomData,
|
||||
|
|
@ -385,9 +384,9 @@ where
|
|||
let num_ready = self.ready_services.len();
|
||||
let num_unready = self.unready_services.len();
|
||||
let num_peers = num_ready + num_unready;
|
||||
metrics::gauge!("pool.num_ready", num_ready.try_into().unwrap());
|
||||
metrics::gauge!("pool.num_unready", num_unready.try_into().unwrap());
|
||||
metrics::gauge!("pool.num_peers", num_peers.try_into().unwrap());
|
||||
metrics::gauge!("pool.num_ready", num_ready as f64);
|
||||
metrics::gauge!("pool.num_unready", num_unready as f64);
|
||||
metrics::gauge!("pool.num_peers", num_peers as f64);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ lazy_static = "1.4.0"
|
|||
serde = { version = "1", features = ["serde_derive"] }
|
||||
|
||||
futures = "0.3.7"
|
||||
metrics = "0.12"
|
||||
metrics = "0.13.0-alpha.8"
|
||||
tower = { version = "0.4", features = ["buffer", "util"] }
|
||||
tracing = "0.1"
|
||||
thiserror = "1.0.23"
|
||||
|
|
|
|||
|
|
@ -123,10 +123,13 @@ impl FinalizedState {
|
|||
self.max_queued_height = std::cmp::max(self.max_queued_height, height.0 as _);
|
||||
}
|
||||
|
||||
metrics::gauge!("state.finalized.queued.max.height", self.max_queued_height);
|
||||
metrics::gauge!(
|
||||
"state.finalized.queued.max.height",
|
||||
self.max_queued_height as f64
|
||||
);
|
||||
metrics::gauge!(
|
||||
"state.finalized.queued.block.count",
|
||||
self.queued_by_prev_hash.len() as _
|
||||
self.queued_by_prev_hash.len() as f64
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -141,12 +141,12 @@ impl QueuedBlocks {
|
|||
/// Update metrics after the queue is modified
|
||||
fn update_metrics(&self) {
|
||||
if let Some(max_height) = self.by_height.keys().next_back() {
|
||||
metrics::gauge!("state.memory.queued.max.height", max_height.0 as i64);
|
||||
metrics::gauge!("state.memory.queued.max.height", max_height.0 as f64);
|
||||
} else {
|
||||
// use -1 as a sentinel value for "None", because 0 is a valid height
|
||||
metrics::gauge!("state.memory.queued.max.height", -1);
|
||||
// use f64::NAN as a sentinel value for "None", because 0 is a valid height
|
||||
metrics::gauge!("state.memory.queued.max.height", f64::NAN);
|
||||
}
|
||||
metrics::gauge!("state.memory.queued.block.count", self.blocks.len() as _);
|
||||
metrics::gauge!("state.memory.queued.block.count", self.blocks.len() as f64);
|
||||
}
|
||||
|
||||
/// Try to look up this UTXO in any queued block.
|
||||
|
|
|
|||
Loading…
Reference in New Issue