state: implement Request::Block with sled
This commit is contained in:
parent
2cbc60aeae
commit
f4db12efcb
|
|
@ -13,7 +13,7 @@ use zebra_chain::{
|
||||||
parameters::Network,
|
parameters::Network,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{BoxError, Config, HashOrHeight, MemoryState, Request, Response, SledState};
|
use crate::{BoxError, Config, MemoryState, Request, Response, SledState};
|
||||||
|
|
||||||
// todo: put this somewhere
|
// todo: put this somewhere
|
||||||
pub struct QueuedBlock {
|
pub struct QueuedBlock {
|
||||||
|
|
@ -85,8 +85,13 @@ impl Service<Request> for StateService {
|
||||||
.boxed()
|
.boxed()
|
||||||
}
|
}
|
||||||
Request::Transaction(hash) => unimplemented!(),
|
Request::Transaction(hash) => unimplemented!(),
|
||||||
Request::Block(HashOrHeight::Hash(hash)) => unimplemented!(),
|
Request::Block(hash_or_height) => {
|
||||||
Request::Block(HashOrHeight::Height(height)) => unimplemented!(),
|
//todo: handle in memory and sled
|
||||||
|
self.sled
|
||||||
|
.block(hash_or_height)
|
||||||
|
.map_ok(|block| Response::Block(block))
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,14 +1,14 @@
|
||||||
//! The primary implementation of the `zebra_state::Service` built upon sled
|
//! The primary implementation of the `zebra_state::Service` built upon sled
|
||||||
|
|
||||||
use std::{collections::HashMap, convert::TryInto, future::Future};
|
use std::{collections::HashMap, convert::TryInto, future::Future, sync::Arc};
|
||||||
|
|
||||||
use zebra_chain::serialization::ZcashSerialize;
|
use zebra_chain::serialization::{ZcashDeserialize, ZcashSerialize};
|
||||||
use zebra_chain::{
|
use zebra_chain::{
|
||||||
block::{self},
|
block::{self, Block},
|
||||||
parameters::Network,
|
parameters::Network,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{BoxError, Config, QueuedBlock};
|
use crate::{BoxError, Config, HashOrHeight, QueuedBlock};
|
||||||
|
|
||||||
pub struct SledState {
|
pub struct SledState {
|
||||||
/// Queued blocks that arrived out of order, indexed by their parent block hash.
|
/// Queued blocks that arrived out of order, indexed by their parent block hash.
|
||||||
|
|
@ -81,7 +81,7 @@ impl SledState {
|
||||||
&self.height_by_hash,
|
&self.height_by_hash,
|
||||||
&self.block_by_height,
|
&self.block_by_height,
|
||||||
)
|
)
|
||||||
.transaction(|(hash_by_height, height_by_hash, block_by_height)| {
|
.transaction(move |(hash_by_height, height_by_hash, block_by_height)| {
|
||||||
// TODO: do serialization above
|
// TODO: do serialization above
|
||||||
// for some reason this wouldn't move into the closure (??)
|
// for some reason this wouldn't move into the closure (??)
|
||||||
let block_bytes = block
|
let block_bytes = block
|
||||||
|
|
@ -151,6 +151,31 @@ impl SledState {
|
||||||
Ok(Some(tip_height.0 - height.0))
|
Ok(Some(tip_height.0 - height.0))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn block(
|
||||||
|
&self,
|
||||||
|
hash_or_height: HashOrHeight,
|
||||||
|
) -> impl Future<Output = Result<Option<Arc<Block>>, BoxError>> {
|
||||||
|
let height_by_hash = self.height_by_hash.clone();
|
||||||
|
let block_by_height = self.block_by_height.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let height = match hash_or_height {
|
||||||
|
HashOrHeight::Height(height) => height,
|
||||||
|
HashOrHeight::Hash(hash) => match height_by_hash.get(&hash.0)? {
|
||||||
|
Some(bytes) => {
|
||||||
|
block::Height(u32::from_be_bytes(bytes.as_ref().try_into().unwrap()))
|
||||||
|
}
|
||||||
|
None => return Ok(None),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
match block_by_height.get(&height.0.to_be_bytes())? {
|
||||||
|
Some(bytes) => Ok(Some(Arc::<Block>::zcash_deserialize(bytes.as_ref())?)),
|
||||||
|
None => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Split into a helper function to be called synchronously or asynchronously.
|
// Split into a helper function to be called synchronously or asynchronously.
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue