darkfi/blockchain/
mod.rs

1/* This file is part of DarkFi (https://dark.fi)
2 *
3 * Copyright (C) 2020-2026 Dyne.org foundation
4 *
5 * This program is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU Affero General Public License as
7 * published by the Free Software Foundation, either version 3 of the
8 * License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU Affero General Public License for more details.
14 *
15 * You should have received a copy of the GNU Affero General Public License
16 * along with this program.  If not, see <https://www.gnu.org/licenses/>.
17 */
18
19use std::{
20    slice,
21    sync::{Arc, Mutex},
22};
23
24use darkfi_sdk::{
25    monotree::{self, Monotree},
26    tx::TransactionHash,
27};
28use darkfi_serial::{deserialize, Decodable};
29use sled_overlay::{
30    sled,
31    sled::{IVec, Transactional},
32};
33use tracing::debug;
34
35#[cfg(feature = "async-serial")]
36use darkfi_serial::{deserialize_async, AsyncDecodable};
37
38use crate::{tx::Transaction, util::time::Timestamp, Error, Result};
39
40/// Block related definitions and storage implementations
41pub mod block_store;
42pub use block_store::{
43    Block, BlockDifficulty, BlockInfo, BlockStore, BlockStoreOverlay, SLED_BLOCK_DIFFICULTY_TREE,
44    SLED_BLOCK_ORDER_TREE, SLED_BLOCK_STATE_INVERSE_DIFF_TREE, SLED_BLOCK_TREE,
45};
46
47/// Header definition and storage implementation
48pub mod header_store;
49pub use header_store::{
50    Header, HeaderHash, HeaderStore, HeaderStoreOverlay, SLED_HEADER_TREE, SLED_SYNC_HEADER_TREE,
51};
52
53/// Transactions related storage implementations
54pub mod tx_store;
55pub use tx_store::{
56    TxStore, TxStoreOverlay, SLED_PENDING_TX_ORDER_TREE, SLED_PENDING_TX_TREE,
57    SLED_TX_LOCATION_TREE, SLED_TX_TREE,
58};
59
60/// Contracts and Wasm storage implementations
61pub mod contract_store;
62pub use contract_store::{
63    ContractStore, ContractStoreOverlay, SLED_BINCODE_TREE, SLED_CONTRACTS_TREE,
64    SLED_CONTRACTS_TREES_TREE,
65};
66
67/// Monero definitions needed for merge mining
68pub mod monero;
69
70/// Structure holding all sled trees that define the concept of Blockchain.
71#[derive(Clone)]
72pub struct Blockchain {
73    /// Main pointer to the sled db connection
74    pub sled_db: sled::Db,
75    /// Headers sled tree
76    pub headers: HeaderStore,
77    /// Blocks sled tree
78    pub blocks: BlockStore,
79    /// Transactions related sled trees
80    pub transactions: TxStore,
81    /// Contracts related sled trees
82    pub contracts: ContractStore,
83}
84
85impl Blockchain {
86    /// Instantiate a new `Blockchain` with the given `sled` database.
87    pub fn new(db: &sled::Db) -> Result<Self> {
88        let headers = HeaderStore::new(db)?;
89        let blocks = BlockStore::new(db)?;
90        let transactions = TxStore::new(db)?;
91        let contracts = ContractStore::new(db)?;
92
93        Ok(Self { sled_db: db.clone(), headers, blocks, transactions, contracts })
94    }
95
96    /// Insert a given [`BlockInfo`] into the blockchain database.
97    /// This functions wraps all the logic of separating the block into specific
98    /// data that can be fed into the different trees of the database.
99    /// Upon success, the functions returns the block hash that
100    /// were given and appended to the ledger.
101    pub fn add_block(&self, block: &BlockInfo) -> Result<HeaderHash> {
102        let mut trees = vec![];
103        let mut batches = vec![];
104
105        // Store header
106        let (headers_batch, _) = self.headers.insert_batch(slice::from_ref(&block.header));
107        trees.push(self.headers.main.clone());
108        batches.push(headers_batch);
109
110        // Store block
111        let blk: Block = Block::from_block_info(block);
112        let (bocks_batch, block_hashes) = self.blocks.insert_batch(&[blk]);
113        let block_hash = block_hashes[0];
114        let block_hash_vec = [block_hash];
115        trees.push(self.blocks.main.clone());
116        batches.push(bocks_batch);
117
118        // Store block order
119        let blocks_order_batch =
120            self.blocks.insert_batch_order(&[block.header.height], &block_hash_vec);
121        trees.push(self.blocks.order.clone());
122        batches.push(blocks_order_batch);
123
124        // Store transactions
125        let (txs_batch, txs_hashes) = self.transactions.insert_batch(&block.txs);
126        trees.push(self.transactions.main.clone());
127        batches.push(txs_batch);
128
129        // Store transactions_locations
130        let txs_locations_batch =
131            self.transactions.insert_batch_location(&txs_hashes, block.header.height);
132        trees.push(self.transactions.location.clone());
133        batches.push(txs_locations_batch);
134
135        // Perform an atomic transaction over the trees and apply the batches.
136        self.atomic_write(&trees, &batches)?;
137
138        Ok(block_hash)
139    }
140
141    /// Check if the given [`BlockInfo`] is in the database and all trees.
142    pub fn has_block(&self, block: &BlockInfo) -> Result<bool> {
143        let blockhash = match self.blocks.get_order(&[block.header.height], true) {
144            Ok(v) => v[0].unwrap(),
145            Err(_) => return Ok(false),
146        };
147
148        // Check if we have all transactions
149        let txs: Vec<TransactionHash> = block.txs.iter().map(|tx| tx.hash()).collect();
150        if self.transactions.get(&txs, true).is_err() {
151            return Ok(false)
152        }
153
154        // Check provided info produces the same hash
155        Ok(blockhash == block.hash())
156    }
157
158    /// Retrieve [`BlockInfo`]s by given hashes. Fails if any of them is not found.
159    pub fn get_blocks_by_hash(&self, hashes: &[HeaderHash]) -> Result<Vec<BlockInfo>> {
160        let blocks = self.blocks.get(hashes, true)?;
161        let blocks: Vec<Block> = blocks.iter().map(|x| x.clone().unwrap()).collect();
162        let ret = self.get_blocks_infos(&blocks)?;
163
164        Ok(ret)
165    }
166
167    /// Retrieve all [`BlockInfo`] for given slice of [`Block`].
168    /// Fails if any of them is not found
169    fn get_blocks_infos(&self, blocks: &[Block]) -> Result<Vec<BlockInfo>> {
170        let mut ret = Vec::with_capacity(blocks.len());
171        for block in blocks {
172            let headers = self.headers.get(&[block.header], true)?;
173            // Since we used strict get, its safe to unwrap here
174            let header = headers[0].clone().unwrap();
175
176            let txs = self.transactions.get(&block.txs, true)?;
177            let txs = txs.iter().map(|x| x.clone().unwrap()).collect();
178
179            let info = BlockInfo::new(header, txs, block.signature);
180            ret.push(info);
181        }
182
183        Ok(ret)
184    }
185
186    /// Retrieve [`BlockInfo`]s by given heights. Does not fail if any of them are not found.
187    pub fn get_blocks_by_heights(&self, heights: &[u32]) -> Result<Vec<BlockInfo>> {
188        debug!(target: "blockchain", "get_blocks_by_heights(): {heights:?}");
189        let blockhashes = self.blocks.get_order(heights, false)?;
190
191        let mut hashes = vec![];
192        for i in blockhashes.into_iter().flatten() {
193            hashes.push(i);
194        }
195
196        self.get_blocks_by_hash(&hashes)
197    }
198
199    /// Retrieve [`Header`]s by given hashes. Fails if any of them is not found.
200    pub fn get_headers_by_hash(&self, hashes: &[HeaderHash]) -> Result<Vec<Header>> {
201        let headers = self.headers.get(hashes, true)?;
202        let ret: Vec<Header> = headers.iter().map(|x| x.clone().unwrap()).collect();
203
204        Ok(ret)
205    }
206
207    /// Retrieve [`Header`]s by given heights. Fails if any of them is not found.
208    pub fn get_headers_by_heights(&self, heights: &[u32]) -> Result<Vec<Header>> {
209        debug!(target: "blockchain", "get_headers_by_heights(): {heights:?}");
210        let blockhashes = self.blocks.get_order(heights, true)?;
211
212        let mut hashes = vec![];
213        for i in blockhashes.into_iter().flatten() {
214            hashes.push(i);
215        }
216
217        self.get_headers_by_hash(&hashes)
218    }
219
220    /// Retrieve n headers before given block height.
221    pub fn get_headers_before(&self, height: u32, n: usize) -> Result<Vec<Header>> {
222        debug!(target: "blockchain", "get_headers_before(): {height} -> {n}");
223        let hashes = self.blocks.get_before(height, n)?;
224        let headers = self.headers.get(&hashes, true)?;
225        Ok(headers.iter().map(|h| h.clone().unwrap()).collect())
226    }
227
228    /// Retrieve stored blocks count
229    pub fn len(&self) -> usize {
230        self.blocks.len()
231    }
232
233    /// Retrieve stored txs count
234    pub fn txs_len(&self) -> usize {
235        self.transactions.len()
236    }
237
238    /// Check if blockchain contains any blocks
239    pub fn is_empty(&self) -> bool {
240        self.blocks.is_empty()
241    }
242
243    /// Retrieve genesis (first) block height and hash.
244    pub fn genesis(&self) -> Result<(u32, HeaderHash)> {
245        self.blocks.get_first()
246    }
247
248    /// Retrieve genesis (first) block info.
249    pub fn genesis_block(&self) -> Result<BlockInfo> {
250        let (_, hash) = self.genesis()?;
251        Ok(self.get_blocks_by_hash(&[hash])?[0].clone())
252    }
253
254    /// Retrieve the last block height and hash.
255    pub fn last(&self) -> Result<(u32, HeaderHash)> {
256        self.blocks.get_last()
257    }
258
259    /// Retrieve the last block header.
260    pub fn last_header(&self) -> Result<Header> {
261        let (_, hash) = self.last()?;
262        Ok(self.headers.get(&[hash], true)?[0].clone().unwrap())
263    }
264
265    /// Retrieve the last block info.
266    pub fn last_block(&self) -> Result<BlockInfo> {
267        let (_, hash) = self.last()?;
268        Ok(self.get_blocks_by_hash(&[hash])?[0].clone())
269    }
270
271    /// Retrieve the last block difficulty. If the tree is empty,
272    /// returns `BlockDifficulty::genesis` difficulty.
273    pub fn last_block_difficulty(&self) -> Result<BlockDifficulty> {
274        if let Some(found) = self.blocks.get_last_difficulty()? {
275            return Ok(found)
276        }
277
278        let genesis_block = self.genesis_block()?;
279        Ok(BlockDifficulty::genesis(genesis_block.header.timestamp))
280    }
281
282    /// Check if block order for the given height is in the database.
283    pub fn has_height(&self, height: u32) -> Result<bool> {
284        let vec = match self.blocks.get_order(&[height], true) {
285            Ok(v) => v,
286            Err(_) => return Ok(false),
287        };
288        Ok(!vec.is_empty())
289    }
290
291    /// Insert a given slice of pending transactions into the blockchain database.
292    /// On success, the function returns the transaction hashes in the same order
293    /// as the input transactions.
294    pub fn add_pending_txs(&self, txs: &[Transaction]) -> Result<Vec<TransactionHash>> {
295        let (txs_batch, txs_hashes) = self.transactions.insert_batch_pending(txs);
296        let txs_order_batch = self.transactions.insert_batch_pending_order(&txs_hashes)?;
297
298        // Perform an atomic transaction over the trees and apply the batches.
299        let trees = [self.transactions.pending.clone(), self.transactions.pending_order.clone()];
300        let batches = [txs_batch, txs_order_batch];
301        self.atomic_write(&trees, &batches)?;
302
303        Ok(txs_hashes)
304    }
305
306    /// Retrieve all transactions from the pending tx store.
307    /// Be careful as this will try to load everything in memory.
308    pub fn get_pending_txs(&self) -> Result<Vec<Transaction>> {
309        let txs = self.transactions.get_all_pending()?;
310        let indexes = self.transactions.get_all_pending_order()?;
311        if txs.len() != indexes.len() {
312            return Err(Error::InvalidInputLengths)
313        }
314
315        let mut ret = Vec::with_capacity(txs.len());
316        for index in indexes {
317            ret.push(txs.get(&index.1).unwrap().clone());
318        }
319
320        Ok(ret)
321    }
322
323    /// Remove a given slice of pending transactions from the blockchain database.
324    pub fn remove_pending_txs(&self, txs: &[Transaction]) -> Result<()> {
325        let txs_hashes: Vec<TransactionHash> = txs.iter().map(|tx| tx.hash()).collect();
326        self.remove_pending_txs_hashes(&txs_hashes)
327    }
328
329    /// Remove a given slice of pending transactions hashes from the blockchain database.
330    pub fn remove_pending_txs_hashes(&self, txs: &[TransactionHash]) -> Result<()> {
331        let indexes = self.transactions.get_all_pending_order()?;
332        // We could do indexes.iter().map(|x| txs.contains(x.1)).collect.map(|x| x.0).collect
333        // but this is faster since we don't do the second iteration
334        let mut removed_indexes = vec![];
335        for index in indexes {
336            if txs.contains(&index.1) {
337                removed_indexes.push(index.0);
338            }
339        }
340
341        let txs_batch = self.transactions.remove_batch_pending(txs);
342        let txs_order_batch = self.transactions.remove_batch_pending_order(&removed_indexes);
343
344        // Perform an atomic transaction over the trees and apply the batches.
345        let trees = [self.transactions.pending.clone(), self.transactions.pending_order.clone()];
346        let batches = [txs_batch, txs_order_batch];
347        self.atomic_write(&trees, &batches)?;
348
349        Ok(())
350    }
351
352    /// Auxiliary function to write to multiple trees completely atomic.
353    fn atomic_write(&self, trees: &[sled::Tree], batches: &[sled::Batch]) -> Result<()> {
354        if trees.len() != batches.len() {
355            return Err(Error::InvalidInputLengths)
356        }
357
358        trees.transaction(|trees| {
359            for (index, tree) in trees.iter().enumerate() {
360                tree.apply_batch(&batches[index])?;
361            }
362
363            Ok::<(), sled::transaction::ConflictableTransactionError<sled::Error>>(())
364        })?;
365
366        Ok(())
367    }
368
369    /// Retrieve all blocks contained in the blockchain in order.
370    /// Be careful as this will try to load everything in memory.
371    pub fn get_all(&self) -> Result<Vec<BlockInfo>> {
372        let order = self.blocks.get_all_order()?;
373        let order: Vec<HeaderHash> = order.iter().map(|x| x.1).collect();
374        let blocks = self.get_blocks_by_hash(&order)?;
375
376        Ok(blocks)
377    }
378
379    /// Retrieve [`BlockInfo`]s by given heights range.
380    pub fn get_by_range(&self, start: u32, end: u32) -> Result<Vec<BlockInfo>> {
381        let blockhashes = self.blocks.get_order_by_range(start, end)?;
382        let hashes: Vec<HeaderHash> = blockhashes.into_iter().map(|(_, hash)| hash).collect();
383        self.get_blocks_by_hash(&hashes)
384    }
385
386    /// Retrieve last 'N' [`BlockInfo`]s from the blockchain.
387    pub fn get_last_n(&self, n: usize) -> Result<Vec<BlockInfo>> {
388        let records = self.blocks.get_last_n_orders(n)?;
389
390        let mut last_n = vec![];
391        for record in records {
392            let header_hash = record.1;
393            let blocks = self.get_blocks_by_hash(&[header_hash])?;
394            for block in blocks {
395                last_n.push(block.clone());
396            }
397        }
398
399        Ok(last_n)
400    }
401
402    /// Auxiliary function to reset the blockchain and consensus state
403    /// to the provided block height.
404    pub fn reset_to_height(&self, height: u32) -> Result<()> {
405        // First we grab the last block height
406        let (last, _) = self.last()?;
407
408        // Check if request height is after our last height
409        if height >= last {
410            return Ok(())
411        }
412
413        // Grab all state inverse diffs until requested height,
414        // going backwards.
415        let heights: Vec<u32> = (height + 1..=last).rev().collect();
416        let inverse_diffs = self.blocks.get_state_inverse_diff(&heights, true)?;
417
418        // Create an overlay to apply the reverse diffs
419        let overlay = BlockchainOverlay::new(self)?;
420
421        // Apply the inverse diffs sequence
422        let overlay_lock = overlay.lock().unwrap();
423        let mut lock = overlay_lock.overlay.lock().unwrap();
424        for inverse_diff in inverse_diffs {
425            // Since we used strict retrieval it's safe to unwrap here
426            let inverse_diff = inverse_diff.unwrap();
427            lock.add_diff(&inverse_diff)?;
428            lock.apply_diff(&inverse_diff)?;
429            self.sled_db.flush()?;
430        }
431        drop(lock);
432        drop(overlay_lock);
433
434        Ok(())
435    }
436
437    /// Generate a Monotree(SMT) containing all contracts states
438    /// roots, along with the wasm bincodes monotree root.
439    ///
440    /// Note: native contracts zkas tree and wasm bincodes are excluded.
441    pub fn get_state_monotree(&self) -> Result<Monotree<monotree::MemoryDb>> {
442        self.contracts.get_state_monotree(&self.sled_db)
443    }
444
445    /// Grab the RandomX VM current and next key, based on provided key
446    /// changing height and delay. Optionally, a height can be provided
447    /// to get the keys before it.
448    ///
449    /// NOTE: the height calculation logic is verified using test:
450    //        test_randomx_keys_retrieval_logic
451    pub fn get_randomx_vm_keys(
452        &self,
453        key_change_height: &u32,
454        key_change_delay: &u32,
455        height: Option<u32>,
456    ) -> Result<(HeaderHash, HeaderHash)> {
457        // Grab last known block header
458        let last = match height {
459            Some(h) => &self.get_headers_by_heights(&[if h != 0 { h - 1 } else { 0 }])?[0],
460            None => &self.last_header()?,
461        };
462
463        // Check if we passed the first key change height
464        if &last.height <= key_change_height {
465            // Genesis is our current
466            let current = self.genesis()?.1;
467
468            // Check if last known block header is the next key
469            let next = if &last.height == key_change_height { last.hash() } else { current };
470
471            return Ok((current, next))
472        }
473
474        // Find the current and next key based on distance of last
475        // known block header height from the key change height.
476        let distance = last.height % key_change_height;
477
478        // When distance is 0, current key is the block header
479        // located at last_height - key_change_height height, while
480        // last known block header is the next key.
481        if distance == 0 {
482            return Ok((
483                self.get_headers_by_heights(&[last.height - key_change_height])?[0].hash(),
484                last.hash(),
485            ))
486        }
487
488        // When distance is less than key change delay, current key
489        // is the block header located at last_height - (distance + key_change_height)
490        // height, while the block header located at last_height - distance
491        // height is the next key.
492        if &distance < key_change_delay {
493            return Ok((
494                self.get_headers_by_heights(&[last.height - (distance + key_change_height)])?[0]
495                    .hash(),
496                self.get_headers_by_heights(&[last.height - distance])?[0].hash(),
497            ))
498        }
499
500        // When distance is greater or equal to key change delay,
501        // current key is the block header located at last_height - distance
502        // height and we don't know the next key.
503        let current = self.get_headers_by_heights(&[last.height - distance])?[0].hash();
504        Ok((current, current))
505    }
506}
507
508/// Atomic pointer to sled db overlay.
509pub type SledDbOverlayPtr = Arc<Mutex<sled_overlay::SledDbOverlay>>;
510
511/// Atomic pointer to blockchain overlay.
512pub type BlockchainOverlayPtr = Arc<Mutex<BlockchainOverlay>>;
513
514/// Overlay structure over a [`Blockchain`] instance.
515pub struct BlockchainOverlay {
516    /// Main [`sled_overlay::SledDbOverlay`] to the sled db connection
517    pub overlay: SledDbOverlayPtr,
518    /// Headers overlay
519    pub headers: HeaderStoreOverlay,
520    /// Blocks overlay
521    pub blocks: BlockStoreOverlay,
522    /// Transactions overlay
523    pub transactions: TxStoreOverlay,
524    /// Contract overlay
525    pub contracts: ContractStoreOverlay,
526}
527
528impl BlockchainOverlay {
529    /// Instantiate a new `BlockchainOverlay` over the given [`Blockchain`] instance.
530    pub fn new(blockchain: &Blockchain) -> Result<BlockchainOverlayPtr> {
531        // Here we configure all our blockchain sled trees to be protected in the overlay
532        let protected_trees = vec![
533            SLED_BLOCK_TREE,
534            SLED_BLOCK_ORDER_TREE,
535            SLED_BLOCK_DIFFICULTY_TREE,
536            SLED_BLOCK_STATE_INVERSE_DIFF_TREE,
537            SLED_HEADER_TREE,
538            SLED_SYNC_HEADER_TREE,
539            SLED_TX_TREE,
540            SLED_TX_LOCATION_TREE,
541            SLED_PENDING_TX_TREE,
542            SLED_PENDING_TX_ORDER_TREE,
543            SLED_CONTRACTS_TREE,
544            SLED_CONTRACTS_TREES_TREE,
545            SLED_BINCODE_TREE,
546        ];
547        let overlay = Arc::new(Mutex::new(sled_overlay::SledDbOverlay::new(
548            &blockchain.sled_db,
549            protected_trees,
550        )));
551        let headers = HeaderStoreOverlay::new(&overlay)?;
552        let blocks = BlockStoreOverlay::new(&overlay)?;
553        let transactions = TxStoreOverlay::new(&overlay)?;
554        let contracts = ContractStoreOverlay::new(&overlay)?;
555
556        Ok(Arc::new(Mutex::new(Self { overlay, headers, blocks, transactions, contracts })))
557    }
558
559    /// Check if blockchain contains any blocks
560    pub fn is_empty(&self) -> Result<bool> {
561        self.blocks.is_empty()
562    }
563
564    /// Retrieve the last block height and hash.
565    pub fn last(&self) -> Result<(u32, HeaderHash)> {
566        self.blocks.get_last()
567    }
568
569    /// Retrieve the last block info.
570    pub fn last_block(&self) -> Result<BlockInfo> {
571        let (_, hash) = self.last()?;
572        Ok(self.get_blocks_by_hash(&[hash])?[0].clone())
573    }
574
575    /// Retrieve the last block height.
576    pub fn last_block_height(&self) -> Result<u32> {
577        Ok(self.last()?.0)
578    }
579
580    /// Retrieve the last block timestamp.
581    pub fn last_block_timestamp(&self) -> Result<Timestamp> {
582        let (_, hash) = self.last()?;
583        Ok(self.get_blocks_by_hash(&[hash])?[0].header.timestamp)
584    }
585
586    /// Insert a given [`BlockInfo`] into the overlay.
587    /// This functions wraps all the logic of separating the block into specific
588    /// data that can be fed into the different trees of the overlay.
589    /// Upon success, the functions returns the block hash that
590    /// were given and appended to the overlay.
591    /// Since we are adding to the overlay, we don't need to exeucte
592    /// the writes atomically.
593    pub fn add_block(&self, block: &BlockInfo) -> Result<HeaderHash> {
594        // Store header
595        self.headers.insert(slice::from_ref(&block.header))?;
596
597        // Store block
598        let blk: Block = Block::from_block_info(block);
599        let txs_hashes = blk.txs.clone();
600        let block_hash = self.blocks.insert(&[blk])?[0];
601        let block_hash_vec = [block_hash];
602
603        // Store block order
604        self.blocks.insert_order(&[block.header.height], &block_hash_vec)?;
605
606        // Store transactions
607        self.transactions.insert(&block.txs)?;
608
609        // Store transactions locations
610        self.transactions.insert_location(&txs_hashes, block.header.height)?;
611
612        Ok(block_hash)
613    }
614
615    /// Check if the given [`BlockInfo`] is in the database and all trees.
616    pub fn has_block(&self, block: &BlockInfo) -> Result<bool> {
617        let blockhash = match self.blocks.get_order(&[block.header.height], true) {
618            Ok(v) => v[0].unwrap(),
619            Err(_) => return Ok(false),
620        };
621
622        // Check if we have all transactions
623        let txs: Vec<TransactionHash> = block.txs.iter().map(|tx| tx.hash()).collect();
624        if self.transactions.get(&txs, true).is_err() {
625            return Ok(false)
626        }
627
628        // Check provided info produces the same hash
629        Ok(blockhash == block.hash())
630    }
631
632    /// Retrieve [`Header`]s by given hashes. Fails if any of them is not found.
633    pub fn get_headers_by_hash(&self, hashes: &[HeaderHash]) -> Result<Vec<Header>> {
634        let headers = self.headers.get(hashes, true)?;
635        let ret: Vec<Header> = headers.iter().map(|x| x.clone().unwrap()).collect();
636
637        Ok(ret)
638    }
639
640    /// Retrieve [`BlockInfo`]s by given hashes. Fails if any of them is not found.
641    pub fn get_blocks_by_hash(&self, hashes: &[HeaderHash]) -> Result<Vec<BlockInfo>> {
642        let blocks = self.blocks.get(hashes, true)?;
643        let blocks: Vec<Block> = blocks.iter().map(|x| x.clone().unwrap()).collect();
644        let ret = self.get_blocks_infos(&blocks)?;
645
646        Ok(ret)
647    }
648
649    /// Retrieve all [`BlockInfo`] for given slice of [`Block`].
650    /// Fails if any of them is not found
651    fn get_blocks_infos(&self, blocks: &[Block]) -> Result<Vec<BlockInfo>> {
652        let mut ret = Vec::with_capacity(blocks.len());
653        for block in blocks {
654            let headers = self.headers.get(&[block.header], true)?;
655            // Since we used strict get, its safe to unwrap here
656            let header = headers[0].clone().unwrap();
657
658            let txs = self.transactions.get(&block.txs, true)?;
659            let txs = txs.iter().map(|x| x.clone().unwrap()).collect();
660
661            let info = BlockInfo::new(header, txs, block.signature);
662            ret.push(info);
663        }
664
665        Ok(ret)
666    }
667
668    /// Retrieve [`Block`]s by given hashes and return their transactions hashes.
669    pub fn get_blocks_txs_hashes(&self, hashes: &[HeaderHash]) -> Result<Vec<TransactionHash>> {
670        let blocks = self.blocks.get(hashes, true)?;
671        let mut ret = vec![];
672        for block in blocks {
673            ret.extend_from_slice(&block.unwrap().txs);
674        }
675
676        Ok(ret)
677    }
678
679    /// Checkpoint overlay so we can revert to it, if needed.
680    pub fn checkpoint(&self) {
681        self.overlay.lock().unwrap().checkpoint();
682    }
683
684    /// Revert to current overlay checkpoint.
685    pub fn revert_to_checkpoint(&self) -> Result<()> {
686        self.overlay.lock().unwrap().revert_to_checkpoint()?;
687
688        Ok(())
689    }
690
691    /// Auxiliary function to create a full clone using SledDbOverlay::clone,
692    /// generating new pointers for the underlying overlays.
693    pub fn full_clone(&self) -> Result<BlockchainOverlayPtr> {
694        let overlay = Arc::new(Mutex::new(self.overlay.lock().unwrap().clone()));
695        let headers = HeaderStoreOverlay::new(&overlay)?;
696        let blocks = BlockStoreOverlay::new(&overlay)?;
697        let transactions = TxStoreOverlay::new(&overlay)?;
698        let contracts = ContractStoreOverlay::new(&overlay)?;
699
700        Ok(Arc::new(Mutex::new(Self { overlay, headers, blocks, transactions, contracts })))
701    }
702
703    /// Generate a Monotree(SMT) containing all contracts states
704    /// roots, along with the wasm bincodes monotree root.
705    /// A clone is used so we are not affected by the opened trees
706    /// during roots computing.
707    ///
708    /// Note: native contracts zkas tree and wasm bincodes are excluded.
709    pub fn get_state_monotree(&self) -> Result<Monotree<monotree::MemoryDb>> {
710        self.full_clone()?.lock().unwrap().contracts.get_state_monotree()
711    }
712}
713
714/// Parse a sled record in the form of a tuple (`key`, `value`).
715pub fn parse_record<T1: Decodable, T2: Decodable>(record: (IVec, IVec)) -> Result<(T1, T2)> {
716    let key = deserialize(&record.0)?;
717    let value = deserialize(&record.1)?;
718
719    Ok((key, value))
720}
721
722/// Parse a sled record with a u32 key, encoded in Big Endian bytes,
723/// in the form of a tuple (`key`, `value`).
724pub fn parse_u32_key_record<T: Decodable>(record: (IVec, IVec)) -> Result<(u32, T)> {
725    let key_bytes: [u8; 4] = record.0.as_ref().try_into().unwrap();
726    let key = u32::from_be_bytes(key_bytes);
727    let value = deserialize(&record.1)?;
728
729    Ok((key, value))
730}
731
732/// Parse a sled record with a u64 key, encoded in Big Endian bytes,
733/// in the form of a tuple (`key`, `value`).
734pub fn parse_u64_key_record<T: Decodable>(record: (IVec, IVec)) -> Result<(u64, T)> {
735    let key_bytes: [u8; 8] = record.0.as_ref().try_into().unwrap();
736    let key = u64::from_be_bytes(key_bytes);
737    let value = deserialize(&record.1)?;
738
739    Ok((key, value))
740}
741
742#[cfg(feature = "async-serial")]
743/// Parse a sled record in the form of a tuple (`key`, `value`).
744pub async fn parse_record_async<T1: AsyncDecodable, T2: AsyncDecodable>(
745    record: (IVec, IVec),
746) -> Result<(T1, T2)> {
747    let key = deserialize_async(&record.0).await?;
748    let value = deserialize_async(&record.1).await?;
749
750    Ok((key, value))
751}
752
753#[cfg(feature = "async-serial")]
754/// Parse a sled record with a u32 key, encoded in Big Endian bytes,
755/// in the form of a tuple (`key`, `value`).
756pub async fn parse_u32_key_record_async<T: AsyncDecodable>(
757    record: (IVec, IVec),
758) -> Result<(u32, T)> {
759    let key_bytes: [u8; 4] = record.0.as_ref().try_into().unwrap();
760    let key = u32::from_be_bytes(key_bytes);
761    let value = deserialize_async(&record.1).await?;
762
763    Ok((key, value))
764}
765
766#[cfg(feature = "async-serial")]
767/// Parse a sled record with a u64 key, encoded in Big Endian bytes,
768/// in the form of a tuple (`key`, `value`).
769pub async fn parse_u64_key_record_async<T: AsyncDecodable>(
770    record: (IVec, IVec),
771) -> Result<(u64, T)> {
772    let key_bytes: [u8; 8] = record.0.as_ref().try_into().unwrap();
773    let key = u64::from_be_bytes(key_bytes);
774    let value = deserialize_async(&record.1).await?;
775
776    Ok((key, value))
777}
778
779#[cfg(test)]
780mod tests {
781    use crate::validator::pow::{RANDOMX_KEY_CHANGE_DELAY, RANDOMX_KEY_CHANGING_HEIGHT};
782
783    /// Compute the RandomX VM current and next key heights, based on
784    /// provided key changing height and delay.
785    fn get_randomx_vm_keys_heights(last: u32) -> (u32, u32) {
786        // Check if we passed the first key change height
787        if last <= RANDOMX_KEY_CHANGING_HEIGHT {
788            // Genesis is our current
789            let current = 0;
790
791            // Check if last height is the next key height
792            let next = if last == RANDOMX_KEY_CHANGING_HEIGHT { last } else { current };
793
794            return (current, next)
795        }
796
797        // Find the current and next key based on distance of last
798        // height from the key change height.
799        let distance = last % RANDOMX_KEY_CHANGING_HEIGHT;
800
801        // When distance is 0, current key is the last_height - RANDOMX_KEY_CHANGING_HEIGHT
802        // height, while last is the next key.
803        if distance == 0 {
804            return (last - RANDOMX_KEY_CHANGING_HEIGHT, last)
805        }
806
807        // When distance is less than key change delay, current key
808        // is the last_height - (distance + RANDOMX_KEY_CHANGING_HEIGHT) height,
809        // while the last_height - distance height is the next key.
810        if distance < RANDOMX_KEY_CHANGE_DELAY {
811            return (last - (distance + RANDOMX_KEY_CHANGING_HEIGHT), last - distance)
812        }
813
814        // When distance is greater or equal to key change delay,
815        // current key is the last_height - distance height and we
816        // don't know the next key height.
817        let current = last - distance;
818        (current, current)
819    }
820
821    #[test]
822    fn test_randomx_keys_retrieval_logic() {
823        // last < RANDOMX_KEY_CHANGING_HEIGHT(2048)
824        let (current, next) = get_randomx_vm_keys_heights(2047);
825        assert_eq!(current, 0);
826        assert_eq!(next, 0);
827
828        // last == RANDOMX_KEY_CHANGING_HEIGHT(2048)
829        let (current, next) = get_randomx_vm_keys_heights(2048);
830        assert_eq!(current, 0);
831        assert_eq!(next, 2048);
832
833        // last > RANDOMX_KEY_CHANGING_HEIGHT(2048)
834        // last % RANDOMX_KEY_CHANGING_HEIGHT(2048) == 0
835        let (current, next) = get_randomx_vm_keys_heights(4096);
836        assert_eq!(current, 2048);
837        assert_eq!(next, 4096);
838
839        // last % RANDOMX_KEY_CHANGING_HEIGHT(2048) < RANDOMX_KEY_CHANGE_DELAY(64)
840        let (current, next) = get_randomx_vm_keys_heights(4097);
841        assert_eq!(current, 2048);
842        assert_eq!(next, 4096);
843
844        // last % RANDOMX_KEY_CHANGING_HEIGHT(2048) == RANDOMX_KEY_CHANGE_DELAY(64)
845        let (current, next) = get_randomx_vm_keys_heights(4160);
846        assert_eq!(current, 4096);
847        assert_eq!(next, 4096);
848
849        // last % RANDOMX_KEY_CHANGING_HEIGHT(2048) > RANDOMX_KEY_CHANGE_DELAY(64)
850        let (current, next) = get_randomx_vm_keys_heights(4161);
851        assert_eq!(current, 4096);
852        assert_eq!(next, 4096);
853    }
854}