darkfi/blockchain/
mod.rs

1/* This file is part of DarkFi (https://dark.fi)
2 *
3 * Copyright (C) 2020-2026 Dyne.org foundation
4 *
5 * This program is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU Affero General Public License as
7 * published by the Free Software Foundation, either version 3 of the
8 * License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU Affero General Public License for more details.
14 *
15 * You should have received a copy of the GNU Affero General Public License
16 * along with this program.  If not, see <https://www.gnu.org/licenses/>.
17 */
18
19use std::{
20    slice,
21    sync::{Arc, Mutex},
22};
23
24use darkfi_sdk::tx::TransactionHash;
25use darkfi_serial::{deserialize, Decodable};
26use sled_overlay::{
27    sled,
28    sled::{IVec, Transactional},
29};
30use tracing::debug;
31
32#[cfg(feature = "async-serial")]
33use darkfi_serial::{deserialize_async, AsyncDecodable};
34
35use crate::{tx::Transaction, util::time::Timestamp, Error, Result};
36
37/// Block related definitions and storage implementations
38pub mod block_store;
39pub use block_store::{
40    Block, BlockDifficulty, BlockInfo, BlockStore, BlockStoreOverlay, SLED_BLOCK_DIFFICULTY_TREE,
41    SLED_BLOCK_ORDER_TREE, SLED_BLOCK_STATE_INVERSE_DIFF_TREE, SLED_BLOCK_TREE,
42};
43
44/// Header definition and storage implementation
45pub mod header_store;
46pub use header_store::{
47    Header, HeaderHash, HeaderStore, HeaderStoreOverlay, SLED_HEADER_TREE, SLED_SYNC_HEADER_TREE,
48};
49
50/// Transactions related storage implementations
51pub mod tx_store;
52pub use tx_store::{
53    TxStore, TxStoreOverlay, SLED_PENDING_TX_ORDER_TREE, SLED_PENDING_TX_TREE,
54    SLED_TX_LOCATION_TREE, SLED_TX_TREE,
55};
56
57/// Contracts and Wasm storage implementations
58pub mod contract_store;
59pub use contract_store::{
60    ContractStore, ContractStoreOverlay, SLED_BINCODE_TREE, SLED_CONTRACTS_TREE,
61    SLED_CONTRACTS_TREES_TREE,
62};
63
64/// Monero definitions needed for merge mining
65pub mod monero;
66
67/// Structure holding all sled trees that define the concept of Blockchain.
68#[derive(Clone)]
69pub struct Blockchain {
70    /// Main pointer to the sled db connection
71    pub sled_db: sled::Db,
72    /// Headers sled tree
73    pub headers: HeaderStore,
74    /// Blocks sled tree
75    pub blocks: BlockStore,
76    /// Transactions related sled trees
77    pub transactions: TxStore,
78    /// Contracts related sled trees
79    pub contracts: ContractStore,
80}
81
82impl Blockchain {
83    /// Instantiate a new `Blockchain` with the given `sled` database.
84    pub fn new(db: &sled::Db) -> Result<Self> {
85        let headers = HeaderStore::new(db)?;
86        let blocks = BlockStore::new(db)?;
87        let transactions = TxStore::new(db)?;
88        let contracts = ContractStore::new(db)?;
89
90        Ok(Self { sled_db: db.clone(), headers, blocks, transactions, contracts })
91    }
92
93    /// Insert a given [`BlockInfo`] into the blockchain database.
94    /// This functions wraps all the logic of separating the block into specific
95    /// data that can be fed into the different trees of the database.
96    /// Upon success, the functions returns the block hash that
97    /// were given and appended to the ledger.
98    pub fn add_block(&self, block: &BlockInfo) -> Result<HeaderHash> {
99        let mut trees = vec![];
100        let mut batches = vec![];
101
102        // Store header
103        let (headers_batch, _) = self.headers.insert_batch(slice::from_ref(&block.header));
104        trees.push(self.headers.main.clone());
105        batches.push(headers_batch);
106
107        // Store block
108        let blk: Block = Block::from_block_info(block);
109        let (bocks_batch, block_hashes) = self.blocks.insert_batch(&[blk]);
110        let block_hash = block_hashes[0];
111        let block_hash_vec = [block_hash];
112        trees.push(self.blocks.main.clone());
113        batches.push(bocks_batch);
114
115        // Store block order
116        let blocks_order_batch =
117            self.blocks.insert_batch_order(&[block.header.height], &block_hash_vec);
118        trees.push(self.blocks.order.clone());
119        batches.push(blocks_order_batch);
120
121        // Store transactions
122        let (txs_batch, txs_hashes) = self.transactions.insert_batch(&block.txs);
123        trees.push(self.transactions.main.clone());
124        batches.push(txs_batch);
125
126        // Store transactions_locations
127        let txs_locations_batch =
128            self.transactions.insert_batch_location(&txs_hashes, block.header.height);
129        trees.push(self.transactions.location.clone());
130        batches.push(txs_locations_batch);
131
132        // Perform an atomic transaction over the trees and apply the batches.
133        self.atomic_write(&trees, &batches)?;
134
135        Ok(block_hash)
136    }
137
138    /// Check if the given [`BlockInfo`] is in the database and all trees.
139    pub fn has_block(&self, block: &BlockInfo) -> Result<bool> {
140        let blockhash = match self.blocks.get_order(&[block.header.height], true) {
141            Ok(v) => v[0].unwrap(),
142            Err(_) => return Ok(false),
143        };
144
145        // Check if we have all transactions
146        let txs: Vec<TransactionHash> = block.txs.iter().map(|tx| tx.hash()).collect();
147        if self.transactions.get(&txs, true).is_err() {
148            return Ok(false)
149        }
150
151        // Check provided info produces the same hash
152        Ok(blockhash == block.hash())
153    }
154
155    /// Retrieve [`BlockInfo`]s by given hashes. Fails if any of them is not found.
156    pub fn get_blocks_by_hash(&self, hashes: &[HeaderHash]) -> Result<Vec<BlockInfo>> {
157        let blocks = self.blocks.get(hashes, true)?;
158        let blocks: Vec<Block> = blocks.iter().map(|x| x.clone().unwrap()).collect();
159        let ret = self.get_blocks_infos(&blocks)?;
160
161        Ok(ret)
162    }
163
164    /// Retrieve all [`BlockInfo`] for given slice of [`Block`].
165    /// Fails if any of them is not found
166    fn get_blocks_infos(&self, blocks: &[Block]) -> Result<Vec<BlockInfo>> {
167        let mut ret = Vec::with_capacity(blocks.len());
168        for block in blocks {
169            let headers = self.headers.get(&[block.header], true)?;
170            // Since we used strict get, its safe to unwrap here
171            let header = headers[0].clone().unwrap();
172
173            let txs = self.transactions.get(&block.txs, true)?;
174            let txs = txs.iter().map(|x| x.clone().unwrap()).collect();
175
176            let info = BlockInfo::new(header, txs, block.signature);
177            ret.push(info);
178        }
179
180        Ok(ret)
181    }
182
183    /// Retrieve [`BlockInfo`]s by given heights. Does not fail if any of them are not found.
184    pub fn get_blocks_by_heights(&self, heights: &[u32]) -> Result<Vec<BlockInfo>> {
185        debug!(target: "blockchain", "get_blocks_by_heights(): {heights:?}");
186        let blockhashes = self.blocks.get_order(heights, false)?;
187
188        let mut hashes = vec![];
189        for i in blockhashes.into_iter().flatten() {
190            hashes.push(i);
191        }
192
193        self.get_blocks_by_hash(&hashes)
194    }
195
196    /// Retrieve [`Header`]s by given hashes. Fails if any of them is not found.
197    pub fn get_headers_by_hash(&self, hashes: &[HeaderHash]) -> Result<Vec<Header>> {
198        let headers = self.headers.get(hashes, true)?;
199        let ret: Vec<Header> = headers.iter().map(|x| x.clone().unwrap()).collect();
200
201        Ok(ret)
202    }
203
204    /// Retrieve [`Header`]s by given heights. Fails if any of them is not found.
205    pub fn get_headers_by_heights(&self, heights: &[u32]) -> Result<Vec<Header>> {
206        debug!(target: "blockchain", "get_headers_by_heights(): {heights:?}");
207        let blockhashes = self.blocks.get_order(heights, true)?;
208
209        let mut hashes = vec![];
210        for i in blockhashes.into_iter().flatten() {
211            hashes.push(i);
212        }
213
214        self.get_headers_by_hash(&hashes)
215    }
216
217    /// Retrieve n headers before given block height.
218    pub fn get_headers_before(&self, height: u32, n: usize) -> Result<Vec<Header>> {
219        debug!(target: "blockchain", "get_headers_before(): {height} -> {n}");
220        let hashes = self.blocks.get_before(height, n)?;
221        let headers = self.headers.get(&hashes, true)?;
222        Ok(headers.iter().map(|h| h.clone().unwrap()).collect())
223    }
224
225    /// Retrieve stored blocks count
226    pub fn len(&self) -> usize {
227        self.blocks.len()
228    }
229
230    /// Retrieve stored txs count
231    pub fn txs_len(&self) -> usize {
232        self.transactions.len()
233    }
234
235    /// Check if blockchain contains any blocks
236    pub fn is_empty(&self) -> bool {
237        self.blocks.is_empty()
238    }
239
240    /// Retrieve genesis (first) block height and hash.
241    pub fn genesis(&self) -> Result<(u32, HeaderHash)> {
242        self.blocks.get_first()
243    }
244
245    /// Retrieve genesis (first) block info.
246    pub fn genesis_block(&self) -> Result<BlockInfo> {
247        let (_, hash) = self.genesis()?;
248        Ok(self.get_blocks_by_hash(&[hash])?[0].clone())
249    }
250
251    /// Retrieve the last block height and hash.
252    pub fn last(&self) -> Result<(u32, HeaderHash)> {
253        self.blocks.get_last()
254    }
255
256    /// Retrieve the last block header.
257    pub fn last_header(&self) -> Result<Header> {
258        let (_, hash) = self.last()?;
259        Ok(self.headers.get(&[hash], true)?[0].clone().unwrap())
260    }
261
262    /// Retrieve the last block info.
263    pub fn last_block(&self) -> Result<BlockInfo> {
264        let (_, hash) = self.last()?;
265        Ok(self.get_blocks_by_hash(&[hash])?[0].clone())
266    }
267
268    /// Retrieve the last block difficulty. If the tree is empty,
269    /// returns `BlockDifficulty::genesis` difficulty.
270    pub fn last_block_difficulty(&self) -> Result<BlockDifficulty> {
271        if let Some(found) = self.blocks.get_last_difficulty()? {
272            return Ok(found)
273        }
274
275        let genesis_block = self.genesis_block()?;
276        Ok(BlockDifficulty::genesis(genesis_block.header.timestamp))
277    }
278
279    /// Check if block order for the given height is in the database.
280    pub fn has_height(&self, height: u32) -> Result<bool> {
281        let vec = match self.blocks.get_order(&[height], true) {
282            Ok(v) => v,
283            Err(_) => return Ok(false),
284        };
285        Ok(!vec.is_empty())
286    }
287
288    /// Insert a given slice of pending transactions into the blockchain database.
289    /// On success, the function returns the transaction hashes in the same order
290    /// as the input transactions.
291    pub fn add_pending_txs(&self, txs: &[Transaction]) -> Result<Vec<TransactionHash>> {
292        let (txs_batch, txs_hashes) = self.transactions.insert_batch_pending(txs);
293        let txs_order_batch = self.transactions.insert_batch_pending_order(&txs_hashes)?;
294
295        // Perform an atomic transaction over the trees and apply the batches.
296        let trees = [self.transactions.pending.clone(), self.transactions.pending_order.clone()];
297        let batches = [txs_batch, txs_order_batch];
298        self.atomic_write(&trees, &batches)?;
299
300        Ok(txs_hashes)
301    }
302
303    /// Retrieve all transactions from the pending tx store.
304    /// Be careful as this will try to load everything in memory.
305    pub fn get_pending_txs(&self) -> Result<Vec<Transaction>> {
306        let txs = self.transactions.get_all_pending()?;
307        let indexes = self.transactions.get_all_pending_order()?;
308        if txs.len() != indexes.len() {
309            return Err(Error::InvalidInputLengths)
310        }
311
312        let mut ret = Vec::with_capacity(txs.len());
313        for index in indexes {
314            ret.push(txs.get(&index.1).unwrap().clone());
315        }
316
317        Ok(ret)
318    }
319
320    /// Remove a given slice of pending transactions from the blockchain database.
321    pub fn remove_pending_txs(&self, txs: &[Transaction]) -> Result<()> {
322        let txs_hashes: Vec<TransactionHash> = txs.iter().map(|tx| tx.hash()).collect();
323        self.remove_pending_txs_hashes(&txs_hashes)
324    }
325
326    /// Remove a given slice of pending transactions hashes from the blockchain database.
327    pub fn remove_pending_txs_hashes(&self, txs: &[TransactionHash]) -> Result<()> {
328        let indexes = self.transactions.get_all_pending_order()?;
329        // We could do indexes.iter().map(|x| txs.contains(x.1)).collect.map(|x| x.0).collect
330        // but this is faster since we don't do the second iteration
331        let mut removed_indexes = vec![];
332        for index in indexes {
333            if txs.contains(&index.1) {
334                removed_indexes.push(index.0);
335            }
336        }
337
338        let txs_batch = self.transactions.remove_batch_pending(txs);
339        let txs_order_batch = self.transactions.remove_batch_pending_order(&removed_indexes);
340
341        // Perform an atomic transaction over the trees and apply the batches.
342        let trees = [self.transactions.pending.clone(), self.transactions.pending_order.clone()];
343        let batches = [txs_batch, txs_order_batch];
344        self.atomic_write(&trees, &batches)?;
345
346        Ok(())
347    }
348
349    /// Remove all transactions from the pending tx store not in the
350    /// provided vector and rebuild the remaining ones order.
351    pub fn reset_pending_txs(&self, exclude_txs: &[TransactionHash]) -> Result<()> {
352        let mut txs = vec![];
353        let mut removed_txs = vec![];
354        for tx in self.transactions.get_all_pending()?.keys() {
355            if exclude_txs.contains(tx) {
356                txs.push(*tx);
357                continue
358            }
359            removed_txs.push(*tx);
360        }
361        let indexes: Vec<u64> =
362            self.transactions.get_all_pending_order()?.iter().map(|(k, _)| *k).collect();
363
364        let txs_batch = self.transactions.remove_batch_pending(&removed_txs);
365        let txs_order_batch = self.transactions.remove_batch_pending_order(&indexes);
366        let txs_new_order_batch = self.transactions.insert_batch_pending_order(&txs)?;
367
368        // Perform an atomic transaction over the trees and apply the batches.
369        let trees = [
370            self.transactions.pending.clone(),
371            self.transactions.pending_order.clone(),
372            self.transactions.pending_order.clone(),
373        ];
374        let batches = [txs_batch, txs_order_batch, txs_new_order_batch];
375        self.atomic_write(&trees, &batches)?;
376
377        Ok(())
378    }
379
380    /// Auxiliary function to write to multiple trees completely atomic.
381    fn atomic_write(&self, trees: &[sled::Tree], batches: &[sled::Batch]) -> Result<()> {
382        if trees.len() != batches.len() {
383            return Err(Error::InvalidInputLengths)
384        }
385
386        trees.transaction(|trees| {
387            for (index, tree) in trees.iter().enumerate() {
388                tree.apply_batch(&batches[index])?;
389            }
390
391            Ok::<(), sled::transaction::ConflictableTransactionError<sled::Error>>(())
392        })?;
393
394        Ok(())
395    }
396
397    /// Retrieve all blocks contained in the blockchain in order.
398    /// Be careful as this will try to load everything in memory.
399    pub fn get_all(&self) -> Result<Vec<BlockInfo>> {
400        let order = self.blocks.get_all_order()?;
401        let order: Vec<HeaderHash> = order.iter().map(|x| x.1).collect();
402        let blocks = self.get_blocks_by_hash(&order)?;
403
404        Ok(blocks)
405    }
406
407    /// Retrieve [`BlockInfo`]s by given heights range.
408    pub fn get_by_range(&self, start: u32, end: u32) -> Result<Vec<BlockInfo>> {
409        let blockhashes = self.blocks.get_order_by_range(start, end)?;
410        let hashes: Vec<HeaderHash> = blockhashes.into_iter().map(|(_, hash)| hash).collect();
411        self.get_blocks_by_hash(&hashes)
412    }
413
414    /// Retrieve last 'N' [`BlockInfo`]s from the blockchain.
415    pub fn get_last_n(&self, n: usize) -> Result<Vec<BlockInfo>> {
416        let records = self.blocks.get_last_n_orders(n)?;
417
418        let mut last_n = vec![];
419        for record in records {
420            let header_hash = record.1;
421            let blocks = self.get_blocks_by_hash(&[header_hash])?;
422            for block in blocks {
423                last_n.push(block.clone());
424            }
425        }
426
427        Ok(last_n)
428    }
429
430    /// Auxiliary function to reset the blockchain and consensus state
431    /// to the provided block height.
432    pub fn reset_to_height(&self, height: u32) -> Result<()> {
433        // First we grab the last block height
434        let (last, _) = self.last()?;
435
436        // Check if request height is after our last height
437        if height >= last {
438            return Ok(())
439        }
440
441        // Grab all state inverse diffs until requested height,
442        // going backwards.
443        let heights: Vec<u32> = (height + 1..=last).rev().collect();
444        let inverse_diffs = self.blocks.get_state_inverse_diff(&heights, true)?;
445
446        // Create an overlay to apply the reverse diffs
447        let overlay = BlockchainOverlay::new(self)?;
448
449        // Apply the inverse diffs sequence
450        let overlay_lock = overlay.lock().unwrap();
451        let mut lock = overlay_lock.overlay.lock().unwrap();
452        for inverse_diff in inverse_diffs {
453            // Since we used strict retrieval it's safe to unwrap here
454            let inverse_diff = inverse_diff.unwrap();
455            lock.add_diff(&inverse_diff)?;
456            lock.apply_diff(&inverse_diff)?;
457            self.sled_db.flush()?;
458        }
459        drop(lock);
460        drop(overlay_lock);
461
462        Ok(())
463    }
464
465    /// Grab the RandomX VM current and next key, based on provided key
466    /// changing height and delay. Optionally, a height can be provided
467    /// to get the keys before it.
468    ///
469    /// NOTE: the height calculation logic is verified using test:
470    //        test_randomx_keys_retrieval_logic
471    pub fn get_randomx_vm_keys(
472        &self,
473        key_change_height: &u32,
474        key_change_delay: &u32,
475        height: Option<u32>,
476    ) -> Result<(HeaderHash, Option<HeaderHash>)> {
477        // Grab last known block header
478        let last = match height {
479            Some(h) => &self.get_headers_by_heights(&[if h != 0 { h - 1 } else { 0 }])?[0],
480            None => &self.last_header()?,
481        };
482
483        // Check if we passed the first key change height
484        if &last.height <= key_change_height {
485            // Genesis is our current
486            let current = self.genesis()?.1;
487
488            // Check if last known block header is the next key
489            let next = if &last.height == key_change_height { Some(last.hash()) } else { None };
490
491            return Ok((current, next))
492        }
493
494        // Find the current and next key based on distance of last
495        // known block header height from the key change height.
496        let distance = last.height % key_change_height;
497
498        // When distance is 0, current key is the block header
499        // located at last_height - key_change_height height, while
500        // last known block header is the next key.
501        if distance == 0 {
502            return Ok((
503                self.get_headers_by_heights(&[last.height - key_change_height])?[0].hash(),
504                Some(last.hash()),
505            ))
506        }
507
508        // When distance is less than key change delay, current key
509        // is the block header located at last_height - (distance + key_change_height)
510        // height, while the block header located at last_height - distance
511        // height is the next key.
512        if &distance < key_change_delay {
513            return Ok((
514                self.get_headers_by_heights(&[last.height - (distance + key_change_height)])?[0]
515                    .hash(),
516                Some(self.get_headers_by_heights(&[last.height - distance])?[0].hash()),
517            ))
518        }
519
520        // When distance is greater or equal to key change delay,
521        // current key is the block header located at last_height - distance
522        // height and we don't know the next key.
523        let current = self.get_headers_by_heights(&[last.height - distance])?[0].hash();
524        Ok((current, None))
525    }
526}
527
528/// Atomic pointer to sled db overlay.
529pub type SledDbOverlayPtr = Arc<Mutex<sled_overlay::SledDbOverlay>>;
530
531/// Atomic pointer to blockchain overlay.
532pub type BlockchainOverlayPtr = Arc<Mutex<BlockchainOverlay>>;
533
534/// Overlay structure over a [`Blockchain`] instance.
535pub struct BlockchainOverlay {
536    /// Main [`sled_overlay::SledDbOverlay`] to the sled db connection
537    pub overlay: SledDbOverlayPtr,
538    /// Headers overlay
539    pub headers: HeaderStoreOverlay,
540    /// Blocks overlay
541    pub blocks: BlockStoreOverlay,
542    /// Transactions overlay
543    pub transactions: TxStoreOverlay,
544    /// Contract overlay
545    pub contracts: ContractStoreOverlay,
546}
547
548impl BlockchainOverlay {
549    /// Instantiate a new `BlockchainOverlay` over the given [`Blockchain`] instance.
550    pub fn new(blockchain: &Blockchain) -> Result<BlockchainOverlayPtr> {
551        // Here we configure all our blockchain sled trees to be protected in the overlay
552        let protected_trees = vec![
553            SLED_BLOCK_TREE,
554            SLED_BLOCK_ORDER_TREE,
555            SLED_BLOCK_DIFFICULTY_TREE,
556            SLED_BLOCK_STATE_INVERSE_DIFF_TREE,
557            SLED_HEADER_TREE,
558            SLED_SYNC_HEADER_TREE,
559            SLED_TX_TREE,
560            SLED_TX_LOCATION_TREE,
561            SLED_PENDING_TX_TREE,
562            SLED_PENDING_TX_ORDER_TREE,
563            SLED_CONTRACTS_TREE,
564            SLED_CONTRACTS_TREES_TREE,
565            SLED_BINCODE_TREE,
566        ];
567        let overlay = Arc::new(Mutex::new(sled_overlay::SledDbOverlay::new(
568            &blockchain.sled_db,
569            protected_trees,
570        )));
571        let headers = HeaderStoreOverlay::new(&overlay)?;
572        let blocks = BlockStoreOverlay::new(&overlay)?;
573        let transactions = TxStoreOverlay::new(&overlay)?;
574        let contracts = ContractStoreOverlay::new(&overlay)?;
575
576        Ok(Arc::new(Mutex::new(Self { overlay, headers, blocks, transactions, contracts })))
577    }
578
579    /// Check if blockchain contains any blocks
580    pub fn is_empty(&self) -> Result<bool> {
581        self.blocks.is_empty()
582    }
583
584    /// Retrieve the last block height and hash.
585    pub fn last(&self) -> Result<(u32, HeaderHash)> {
586        self.blocks.get_last()
587    }
588
589    /// Retrieve the last block info.
590    pub fn last_block(&self) -> Result<BlockInfo> {
591        let (_, hash) = self.last()?;
592        Ok(self.get_blocks_by_hash(&[hash])?[0].clone())
593    }
594
595    /// Retrieve the last block height.
596    pub fn last_block_height(&self) -> Result<u32> {
597        Ok(self.last()?.0)
598    }
599
600    /// Retrieve the last block timestamp.
601    pub fn last_block_timestamp(&self) -> Result<Timestamp> {
602        let (_, hash) = self.last()?;
603        Ok(self.get_blocks_by_hash(&[hash])?[0].header.timestamp)
604    }
605
606    /// Insert a given [`BlockInfo`] into the overlay.
607    /// This functions wraps all the logic of separating the block into specific
608    /// data that can be fed into the different trees of the overlay.
609    /// Upon success, the functions returns the block hash that
610    /// were given and appended to the overlay.
611    /// Since we are adding to the overlay, we don't need to exeucte
612    /// the writes atomically.
613    pub fn add_block(&self, block: &BlockInfo) -> Result<HeaderHash> {
614        // Store header
615        self.headers.insert(slice::from_ref(&block.header))?;
616
617        // Store block
618        let blk: Block = Block::from_block_info(block);
619        let txs_hashes = blk.txs.clone();
620        let block_hash = self.blocks.insert(&[blk])?[0];
621        let block_hash_vec = [block_hash];
622
623        // Store block order
624        self.blocks.insert_order(&[block.header.height], &block_hash_vec)?;
625
626        // Store transactions
627        self.transactions.insert(&block.txs)?;
628
629        // Store transactions locations
630        self.transactions.insert_location(&txs_hashes, block.header.height)?;
631
632        Ok(block_hash)
633    }
634
635    /// Check if the given [`BlockInfo`] is in the database and all trees.
636    pub fn has_block(&self, block: &BlockInfo) -> Result<bool> {
637        let blockhash = match self.blocks.get_order(&[block.header.height], true) {
638            Ok(v) => v[0].unwrap(),
639            Err(_) => return Ok(false),
640        };
641
642        // Check if we have all transactions
643        let txs: Vec<TransactionHash> = block.txs.iter().map(|tx| tx.hash()).collect();
644        if self.transactions.get(&txs, true).is_err() {
645            return Ok(false)
646        }
647
648        // Check provided info produces the same hash
649        Ok(blockhash == block.hash())
650    }
651
652    /// Retrieve [`Header`]s by given hashes. Fails if any of them is not found.
653    pub fn get_headers_by_hash(&self, hashes: &[HeaderHash]) -> Result<Vec<Header>> {
654        let headers = self.headers.get(hashes, true)?;
655        let ret: Vec<Header> = headers.iter().map(|x| x.clone().unwrap()).collect();
656
657        Ok(ret)
658    }
659
660    /// Retrieve [`BlockInfo`]s by given hashes. Fails if any of them is not found.
661    pub fn get_blocks_by_hash(&self, hashes: &[HeaderHash]) -> Result<Vec<BlockInfo>> {
662        let blocks = self.blocks.get(hashes, true)?;
663        let blocks: Vec<Block> = blocks.iter().map(|x| x.clone().unwrap()).collect();
664        let ret = self.get_blocks_infos(&blocks)?;
665
666        Ok(ret)
667    }
668
669    /// Retrieve all [`BlockInfo`] for given slice of [`Block`].
670    /// Fails if any of them is not found
671    fn get_blocks_infos(&self, blocks: &[Block]) -> Result<Vec<BlockInfo>> {
672        let mut ret = Vec::with_capacity(blocks.len());
673        for block in blocks {
674            let headers = self.headers.get(&[block.header], true)?;
675            // Since we used strict get, its safe to unwrap here
676            let header = headers[0].clone().unwrap();
677
678            let txs = self.transactions.get(&block.txs, true)?;
679            let txs = txs.iter().map(|x| x.clone().unwrap()).collect();
680
681            let info = BlockInfo::new(header, txs, block.signature);
682            ret.push(info);
683        }
684
685        Ok(ret)
686    }
687
688    /// Retrieve [`Block`]s by given hashes and return their transactions hashes.
689    pub fn get_blocks_txs_hashes(&self, hashes: &[HeaderHash]) -> Result<Vec<TransactionHash>> {
690        let blocks = self.blocks.get(hashes, true)?;
691        let mut ret = vec![];
692        for block in blocks {
693            ret.extend_from_slice(&block.unwrap().txs);
694        }
695
696        Ok(ret)
697    }
698
699    /// Checkpoint overlay so we can revert to it, if needed.
700    pub fn checkpoint(&self) {
701        self.overlay.lock().unwrap().checkpoint();
702    }
703
704    /// Revert to current overlay checkpoint.
705    pub fn revert_to_checkpoint(&self) {
706        self.overlay.lock().unwrap().revert_to_checkpoint();
707    }
708
709    /// Auxiliary function to create a full clone using SledDbOverlay::clone,
710    /// generating new pointers for the underlying overlays.
711    pub fn full_clone(&self) -> Result<BlockchainOverlayPtr> {
712        let overlay = Arc::new(Mutex::new(self.overlay.lock().unwrap().clone()));
713        let headers = HeaderStoreOverlay::new(&overlay)?;
714        let blocks = BlockStoreOverlay::new(&overlay)?;
715        let transactions = TxStoreOverlay::new(&overlay)?;
716        let contracts = ContractStoreOverlay::new(&overlay)?;
717
718        Ok(Arc::new(Mutex::new(Self { overlay, headers, blocks, transactions, contracts })))
719    }
720}
721
722/// Parse a sled record in the form of a tuple (`key`, `value`).
723pub fn parse_record<T1: Decodable, T2: Decodable>(record: (IVec, IVec)) -> Result<(T1, T2)> {
724    let key = deserialize(&record.0)?;
725    let value = deserialize(&record.1)?;
726
727    Ok((key, value))
728}
729
730/// Parse a sled record with a u32 key, encoded in Big Endian bytes,
731/// in the form of a tuple (`key`, `value`).
732pub fn parse_u32_key_record<T: Decodable>(record: (IVec, IVec)) -> Result<(u32, T)> {
733    let key_bytes: [u8; 4] = record.0.as_ref().try_into().unwrap();
734    let key = u32::from_be_bytes(key_bytes);
735    let value = deserialize(&record.1)?;
736
737    Ok((key, value))
738}
739
740/// Parse a sled record with a u64 key, encoded in Big Endian bytes,
741/// in the form of a tuple (`key`, `value`).
742pub fn parse_u64_key_record<T: Decodable>(record: (IVec, IVec)) -> Result<(u64, T)> {
743    let key_bytes: [u8; 8] = record.0.as_ref().try_into().unwrap();
744    let key = u64::from_be_bytes(key_bytes);
745    let value = deserialize(&record.1)?;
746
747    Ok((key, value))
748}
749
750#[cfg(feature = "async-serial")]
751/// Parse a sled record in the form of a tuple (`key`, `value`).
752pub async fn parse_record_async<T1: AsyncDecodable, T2: AsyncDecodable>(
753    record: (IVec, IVec),
754) -> Result<(T1, T2)> {
755    let key = deserialize_async(&record.0).await?;
756    let value = deserialize_async(&record.1).await?;
757
758    Ok((key, value))
759}
760
761#[cfg(feature = "async-serial")]
762/// Parse a sled record with a u32 key, encoded in Big Endian bytes,
763/// in the form of a tuple (`key`, `value`).
764pub async fn parse_u32_key_record_async<T: AsyncDecodable>(
765    record: (IVec, IVec),
766) -> Result<(u32, T)> {
767    let key_bytes: [u8; 4] = record.0.as_ref().try_into().unwrap();
768    let key = u32::from_be_bytes(key_bytes);
769    let value = deserialize_async(&record.1).await?;
770
771    Ok((key, value))
772}
773
774#[cfg(feature = "async-serial")]
775/// Parse a sled record with a u64 key, encoded in Big Endian bytes,
776/// in the form of a tuple (`key`, `value`).
777pub async fn parse_u64_key_record_async<T: AsyncDecodable>(
778    record: (IVec, IVec),
779) -> Result<(u64, T)> {
780    let key_bytes: [u8; 8] = record.0.as_ref().try_into().unwrap();
781    let key = u64::from_be_bytes(key_bytes);
782    let value = deserialize_async(&record.1).await?;
783
784    Ok((key, value))
785}
786
787#[cfg(test)]
788mod tests {
789    use crate::validator::pow::{RANDOMX_KEY_CHANGE_DELAY, RANDOMX_KEY_CHANGING_HEIGHT};
790
791    /// Compute the RandomX VM current and next key heights, based on
792    /// provided key changing height and delay.
793    fn get_randomx_vm_keys_heights(last: u32) -> (u32, Option<u32>) {
794        // Check if we passed the first key change height
795        if last <= RANDOMX_KEY_CHANGING_HEIGHT {
796            // Genesis is our current
797            let current = 0;
798
799            // Check if last height is the next key height
800            let next = if last == RANDOMX_KEY_CHANGING_HEIGHT { Some(last) } else { None };
801
802            return (current, next)
803        }
804
805        // Find the current and next key based on distance of last
806        // height from the key change height.
807        let distance = last % RANDOMX_KEY_CHANGING_HEIGHT;
808
809        // When distance is 0, current key is the last_height - RANDOMX_KEY_CHANGING_HEIGHT
810        // height, while last is the next key.
811        if distance == 0 {
812            return (last - RANDOMX_KEY_CHANGING_HEIGHT, Some(last))
813        }
814
815        // When distance is less than key change delay, current key
816        // is the last_height - (distance + RANDOMX_KEY_CHANGING_HEIGHT) height,
817        // while the last_height - distance height is the next key.
818        if distance < RANDOMX_KEY_CHANGE_DELAY {
819            return (last - (distance + RANDOMX_KEY_CHANGING_HEIGHT), Some(last - distance))
820        }
821
822        // When distance is greater or equal to key change delay,
823        // current key is the last_height - distance height and we
824        // don't know the next key height.
825        let current = last - distance;
826        (current, None)
827    }
828
829    #[test]
830    fn test_randomx_keys_retrieval_logic() {
831        // last < RANDOMX_KEY_CHANGING_HEIGHT(2048)
832        let (current, next) = get_randomx_vm_keys_heights(2047);
833        assert_eq!(current, 0);
834        assert!(next.is_none());
835
836        // last == RANDOMX_KEY_CHANGING_HEIGHT(2048)
837        let (current, next) = get_randomx_vm_keys_heights(2048);
838        assert_eq!(current, 0);
839        assert_eq!(next, Some(2048));
840
841        // last > RANDOMX_KEY_CHANGING_HEIGHT(2048)
842        // last % RANDOMX_KEY_CHANGING_HEIGHT(2048) == 0
843        let (current, next) = get_randomx_vm_keys_heights(4096);
844        assert_eq!(current, 2048);
845        assert_eq!(next, Some(4096));
846
847        // last % RANDOMX_KEY_CHANGING_HEIGHT(2048) < RANDOMX_KEY_CHANGE_DELAY(64)
848        let (current, next) = get_randomx_vm_keys_heights(4097);
849        assert_eq!(current, 2048);
850        assert_eq!(next, Some(4096));
851
852        // last % RANDOMX_KEY_CHANGING_HEIGHT(2048) == RANDOMX_KEY_CHANGE_DELAY(64)
853        let (current, next) = get_randomx_vm_keys_heights(4160);
854        assert_eq!(current, 4096);
855        assert!(next.is_none());
856
857        // last % RANDOMX_KEY_CHANGING_HEIGHT(2048) > RANDOMX_KEY_CHANGE_DELAY(64)
858        let (current, next) = get_randomx_vm_keys_heights(4161);
859        assert_eq!(current, 4096);
860        assert!(next.is_none());
861    }
862}