darkfi/validator/
consensus.rs

1/* This file is part of DarkFi (https://dark.fi)
2 *
3 * Copyright (C) 2020-2026 Dyne.org foundation
4 *
5 * This program is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU Affero General Public License as
7 * published by the Free Software Foundation, either version 3 of the
8 * License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU Affero General Public License for more details.
14 *
15 * You should have received a copy of the GNU Affero General Public License
16 * along with this program.  If not, see <https://www.gnu.org/licenses/>.
17 */
18
19use std::collections::{HashMap, HashSet};
20
21use darkfi_sdk::{
22    crypto::MerkleTree,
23    monotree::{self, Monotree},
24    tx::TransactionHash,
25};
26use darkfi_serial::{async_trait, SerialDecodable, SerialEncodable};
27use num_bigint::BigUint;
28use sled_overlay::database::SledDbOverlayStateDiff;
29use smol::lock::RwLock;
30use tracing::{debug, error, info, warn};
31
32use crate::{
33    blockchain::{
34        block_store::{BlockDifficulty, BlockRanks},
35        BlockInfo, Blockchain, BlockchainOverlay, BlockchainOverlayPtr, Header, HeaderHash,
36    },
37    runtime::vm_runtime::GAS_LIMIT,
38    tx::{Transaction, MAX_TX_CALLS},
39    validator::{
40        pow::{PoWModule, RANDOMX_KEY_CHANGE_DELAY, RANDOMX_KEY_CHANGING_HEIGHT},
41        utils::{best_fork_index, block_rank, find_extended_fork_index},
42        verification::{verify_proposal, verify_transaction},
43    },
44    zk::VerifyingKey,
45    Error, Result,
46};
47
48/// Gas limit for total block transactions(50 full transactions).
49pub const BLOCK_GAS_LIMIT: u64 = GAS_LIMIT * MAX_TX_CALLS as u64 * 50;
50
51/// This struct represents the information required by the consensus algorithm
52pub struct Consensus {
53    /// Canonical (confirmed) blockchain
54    pub blockchain: Blockchain,
55    /// Fork size(length) after which it can be confirmed
56    pub confirmation_threshold: usize,
57    /// Fork chains containing block proposals
58    pub forks: RwLock<Vec<Fork>>,
59    /// Canonical blockchain PoW module state
60    pub module: RwLock<PoWModule>,
61    /// Lock to restrict when proposals appends can happen
62    pub append_lock: RwLock<()>,
63}
64
65impl Consensus {
66    /// Generate a new Consensus state.
67    pub fn new(
68        blockchain: Blockchain,
69        confirmation_threshold: usize,
70        pow_target: u32,
71        pow_fixed_difficulty: Option<BigUint>,
72    ) -> Result<Self> {
73        let forks = RwLock::new(vec![]);
74
75        let module = RwLock::new(PoWModule::new(
76            blockchain.clone(),
77            pow_target,
78            pow_fixed_difficulty,
79            None,
80        )?);
81
82        let append_lock = RwLock::new(());
83
84        Ok(Self { blockchain, confirmation_threshold, forks, module, append_lock })
85    }
86
87    /// Generate a new empty fork.
88    pub async fn generate_empty_fork(&self) -> Result<()> {
89        debug!(target: "validator::consensus::generate_empty_fork", "Generating new empty fork...");
90        let mut forks = self.forks.write().await;
91        // Check if we already have an empty fork
92        for fork in forks.iter() {
93            if fork.proposals.is_empty() {
94                debug!(target: "validator::consensus::generate_empty_fork", "An empty fork already exists.");
95                drop(forks);
96                return Ok(())
97            }
98        }
99        let fork = Fork::new(self.blockchain.clone(), self.module.read().await.clone()).await?;
100        forks.push(fork);
101        drop(forks);
102        debug!(target: "validator::consensus::generate_empty_fork", "Fork generated!");
103        Ok(())
104    }
105
106    /// Given a proposal, the node verifys it and finds which fork it extends.
107    /// If the proposal extends the canonical blockchain, a new fork chain is created.
108    pub async fn append_proposal(&self, proposal: &Proposal, verify_fees: bool) -> Result<()> {
109        debug!(target: "validator::consensus::append_proposal", "Appending proposal {}", proposal.hash);
110
111        // Check if proposal already exists
112        let lock = self.forks.read().await;
113        for fork in lock.iter() {
114            for p in fork.proposals.iter().rev() {
115                if p == &proposal.hash {
116                    drop(lock);
117                    debug!(target: "validator::consensus::append_proposal", "Proposal {} already exists", proposal.hash);
118                    return Err(Error::ProposalAlreadyExists)
119                }
120            }
121        }
122        // Check if proposal is canonical
123        if let Ok(canonical_headers) =
124            self.blockchain.blocks.get_order(&[proposal.block.header.height], true)
125        {
126            if canonical_headers[0].unwrap() == proposal.hash {
127                drop(lock);
128                debug!(target: "validator::consensus::append_proposal", "Proposal {} already exists", proposal.hash);
129                return Err(Error::ProposalAlreadyExists)
130            }
131        }
132        drop(lock);
133
134        // Verify proposal and grab corresponding fork
135        let (mut fork, index) = verify_proposal(self, proposal, verify_fees).await?;
136
137        // Append proposal to the fork
138        fork.append_proposal(proposal).await?;
139
140        // TODO: to keep memory usage low, we should only append forks that
141        // are higher ranking than our current best one
142
143        // If a fork index was found, replace forks with the mutated one,
144        // otherwise push the new fork.
145        let mut lock = self.forks.write().await;
146        match index {
147            Some(i) => {
148                if i < lock.len() && lock[i].proposals == fork.proposals[..fork.proposals.len() - 1]
149                {
150                    lock[i] = fork;
151                } else {
152                    lock.push(fork);
153                }
154            }
155            None => {
156                lock.push(fork);
157            }
158        }
159        drop(lock);
160
161        info!(target: "validator::consensus::append_proposal", "Appended proposal {}", proposal.hash);
162
163        Ok(())
164    }
165
166    /// Given a proposal, find the fork chain it extends, and return its full clone.
167    /// If the proposal extends the fork not on its tail, a new fork is created and
168    /// we re-apply the proposals up to the extending one. If proposal extends canonical,
169    /// a new fork is created. Additionally, we return the fork index if a new fork
170    /// was not created, so caller can replace the fork.
171    pub async fn find_extended_fork(&self, proposal: &Proposal) -> Result<(Fork, Option<usize>)> {
172        // Grab a lock over current forks
173        let forks = self.forks.read().await;
174
175        // Check if proposal extends any fork
176        let found = find_extended_fork_index(&forks, proposal);
177        if found.is_err() {
178            if let Err(Error::ProposalAlreadyExists) = found {
179                return Err(Error::ProposalAlreadyExists)
180            }
181
182            // Check if proposal extends canonical
183            let (last_height, last_block) = self.blockchain.last()?;
184            if proposal.block.header.previous != last_block ||
185                proposal.block.header.height <= last_height
186            {
187                return Err(Error::ExtendedChainIndexNotFound)
188            }
189
190            // Check if we have an empty fork to use
191            for (f_index, fork) in forks.iter().enumerate() {
192                if fork.proposals.is_empty() {
193                    return Ok((forks[f_index].full_clone()?, Some(f_index)))
194                }
195            }
196
197            // Generate a new fork extending canonical
198            let fork = Fork::new(self.blockchain.clone(), self.module.read().await.clone()).await?;
199            return Ok((fork, None))
200        }
201
202        let (f_index, p_index) = found.unwrap();
203        let original_fork = &forks[f_index];
204        // Check if proposal extends fork at last proposal
205        if p_index == (original_fork.proposals.len() - 1) {
206            return Ok((original_fork.full_clone()?, Some(f_index)))
207        }
208
209        // Rebuild fork
210        let mut fork = Fork::new(self.blockchain.clone(), self.module.read().await.clone()).await?;
211        fork.proposals = original_fork.proposals[..p_index + 1].to_vec();
212        fork.diffs = original_fork.diffs[..p_index + 1].to_vec();
213
214        // Retrieve proposals blocks from original fork
215        let blocks = &original_fork.overlay.lock().unwrap().get_blocks_by_hash(&fork.proposals)?;
216        for (index, block) in blocks.iter().enumerate() {
217            // Apply block diffs
218            fork.overlay.lock().unwrap().overlay.lock().unwrap().add_diff(&fork.diffs[index])?;
219
220            // Grab next mine target and difficulty
221            let (next_target, next_difficulty) = fork.module.next_mine_target_and_difficulty()?;
222
223            // Calculate block rank
224            let (target_distance_sq, hash_distance_sq) = block_rank(block, &next_target)?;
225
226            // Update PoW module
227            fork.module.append(&block.header, &next_difficulty)?;
228
229            // Update fork ranks
230            fork.targets_rank += target_distance_sq;
231            fork.hashes_rank += hash_distance_sq;
232        }
233
234        // Rebuild fork contracts states monotree
235        fork.compute_monotree()?;
236
237        // Drop forks lock
238        drop(forks);
239
240        Ok((fork, None))
241    }
242
243    /// Check if best fork proposals can be confirmed.
244    /// Consensus confirmation logic:
245    /// - If the current best fork has reached greater length than the security threshold,
246    ///   and no other fork exist with same rank, first proposal(s) in that fork can be
247    ///   appended to canonical blockchain (confirme).
248    ///
249    /// When best fork can be confirmed, first block(s) should be appended to canonical,
250    /// and forks should be rebuilt.
251    pub async fn confirmation(&self) -> Result<Option<usize>> {
252        debug!(target: "validator::consensus::confirmation", "Started confirmation check");
253
254        // Grab best fork
255        let forks = self.forks.read().await;
256        let index = best_fork_index(&forks)?;
257        let fork = &forks[index];
258
259        // Check its length
260        let length = fork.proposals.len();
261        if length < self.confirmation_threshold {
262            debug!(target: "validator::consensus::confirmation", "Nothing to confirme yet, best fork size: {length}");
263            drop(forks);
264            return Ok(None)
265        }
266
267        // Drop forks lock
268        drop(forks);
269
270        Ok(Some(index))
271    }
272
273    /// Auxiliary function to retrieve the fork header hash of provided height.
274    /// The fork is identified by the provided header hash.
275    pub async fn get_fork_header_hash(
276        &self,
277        height: u32,
278        fork_header: &HeaderHash,
279    ) -> Result<Option<HeaderHash>> {
280        // Grab a lock over current forks
281        let forks = self.forks.read().await;
282
283        // Find the fork containing the provided header
284        let mut found = None;
285        'outer: for (index, fork) in forks.iter().enumerate() {
286            for p in fork.proposals.iter().rev() {
287                if p == fork_header {
288                    found = Some(index);
289                    break 'outer
290                }
291            }
292        }
293        if found.is_none() {
294            drop(forks);
295            return Ok(None)
296        }
297        let index = found.unwrap();
298
299        // Grab header if it exists
300        let header = forks[index].overlay.lock().unwrap().blocks.get_order(&[height], false)?[0];
301
302        // Drop forks lock
303        drop(forks);
304
305        Ok(header)
306    }
307
308    /// Auxiliary function to retrieve the fork headers of provided hashes.
309    /// The fork is identified by the provided header hash. If fork doesn't
310    /// exists, an empty vector is returned.
311    pub async fn get_fork_headers(
312        &self,
313        headers: &[HeaderHash],
314        fork_header: &HeaderHash,
315    ) -> Result<Vec<Header>> {
316        // Grab a lock over current forks
317        let forks = self.forks.read().await;
318
319        // Find the fork containing the provided header
320        let mut found = None;
321        'outer: for (index, fork) in forks.iter().enumerate() {
322            for p in fork.proposals.iter().rev() {
323                if p == fork_header {
324                    found = Some(index);
325                    break 'outer
326                }
327            }
328        }
329        let Some(index) = found else {
330            drop(forks);
331            return Ok(vec![])
332        };
333
334        // Grab headers
335        let headers = forks[index].overlay.lock().unwrap().get_headers_by_hash(headers)?;
336
337        // Drop forks lock
338        drop(forks);
339
340        Ok(headers)
341    }
342
343    /// Auxiliary function to retrieve the fork proposals of provided hashes.
344    /// The fork is identified by the provided header hash. If fork doesn't
345    /// exists, an empty vector is returned.
346    pub async fn get_fork_proposals(
347        &self,
348        headers: &[HeaderHash],
349        fork_header: &HeaderHash,
350    ) -> Result<Vec<Proposal>> {
351        // Grab a lock over current forks
352        let forks = self.forks.read().await;
353
354        // Find the fork containing the provided header
355        let mut found = None;
356        'outer: for (index, fork) in forks.iter().enumerate() {
357            for p in fork.proposals.iter().rev() {
358                if p == fork_header {
359                    found = Some(index);
360                    break 'outer
361                }
362            }
363        }
364        let Some(index) = found else {
365            drop(forks);
366            return Ok(vec![])
367        };
368
369        // Grab proposals
370        let blocks = forks[index].overlay.lock().unwrap().get_blocks_by_hash(headers)?;
371        let mut proposals = Vec::with_capacity(blocks.len());
372        for block in blocks {
373            proposals.push(Proposal::new(block));
374        }
375
376        // Drop forks lock
377        drop(forks);
378
379        Ok(proposals)
380    }
381
382    /// Auxiliary function to retrieve a fork proposals, starting from provided tip.
383    /// If provided tip is too far behind, unknown, or fork doesn't exists, an empty
384    /// vector is returned. The fork is identified by the optional provided header hash.
385    /// If its `None`, we use our best fork.
386    pub async fn get_fork_proposals_after(
387        &self,
388        tip: HeaderHash,
389        fork_tip: Option<HeaderHash>,
390        limit: u32,
391    ) -> Result<Vec<Proposal>> {
392        // Grab a lock over current forks
393        let forks = self.forks.read().await;
394
395        // Create return vector
396        let mut proposals = vec![];
397
398        // Grab fork index to use
399        let index = match fork_tip {
400            Some(fork_tip) => {
401                let mut found = None;
402                'outer: for (index, fork) in forks.iter().enumerate() {
403                    for p in fork.proposals.iter().rev() {
404                        if p == &fork_tip {
405                            found = Some(index);
406                            break 'outer
407                        }
408                    }
409                }
410                if found.is_none() {
411                    drop(forks);
412                    return Ok(proposals)
413                }
414                found.unwrap()
415            }
416            None => best_fork_index(&forks)?,
417        };
418
419        // Check tip exists
420        let Ok(existing_tips) = forks[index].overlay.lock().unwrap().get_blocks_by_hash(&[tip])
421        else {
422            drop(forks);
423            return Ok(proposals)
424        };
425
426        // Check tip is not far behind
427        let last_block_height = forks[index].overlay.lock().unwrap().last()?.0;
428        if last_block_height - existing_tips[0].header.height >= limit {
429            drop(forks);
430            return Ok(proposals)
431        }
432
433        // Retrieve all proposals after requested one
434        let headers = self.blockchain.blocks.get_all_after(existing_tips[0].header.height)?;
435        let blocks = self.blockchain.get_blocks_by_hash(&headers)?;
436        for block in blocks {
437            proposals.push(Proposal::new(block));
438        }
439        let blocks =
440            forks[index].overlay.lock().unwrap().get_blocks_by_hash(&forks[index].proposals)?;
441        for block in blocks {
442            proposals.push(Proposal::new(block));
443        }
444
445        // Drop forks lock
446        drop(forks);
447
448        Ok(proposals)
449    }
450
451    /// Auxiliary function to grab current mining RandomX key,
452    /// based on next block height.
453    /// If no forks exist, returns the canonical key.
454    pub async fn current_mining_randomx_key(&self) -> Result<HeaderHash> {
455        // Grab a lock over current forks
456        let forks = self.forks.read().await;
457
458        // Grab next block height and current keys.
459        // If no forks exist, use canonical keys
460        let (next_block_height, rx_keys) = if forks.is_empty() {
461            let (next_block_height, _) = self.blockchain.last()?;
462            (next_block_height + 1, self.module.read().await.darkfi_rx_keys)
463        } else {
464            // Grab best fork and its last proposal
465            let fork = &forks[best_fork_index(&forks)?];
466            let last = fork.last_proposal()?;
467            (last.block.header.height + 1, fork.module.darkfi_rx_keys)
468        };
469
470        // We only use the next key when the next block is the
471        // height changing one.
472        if next_block_height > RANDOMX_KEY_CHANGING_HEIGHT &&
473            next_block_height % RANDOMX_KEY_CHANGING_HEIGHT == RANDOMX_KEY_CHANGE_DELAY
474        {
475            Ok(rx_keys.1)
476        } else {
477            Ok(rx_keys.0)
478        }
479    }
480
481    /// Auxiliary function to grab best current fork full clone.
482    pub async fn best_current_fork(&self) -> Result<Fork> {
483        let forks = self.forks.read().await;
484        let index = best_fork_index(&forks)?;
485        forks[index].full_clone()
486    }
487
488    /// Auxiliary function to retrieve current best fork last header.
489    /// If no forks exist, grab the last header from canonical.
490    pub async fn best_fork_last_header(&self) -> Result<(u32, HeaderHash)> {
491        // Grab a lock over current forks
492        let forks = self.forks.read().await;
493
494        // Check if node has any forks
495        if forks.is_empty() {
496            drop(forks);
497            return self.blockchain.last()
498        }
499
500        // Grab best fork
501        let fork = &forks[best_fork_index(&forks)?];
502
503        // Grab its last header
504        let last = fork.last_proposal()?;
505        drop(forks);
506        Ok((last.block.header.height, last.hash))
507    }
508
509    /// Auxiliary function to purge current forks and reset the ones starting
510    /// with the provided prefix, excluding provided confirmed fork.
511    /// Additionally, remove confirmed transactions from the forks mempools,
512    /// along with the unporposed transactions sled trees.
513    /// This function assumes that the prefix blocks have already been appended
514    /// to canonical chain from the confirmed fork.
515    pub async fn reset_forks(
516        &self,
517        prefix: &[HeaderHash],
518        confirmed_fork_index: &usize,
519        confirmed_txs: &[Transaction],
520    ) -> Result<()> {
521        // Grab a lock over current forks
522        let mut forks = self.forks.write().await;
523
524        // Find all the forks that start with the provided prefix,
525        // excluding confirmed fork index, and remove their prefixed
526        // proposals, and their corresponding diffs.
527        // If the fork is not starting with the provided prefix,
528        // drop it. Additionally, keep track of all the referenced
529        // trees in overlays that are valid.
530        let excess = prefix.len();
531        let prefix_last_index = excess - 1;
532        let prefix_last = prefix.last().unwrap();
533        let mut keep = vec![true; forks.len()];
534        let mut referenced_trees = HashSet::new();
535        let mut referenced_txs = HashSet::new();
536        let confirmed_txs_hashes: Vec<TransactionHash> =
537            confirmed_txs.iter().map(|tx| tx.hash()).collect();
538        for (index, fork) in forks.iter_mut().enumerate() {
539            if &index == confirmed_fork_index {
540                // Store its tree references
541                let fork_overlay = fork.overlay.lock().unwrap();
542                let overlay = fork_overlay.overlay.lock().unwrap();
543                for tree in &overlay.state.initial_tree_names {
544                    referenced_trees.insert(tree.clone());
545                }
546                for tree in &overlay.state.new_tree_names {
547                    referenced_trees.insert(tree.clone());
548                }
549                for tree in overlay.state.dropped_trees.keys() {
550                    referenced_trees.insert(tree.clone());
551                }
552                // Remove confirmed proposals txs from fork's mempool
553                fork.mempool.retain(|tx| !confirmed_txs_hashes.contains(tx));
554                // Store its txs references
555                for tx in &fork.mempool {
556                    referenced_txs.insert(*tx);
557                }
558                drop(overlay);
559                drop(fork_overlay);
560                continue
561            }
562
563            if fork.proposals.is_empty() ||
564                prefix_last_index >= fork.proposals.len() ||
565                &fork.proposals[prefix_last_index] != prefix_last
566            {
567                keep[index] = false;
568                continue
569            }
570
571            // Remove confirmed proposals txs from fork's mempool
572            fork.mempool.retain(|tx| !confirmed_txs_hashes.contains(tx));
573            // Store its txs references
574            for tx in &fork.mempool {
575                referenced_txs.insert(*tx);
576            }
577
578            // Remove the commited differences
579            let rest_proposals = fork.proposals.split_off(excess);
580            let rest_diffs = fork.diffs.split_off(excess);
581            let mut diffs = fork.diffs.clone();
582            fork.proposals = rest_proposals;
583            fork.diffs = rest_diffs;
584            for diff in diffs.iter_mut() {
585                fork.overlay.lock().unwrap().overlay.lock().unwrap().remove_diff(diff);
586            }
587
588            // Store its tree references
589            let fork_overlay = fork.overlay.lock().unwrap();
590            let overlay = fork_overlay.overlay.lock().unwrap();
591            for tree in &overlay.state.initial_tree_names {
592                referenced_trees.insert(tree.clone());
593            }
594            for tree in &overlay.state.new_tree_names {
595                referenced_trees.insert(tree.clone());
596            }
597            for tree in overlay.state.dropped_trees.keys() {
598                referenced_trees.insert(tree.clone());
599            }
600            drop(overlay);
601            drop(fork_overlay);
602        }
603
604        // Find the trees and pending txs that are no longer referenced by valid forks
605        let mut dropped_trees = HashSet::new();
606        let mut dropped_txs = HashSet::new();
607        for (index, fork) in forks.iter_mut().enumerate() {
608            if keep[index] {
609                continue
610            }
611            for tx in &fork.mempool {
612                if !referenced_txs.contains(tx) {
613                    dropped_txs.insert(*tx);
614                }
615            }
616            let fork_overlay = fork.overlay.lock().unwrap();
617            let overlay = fork_overlay.overlay.lock().unwrap();
618            for tree in &overlay.state.initial_tree_names {
619                if !referenced_trees.contains(tree) {
620                    dropped_trees.insert(tree.clone());
621                }
622            }
623            for tree in &overlay.state.new_tree_names {
624                if !referenced_trees.contains(tree) {
625                    dropped_trees.insert(tree.clone());
626                }
627            }
628            for tree in overlay.state.dropped_trees.keys() {
629                if !referenced_trees.contains(tree) {
630                    dropped_trees.insert(tree.clone());
631                }
632            }
633            drop(overlay);
634            drop(fork_overlay);
635        }
636
637        // Drop unreferenced trees from the database
638        for tree in dropped_trees {
639            self.blockchain.sled_db.drop_tree(tree)?;
640        }
641
642        // Drop invalid forks
643        let mut iter = keep.iter();
644        forks.retain(|_| *iter.next().unwrap());
645
646        // Remove confirmed proposals txs from the unporposed txs sled tree
647        self.blockchain.remove_pending_txs_hashes(&confirmed_txs_hashes)?;
648
649        // Remove unreferenced txs from the unporposed txs sled tree
650        self.blockchain.remove_pending_txs_hashes(&Vec::from_iter(dropped_txs))?;
651
652        // Drop forks lock
653        drop(forks);
654
655        Ok(())
656    }
657
658    /// Auxiliary function to fully purge current forks and leave only a new empty fork.
659    pub async fn purge_forks(&self) -> Result<()> {
660        debug!(target: "validator::consensus::purge_forks", "Purging current forks...");
661        let mut forks = self.forks.write().await;
662        *forks = vec![Fork::new(self.blockchain.clone(), self.module.read().await.clone()).await?];
663        drop(forks);
664        debug!(target: "validator::consensus::purge_forks", "Forks purged!");
665        Ok(())
666    }
667
668    /// Auxiliary function to reset PoW module.
669    pub async fn reset_pow_module(&self) -> Result<()> {
670        debug!(target: "validator::consensus::reset_pow_module", "Resetting PoW module...");
671
672        let mut module = self.module.write().await;
673        *module = PoWModule::new(
674            self.blockchain.clone(),
675            module.target,
676            module.fixed_difficulty.clone(),
677            None,
678        )?;
679        drop(module);
680        debug!(target: "validator::consensus::reset_pow_module", "PoW module reset successfully!");
681        Ok(())
682    }
683
684    /// Auxiliary function to check current contracts states
685    /// Monotree(SMT) validity in all active forks and canonical.
686    pub async fn healthcheck(&self) -> Result<()> {
687        // Grab a lock over current forks
688        let lock = self.forks.read().await;
689
690        // Rebuild current canonical contract states monotree
691        let state_monotree = self.blockchain.get_state_monotree()?;
692
693        // Check that the root matches last block header state root
694        let Some(state_root) = state_monotree.get_headroot()? else {
695            return Err(Error::ContractsStatesRootNotFoundError);
696        };
697        let last_block_state_root = self.blockchain.last_header()?.state_root;
698        if state_root != last_block_state_root {
699            return Err(Error::ContractsStatesRootError(
700                blake3::Hash::from_bytes(state_root).to_string(),
701                blake3::Hash::from_bytes(last_block_state_root).to_string(),
702            ));
703        }
704
705        // Check each fork health
706        for fork in lock.iter() {
707            fork.healthcheck()?;
708        }
709
710        Ok(())
711    }
712}
713
714/// This struct represents a block proposal, used for consensus.
715#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
716pub struct Proposal {
717    /// Block hash
718    pub hash: HeaderHash,
719    /// Block data
720    pub block: BlockInfo,
721}
722
723impl Proposal {
724    pub fn new(block: BlockInfo) -> Self {
725        let hash = block.hash();
726        Self { hash, block }
727    }
728}
729
730impl From<Proposal> for BlockInfo {
731    fn from(proposal: Proposal) -> BlockInfo {
732        proposal.block
733    }
734}
735
736/// Struct representing a forked blockchain state.
737///
738/// An overlay over the original blockchain is used, containing all pending to-write
739/// records. Additionally, each fork keeps a vector of valid pending transactions hashes,
740/// in order of receival, and the proposals hashes sequence, for validations.
741#[derive(Clone)]
742pub struct Fork {
743    /// Canonical (confirmed) blockchain
744    pub blockchain: Blockchain,
745    /// Overlay cache over canonical Blockchain
746    pub overlay: BlockchainOverlayPtr,
747    /// Current PoW module state
748    pub module: PoWModule,
749    /// Current contracts states Monotree(SMT)
750    pub state_monotree: Monotree<monotree::MemoryDb>,
751    /// Fork proposal hashes sequence
752    pub proposals: Vec<HeaderHash>,
753    /// Fork proposal overlay diffs sequence
754    pub diffs: Vec<SledDbOverlayStateDiff>,
755    /// Valid pending transaction hashes
756    pub mempool: Vec<TransactionHash>,
757    /// Current fork mining targets rank, cached for better performance
758    pub targets_rank: BigUint,
759    /// Current fork hashes rank, cached for better performance
760    pub hashes_rank: BigUint,
761}
762
763impl Fork {
764    pub async fn new(blockchain: Blockchain, module: PoWModule) -> Result<Self> {
765        let mempool = blockchain.get_pending_txs()?.iter().map(|tx| tx.hash()).collect();
766        let overlay = BlockchainOverlay::new(&blockchain)?;
767        // Build current contract states monotree
768        let state_monotree = overlay.lock().unwrap().get_state_monotree()?;
769        // Retrieve last block difficulty to access current ranks
770        let last_difficulty = blockchain.last_block_difficulty()?;
771        let targets_rank = last_difficulty.ranks.targets_rank;
772        let hashes_rank = last_difficulty.ranks.hashes_rank;
773        Ok(Self {
774            blockchain,
775            overlay,
776            module,
777            state_monotree,
778            proposals: vec![],
779            diffs: vec![],
780            mempool,
781            targets_rank,
782            hashes_rank,
783        })
784    }
785
786    /// Auxiliary function to append a proposal and update current fork rank.
787    pub async fn append_proposal(&mut self, proposal: &Proposal) -> Result<()> {
788        // Grab next mine target and difficulty
789        let (next_target, next_difficulty) = self.module.next_mine_target_and_difficulty()?;
790
791        // Calculate block rank
792        let (target_distance_sq, hash_distance_sq) = block_rank(&proposal.block, &next_target)?;
793
794        // Update fork ranks
795        self.targets_rank += target_distance_sq.clone();
796        self.hashes_rank += hash_distance_sq.clone();
797
798        // Generate block difficulty and update PoW module
799        let cumulative_difficulty =
800            self.module.cumulative_difficulty.clone() + next_difficulty.clone();
801        let ranks = BlockRanks::new(
802            target_distance_sq,
803            self.targets_rank.clone(),
804            hash_distance_sq,
805            self.hashes_rank.clone(),
806        );
807        let block_difficulty = BlockDifficulty::new(
808            proposal.block.header.height,
809            proposal.block.header.timestamp,
810            next_difficulty,
811            cumulative_difficulty,
812            ranks,
813        );
814        self.module.append_difficulty(&self.overlay, &proposal.block.header, block_difficulty)?;
815
816        // Push proposal's hash
817        self.proposals.push(proposal.hash);
818
819        // Push proposal overlay diff
820        self.diffs.push(self.overlay.lock().unwrap().overlay.lock().unwrap().diff(&self.diffs)?);
821
822        Ok(())
823    }
824
825    /// Auxiliary function to retrieve last proposal.
826    pub fn last_proposal(&self) -> Result<Proposal> {
827        let block = if let Some(last) = self.proposals.last() {
828            self.overlay.lock().unwrap().get_blocks_by_hash(&[*last])?[0].clone()
829        } else {
830            self.overlay.lock().unwrap().last_block()?
831        };
832
833        Ok(Proposal::new(block))
834    }
835
836    /// Auxiliary function to compute forks' next block height.
837    pub fn get_next_block_height(&self) -> Result<u32> {
838        let proposal = self.last_proposal()?;
839        Ok(proposal.block.header.height + 1)
840    }
841
842    /// Auxiliary function to retrieve unproposed valid transactions,
843    /// along with their total gas used and total paid fees.
844    ///
845    /// Note: Always remember to purge new trees from the overlay if not needed.
846    pub async fn unproposed_txs(
847        &self,
848        verifying_block_height: u32,
849        verify_fees: bool,
850    ) -> Result<(Vec<Transaction>, u64, u64)> {
851        // Check if our mempool is not empty
852        if self.mempool.is_empty() {
853            return Ok((vec![], 0, 0))
854        }
855
856        // Transactions Merkle tree
857        let mut tree = MerkleTree::new(1);
858
859        // Total gas accumulators
860        let mut total_gas_used = 0;
861        let mut total_gas_paid = 0;
862
863        // Map of ZK proof verifying keys for the current transaction batch
864        let mut vks: HashMap<[u8; 32], HashMap<String, VerifyingKey>> = HashMap::new();
865
866        // Grab all current proposals transactions hashes
867        let proposals_txs = self.overlay.lock().unwrap().get_blocks_txs_hashes(&self.proposals)?;
868
869        // Iterate through all pending transactions in the forks' mempool
870        let mut unproposed_txs = vec![];
871        for tx in &self.mempool {
872            // If the hash is contained in the proposals transactions vec, skip it
873            if proposals_txs.contains(tx) {
874                continue
875            }
876
877            // Retrieve the actual unproposed transaction
878            let unproposed_tx =
879                self.blockchain.transactions.get_pending(&[*tx], true)?[0].clone().unwrap();
880
881            // Update the verifying keys map
882            for call in &unproposed_tx.calls {
883                vks.entry(call.data.contract_id.to_bytes()).or_default();
884            }
885
886            // Verify the transaction against current state
887            self.overlay.lock().unwrap().checkpoint();
888            let gas_data = match verify_transaction(
889                &self.overlay,
890                verifying_block_height,
891                self.module.target,
892                &unproposed_tx,
893                &mut tree,
894                &mut vks,
895                verify_fees,
896            )
897            .await
898            {
899                Ok(gas_values) => gas_values,
900                Err(e) => {
901                    debug!(target: "validator::consensus::unproposed_txs", "Transaction verification failed: {e}");
902                    self.overlay.lock().unwrap().overlay.lock().unwrap().purge_new_trees()?;
903                    self.overlay.lock().unwrap().revert_to_checkpoint()?;
904                    continue
905                }
906            };
907
908            // Store the gas used by the verified transaction
909            let tx_gas_used = gas_data.total_gas_used();
910
911            // Calculate current accumulated gas usage
912            let accumulated_gas_usage = total_gas_used + tx_gas_used;
913
914            // Check gas limit - if accumulated gas used exceeds it, break out of loop
915            if accumulated_gas_usage > BLOCK_GAS_LIMIT {
916                warn!(
917                    target: "validator::consensus::unproposed_txs",
918                    "Retrieving transaction {tx} would exceed configured unproposed transaction gas limit: {accumulated_gas_usage} - {BLOCK_GAS_LIMIT}"
919                );
920                self.overlay.lock().unwrap().overlay.lock().unwrap().purge_new_trees()?;
921                self.overlay.lock().unwrap().revert_to_checkpoint()?;
922                break
923            }
924
925            // Update accumulated total gas
926            total_gas_used += tx_gas_used;
927            total_gas_paid += gas_data.paid;
928
929            // Push the tx hash into the unproposed transactions vector
930            unproposed_txs.push(unproposed_tx);
931        }
932
933        Ok((unproposed_txs, total_gas_used, total_gas_paid))
934    }
935
936    /// Auxiliary function to create a full clone using BlockchainOverlay::full_clone.
937    /// Changes to this copy don't affect original fork overlay records, since underlying
938    /// overlay pointer have been updated to the cloned one.
939    pub fn full_clone(&self) -> Result<Self> {
940        let blockchain = self.blockchain.clone();
941        let overlay = self.overlay.lock().unwrap().full_clone()?;
942        let module = self.module.clone();
943        let state_monotree = self.state_monotree.clone();
944        let proposals = self.proposals.clone();
945        let diffs = self.diffs.clone();
946        let mempool = self.mempool.clone();
947        let targets_rank = self.targets_rank.clone();
948        let hashes_rank = self.hashes_rank.clone();
949
950        Ok(Self {
951            blockchain,
952            overlay,
953            module,
954            state_monotree,
955            proposals,
956            diffs,
957            mempool,
958            targets_rank,
959            hashes_rank,
960        })
961    }
962
963    /// Build current contract states monotree.
964    pub fn compute_monotree(&mut self) -> Result<()> {
965        self.state_monotree = self.overlay.lock().unwrap().get_state_monotree()?;
966        Ok(())
967    }
968
969    /// Auxiliary function to check current contracts states
970    /// Monotree(SMT) validity.
971    ///
972    /// Note: This should be executed on fresh forks and/or when
973    ///       a fork doesn't contain changes over the last appended
974    //        proposal.
975    pub fn healthcheck(&self) -> Result<()> {
976        // Rebuild current contract states monotree
977        let state_monotree = self.overlay.lock().unwrap().get_state_monotree()?;
978
979        // Check that it matches forks' tree
980        let Some(state_root) = state_monotree.get_headroot()? else {
981            return Err(Error::ContractsStatesRootNotFoundError);
982        };
983        let Some(fork_state_root) = self.state_monotree.get_headroot()? else {
984            return Err(Error::ContractsStatesRootNotFoundError);
985        };
986        if state_root != fork_state_root {
987            return Err(Error::ContractsStatesRootError(
988                blake3::Hash::from_bytes(state_root).to_string(),
989                blake3::Hash::from_bytes(fork_state_root).to_string(),
990            ));
991        }
992
993        // Check that the root matches last block header state root
994        let last_block_state_root = self.last_proposal()?.block.header.state_root;
995        if state_root != last_block_state_root {
996            return Err(Error::ContractsStatesRootError(
997                blake3::Hash::from_bytes(state_root).to_string(),
998                blake3::Hash::from_bytes(last_block_state_root).to_string(),
999            ));
1000        }
1001
1002        Ok(())
1003    }
1004
1005    /// Auxiliary function to purge all new trees from the fork
1006    /// overlay.
1007    pub fn purge_new_trees(&self) {
1008        if let Err(e) = self.overlay.lock().unwrap().overlay.lock().unwrap().purge_new_trees() {
1009            error!(target: "validator::consensus::fork::purge_new_trees", "Purging new trees in the overlay failed: {e}");
1010        }
1011    }
1012}