1use std::collections::{HashMap, HashSet};
20
21use darkfi_sdk::{
22 crypto::MerkleTree,
23 monotree::{self, Monotree},
24 tx::TransactionHash,
25};
26use darkfi_serial::{async_trait, SerialDecodable, SerialEncodable};
27use num_bigint::BigUint;
28use sled_overlay::database::SledDbOverlayStateDiff;
29use smol::lock::RwLock;
30use tracing::{debug, error, info, warn};
31
32use crate::{
33 blockchain::{
34 block_store::{BlockDifficulty, BlockRanks},
35 BlockInfo, Blockchain, BlockchainOverlay, BlockchainOverlayPtr, Header, HeaderHash,
36 },
37 runtime::vm_runtime::GAS_LIMIT,
38 tx::{Transaction, MAX_TX_CALLS},
39 validator::{
40 pow::{PoWModule, RANDOMX_KEY_CHANGE_DELAY, RANDOMX_KEY_CHANGING_HEIGHT},
41 utils::{best_fork_index, block_rank, find_extended_fork_index},
42 verification::{verify_proposal, verify_transaction},
43 },
44 zk::VerifyingKey,
45 Error, Result,
46};
47
48pub const BLOCK_GAS_LIMIT: u64 = GAS_LIMIT * MAX_TX_CALLS as u64 * 50;
50
51pub struct Consensus {
53 pub blockchain: Blockchain,
55 pub confirmation_threshold: usize,
57 pub forks: RwLock<Vec<Fork>>,
59 pub module: RwLock<PoWModule>,
61 pub append_lock: RwLock<()>,
63}
64
65impl Consensus {
66 pub fn new(
68 blockchain: Blockchain,
69 confirmation_threshold: usize,
70 pow_target: u32,
71 pow_fixed_difficulty: Option<BigUint>,
72 ) -> Result<Self> {
73 let forks = RwLock::new(vec![]);
74
75 let module = RwLock::new(PoWModule::new(
76 blockchain.clone(),
77 pow_target,
78 pow_fixed_difficulty,
79 None,
80 )?);
81
82 let append_lock = RwLock::new(());
83
84 Ok(Self { blockchain, confirmation_threshold, forks, module, append_lock })
85 }
86
87 pub async fn generate_empty_fork(&self) -> Result<()> {
89 debug!(target: "validator::consensus::generate_empty_fork", "Generating new empty fork...");
90 let mut forks = self.forks.write().await;
91 for fork in forks.iter() {
93 if fork.proposals.is_empty() {
94 debug!(target: "validator::consensus::generate_empty_fork", "An empty fork already exists.");
95 drop(forks);
96 return Ok(())
97 }
98 }
99 let fork = Fork::new(self.blockchain.clone(), self.module.read().await.clone()).await?;
100 forks.push(fork);
101 drop(forks);
102 debug!(target: "validator::consensus::generate_empty_fork", "Fork generated!");
103 Ok(())
104 }
105
106 pub async fn append_proposal(&self, proposal: &Proposal, verify_fees: bool) -> Result<()> {
109 debug!(target: "validator::consensus::append_proposal", "Appending proposal {}", proposal.hash);
110
111 let lock = self.forks.read().await;
113 for fork in lock.iter() {
114 for p in fork.proposals.iter().rev() {
115 if p == &proposal.hash {
116 drop(lock);
117 debug!(target: "validator::consensus::append_proposal", "Proposal {} already exists", proposal.hash);
118 return Err(Error::ProposalAlreadyExists)
119 }
120 }
121 }
122 if let Ok(canonical_headers) =
124 self.blockchain.blocks.get_order(&[proposal.block.header.height], true)
125 {
126 if canonical_headers[0].unwrap() == proposal.hash {
127 drop(lock);
128 debug!(target: "validator::consensus::append_proposal", "Proposal {} already exists", proposal.hash);
129 return Err(Error::ProposalAlreadyExists)
130 }
131 }
132 drop(lock);
133
134 let (mut fork, index) = verify_proposal(self, proposal, verify_fees).await?;
136
137 fork.append_proposal(proposal).await?;
139
140 let mut lock = self.forks.write().await;
146 match index {
147 Some(i) => {
148 if i < lock.len() && lock[i].proposals == fork.proposals[..fork.proposals.len() - 1]
149 {
150 lock[i] = fork;
151 } else {
152 lock.push(fork);
153 }
154 }
155 None => {
156 lock.push(fork);
157 }
158 }
159 drop(lock);
160
161 info!(target: "validator::consensus::append_proposal", "Appended proposal {}", proposal.hash);
162
163 Ok(())
164 }
165
166 pub async fn find_extended_fork(&self, proposal: &Proposal) -> Result<(Fork, Option<usize>)> {
172 let forks = self.forks.read().await;
174
175 let found = find_extended_fork_index(&forks, proposal);
177 if found.is_err() {
178 if let Err(Error::ProposalAlreadyExists) = found {
179 return Err(Error::ProposalAlreadyExists)
180 }
181
182 let (last_height, last_block) = self.blockchain.last()?;
184 if proposal.block.header.previous != last_block ||
185 proposal.block.header.height <= last_height
186 {
187 return Err(Error::ExtendedChainIndexNotFound)
188 }
189
190 for (f_index, fork) in forks.iter().enumerate() {
192 if fork.proposals.is_empty() {
193 return Ok((forks[f_index].full_clone()?, Some(f_index)))
194 }
195 }
196
197 let fork = Fork::new(self.blockchain.clone(), self.module.read().await.clone()).await?;
199 return Ok((fork, None))
200 }
201
202 let (f_index, p_index) = found.unwrap();
203 let original_fork = &forks[f_index];
204 if p_index == (original_fork.proposals.len() - 1) {
206 return Ok((original_fork.full_clone()?, Some(f_index)))
207 }
208
209 let mut fork = Fork::new(self.blockchain.clone(), self.module.read().await.clone()).await?;
211 fork.proposals = original_fork.proposals[..p_index + 1].to_vec();
212 fork.diffs = original_fork.diffs[..p_index + 1].to_vec();
213
214 let blocks = &original_fork.overlay.lock().unwrap().get_blocks_by_hash(&fork.proposals)?;
216 for (index, block) in blocks.iter().enumerate() {
217 fork.overlay.lock().unwrap().overlay.lock().unwrap().add_diff(&fork.diffs[index])?;
219
220 let (next_target, next_difficulty) = fork.module.next_mine_target_and_difficulty()?;
222
223 let (target_distance_sq, hash_distance_sq) = block_rank(block, &next_target)?;
225
226 fork.module.append(&block.header, &next_difficulty)?;
228
229 fork.targets_rank += target_distance_sq;
231 fork.hashes_rank += hash_distance_sq;
232 }
233
234 fork.compute_monotree()?;
236
237 drop(forks);
239
240 Ok((fork, None))
241 }
242
243 pub async fn confirmation(&self) -> Result<Option<usize>> {
252 debug!(target: "validator::consensus::confirmation", "Started confirmation check");
253
254 let forks = self.forks.read().await;
256 let index = best_fork_index(&forks)?;
257 let fork = &forks[index];
258
259 let length = fork.proposals.len();
261 if length < self.confirmation_threshold {
262 debug!(target: "validator::consensus::confirmation", "Nothing to confirme yet, best fork size: {length}");
263 drop(forks);
264 return Ok(None)
265 }
266
267 drop(forks);
269
270 Ok(Some(index))
271 }
272
273 pub async fn get_fork_header_hash(
276 &self,
277 height: u32,
278 fork_header: &HeaderHash,
279 ) -> Result<Option<HeaderHash>> {
280 let forks = self.forks.read().await;
282
283 let mut found = None;
285 'outer: for (index, fork) in forks.iter().enumerate() {
286 for p in fork.proposals.iter().rev() {
287 if p == fork_header {
288 found = Some(index);
289 break 'outer
290 }
291 }
292 }
293 if found.is_none() {
294 drop(forks);
295 return Ok(None)
296 }
297 let index = found.unwrap();
298
299 let header = forks[index].overlay.lock().unwrap().blocks.get_order(&[height], false)?[0];
301
302 drop(forks);
304
305 Ok(header)
306 }
307
308 pub async fn get_fork_headers(
312 &self,
313 headers: &[HeaderHash],
314 fork_header: &HeaderHash,
315 ) -> Result<Vec<Header>> {
316 let forks = self.forks.read().await;
318
319 let mut found = None;
321 'outer: for (index, fork) in forks.iter().enumerate() {
322 for p in fork.proposals.iter().rev() {
323 if p == fork_header {
324 found = Some(index);
325 break 'outer
326 }
327 }
328 }
329 let Some(index) = found else {
330 drop(forks);
331 return Ok(vec![])
332 };
333
334 let headers = forks[index].overlay.lock().unwrap().get_headers_by_hash(headers)?;
336
337 drop(forks);
339
340 Ok(headers)
341 }
342
343 pub async fn get_fork_proposals(
347 &self,
348 headers: &[HeaderHash],
349 fork_header: &HeaderHash,
350 ) -> Result<Vec<Proposal>> {
351 let forks = self.forks.read().await;
353
354 let mut found = None;
356 'outer: for (index, fork) in forks.iter().enumerate() {
357 for p in fork.proposals.iter().rev() {
358 if p == fork_header {
359 found = Some(index);
360 break 'outer
361 }
362 }
363 }
364 let Some(index) = found else {
365 drop(forks);
366 return Ok(vec![])
367 };
368
369 let blocks = forks[index].overlay.lock().unwrap().get_blocks_by_hash(headers)?;
371 let mut proposals = Vec::with_capacity(blocks.len());
372 for block in blocks {
373 proposals.push(Proposal::new(block));
374 }
375
376 drop(forks);
378
379 Ok(proposals)
380 }
381
382 pub async fn get_fork_proposals_after(
387 &self,
388 tip: HeaderHash,
389 fork_tip: Option<HeaderHash>,
390 limit: u32,
391 ) -> Result<Vec<Proposal>> {
392 let forks = self.forks.read().await;
394
395 let mut proposals = vec![];
397
398 let index = match fork_tip {
400 Some(fork_tip) => {
401 let mut found = None;
402 'outer: for (index, fork) in forks.iter().enumerate() {
403 for p in fork.proposals.iter().rev() {
404 if p == &fork_tip {
405 found = Some(index);
406 break 'outer
407 }
408 }
409 }
410 if found.is_none() {
411 drop(forks);
412 return Ok(proposals)
413 }
414 found.unwrap()
415 }
416 None => best_fork_index(&forks)?,
417 };
418
419 let Ok(existing_tips) = forks[index].overlay.lock().unwrap().get_blocks_by_hash(&[tip])
421 else {
422 drop(forks);
423 return Ok(proposals)
424 };
425
426 let last_block_height = forks[index].overlay.lock().unwrap().last()?.0;
428 if last_block_height - existing_tips[0].header.height >= limit {
429 drop(forks);
430 return Ok(proposals)
431 }
432
433 let headers = self.blockchain.blocks.get_all_after(existing_tips[0].header.height)?;
435 let blocks = self.blockchain.get_blocks_by_hash(&headers)?;
436 for block in blocks {
437 proposals.push(Proposal::new(block));
438 }
439 let blocks =
440 forks[index].overlay.lock().unwrap().get_blocks_by_hash(&forks[index].proposals)?;
441 for block in blocks {
442 proposals.push(Proposal::new(block));
443 }
444
445 drop(forks);
447
448 Ok(proposals)
449 }
450
451 pub async fn current_mining_randomx_key(&self) -> Result<HeaderHash> {
455 let forks = self.forks.read().await;
457
458 let (next_block_height, rx_keys) = if forks.is_empty() {
461 let (next_block_height, _) = self.blockchain.last()?;
462 (next_block_height + 1, self.module.read().await.darkfi_rx_keys)
463 } else {
464 let fork = &forks[best_fork_index(&forks)?];
466 let last = fork.last_proposal()?;
467 (last.block.header.height + 1, fork.module.darkfi_rx_keys)
468 };
469
470 if next_block_height > RANDOMX_KEY_CHANGING_HEIGHT &&
473 next_block_height % RANDOMX_KEY_CHANGING_HEIGHT == RANDOMX_KEY_CHANGE_DELAY
474 {
475 Ok(rx_keys.1)
476 } else {
477 Ok(rx_keys.0)
478 }
479 }
480
481 pub async fn best_current_fork(&self) -> Result<Fork> {
483 let forks = self.forks.read().await;
484 let index = best_fork_index(&forks)?;
485 forks[index].full_clone()
486 }
487
488 pub async fn best_fork_last_header(&self) -> Result<(u32, HeaderHash)> {
491 let forks = self.forks.read().await;
493
494 if forks.is_empty() {
496 drop(forks);
497 return self.blockchain.last()
498 }
499
500 let fork = &forks[best_fork_index(&forks)?];
502
503 let last = fork.last_proposal()?;
505 drop(forks);
506 Ok((last.block.header.height, last.hash))
507 }
508
509 pub async fn reset_forks(
516 &self,
517 prefix: &[HeaderHash],
518 confirmed_fork_index: &usize,
519 confirmed_txs: &[Transaction],
520 ) -> Result<()> {
521 let mut forks = self.forks.write().await;
523
524 let excess = prefix.len();
531 let prefix_last_index = excess - 1;
532 let prefix_last = prefix.last().unwrap();
533 let mut keep = vec![true; forks.len()];
534 let mut referenced_trees = HashSet::new();
535 let mut referenced_txs = HashSet::new();
536 let confirmed_txs_hashes: Vec<TransactionHash> =
537 confirmed_txs.iter().map(|tx| tx.hash()).collect();
538 for (index, fork) in forks.iter_mut().enumerate() {
539 if &index == confirmed_fork_index {
540 let fork_overlay = fork.overlay.lock().unwrap();
542 let overlay = fork_overlay.overlay.lock().unwrap();
543 for tree in &overlay.state.initial_tree_names {
544 referenced_trees.insert(tree.clone());
545 }
546 for tree in &overlay.state.new_tree_names {
547 referenced_trees.insert(tree.clone());
548 }
549 for tree in overlay.state.dropped_trees.keys() {
550 referenced_trees.insert(tree.clone());
551 }
552 fork.mempool.retain(|tx| !confirmed_txs_hashes.contains(tx));
554 for tx in &fork.mempool {
556 referenced_txs.insert(*tx);
557 }
558 drop(overlay);
559 drop(fork_overlay);
560 continue
561 }
562
563 if fork.proposals.is_empty() ||
564 prefix_last_index >= fork.proposals.len() ||
565 &fork.proposals[prefix_last_index] != prefix_last
566 {
567 keep[index] = false;
568 continue
569 }
570
571 fork.mempool.retain(|tx| !confirmed_txs_hashes.contains(tx));
573 for tx in &fork.mempool {
575 referenced_txs.insert(*tx);
576 }
577
578 let rest_proposals = fork.proposals.split_off(excess);
580 let rest_diffs = fork.diffs.split_off(excess);
581 let mut diffs = fork.diffs.clone();
582 fork.proposals = rest_proposals;
583 fork.diffs = rest_diffs;
584 for diff in diffs.iter_mut() {
585 fork.overlay.lock().unwrap().overlay.lock().unwrap().remove_diff(diff);
586 }
587
588 let fork_overlay = fork.overlay.lock().unwrap();
590 let overlay = fork_overlay.overlay.lock().unwrap();
591 for tree in &overlay.state.initial_tree_names {
592 referenced_trees.insert(tree.clone());
593 }
594 for tree in &overlay.state.new_tree_names {
595 referenced_trees.insert(tree.clone());
596 }
597 for tree in overlay.state.dropped_trees.keys() {
598 referenced_trees.insert(tree.clone());
599 }
600 drop(overlay);
601 drop(fork_overlay);
602 }
603
604 let mut dropped_trees = HashSet::new();
606 let mut dropped_txs = HashSet::new();
607 for (index, fork) in forks.iter_mut().enumerate() {
608 if keep[index] {
609 continue
610 }
611 for tx in &fork.mempool {
612 if !referenced_txs.contains(tx) {
613 dropped_txs.insert(*tx);
614 }
615 }
616 let fork_overlay = fork.overlay.lock().unwrap();
617 let overlay = fork_overlay.overlay.lock().unwrap();
618 for tree in &overlay.state.initial_tree_names {
619 if !referenced_trees.contains(tree) {
620 dropped_trees.insert(tree.clone());
621 }
622 }
623 for tree in &overlay.state.new_tree_names {
624 if !referenced_trees.contains(tree) {
625 dropped_trees.insert(tree.clone());
626 }
627 }
628 for tree in overlay.state.dropped_trees.keys() {
629 if !referenced_trees.contains(tree) {
630 dropped_trees.insert(tree.clone());
631 }
632 }
633 drop(overlay);
634 drop(fork_overlay);
635 }
636
637 for tree in dropped_trees {
639 self.blockchain.sled_db.drop_tree(tree)?;
640 }
641
642 let mut iter = keep.iter();
644 forks.retain(|_| *iter.next().unwrap());
645
646 self.blockchain.remove_pending_txs_hashes(&confirmed_txs_hashes)?;
648
649 self.blockchain.remove_pending_txs_hashes(&Vec::from_iter(dropped_txs))?;
651
652 drop(forks);
654
655 Ok(())
656 }
657
658 pub async fn purge_forks(&self) -> Result<()> {
660 debug!(target: "validator::consensus::purge_forks", "Purging current forks...");
661 let mut forks = self.forks.write().await;
662 *forks = vec![Fork::new(self.blockchain.clone(), self.module.read().await.clone()).await?];
663 drop(forks);
664 debug!(target: "validator::consensus::purge_forks", "Forks purged!");
665 Ok(())
666 }
667
668 pub async fn reset_pow_module(&self) -> Result<()> {
670 debug!(target: "validator::consensus::reset_pow_module", "Resetting PoW module...");
671
672 let mut module = self.module.write().await;
673 *module = PoWModule::new(
674 self.blockchain.clone(),
675 module.target,
676 module.fixed_difficulty.clone(),
677 None,
678 )?;
679 drop(module);
680 debug!(target: "validator::consensus::reset_pow_module", "PoW module reset successfully!");
681 Ok(())
682 }
683
684 pub async fn healthcheck(&self) -> Result<()> {
687 let lock = self.forks.read().await;
689
690 let state_monotree = self.blockchain.get_state_monotree()?;
692
693 let Some(state_root) = state_monotree.get_headroot()? else {
695 return Err(Error::ContractsStatesRootNotFoundError);
696 };
697 let last_block_state_root = self.blockchain.last_header()?.state_root;
698 if state_root != last_block_state_root {
699 return Err(Error::ContractsStatesRootError(
700 blake3::Hash::from_bytes(state_root).to_string(),
701 blake3::Hash::from_bytes(last_block_state_root).to_string(),
702 ));
703 }
704
705 for fork in lock.iter() {
707 fork.healthcheck()?;
708 }
709
710 Ok(())
711 }
712}
713
714#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
716pub struct Proposal {
717 pub hash: HeaderHash,
719 pub block: BlockInfo,
721}
722
723impl Proposal {
724 pub fn new(block: BlockInfo) -> Self {
725 let hash = block.hash();
726 Self { hash, block }
727 }
728}
729
730impl From<Proposal> for BlockInfo {
731 fn from(proposal: Proposal) -> BlockInfo {
732 proposal.block
733 }
734}
735
736#[derive(Clone)]
742pub struct Fork {
743 pub blockchain: Blockchain,
745 pub overlay: BlockchainOverlayPtr,
747 pub module: PoWModule,
749 pub state_monotree: Monotree<monotree::MemoryDb>,
751 pub proposals: Vec<HeaderHash>,
753 pub diffs: Vec<SledDbOverlayStateDiff>,
755 pub mempool: Vec<TransactionHash>,
757 pub targets_rank: BigUint,
759 pub hashes_rank: BigUint,
761}
762
763impl Fork {
764 pub async fn new(blockchain: Blockchain, module: PoWModule) -> Result<Self> {
765 let mempool = blockchain.get_pending_txs()?.iter().map(|tx| tx.hash()).collect();
766 let overlay = BlockchainOverlay::new(&blockchain)?;
767 let state_monotree = overlay.lock().unwrap().get_state_monotree()?;
769 let last_difficulty = blockchain.last_block_difficulty()?;
771 let targets_rank = last_difficulty.ranks.targets_rank;
772 let hashes_rank = last_difficulty.ranks.hashes_rank;
773 Ok(Self {
774 blockchain,
775 overlay,
776 module,
777 state_monotree,
778 proposals: vec![],
779 diffs: vec![],
780 mempool,
781 targets_rank,
782 hashes_rank,
783 })
784 }
785
786 pub async fn append_proposal(&mut self, proposal: &Proposal) -> Result<()> {
788 let (next_target, next_difficulty) = self.module.next_mine_target_and_difficulty()?;
790
791 let (target_distance_sq, hash_distance_sq) = block_rank(&proposal.block, &next_target)?;
793
794 self.targets_rank += target_distance_sq.clone();
796 self.hashes_rank += hash_distance_sq.clone();
797
798 let cumulative_difficulty =
800 self.module.cumulative_difficulty.clone() + next_difficulty.clone();
801 let ranks = BlockRanks::new(
802 target_distance_sq,
803 self.targets_rank.clone(),
804 hash_distance_sq,
805 self.hashes_rank.clone(),
806 );
807 let block_difficulty = BlockDifficulty::new(
808 proposal.block.header.height,
809 proposal.block.header.timestamp,
810 next_difficulty,
811 cumulative_difficulty,
812 ranks,
813 );
814 self.module.append_difficulty(&self.overlay, &proposal.block.header, block_difficulty)?;
815
816 self.proposals.push(proposal.hash);
818
819 self.diffs.push(self.overlay.lock().unwrap().overlay.lock().unwrap().diff(&self.diffs)?);
821
822 Ok(())
823 }
824
825 pub fn last_proposal(&self) -> Result<Proposal> {
827 let block = if let Some(last) = self.proposals.last() {
828 self.overlay.lock().unwrap().get_blocks_by_hash(&[*last])?[0].clone()
829 } else {
830 self.overlay.lock().unwrap().last_block()?
831 };
832
833 Ok(Proposal::new(block))
834 }
835
836 pub fn get_next_block_height(&self) -> Result<u32> {
838 let proposal = self.last_proposal()?;
839 Ok(proposal.block.header.height + 1)
840 }
841
842 pub async fn unproposed_txs(
847 &self,
848 verifying_block_height: u32,
849 verify_fees: bool,
850 ) -> Result<(Vec<Transaction>, u64, u64)> {
851 if self.mempool.is_empty() {
853 return Ok((vec![], 0, 0))
854 }
855
856 let mut tree = MerkleTree::new(1);
858
859 let mut total_gas_used = 0;
861 let mut total_gas_paid = 0;
862
863 let mut vks: HashMap<[u8; 32], HashMap<String, VerifyingKey>> = HashMap::new();
865
866 let proposals_txs = self.overlay.lock().unwrap().get_blocks_txs_hashes(&self.proposals)?;
868
869 let mut unproposed_txs = vec![];
871 for tx in &self.mempool {
872 if proposals_txs.contains(tx) {
874 continue
875 }
876
877 let unproposed_tx =
879 self.blockchain.transactions.get_pending(&[*tx], true)?[0].clone().unwrap();
880
881 for call in &unproposed_tx.calls {
883 vks.entry(call.data.contract_id.to_bytes()).or_default();
884 }
885
886 self.overlay.lock().unwrap().checkpoint();
888 let gas_data = match verify_transaction(
889 &self.overlay,
890 verifying_block_height,
891 self.module.target,
892 &unproposed_tx,
893 &mut tree,
894 &mut vks,
895 verify_fees,
896 )
897 .await
898 {
899 Ok(gas_values) => gas_values,
900 Err(e) => {
901 debug!(target: "validator::consensus::unproposed_txs", "Transaction verification failed: {e}");
902 self.overlay.lock().unwrap().overlay.lock().unwrap().purge_new_trees()?;
903 self.overlay.lock().unwrap().revert_to_checkpoint()?;
904 continue
905 }
906 };
907
908 let tx_gas_used = gas_data.total_gas_used();
910
911 let accumulated_gas_usage = total_gas_used + tx_gas_used;
913
914 if accumulated_gas_usage > BLOCK_GAS_LIMIT {
916 warn!(
917 target: "validator::consensus::unproposed_txs",
918 "Retrieving transaction {tx} would exceed configured unproposed transaction gas limit: {accumulated_gas_usage} - {BLOCK_GAS_LIMIT}"
919 );
920 self.overlay.lock().unwrap().overlay.lock().unwrap().purge_new_trees()?;
921 self.overlay.lock().unwrap().revert_to_checkpoint()?;
922 break
923 }
924
925 total_gas_used += tx_gas_used;
927 total_gas_paid += gas_data.paid;
928
929 unproposed_txs.push(unproposed_tx);
931 }
932
933 Ok((unproposed_txs, total_gas_used, total_gas_paid))
934 }
935
936 pub fn full_clone(&self) -> Result<Self> {
940 let blockchain = self.blockchain.clone();
941 let overlay = self.overlay.lock().unwrap().full_clone()?;
942 let module = self.module.clone();
943 let state_monotree = self.state_monotree.clone();
944 let proposals = self.proposals.clone();
945 let diffs = self.diffs.clone();
946 let mempool = self.mempool.clone();
947 let targets_rank = self.targets_rank.clone();
948 let hashes_rank = self.hashes_rank.clone();
949
950 Ok(Self {
951 blockchain,
952 overlay,
953 module,
954 state_monotree,
955 proposals,
956 diffs,
957 mempool,
958 targets_rank,
959 hashes_rank,
960 })
961 }
962
963 pub fn compute_monotree(&mut self) -> Result<()> {
965 self.state_monotree = self.overlay.lock().unwrap().get_state_monotree()?;
966 Ok(())
967 }
968
969 pub fn healthcheck(&self) -> Result<()> {
976 let state_monotree = self.overlay.lock().unwrap().get_state_monotree()?;
978
979 let Some(state_root) = state_monotree.get_headroot()? else {
981 return Err(Error::ContractsStatesRootNotFoundError);
982 };
983 let Some(fork_state_root) = self.state_monotree.get_headroot()? else {
984 return Err(Error::ContractsStatesRootNotFoundError);
985 };
986 if state_root != fork_state_root {
987 return Err(Error::ContractsStatesRootError(
988 blake3::Hash::from_bytes(state_root).to_string(),
989 blake3::Hash::from_bytes(fork_state_root).to_string(),
990 ));
991 }
992
993 let last_block_state_root = self.last_proposal()?.block.header.state_root;
995 if state_root != last_block_state_root {
996 return Err(Error::ContractsStatesRootError(
997 blake3::Hash::from_bytes(state_root).to_string(),
998 blake3::Hash::from_bytes(last_block_state_root).to_string(),
999 ));
1000 }
1001
1002 Ok(())
1003 }
1004
1005 pub fn purge_new_trees(&self) {
1008 if let Err(e) = self.overlay.lock().unwrap().overlay.lock().unwrap().purge_new_trees() {
1009 error!(target: "validator::consensus::fork::purge_new_trees", "Purging new trees in the overlay failed: {e}");
1010 }
1011 }
1012}