1use std::collections::{BTreeSet, HashMap, HashSet};
20
21use darkfi_sdk::{crypto::MerkleTree, tx::TransactionHash};
22use darkfi_serial::{async_trait, deserialize, SerialDecodable, SerialEncodable};
23use num_bigint::BigUint;
24use sled_overlay::{database::SledDbOverlayStateDiff, sled::IVec};
25use tracing::{debug, info, warn};
26
27use crate::{
28 blockchain::{
29 block_store::{BlockDifficulty, BlockRanks},
30 BlockInfo, Blockchain, BlockchainOverlay, BlockchainOverlayPtr, Header, HeaderHash,
31 },
32 runtime::vm_runtime::GAS_LIMIT,
33 tx::{Transaction, MAX_TX_CALLS},
34 validator::{
35 pow::{PoWModule, RANDOMX_KEY_CHANGE_DELAY, RANDOMX_KEY_CHANGING_HEIGHT},
36 utils::{best_fork_index, block_rank, find_extended_fork_index, worst_fork_index},
37 verification::{verify_proposal, verify_transaction},
38 },
39 zk::VerifyingKey,
40 Error, Result,
41};
42
43pub const BLOCK_GAS_LIMIT: u64 = GAS_LIMIT * MAX_TX_CALLS as u64 * 50;
45
46pub struct Consensus {
49 pub blockchain: Blockchain,
51 pub confirmation_threshold: usize,
53 pub forks: Vec<Fork>,
55 max_forks: usize,
57 pub module: PoWModule,
59}
60
61impl Consensus {
62 pub fn new(
64 blockchain: Blockchain,
65 confirmation_threshold: usize,
66 max_forks: usize,
67 pow_target: u32,
68 pow_fixed_difficulty: Option<BigUint>,
69 ) -> Result<Self> {
70 let max_forks = if max_forks == 0 { 1 } else { max_forks };
71 let module = PoWModule::new(blockchain.clone(), pow_target, pow_fixed_difficulty, None)?;
72
73 Ok(Self { blockchain, confirmation_threshold, forks: vec![], max_forks, module })
74 }
75
76 pub async fn generate_empty_fork(&mut self) -> Result<()> {
80 debug!(target: "validator::consensus::generate_empty_fork", "Generating new empty fork...");
81 for fork in &self.forks {
83 if fork.proposals.is_empty() {
84 debug!(target: "validator::consensus::generate_empty_fork", "An empty fork already exists.");
85 return Ok(())
86 }
87 }
88 let fork = Fork::new(self.blockchain.clone(), self.module.clone()).await?;
89 self.push_fork(fork);
90 debug!(target: "validator::consensus::generate_empty_fork", "Fork generated!");
91
92 Ok(())
93 }
94
95 fn push_fork(&mut self, fork: Fork) {
100 if self.forks.len() < self.max_forks {
102 self.forks.push(fork);
103 return
104 }
105
106 let index = worst_fork_index(&self.forks).unwrap();
111
112 if fork.targets_rank < self.forks[index].targets_rank {
114 return
115 }
116
117 if fork.targets_rank == self.forks[index].targets_rank &&
119 fork.hashes_rank <= self.forks[index].hashes_rank
120 {
121 return
122 }
123
124 self.forks[index] = fork;
126 }
127
128 pub async fn append_proposal(
132 &mut self,
133 proposal: &Proposal,
134 is_new: bool,
135 verify_fees: bool,
136 ) -> Result<()> {
137 debug!(target: "validator::consensus::append_proposal", "Appending proposal {}", proposal.hash);
138
139 for fork in &self.forks {
141 for p in fork.proposals.iter().rev() {
142 if p == &proposal.hash {
143 debug!(target: "validator::consensus::append_proposal", "Proposal {} already exists", proposal.hash);
144 return Err(Error::ProposalAlreadyExists)
145 }
146 }
147 }
148 if let Ok(canonical_headers) =
150 self.blockchain.blocks.get_order(&[proposal.block.header.height], true)
151 {
152 if canonical_headers[0].unwrap() == proposal.hash {
153 debug!(target: "validator::consensus::append_proposal", "Proposal {} already exists", proposal.hash);
154 return Err(Error::ProposalAlreadyExists)
155 }
156 }
157
158 let (mut fork, index) = verify_proposal(self, proposal, is_new, verify_fees).await?;
160
161 fork.append_proposal(proposal).await?;
163
164 match index {
167 Some(i) => {
168 if i < self.forks.len() &&
169 self.forks[i].proposals == fork.proposals[..fork.proposals.len() - 1]
170 {
171 self.forks[i] = fork;
172 } else {
173 self.push_fork(fork);
174 }
175 }
176 None => {
177 self.push_fork(fork);
178 }
179 }
180
181 info!(target: "validator::consensus::append_proposal", "Appended proposal {} - {}", proposal.hash, proposal.block.header.height);
182
183 Ok(())
184 }
185
186 pub async fn find_extended_fork(&self, proposal: &Proposal) -> Result<(Fork, Option<usize>)> {
193 let found = find_extended_fork_index(&self.forks, proposal);
195 if found.is_err() {
196 if let Err(Error::ProposalAlreadyExists) = found {
197 return Err(Error::ProposalAlreadyExists)
198 }
199
200 let (last_height, last_block) = self.blockchain.last()?;
202 if proposal.block.header.previous != last_block ||
203 proposal.block.header.height <= last_height
204 {
205 return Err(Error::ExtendedChainIndexNotFound)
206 }
207
208 for (f_index, fork) in self.forks.iter().enumerate() {
210 if fork.proposals.is_empty() {
211 return Ok((self.forks[f_index].full_clone()?, Some(f_index)))
212 }
213 }
214
215 let fork = Fork::new(self.blockchain.clone(), self.module.clone()).await?;
217 return Ok((fork, None))
218 }
219
220 let (f_index, p_index) = found.unwrap();
221 let original_fork = &self.forks[f_index];
222 if p_index == (original_fork.proposals.len() - 1) {
224 return Ok((original_fork.full_clone()?, Some(f_index)))
225 }
226
227 let mut fork = Fork::new(self.blockchain.clone(), self.module.clone()).await?;
229 fork.proposals = original_fork.proposals[..p_index + 1].to_vec();
230 fork.diffs = original_fork.diffs[..p_index + 1].to_vec();
231
232 let blocks = &original_fork.overlay.lock().unwrap().get_blocks_by_hash(&fork.proposals)?;
234 for (index, block) in blocks.iter().enumerate() {
235 fork.overlay.lock().unwrap().overlay.lock().unwrap().add_diff(&fork.diffs[index])?;
237
238 let (next_difficulty, target_distance_sq, hash_distance_sq) =
240 block_rank(&mut fork.module, block)?;
241
242 fork.module.append(&block.header, &next_difficulty)?;
244
245 fork.targets_rank += target_distance_sq;
247 fork.hashes_rank += hash_distance_sq;
248 }
249
250 Ok((fork, None))
251 }
252
253 pub async fn confirmation(&self) -> Result<Option<usize>> {
263 debug!(target: "validator::consensus::confirmation", "Started confirmation check");
264
265 let index = best_fork_index(&self.forks)?;
267
268 if self.forks[index].proposals.len() < self.confirmation_threshold {
270 debug!(target: "validator::consensus::confirmation", "Nothing to confirm yet, best fork size: {}", self.forks[index].proposals.len());
271 return Ok(None)
272 }
273
274 for (f_index, fork) in self.forks.iter().enumerate() {
276 if f_index == index {
278 continue
279 }
280
281 if fork.targets_rank != self.forks[index].targets_rank {
283 continue
284 }
285
286 if fork.hashes_rank == self.forks[index].hashes_rank {
288 debug!(target: "validator::consensus::confirmation", "Competing best forks found");
289 return Ok(None)
290 }
291 }
292
293 Ok(Some(index))
294 }
295
296 fn find_fork_by_header(&self, fork_header: &HeaderHash) -> Option<usize> {
299 for (index, fork) in self.forks.iter().enumerate() {
300 for p in fork.proposals.iter().rev() {
301 if p == fork_header {
302 return Some(index)
303 }
304 }
305 }
306 None
307 }
308
309 pub async fn get_fork_header_hash(
312 &self,
313 height: u32,
314 fork_header: &HeaderHash,
315 ) -> Result<Option<HeaderHash>> {
316 let Some(index) = self.find_fork_by_header(fork_header) else { return Ok(None) };
318
319 let header =
321 self.forks[index].overlay.lock().unwrap().blocks.get_order(&[height], false)?[0];
322
323 Ok(header)
324 }
325
326 pub async fn get_fork_headers(
330 &self,
331 headers: &[HeaderHash],
332 fork_header: &HeaderHash,
333 ) -> Result<Vec<Header>> {
334 let Some(index) = self.find_fork_by_header(fork_header) else { return Ok(vec![]) };
336
337 let headers = self.forks[index].overlay.lock().unwrap().get_headers_by_hash(headers)?;
339
340 Ok(headers)
341 }
342
343 pub async fn get_fork_proposals(
347 &self,
348 headers: &[HeaderHash],
349 fork_header: &HeaderHash,
350 ) -> Result<Vec<Proposal>> {
351 let Some(index) = self.find_fork_by_header(fork_header) else { return Ok(vec![]) };
353
354 let blocks = self.forks[index].overlay.lock().unwrap().get_blocks_by_hash(headers)?;
356 let mut proposals = Vec::with_capacity(blocks.len());
357 for block in blocks {
358 proposals.push(Proposal::new(block));
359 }
360
361 Ok(proposals)
362 }
363
364 pub async fn get_fork_proposals_after(
370 &self,
371 tip: HeaderHash,
372 fork_tip: Option<HeaderHash>,
373 limit: u32,
374 ) -> Result<Vec<Proposal>> {
375 let mut proposals = vec![];
377
378 let index = match fork_tip {
380 Some(fork_tip) => {
381 let Some(found) = self.find_fork_by_header(&fork_tip) else { return Ok(proposals) };
382 found
383 }
384 None => best_fork_index(&self.forks)?,
385 };
386
387 let Ok(existing_tips) =
389 self.forks[index].overlay.lock().unwrap().get_blocks_by_hash(&[tip])
390 else {
391 return Ok(proposals)
392 };
393
394 let last_block_height = self.forks[index].overlay.lock().unwrap().last()?.0;
396 if last_block_height.saturating_sub(existing_tips[0].header.height) >= limit {
397 return Ok(proposals)
398 }
399
400 let headers = self.blockchain.blocks.get_all_after(existing_tips[0].header.height)?;
402 let blocks = self.blockchain.get_blocks_by_hash(&headers)?;
403 for block in blocks {
404 proposals.push(Proposal::new(block));
405 }
406 let blocks = self.forks[index]
407 .overlay
408 .lock()
409 .unwrap()
410 .get_blocks_by_hash(&self.forks[index].proposals)?;
411 for block in blocks {
412 if block.header.height > existing_tips[0].header.height {
414 proposals.push(Proposal::new(block));
415 }
416 }
417
418 Ok(proposals)
419 }
420
421 pub async fn current_mining_randomx_key(&self) -> Result<HeaderHash> {
425 let (next_block_height, rx_keys) = if self.forks.is_empty() {
428 let (next_block_height, _) = self.blockchain.last()?;
429 (next_block_height + 1, self.module.darkfi_rx_keys)
430 } else {
431 let index = best_fork_index(&self.forks)?;
433 let fork = &self.forks[index];
434 let last = fork.last_proposal()?;
435 (last.block.header.height + 1, fork.module.darkfi_rx_keys)
436 };
437
438 if next_block_height > RANDOMX_KEY_CHANGING_HEIGHT &&
441 next_block_height % RANDOMX_KEY_CHANGING_HEIGHT == RANDOMX_KEY_CHANGE_DELAY
442 {
443 Ok(rx_keys.1.ok_or_else(|| Error::ParseFailed("darkfi_rx_keys.1 unwrap() error"))?)
444 } else {
445 Ok(rx_keys.0)
446 }
447 }
448
449 pub async fn best_current_fork(&self) -> Result<Fork> {
451 let index = best_fork_index(&self.forks)?;
452 self.forks[index].full_clone()
453 }
454
455 pub async fn best_fork_last_header(&self) -> Result<(u32, HeaderHash)> {
458 if self.forks.is_empty() {
460 return self.blockchain.last()
461 }
462
463 let index = best_fork_index(&self.forks)?;
465 let fork = &self.forks[index];
466
467 let last = fork.last_proposal()?;
469 Ok((last.block.header.height, last.hash))
470 }
471
472 pub async fn reset_forks(
482 &mut self,
483 prefix: &[HeaderHash],
484 confirmed_fork_index: &usize,
485 confirmed_txs: &[Transaction],
486 ) -> Result<()> {
487 let excess = prefix.len();
492 let prefix_last_index = excess - 1;
493 let prefix_last = prefix.last().unwrap();
494 let mut keep = vec![true; self.forks.len()];
495 let confirmed_txs_hashes: Vec<TransactionHash> =
496 confirmed_txs.iter().map(|tx| tx.hash()).collect();
497 for (index, fork) in self.forks.iter_mut().enumerate() {
498 if &index == confirmed_fork_index {
499 fork.mempool.retain(|tx| !confirmed_txs_hashes.contains(tx));
501 continue
502 }
503
504 if fork.proposals.is_empty() ||
510 prefix_last_index >= fork.proposals.len() ||
511 &fork.proposals[prefix_last_index] != prefix_last
512 {
513 keep[index] = false;
514 continue
515 }
516
517 fork.mempool.retain(|tx| !confirmed_txs_hashes.contains(tx));
519
520 let rest_proposals = fork.proposals.split_off(excess);
522 let rest_diffs = fork.diffs.split_off(excess);
523 let mut diffs = fork.diffs.clone();
524 fork.proposals = rest_proposals;
525 fork.diffs = rest_diffs;
526 for diff in diffs.iter_mut() {
527 fork.overlay.lock().unwrap().overlay.lock().unwrap().remove_diff(diff);
528 }
529 }
530
531 let mut iter = keep.iter();
533 self.forks.retain(|_| *iter.next().unwrap());
534
535 self.blockchain.remove_pending_txs_hashes(&confirmed_txs_hashes)?;
538
539 Ok(())
540 }
541
542 pub async fn purge_forks(&mut self) -> Result<()> {
545 debug!(target: "validator::consensus::purge_forks", "Purging current forks...");
546 self.forks = vec![Fork::new(self.blockchain.clone(), self.module.clone()).await?];
547 debug!(target: "validator::consensus::purge_forks", "Forks purged!");
548
549 Ok(())
550 }
551
552 pub async fn reset_pow_module(&mut self) -> Result<()> {
554 debug!(target: "validator::consensus::reset_pow_module", "Resetting PoW module...");
555 self.module = PoWModule::new(
556 self.blockchain.clone(),
557 self.module.target,
558 self.module.fixed_difficulty.clone(),
559 None,
560 )?;
561 debug!(target: "validator::consensus::reset_pow_module", "PoW module reset successfully!");
562
563 Ok(())
564 }
565
566 pub async fn healthcheck(&self) -> Result<()> {
569 let state_root = self.blockchain.contracts.get_state_monotree_root()?;
571
572 let last_block_state_root = self.blockchain.last_header()?.state_root;
574 if state_root != last_block_state_root {
575 return Err(Error::ContractsStatesRootError(
576 blake3::Hash::from_bytes(state_root).to_string(),
577 blake3::Hash::from_bytes(last_block_state_root).to_string(),
578 ));
579 }
580
581 for fork in &self.forks {
583 fork.healthcheck()?;
584 }
585
586 Ok(())
587 }
588
589 pub async fn purge_unreferenced_trees(
592 &self,
593 referenced_trees: &mut BTreeSet<IVec>,
594 ) -> Result<()> {
595 if self.forks.is_empty() {
597 let fork = Fork::new(self.blockchain.clone(), self.module.clone()).await?;
600 fork.referenced_trees(referenced_trees);
601 } else {
602 for fork in &self.forks {
604 fork.referenced_trees(referenced_trees);
605 }
606 }
607
608 let current_trees = self.blockchain.sled_db.tree_names();
610
611 for tree in current_trees {
614 if referenced_trees.contains(&tree) {
616 continue
617 }
618
619 let Ok(tree) = deserialize::<[u8; 32]>(&tree) else { continue };
621
622 debug!(target: "validator::consensus::purge_unreferenced_trees", "Dropping unreferenced tree: {}", blake3::Hash::from(tree));
624 self.blockchain.sled_db.drop_tree(tree)?;
625 }
626
627 Ok(())
628 }
629
630 pub async fn purge_unproposed_pending_txs(
633 &mut self,
634 mut proposed_txs: HashSet<TransactionHash>,
635 ) -> Result<()> {
636 for fork in &self.forks {
638 let proposals_txs =
640 fork.overlay.lock().unwrap().get_blocks_txs_hashes(&fork.proposals)?;
641 for tx in proposals_txs {
642 proposed_txs.insert(tx);
643 }
644 }
645
646 for fork in self.forks.iter_mut() {
649 fork.mempool.retain(|tx| proposed_txs.contains(tx));
650 }
651
652 let proposed_txs: Vec<TransactionHash> = proposed_txs.into_iter().collect();
654 self.blockchain.reset_pending_txs(&proposed_txs)?;
655
656 Ok(())
657 }
658}
659
660#[derive(Debug, Clone, SerialEncodable, SerialDecodable)]
662pub struct Proposal {
663 pub hash: HeaderHash,
665 pub block: BlockInfo,
667}
668
669impl Proposal {
670 pub fn new(block: BlockInfo) -> Self {
671 let hash = block.hash();
672 Self { hash, block }
673 }
674}
675
676impl From<Proposal> for BlockInfo {
677 fn from(proposal: Proposal) -> BlockInfo {
678 proposal.block
679 }
680}
681
682#[derive(Clone)]
689pub struct Fork {
690 pub blockchain: Blockchain,
692 pub overlay: BlockchainOverlayPtr,
694 pub module: PoWModule,
696 pub proposals: Vec<HeaderHash>,
698 pub diffs: Vec<SledDbOverlayStateDiff>,
700 pub mempool: Vec<TransactionHash>,
702 pub targets_rank: BigUint,
704 pub hashes_rank: BigUint,
706}
707
708impl Fork {
709 pub async fn new(blockchain: Blockchain, module: PoWModule) -> Result<Self> {
710 let mempool = blockchain.get_pending_txs()?.iter().map(|tx| tx.hash()).collect();
711 let overlay = BlockchainOverlay::new(&blockchain)?;
712 let last_difficulty = blockchain.last_block_difficulty()?;
714 let targets_rank = last_difficulty.ranks.targets_rank;
715 let hashes_rank = last_difficulty.ranks.hashes_rank;
716 Ok(Self {
717 blockchain,
718 overlay,
719 module,
720 proposals: vec![],
721 diffs: vec![],
722 mempool,
723 targets_rank,
724 hashes_rank,
725 })
726 }
727
728 pub async fn append_proposal(&mut self, proposal: &Proposal) -> Result<()> {
731 let (next_difficulty, target_distance_sq, hash_distance_sq) =
733 block_rank(&mut self.module, &proposal.block)?;
734
735 self.targets_rank += target_distance_sq.clone();
737 self.hashes_rank += hash_distance_sq.clone();
738
739 let cumulative_difficulty =
741 self.module.cumulative_difficulty.clone() + next_difficulty.clone();
742 let ranks = BlockRanks::new(
743 target_distance_sq,
744 self.targets_rank.clone(),
745 hash_distance_sq,
746 self.hashes_rank.clone(),
747 );
748 let block_difficulty = BlockDifficulty::new(
749 proposal.block.header.height,
750 proposal.block.header.timestamp,
751 next_difficulty,
752 cumulative_difficulty,
753 ranks,
754 );
755 self.module.append_difficulty(&self.overlay, &proposal.block.header, block_difficulty)?;
756
757 self.proposals.push(proposal.hash);
759
760 self.diffs.push(self.overlay.lock().unwrap().overlay.lock().unwrap().diff(&self.diffs)?);
762
763 Ok(())
764 }
765
766 pub fn last_proposal(&self) -> Result<Proposal> {
768 let block = if let Some(last) = self.proposals.last() {
769 self.overlay.lock().unwrap().get_blocks_by_hash(&[*last])?[0].clone()
770 } else {
771 self.overlay.lock().unwrap().last_block()?
772 };
773
774 Ok(Proposal::new(block))
775 }
776
777 pub fn get_next_block_height(&self) -> Result<u32> {
779 let proposal = self.last_proposal()?;
780 Ok(proposal.block.header.height + 1)
781 }
782
783 pub async fn unproposed_txs(
789 &mut self,
790 verifying_block_height: u32,
791 verify_fees: bool,
792 ) -> Result<(Vec<Transaction>, u64, u64)> {
793 if self.mempool.is_empty() {
795 return Ok((vec![], 0, 0))
796 }
797
798 let mut tree = MerkleTree::new(1);
800
801 let mut total_gas_used = 0_u64;
803 let mut total_gas_paid = 0_u64;
804
805 let mut vks: HashMap<[u8; 32], HashMap<String, VerifyingKey>> = HashMap::new();
808
809 let proposals_txs = self.overlay.lock().unwrap().get_blocks_txs_hashes(&self.proposals)?;
811
812 let mut unproposed_txs = vec![];
815 let mut erroneous_txs = vec![];
816 for tx in &self.mempool {
817 if proposals_txs.contains(tx) {
820 continue
821 }
822
823 let unproposed_tx = match self.blockchain.transactions.get_pending(&[*tx], true) {
825 Ok(txs) => txs[0].clone().unwrap(),
826 Err(e) => {
827 debug!(target: "validator::consensus::unproposed_txs", "Transaction retrieval failed: {e}");
828 erroneous_txs.push(*tx);
829 continue
830 }
831 };
832
833 for call in &unproposed_tx.calls {
835 vks.entry(call.data.contract_id.to_bytes()).or_default();
836 }
837
838 self.overlay.lock().unwrap().checkpoint();
840 let gas_data = match verify_transaction(
841 &self.overlay,
842 verifying_block_height,
843 self.module.target,
844 &unproposed_tx,
845 &mut tree,
846 &mut vks,
847 verify_fees,
848 )
849 .await
850 {
851 Ok(gas_values) => gas_values,
852 Err(e) => {
853 debug!(target: "validator::consensus::unproposed_txs", "Transaction verification failed: {e}");
854 self.overlay.lock().unwrap().revert_to_checkpoint();
855 erroneous_txs.push(*tx);
856 continue
857 }
858 };
859
860 let tx_gas_used = gas_data.total_gas_used();
862
863 let accumulated_gas_usage = total_gas_used.saturating_add(tx_gas_used);
865
866 if accumulated_gas_usage > BLOCK_GAS_LIMIT {
869 warn!(
870 target: "validator::consensus::unproposed_txs",
871 "Retrieving transaction {tx} would exceed configured unproposed transaction gas limit: {accumulated_gas_usage} - {BLOCK_GAS_LIMIT}"
872 );
873 self.overlay.lock().unwrap().revert_to_checkpoint();
874 break
875 }
876
877 total_gas_used = total_gas_used.saturating_add(tx_gas_used);
879 total_gas_paid = total_gas_paid.saturating_add(gas_data.paid);
880
881 unproposed_txs.push(unproposed_tx);
883 }
884
885 self.mempool.retain(|tx| !erroneous_txs.contains(tx));
887
888 Ok((unproposed_txs, total_gas_used, total_gas_paid))
889 }
890
891 pub fn full_clone(&self) -> Result<Self> {
896 let blockchain = self.blockchain.clone();
897 let overlay = self.overlay.lock().unwrap().full_clone()?;
898 let module = self.module.clone();
899 let proposals = self.proposals.clone();
900 let diffs = self.diffs.clone();
901 let mempool = self.mempool.clone();
902 let targets_rank = self.targets_rank.clone();
903 let hashes_rank = self.hashes_rank.clone();
904
905 Ok(Self {
906 blockchain,
907 overlay,
908 module,
909 proposals,
910 diffs,
911 mempool,
912 targets_rank,
913 hashes_rank,
914 })
915 }
916
917 pub fn healthcheck(&self) -> Result<()> {
924 let state_root = self.overlay.lock().unwrap().contracts.get_state_monotree_root()?;
926
927 let last_block_state_root = self.last_proposal()?.block.header.state_root;
929 if state_root != last_block_state_root {
930 return Err(Error::ContractsStatesRootError(
931 blake3::Hash::from_bytes(state_root).to_string(),
932 blake3::Hash::from_bytes(last_block_state_root).to_string(),
933 ));
934 }
935
936 Ok(())
937 }
938
939 pub fn referenced_trees(&self, trees: &mut BTreeSet<IVec>) {
942 let fork_overlay = self.overlay.lock().unwrap();
944 let overlay = fork_overlay.overlay.lock().unwrap();
945
946 for initial_tree in &overlay.state.initial_tree_names {
948 trees.insert(initial_tree.clone());
949 }
950
951 for new_tree in &overlay.state.new_tree_names {
953 trees.insert(new_tree.clone());
954 }
955
956 for dropped_tree in overlay.state.dropped_trees.keys() {
958 trees.insert(dropped_tree.clone());
959 }
960
961 for protected_tree in &overlay.state.protected_tree_names {
963 trees.insert(protected_tree.clone());
964 }
965 }
966}