darkfi/blockchain/
header_store.rs

1/* This file is part of DarkFi (https://dark.fi)
2 *
3 * Copyright (C) 2020-2026 Dyne.org foundation
4 *
5 * This program is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU Affero General Public License as
7 * published by the Free Software Foundation, either version 3 of the
8 * License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU Affero General Public License for more details.
14 *
15 * You should have received a copy of the GNU Affero General Public License
16 * along with this program.  If not, see <https://www.gnu.org/licenses/>.
17 */
18
19use std::{fmt, str::FromStr};
20
21use darkfi_sdk::{
22    blockchain::block_version,
23    crypto::{MerkleNode, MerkleTree},
24    monotree::{Hash as StateHash, EMPTY_HASH},
25};
26#[cfg(feature = "async-serial")]
27use darkfi_serial::{async_trait, FutAsyncWriteExt};
28use darkfi_serial::{deserialize, serialize, Encodable, SerialDecodable, SerialEncodable};
29use sled_overlay::sled;
30
31use crate::{util::time::Timestamp, Error, Result};
32
33use super::{
34    monero::{extract_aux_merkle_root, MoneroPowData},
35    parse_record, parse_u32_key_record, SledDbOverlayPtr,
36};
37
38/// Struct representing the Proof of Work used in a block.
39#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
40#[allow(clippy::large_enum_variant)]
41pub enum PowData {
42    /// Native DarkFi PoW
43    DarkFi,
44    /// Monero merge mining PoW
45    Monero(MoneroPowData),
46}
47
48impl fmt::Display for PowData {
49    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
50        match self {
51            Self::DarkFi => write!(f, "PoW: DarkFi"),
52            Self::Monero(_) => write!(f, "PoW: Monero"),
53        }
54    }
55}
56
57#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, SerialEncodable, SerialDecodable)]
58// We have to introduce a type rather than using an alias so we can restrict API access.
59pub struct HeaderHash(pub [u8; 32]);
60
61impl HeaderHash {
62    pub fn new(data: [u8; 32]) -> Self {
63        Self(data)
64    }
65
66    #[inline]
67    pub fn inner(&self) -> &[u8; 32] {
68        &self.0
69    }
70
71    pub fn as_string(&self) -> String {
72        blake3::Hash::from_bytes(self.0).to_string()
73    }
74
75    pub fn as_slice(&self) -> &[u8] {
76        self.0.as_slice()
77    }
78}
79
80impl FromStr for HeaderHash {
81    type Err = Error;
82
83    fn from_str(header_hash_str: &str) -> Result<Self> {
84        Ok(Self(*blake3::Hash::from_str(header_hash_str)?.as_bytes()))
85    }
86}
87
88impl fmt::Display for HeaderHash {
89    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
90        write!(f, "{}", self.as_string())
91    }
92}
93
94/// This struct represents a tuple of the form (version, previous, height, timestamp, nonce, merkle_tree).
95#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
96pub struct Header {
97    /// Block version
98    pub version: u8,
99    /// Previous block hash
100    pub previous: HeaderHash,
101    /// Block height
102    pub height: u32,
103    /// The block's nonce. This value changes arbitrarily with mining.
104    pub nonce: u32,
105    /// Block creation timestamp
106    pub timestamp: Timestamp,
107    /// Merkle tree root of the transactions hashes contained in this block
108    pub transactions_root: MerkleNode,
109    /// Contracts states Monotree(SMT) root this block commits to
110    pub state_root: StateHash,
111    /// Block Proof of Work type
112    pub pow_data: PowData,
113}
114
115impl Header {
116    /// Generates a new header with default transactions and state root,
117    /// using DarkFi native Proof of Work data.
118    pub fn new(previous: HeaderHash, height: u32, nonce: u32, timestamp: Timestamp) -> Self {
119        let version = block_version(height);
120        let transactions_root = MerkleTree::new(1).root(0).unwrap();
121        let state_root = *EMPTY_HASH;
122        let pow_data = PowData::DarkFi;
123        Self {
124            version,
125            previous,
126            height,
127            nonce,
128            timestamp,
129            transactions_root,
130            state_root,
131            pow_data,
132        }
133    }
134
135    /// Compute the header's hash.
136    pub fn hash(&self) -> HeaderHash {
137        let mut hasher = blake3::Hasher::new();
138
139        // Blake3 hasher .update() method never fails.
140        // This call returns a Result due to how the Write trait is specified.
141        // Calling unwrap() here should be safe.
142        self.encode(&mut hasher).expect("blake3 hasher");
143
144        HeaderHash(hasher.finalize().into())
145    }
146
147    /// Compute the header's template hash, which excludes its Proof of Work data.
148    pub fn template_hash(&self) -> HeaderHash {
149        let mut hasher = blake3::Hasher::new();
150
151        // Blake3 hasher .update() method never fails.
152        // This call returns a Result due to how the Write trait is specified.
153        // Calling unwrap() here should be safe.
154        self.version.encode(&mut hasher).expect("blake3 hasher");
155        self.previous.encode(&mut hasher).expect("blake3 hasher");
156        self.height.encode(&mut hasher).expect("blake3 hasher");
157        self.timestamp.encode(&mut hasher).expect("blake3 hasher");
158        self.nonce.encode(&mut hasher).expect("blake3 hasher");
159        self.transactions_root.encode(&mut hasher).expect("blake3 hasher");
160        self.state_root.encode(&mut hasher).expect("blake3 hasher");
161
162        HeaderHash(hasher.finalize().into())
163    }
164
165    /// Validate PowData from the header.
166    pub fn validate_powdata(&self) -> bool {
167        match &self.pow_data {
168            // For native DarkFi PoW, this is handled so we just return `true`.
169            PowData::DarkFi => true,
170            // For Monero PoW, we have to check a few things.
171            PowData::Monero(powdata) => {
172                if !powdata.is_coinbase_valid_merkle_root() {
173                    return false
174                }
175
176                // Verify that MoneroPowData correctly corresponds to this header.
177                let Ok(Some(merkle_root)) = extract_aux_merkle_root(&powdata.coinbase_tx_extra)
178                else {
179                    return false
180                };
181
182                let aux_hash = monero::Hash::from(self.template_hash().inner());
183                powdata.aux_chain_merkle_proof.calculate_root(&aux_hash) == merkle_root
184            }
185        }
186    }
187
188    /// Create a block hashing blob from this header.
189    pub fn to_block_hashing_blob(&self) -> Vec<u8> {
190        // For XMRig, we need to pad the blob so that our nonce ends
191        // up at byte offset 39.
192        let mut blob = vec![0x00, 0x00];
193        blob.extend_from_slice(&serialize(self));
194        blob
195    }
196}
197
198impl Default for Header {
199    /// Represents the genesis header on current timestamp.
200    fn default() -> Self {
201        Header::new(
202            HeaderHash::new(blake3::hash(b"Let there be dark!").into()),
203            0u32,
204            0u32,
205            Timestamp::current_time(),
206        )
207    }
208}
209
210impl fmt::Display for Header {
211    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
212        let s = format!(
213            "{} {{\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {:?}\n}}",
214            "Header",
215            "Hash",
216            self.hash(),
217            "Version",
218            self.version,
219            "Previous",
220            self.previous,
221            "Height",
222            self.height,
223            "Timestamp",
224            self.timestamp,
225            "Nonce",
226            self.nonce,
227            "Transactions Root",
228            self.transactions_root,
229            "State Root",
230            blake3::Hash::from_bytes(self.state_root),
231            "Proof of Work data",
232            self.pow_data,
233        );
234
235        write!(f, "{s}")
236    }
237}
238
239pub const SLED_HEADER_TREE: &[u8] = b"_headers";
240pub const SLED_SYNC_HEADER_TREE: &[u8] = b"_sync_headers";
241
242/// The `HeaderStore` is a structure representing all `sled` trees related
243/// to storing the blockchain's blocks's header information.
244#[derive(Clone)]
245pub struct HeaderStore {
246    /// Main `sled` tree, storing all the blockchain's blocks' headers,
247    /// where the key is the headers' hash, and value is the serialized header.
248    pub main: sled::Tree,
249    /// The `sled` tree storing all the node pending headers while syncing,
250    /// where the key is the height number, and the value is the serialized
251    /// header.
252    pub sync: sled::Tree,
253}
254
255impl HeaderStore {
256    /// Opens a new or existing `HeaderStore` on the given sled database.
257    pub fn new(db: &sled::Db) -> Result<Self> {
258        let main = db.open_tree(SLED_HEADER_TREE)?;
259        let sync = db.open_tree(SLED_SYNC_HEADER_TREE)?;
260        Ok(Self { main, sync })
261    }
262
263    /// Insert a slice of [`Header`] into the store's main tree.
264    pub fn insert(&self, headers: &[Header]) -> Result<Vec<HeaderHash>> {
265        let (batch, ret) = self.insert_batch(headers);
266        self.main.apply_batch(batch)?;
267        Ok(ret)
268    }
269
270    /// Insert a slice of [`Header`] into the store's sync tree.
271    pub fn insert_sync(&self, headers: &[Header]) -> Result<()> {
272        let batch = self.insert_batch_sync(headers);
273        self.sync.apply_batch(batch)?;
274        Ok(())
275    }
276
277    /// Generate the sled batch corresponding to an insert to the main
278    /// tree, so caller can handle the write operation.
279    /// The header's hash() function output is used as the key,
280    /// while value is the serialized [`Header`] itself.
281    /// On success, the function returns the header hashes in the same
282    /// order, along with the corresponding operation batch.
283    pub fn insert_batch(&self, headers: &[Header]) -> (sled::Batch, Vec<HeaderHash>) {
284        let mut ret = Vec::with_capacity(headers.len());
285        let mut batch = sled::Batch::default();
286
287        for header in headers {
288            let headerhash = header.hash();
289            batch.insert(headerhash.inner(), serialize(header));
290            ret.push(headerhash);
291        }
292
293        (batch, ret)
294    }
295
296    /// Generate the sled batch corresponding to an insert to the sync
297    /// tree, so caller can handle the write operation.
298    /// The header height is used as the key, while value is the serialized
299    /// [`Header`] itself.
300    pub fn insert_batch_sync(&self, headers: &[Header]) -> sled::Batch {
301        let mut batch = sled::Batch::default();
302
303        for header in headers {
304            batch.insert(&header.height.to_be_bytes(), serialize(header));
305        }
306
307        batch
308    }
309
310    /// Check if the store's main tree contains a given header hash.
311    pub fn contains(&self, headerhash: &HeaderHash) -> Result<bool> {
312        Ok(self.main.contains_key(headerhash.inner())?)
313    }
314
315    /// Fetch given header hashes from the store's main tree.
316    /// The resulting vector contains `Option`, which is `Some` if the header
317    /// was found in the store's main tree, and otherwise it is `None`, if it
318    /// has not. The second parameter is a boolean which tells the function to
319    /// fail in case at least one header was not found.
320    pub fn get(&self, headerhashes: &[HeaderHash], strict: bool) -> Result<Vec<Option<Header>>> {
321        let mut ret = Vec::with_capacity(headerhashes.len());
322
323        for hash in headerhashes {
324            if let Some(found) = self.main.get(hash.inner())? {
325                let header = deserialize(&found)?;
326                ret.push(Some(header));
327                continue
328            }
329            if strict {
330                return Err(Error::HeaderNotFound(hash.as_string()))
331            }
332            ret.push(None);
333        }
334
335        Ok(ret)
336    }
337
338    /// Retrieve all headers from the store's main tree in the form of a tuple
339    /// (`headerhash`, `header`).
340    /// Be careful as this will try to load everything in memory.
341    pub fn get_all(&self) -> Result<Vec<(HeaderHash, Header)>> {
342        let mut headers = vec![];
343
344        for header in self.main.iter() {
345            headers.push(parse_record(header.unwrap())?);
346        }
347
348        Ok(headers)
349    }
350
351    /// Retrieve all headers from the store's sync tree in the form of a tuple
352    /// (`height`, `header`).
353    /// Be careful as this will try to load everything in memory.
354    pub fn get_all_sync(&self) -> Result<Vec<(u32, Header)>> {
355        let mut headers = vec![];
356
357        for record in self.sync.iter() {
358            headers.push(parse_u32_key_record(record.unwrap())?);
359        }
360
361        Ok(headers)
362    }
363
364    /// Fetch the fisrt header in the store's sync tree, based on the `Ord`
365    /// implementation for `Vec<u8>`.
366    pub fn get_first_sync(&self) -> Result<Option<Header>> {
367        let Some(found) = self.sync.first()? else { return Ok(None) };
368        let (_, header) = parse_u32_key_record(found)?;
369
370        Ok(Some(header))
371    }
372
373    /// Fetch the last header in the store's sync tree, based on the `Ord`
374    /// implementation for `Vec<u8>`.
375    pub fn get_last_sync(&self) -> Result<Option<Header>> {
376        let Some(found) = self.sync.last()? else { return Ok(None) };
377        let (_, header) = parse_u32_key_record(found)?;
378
379        Ok(Some(header))
380    }
381
382    /// Fetch n hashes after given height. In the iteration, if a header
383    /// height is not found, the iteration stops and the function returns what
384    /// it has found so far in the store's sync tree.
385    pub fn get_after_sync(&self, height: u32, n: usize) -> Result<Vec<Header>> {
386        let mut ret = vec![];
387
388        let mut key = height;
389        let mut counter = 0;
390        while counter < n {
391            if let Some(found) = self.sync.get_gt(key.to_be_bytes())? {
392                let (height, hash) = parse_u32_key_record(found)?;
393                key = height;
394                ret.push(hash);
395                counter += 1;
396                continue
397            }
398            break
399        }
400
401        Ok(ret)
402    }
403
404    /// Retrieve store's sync tree records count.
405    pub fn len_sync(&self) -> usize {
406        self.sync.len()
407    }
408
409    /// Check if store's sync tree contains any records.
410    pub fn is_empty_sync(&self) -> bool {
411        self.sync.is_empty()
412    }
413
414    /// Remove a slice of [`u32`] from the store's sync tree.
415    pub fn remove_sync(&self, heights: &[u32]) -> Result<()> {
416        let batch = self.remove_batch_sync(heights);
417        self.sync.apply_batch(batch)?;
418        Ok(())
419    }
420
421    /// Remove all records from the store's sync tree.
422    pub fn remove_all_sync(&self) -> Result<()> {
423        let headers = self.get_all_sync()?;
424        let heights = headers.iter().map(|h| h.0).collect::<Vec<u32>>();
425        let batch = self.remove_batch_sync(&heights);
426        self.sync.apply_batch(batch)?;
427        Ok(())
428    }
429
430    /// Generate the sled batch corresponding to a remove from the store's sync
431    /// tree, so caller can handle the write operation.
432    pub fn remove_batch_sync(&self, heights: &[u32]) -> sled::Batch {
433        let mut batch = sled::Batch::default();
434
435        for height in heights {
436            batch.remove(&height.to_be_bytes());
437        }
438
439        batch
440    }
441}
442
443/// Overlay structure over a [`HeaderStore`] instance.
444pub struct HeaderStoreOverlay(SledDbOverlayPtr);
445
446impl HeaderStoreOverlay {
447    pub fn new(overlay: &SledDbOverlayPtr) -> Result<Self> {
448        overlay.lock().unwrap().open_tree(SLED_HEADER_TREE, true)?;
449        Ok(Self(overlay.clone()))
450    }
451
452    /// Insert a slice of [`Header`] into the overlay.
453    /// The header's hash() function output is used as the key,
454    /// while value is the serialized [`Header`] itself.
455    /// On success, the function returns the header hashes in the same order.
456    pub fn insert(&self, headers: &[Header]) -> Result<Vec<HeaderHash>> {
457        let mut ret = Vec::with_capacity(headers.len());
458        let mut lock = self.0.lock().unwrap();
459
460        for header in headers {
461            let headerhash = header.hash();
462            lock.insert(SLED_HEADER_TREE, headerhash.inner(), &serialize(header))?;
463            ret.push(headerhash);
464        }
465
466        Ok(ret)
467    }
468
469    /// Fetch given headerhashes from the overlay.
470    /// The resulting vector contains `Option`, which is `Some` if the header
471    /// was found in the overlay, and otherwise it is `None`, if it has not.
472    /// The second parameter is a boolean which tells the function to fail in
473    /// case at least one header was not found.
474    pub fn get(&self, headerhashes: &[HeaderHash], strict: bool) -> Result<Vec<Option<Header>>> {
475        let mut ret = Vec::with_capacity(headerhashes.len());
476        let lock = self.0.lock().unwrap();
477
478        for hash in headerhashes {
479            if let Some(found) = lock.get(SLED_HEADER_TREE, hash.inner())? {
480                let header = deserialize(&found)?;
481                ret.push(Some(header));
482                continue
483            }
484            if strict {
485                return Err(Error::HeaderNotFound(hash.as_string()))
486            }
487            ret.push(None);
488        }
489
490        Ok(ret)
491    }
492}