1use std::{fmt, str::FromStr};
20
21use darkfi_sdk::{
22 blockchain::block_version,
23 crypto::{MerkleNode, MerkleTree},
24 monotree::{Hash as StateHash, EMPTY_HASH},
25};
26#[cfg(feature = "async-serial")]
27use darkfi_serial::{async_trait, FutAsyncWriteExt};
28use darkfi_serial::{deserialize, serialize, Encodable, SerialDecodable, SerialEncodable};
29use sled_overlay::sled;
30
31use crate::{util::time::Timestamp, Error, Result};
32
33use super::{
34 monero::{extract_aux_merkle_root, MoneroPowData},
35 parse_record, parse_u32_key_record, SledDbOverlayPtr,
36};
37
38#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
40#[allow(clippy::large_enum_variant)]
41pub enum PowData {
42 DarkFi,
44 Monero(MoneroPowData),
46}
47
48impl fmt::Display for PowData {
49 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
50 match self {
51 Self::DarkFi => write!(f, "PoW: DarkFi"),
52 Self::Monero(_) => write!(f, "PoW: Monero"),
53 }
54 }
55}
56
57#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, SerialEncodable, SerialDecodable)]
58pub struct HeaderHash(pub [u8; 32]);
60
61impl HeaderHash {
62 pub fn new(data: [u8; 32]) -> Self {
63 Self(data)
64 }
65
66 #[inline]
67 pub fn inner(&self) -> &[u8; 32] {
68 &self.0
69 }
70
71 pub fn as_string(&self) -> String {
72 blake3::Hash::from_bytes(self.0).to_string()
73 }
74
75 pub fn as_slice(&self) -> &[u8] {
76 self.0.as_slice()
77 }
78}
79
80impl FromStr for HeaderHash {
81 type Err = Error;
82
83 fn from_str(header_hash_str: &str) -> Result<Self> {
84 Ok(Self(*blake3::Hash::from_str(header_hash_str)?.as_bytes()))
85 }
86}
87
88impl fmt::Display for HeaderHash {
89 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
90 write!(f, "{}", self.as_string())
91 }
92}
93
94#[derive(Clone, Debug, SerialEncodable, SerialDecodable)]
96pub struct Header {
97 pub version: u8,
99 pub previous: HeaderHash,
101 pub height: u32,
103 pub nonce: u32,
105 pub timestamp: Timestamp,
107 pub transactions_root: MerkleNode,
109 pub state_root: StateHash,
111 pub pow_data: PowData,
113}
114
115impl Header {
116 pub fn new(previous: HeaderHash, height: u32, nonce: u32, timestamp: Timestamp) -> Self {
119 let version = block_version(height);
120 let transactions_root = MerkleTree::new(1).root(0).unwrap();
121 let state_root = *EMPTY_HASH;
122 let pow_data = PowData::DarkFi;
123 Self {
124 version,
125 previous,
126 height,
127 nonce,
128 timestamp,
129 transactions_root,
130 state_root,
131 pow_data,
132 }
133 }
134
135 pub fn hash(&self) -> HeaderHash {
137 let mut hasher = blake3::Hasher::new();
138
139 self.encode(&mut hasher).expect("blake3 hasher");
143
144 HeaderHash(hasher.finalize().into())
145 }
146
147 pub fn template_hash(&self) -> HeaderHash {
149 let mut hasher = blake3::Hasher::new();
150
151 self.version.encode(&mut hasher).expect("blake3 hasher");
155 self.previous.encode(&mut hasher).expect("blake3 hasher");
156 self.height.encode(&mut hasher).expect("blake3 hasher");
157 self.timestamp.encode(&mut hasher).expect("blake3 hasher");
158 self.nonce.encode(&mut hasher).expect("blake3 hasher");
159 self.transactions_root.encode(&mut hasher).expect("blake3 hasher");
160 self.state_root.encode(&mut hasher).expect("blake3 hasher");
161
162 HeaderHash(hasher.finalize().into())
163 }
164
165 pub fn validate_powdata(&self) -> bool {
167 match &self.pow_data {
168 PowData::DarkFi => true,
170 PowData::Monero(powdata) => {
172 if !powdata.is_coinbase_valid_merkle_root() {
173 return false
174 }
175
176 let Ok(Some(merkle_root)) = extract_aux_merkle_root(&powdata.coinbase_tx_extra)
178 else {
179 return false
180 };
181
182 let aux_hash = monero::Hash::from(self.template_hash().inner());
183 powdata.aux_chain_merkle_proof.calculate_root(&aux_hash) == merkle_root
184 }
185 }
186 }
187
188 pub fn to_block_hashing_blob(&self) -> Vec<u8> {
190 let mut blob = vec![0x00, 0x00];
193 blob.extend_from_slice(&serialize(self));
194 blob
195 }
196}
197
198impl Default for Header {
199 fn default() -> Self {
201 Header::new(
202 HeaderHash::new(blake3::hash(b"Let there be dark!").into()),
203 0u32,
204 0u32,
205 Timestamp::current_time(),
206 )
207 }
208}
209
210impl fmt::Display for Header {
211 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
212 let s = format!(
213 "{} {{\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {:?}\n}}",
214 "Header",
215 "Hash",
216 self.hash(),
217 "Version",
218 self.version,
219 "Previous",
220 self.previous,
221 "Height",
222 self.height,
223 "Timestamp",
224 self.timestamp,
225 "Nonce",
226 self.nonce,
227 "Transactions Root",
228 self.transactions_root,
229 "State Root",
230 blake3::Hash::from_bytes(self.state_root),
231 "Proof of Work data",
232 self.pow_data,
233 );
234
235 write!(f, "{s}")
236 }
237}
238
239pub const SLED_HEADER_TREE: &[u8] = b"_headers";
240pub const SLED_SYNC_HEADER_TREE: &[u8] = b"_sync_headers";
241
242#[derive(Clone)]
245pub struct HeaderStore {
246 pub main: sled::Tree,
249 pub sync: sled::Tree,
253}
254
255impl HeaderStore {
256 pub fn new(db: &sled::Db) -> Result<Self> {
258 let main = db.open_tree(SLED_HEADER_TREE)?;
259 let sync = db.open_tree(SLED_SYNC_HEADER_TREE)?;
260 Ok(Self { main, sync })
261 }
262
263 pub fn insert(&self, headers: &[Header]) -> Result<Vec<HeaderHash>> {
265 let (batch, ret) = self.insert_batch(headers);
266 self.main.apply_batch(batch)?;
267 Ok(ret)
268 }
269
270 pub fn insert_sync(&self, headers: &[Header]) -> Result<()> {
272 let batch = self.insert_batch_sync(headers);
273 self.sync.apply_batch(batch)?;
274 Ok(())
275 }
276
277 pub fn insert_batch(&self, headers: &[Header]) -> (sled::Batch, Vec<HeaderHash>) {
284 let mut ret = Vec::with_capacity(headers.len());
285 let mut batch = sled::Batch::default();
286
287 for header in headers {
288 let headerhash = header.hash();
289 batch.insert(headerhash.inner(), serialize(header));
290 ret.push(headerhash);
291 }
292
293 (batch, ret)
294 }
295
296 pub fn insert_batch_sync(&self, headers: &[Header]) -> sled::Batch {
301 let mut batch = sled::Batch::default();
302
303 for header in headers {
304 batch.insert(&header.height.to_be_bytes(), serialize(header));
305 }
306
307 batch
308 }
309
310 pub fn contains(&self, headerhash: &HeaderHash) -> Result<bool> {
312 Ok(self.main.contains_key(headerhash.inner())?)
313 }
314
315 pub fn get(&self, headerhashes: &[HeaderHash], strict: bool) -> Result<Vec<Option<Header>>> {
321 let mut ret = Vec::with_capacity(headerhashes.len());
322
323 for hash in headerhashes {
324 if let Some(found) = self.main.get(hash.inner())? {
325 let header = deserialize(&found)?;
326 ret.push(Some(header));
327 continue
328 }
329 if strict {
330 return Err(Error::HeaderNotFound(hash.as_string()))
331 }
332 ret.push(None);
333 }
334
335 Ok(ret)
336 }
337
338 pub fn get_all(&self) -> Result<Vec<(HeaderHash, Header)>> {
342 let mut headers = vec![];
343
344 for header in self.main.iter() {
345 headers.push(parse_record(header.unwrap())?);
346 }
347
348 Ok(headers)
349 }
350
351 pub fn get_all_sync(&self) -> Result<Vec<(u32, Header)>> {
355 let mut headers = vec![];
356
357 for record in self.sync.iter() {
358 headers.push(parse_u32_key_record(record.unwrap())?);
359 }
360
361 Ok(headers)
362 }
363
364 pub fn get_first_sync(&self) -> Result<Option<Header>> {
367 let Some(found) = self.sync.first()? else { return Ok(None) };
368 let (_, header) = parse_u32_key_record(found)?;
369
370 Ok(Some(header))
371 }
372
373 pub fn get_last_sync(&self) -> Result<Option<Header>> {
376 let Some(found) = self.sync.last()? else { return Ok(None) };
377 let (_, header) = parse_u32_key_record(found)?;
378
379 Ok(Some(header))
380 }
381
382 pub fn get_after_sync(&self, height: u32, n: usize) -> Result<Vec<Header>> {
386 let mut ret = vec![];
387
388 let mut key = height;
389 let mut counter = 0;
390 while counter < n {
391 if let Some(found) = self.sync.get_gt(key.to_be_bytes())? {
392 let (height, hash) = parse_u32_key_record(found)?;
393 key = height;
394 ret.push(hash);
395 counter += 1;
396 continue
397 }
398 break
399 }
400
401 Ok(ret)
402 }
403
404 pub fn len_sync(&self) -> usize {
406 self.sync.len()
407 }
408
409 pub fn is_empty_sync(&self) -> bool {
411 self.sync.is_empty()
412 }
413
414 pub fn remove_sync(&self, heights: &[u32]) -> Result<()> {
416 let batch = self.remove_batch_sync(heights);
417 self.sync.apply_batch(batch)?;
418 Ok(())
419 }
420
421 pub fn remove_all_sync(&self) -> Result<()> {
423 let headers = self.get_all_sync()?;
424 let heights = headers.iter().map(|h| h.0).collect::<Vec<u32>>();
425 let batch = self.remove_batch_sync(&heights);
426 self.sync.apply_batch(batch)?;
427 Ok(())
428 }
429
430 pub fn remove_batch_sync(&self, heights: &[u32]) -> sled::Batch {
433 let mut batch = sled::Batch::default();
434
435 for height in heights {
436 batch.remove(&height.to_be_bytes());
437 }
438
439 batch
440 }
441}
442
443pub struct HeaderStoreOverlay(SledDbOverlayPtr);
445
446impl HeaderStoreOverlay {
447 pub fn new(overlay: &SledDbOverlayPtr) -> Result<Self> {
448 overlay.lock().unwrap().open_tree(SLED_HEADER_TREE, true)?;
449 Ok(Self(overlay.clone()))
450 }
451
452 pub fn insert(&self, headers: &[Header]) -> Result<Vec<HeaderHash>> {
457 let mut ret = Vec::with_capacity(headers.len());
458 let mut lock = self.0.lock().unwrap();
459
460 for header in headers {
461 let headerhash = header.hash();
462 lock.insert(SLED_HEADER_TREE, headerhash.inner(), &serialize(header))?;
463 ret.push(headerhash);
464 }
465
466 Ok(ret)
467 }
468
469 pub fn get(&self, headerhashes: &[HeaderHash], strict: bool) -> Result<Vec<Option<Header>>> {
475 let mut ret = Vec::with_capacity(headerhashes.len());
476 let lock = self.0.lock().unwrap();
477
478 for hash in headerhashes {
479 if let Some(found) = lock.get(SLED_HEADER_TREE, hash.inner())? {
480 let header = deserialize(&found)?;
481 ret.push(Some(header));
482 continue
483 }
484 if strict {
485 return Err(Error::HeaderNotFound(hash.as_string()))
486 }
487 ret.push(None);
488 }
489
490 Ok(ret)
491 }
492}