darkfi/blockchain/monero/
fixed_array.rs1use std::{
20 io::{self, Read, Write},
21 marker::PhantomData,
22 ops::{Deref, DerefMut},
23};
24
25#[cfg(feature = "async-serial")]
26use darkfi_serial::{
27 async_trait, AsyncDecodable, AsyncEncodable, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt,
28};
29use darkfi_serial::{Decodable, Encodable, ReadExt, SerialDecodable, SerialEncodable, WriteExt};
30
31const MAX_ARR_SIZE: usize = 60;
32
33#[derive(Clone, Debug, PartialEq, Eq)]
35pub struct FixedByteArray {
36 elems: [u8; MAX_ARR_SIZE],
37 len: u8,
38}
39
40impl FixedByteArray {
41 pub fn new() -> Self {
44 Default::default()
45 }
46
47 pub fn as_slice(&self) -> &[u8] {
49 &self[..self.len()]
50 }
51
52 #[inline]
54 pub fn is_full(&self) -> bool {
55 self.len() == MAX_ARR_SIZE
56 }
57
58 #[inline]
60 pub fn len(&self) -> usize {
61 self.len as usize
62 }
63
64 #[inline]
66 pub fn is_empty(&self) -> bool {
67 self.len == 0
68 }
69
70 pub fn to_vec(&self) -> Vec<u8> {
71 self.as_slice().to_vec()
72 }
73
74 pub fn from_bytes(bytes: &[u8]) -> io::Result<Self> {
75 if bytes.len() > MAX_ARR_SIZE {
76 return Err(io::Error::new(io::ErrorKind::OutOfMemory, "Slice too large"))
77 }
78
79 let len = u8::try_from(bytes.len()).map_err(|_| io::ErrorKind::OutOfMemory)?;
80
81 let mut elems = [0u8; MAX_ARR_SIZE];
82 elems
83 .get_mut(..len as usize)
84 .expect("Cannot fail")
85 .copy_from_slice(bytes.get(..len as usize).expect("Cannot fail"));
86 Ok(Self { elems, len })
87 }
88}
89
90impl Deref for FixedByteArray {
91 type Target = [u8];
92
93 fn deref(&self) -> &Self::Target {
94 &self.elems[..self.len as usize]
95 }
96}
97
98impl Default for FixedByteArray {
99 fn default() -> Self {
100 Self { elems: [0u8; MAX_ARR_SIZE], len: 0 }
101 }
102}
103
104impl Encodable for FixedByteArray {
105 fn encode<S: Write>(&self, s: &mut S) -> io::Result<usize> {
106 let mut n = 1;
107 s.write_u8(self.len)?;
108 let data = self.as_slice();
109 for item in data.iter().take(self.len as usize) {
110 s.write_u8(*item)?;
111 n += 1;
112 }
113
114 Ok(n)
115 }
116}
117
118#[cfg(feature = "async-serial")]
119#[async_trait]
120impl AsyncEncodable for FixedByteArray {
121 async fn encode_async<S: AsyncWrite + Unpin + Send>(&self, s: &mut S) -> io::Result<usize> {
122 let mut n = 1;
123 s.write_u8_async(self.len).await?;
124 let data = self.as_slice();
125 for item in data.iter().take(self.len as usize) {
126 s.write_u8_async(*item).await?;
127 n += 1;
128 }
129
130 Ok(n)
131 }
132}
133
134impl Decodable for FixedByteArray {
135 fn decode<D: Read>(d: &mut D) -> io::Result<Self> {
136 let len = d.read_u8()? as usize;
137 if len > MAX_ARR_SIZE {
138 return Err(io::Error::new(
139 io::ErrorKind::InvalidInput,
140 format!("length exceeded max of 60 bytes for FixedByteArray: {len}"),
141 ));
142 }
143
144 let mut elems = [0u8; MAX_ARR_SIZE];
145 #[allow(clippy::needless_range_loop)]
146 for i in 0..len {
147 elems[i] = d.read_u8()?;
148 }
149
150 Ok(Self { elems, len: len as u8 })
151 }
152}
153
154#[cfg(feature = "async-serial")]
155#[async_trait]
156impl AsyncDecodable for FixedByteArray {
157 async fn decode_async<D: AsyncRead + Unpin + Send>(d: &mut D) -> io::Result<Self> {
158 let len = d.read_u8_async().await? as usize;
159 if len > MAX_ARR_SIZE {
160 return Err(io::Error::new(
161 io::ErrorKind::InvalidInput,
162 format!("length exceeded max of 60 bytes for FixedByteArray: {len}"),
163 ));
164 }
165
166 let mut elems = [0u8; MAX_ARR_SIZE];
167 #[allow(clippy::needless_range_loop)]
168 for i in 0..len {
169 elems[i] = d.read_u8_async().await?;
170 }
171
172 Ok(Self { elems, len: len as u8 })
173 }
174}
175
176#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, SerialEncodable, SerialDecodable)]
178pub struct MaxSizeVec<T, const MAX_SIZE: usize>
179where
180 T: Send + Sync,
181{
182 vec: Vec<T>,
183 _marker: PhantomData<T>,
184}
185
186impl<T, const MAX_SIZE: usize> Default for MaxSizeVec<T, MAX_SIZE>
187where
188 T: Send + Sync,
189{
190 fn default() -> Self {
191 Self::new()
192 }
193}
194
195impl<T, const MAX_SIZE: usize> MaxSizeVec<T, MAX_SIZE>
196where
197 T: Send + Sync,
198{
199 pub fn new() -> Self {
201 Self { vec: Vec::new(), _marker: PhantomData }
202 }
203
204 pub fn new_with_data(data: Vec<T>) -> io::Result<Self> {
207 if data.len() > MAX_SIZE {
208 return Err(io::Error::new(io::ErrorKind::StorageFull, "Size exceeded"))
209 }
210
211 Ok(Self { vec: data, _marker: PhantomData })
212 }
213
214 pub fn from_items_truncate(items: Vec<T>) -> Self {
216 let len = std::cmp::min(items.len(), MAX_SIZE);
217 Self { vec: items.into_iter().take(len).collect(), _marker: PhantomData }
218 }
219
220 pub fn into_vec(self) -> Vec<T> {
222 self.vec
223 }
224
225 pub fn max_size(&self) -> usize {
227 MAX_SIZE
228 }
229
230 pub fn push(&mut self, item: T) -> io::Result<()> {
232 if self.vec.len() >= MAX_SIZE {
233 return Err(io::Error::new(io::ErrorKind::StorageFull, "Size exceeded"))
234 }
235
236 self.vec.push(item);
237 Ok(())
238 }
239}
240
241impl<T, const MAX_SIZE: usize> AsRef<[T]> for MaxSizeVec<T, MAX_SIZE>
242where
243 T: Send + Sync,
244{
245 fn as_ref(&self) -> &[T] {
246 &self.vec
247 }
248}
249
250impl<T, const MAX_SIZE: usize> Deref for MaxSizeVec<T, MAX_SIZE>
251where
252 T: Send + Sync,
253{
254 type Target = [T];
255
256 fn deref(&self) -> &Self::Target {
257 &self.vec
258 }
259}
260
261impl<T, const MAX_SIZE: usize> DerefMut for MaxSizeVec<T, MAX_SIZE>
262where
263 T: Send + Sync,
264{
265 fn deref_mut(&mut self) -> &mut Self::Target {
266 &mut self.vec
267 }
268}
269
270impl<T, const MAX_SIZE: usize> Iterator for MaxSizeVec<T, MAX_SIZE>
271where
272 T: Send + Sync,
273{
274 type Item = T;
275
276 fn next(&mut self) -> Option<Self::Item> {
277 if self.vec.is_empty() {
278 None
279 } else {
280 Some(self.vec.remove(0))
281 }
282 }
283}
284
285impl<T, const MAX_SIZE: usize> FromIterator<T> for MaxSizeVec<T, MAX_SIZE>
286where
287 T: Send + Sync,
288{
289 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
290 let mut vec = vec![];
291 for item in iter {
292 if vec.len() >= MAX_SIZE {
293 break
294 }
295 vec.push(item);
296 }
297
298 Self { vec, _marker: PhantomData }
299 }
300}
301
302#[cfg(test)]
303mod tests {
304 use super::*;
305
306 #[test]
307 fn assert_size() {
308 assert_eq!(std::mem::size_of::<FixedByteArray>(), MAX_ARR_SIZE + 1);
309 }
310
311 #[test]
312 fn capacity_overflow_does_not_panic() {
313 let data = &[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f];
314 let _result = FixedByteArray::decode(&mut data.as_slice()).unwrap_err();
315 }
316
317 #[test]
318 fn length_check() {
319 let mut buf = [u8::try_from(MAX_ARR_SIZE).unwrap(); MAX_ARR_SIZE + 1];
320 let fixed_byte_array = FixedByteArray::decode(&mut buf.as_slice()).unwrap();
321 assert_eq!(fixed_byte_array.len(), MAX_ARR_SIZE);
322 buf[0] += 1;
323 FixedByteArray::decode(&mut buf.as_slice()).unwrap_err();
324 }
325}