revm_interpreter/interpreter/
stack.rs1use crate::InstructionResult;
2use core::{fmt, ptr};
3use primitives::U256;
4use std::vec::Vec;
5
6use super::StackTr;
7
8pub const STACK_LIMIT: usize = 1024;
10
11#[derive(Debug, PartialEq, Eq, Hash)]
13#[cfg_attr(feature = "serde", derive(serde::Serialize))]
14pub struct Stack {
15 data: Vec<U256>,
17}
18
19impl fmt::Display for Stack {
20 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
21 f.write_str("[")?;
22 for (i, x) in self.data.iter().enumerate() {
23 if i > 0 {
24 f.write_str(", ")?;
25 }
26 write!(f, "{x}")?;
27 }
28 f.write_str("]")
29 }
30}
31
32impl Default for Stack {
33 #[inline]
34 fn default() -> Self {
35 Self::new()
36 }
37}
38
39impl Clone for Stack {
40 fn clone(&self) -> Self {
41 let mut new_stack = Self::new();
45 new_stack.data.extend_from_slice(&self.data);
46 new_stack
47 }
48}
49
50impl StackTr for Stack {
51 fn len(&self) -> usize {
52 self.len()
53 }
54
55 #[inline]
56 fn clear(&mut self) {
57 self.data.clear();
58 }
59
60 #[inline]
61 fn popn<const N: usize>(&mut self) -> Option<[U256; N]> {
62 if self.len() < N {
63 return None;
64 }
65 Some(unsafe { self.popn::<N>() })
67 }
68
69 #[inline]
70 fn popn_top<const POPN: usize>(&mut self) -> Option<([U256; POPN], &mut U256)> {
71 if self.len() < POPN + 1 {
72 return None;
73 }
74 Some(unsafe { self.popn_top::<POPN>() })
76 }
77
78 fn exchange(&mut self, n: usize, m: usize) -> bool {
79 self.exchange(n, m)
80 }
81
82 fn dup(&mut self, n: usize) -> bool {
83 self.dup(n)
84 }
85
86 fn push(&mut self, value: U256) -> bool {
87 self.push(value)
88 }
89}
90
91impl Stack {
92 #[inline]
94 pub fn new() -> Self {
95 Self {
96 data: Vec::with_capacity(STACK_LIMIT),
98 }
99 }
100
101 #[inline]
103 pub fn invalid() -> Self {
104 Self { data: Vec::new() }
105 }
106
107 #[inline]
109 pub fn len(&self) -> usize {
110 self.data.len()
111 }
112
113 #[inline]
115 pub fn is_empty(&self) -> bool {
116 self.data.is_empty()
117 }
118
119 #[inline]
121 pub fn data(&self) -> &Vec<U256> {
122 &self.data
123 }
124
125 #[inline]
127 pub fn data_mut(&mut self) -> &mut Vec<U256> {
128 &mut self.data
129 }
130
131 #[inline]
133 pub fn into_data(self) -> Vec<U256> {
134 self.data
135 }
136
137 #[inline]
140 #[cfg_attr(debug_assertions, track_caller)]
141 pub fn pop(&mut self) -> Result<U256, InstructionResult> {
142 self.data.pop().ok_or(InstructionResult::StackUnderflow)
143 }
144
145 #[inline]
151 #[cfg_attr(debug_assertions, track_caller)]
152 pub unsafe fn pop_unsafe(&mut self) -> U256 {
153 self.data.pop().unwrap_unchecked()
154 }
155
156 #[inline]
162 #[cfg_attr(debug_assertions, track_caller)]
163 pub unsafe fn top_unsafe(&mut self) -> &mut U256 {
164 let len = self.data.len();
165 self.data.get_unchecked_mut(len - 1)
166 }
167
168 #[inline]
174 #[cfg_attr(debug_assertions, track_caller)]
175 pub unsafe fn popn<const N: usize>(&mut self) -> [U256; N] {
176 if N == 0 {
177 return [U256::ZERO; N];
178 }
179 let mut result = [U256::ZERO; N];
180 for v in &mut result {
181 *v = self.data.pop().unwrap_unchecked();
182 }
183 result
184 }
185
186 #[inline]
192 #[cfg_attr(debug_assertions, track_caller)]
193 pub unsafe fn popn_top<const POPN: usize>(&mut self) -> ([U256; POPN], &mut U256) {
194 let result = self.popn::<POPN>();
195 let top = self.top_unsafe();
196 (result, top)
197 }
198
199 #[inline]
204 #[must_use]
205 #[cfg_attr(debug_assertions, track_caller)]
206 pub fn push(&mut self, value: U256) -> bool {
207 assume!(self.data.capacity() == STACK_LIMIT);
209 if self.data.len() == STACK_LIMIT {
210 return false;
211 }
212 self.data.push(value);
213 true
214 }
215
216 #[inline]
220 pub fn peek(&self, no_from_top: usize) -> Result<U256, InstructionResult> {
221 if self.data.len() > no_from_top {
222 Ok(self.data[self.data.len() - no_from_top - 1])
223 } else {
224 Err(InstructionResult::StackUnderflow)
225 }
226 }
227
228 #[inline]
234 #[must_use]
235 #[cfg_attr(debug_assertions, track_caller)]
236 pub fn dup(&mut self, n: usize) -> bool {
237 assume!(n > 0, "attempted to dup 0");
238 let len = self.data.len();
239 if len < n || len + 1 > STACK_LIMIT {
240 false
241 } else {
242 unsafe {
244 let ptr = self.data.as_mut_ptr().add(len);
245 ptr::copy_nonoverlapping(ptr.sub(n), ptr, 1);
246 self.data.set_len(len + 1);
247 }
248 true
249 }
250 }
251
252 #[inline(always)]
258 #[cfg_attr(debug_assertions, track_caller)]
259 pub fn swap(&mut self, n: usize) -> bool {
260 self.exchange(0, n)
261 }
262
263 #[inline]
271 #[cfg_attr(debug_assertions, track_caller)]
272 pub fn exchange(&mut self, n: usize, m: usize) -> bool {
273 assume!(m > 0, "overlapping exchange");
274 let len = self.data.len();
275 let n_m_index = n + m;
276 if n_m_index >= len {
277 return false;
278 }
279 unsafe {
281 let top = self.data.as_mut_ptr().add(len - 1);
286 core::ptr::swap_nonoverlapping(top.sub(n), top.sub(n_m_index), 1);
287 }
288 true
289 }
290
291 #[inline]
294 pub fn push_slice(&mut self, slice: &[u8]) -> Result<(), InstructionResult> {
295 if slice.is_empty() {
296 return Ok(());
297 }
298
299 let n_words = slice.len().div_ceil(32);
300 let new_len = self.data.len() + n_words;
301 if new_len > STACK_LIMIT {
302 return Err(InstructionResult::StackOverflow);
303 }
304
305 unsafe {
307 let dst = self.data.as_mut_ptr().add(self.data.len()).cast::<u64>();
308 self.data.set_len(new_len);
309
310 let mut i = 0;
311
312 let words = slice.chunks_exact(32);
314 let partial_last_word = words.remainder();
315 for word in words {
316 for l in word.rchunks_exact(8) {
319 dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap()));
320 i += 1;
321 }
322 }
323
324 if partial_last_word.is_empty() {
325 return Ok(());
326 }
327
328 let limbs = partial_last_word.rchunks_exact(8);
330 let partial_last_limb = limbs.remainder();
331 for l in limbs {
332 dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap()));
333 i += 1;
334 }
335
336 if !partial_last_limb.is_empty() {
338 let mut tmp = [0u8; 8];
339 tmp[8 - partial_last_limb.len()..].copy_from_slice(partial_last_limb);
340 dst.add(i).write(u64::from_be_bytes(tmp));
341 i += 1;
342 }
343
344 debug_assert_eq!(i.div_ceil(4), n_words, "wrote too much");
345
346 let m = i % 4; if m != 0 {
349 dst.add(i).write_bytes(0, 4 - m);
350 }
351 }
352
353 Ok(())
354 }
355
356 #[inline]
360 pub fn set(&mut self, no_from_top: usize, val: U256) -> Result<(), InstructionResult> {
361 if self.data.len() > no_from_top {
362 let len = self.data.len();
363 self.data[len - no_from_top - 1] = val;
364 Ok(())
365 } else {
366 Err(InstructionResult::StackUnderflow)
367 }
368 }
369}
370
371#[cfg(feature = "serde")]
372impl<'de> serde::Deserialize<'de> for Stack {
373 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
374 where
375 D: serde::Deserializer<'de>,
376 {
377 let mut data = Vec::<U256>::deserialize(deserializer)?;
378 if data.len() > STACK_LIMIT {
379 return Err(serde::de::Error::custom(std::format!(
380 "stack size exceeds limit: {} > {}",
381 data.len(),
382 STACK_LIMIT
383 )));
384 }
385 data.reserve(STACK_LIMIT - data.len());
386 Ok(Self { data })
387 }
388}
389
390#[cfg(test)]
391mod tests {
392 use super::*;
393
394 fn run(f: impl FnOnce(&mut Stack)) {
395 let mut stack = Stack::new();
396 unsafe {
398 stack.data.set_len(STACK_LIMIT);
399 stack.data.fill(U256::MAX);
400 stack.data.set_len(0);
401 }
402 f(&mut stack);
403 }
404
405 #[test]
406 fn push_slices() {
407 run(|stack| {
409 stack.push_slice(b"").unwrap();
410 assert_eq!(stack.data, []);
411 });
412
413 run(|stack| {
415 stack.push_slice(&[42]).unwrap();
416 assert_eq!(stack.data, [U256::from(42)]);
417 });
418
419 let n = 0x1111_2222_3333_4444_5555_6666_7777_8888_u128;
420 run(|stack| {
421 stack.push_slice(&n.to_be_bytes()).unwrap();
422 assert_eq!(stack.data, [U256::from(n)]);
423 });
424
425 run(|stack| {
427 let b = [U256::from(n).to_be_bytes::<32>(); 2].concat();
428 stack.push_slice(&b).unwrap();
429 assert_eq!(stack.data, [U256::from(n); 2]);
430 });
431
432 run(|stack| {
433 let b = [&[0; 32][..], &[42u8]].concat();
434 stack.push_slice(&b).unwrap();
435 assert_eq!(stack.data, [U256::ZERO, U256::from(42)]);
436 });
437
438 run(|stack| {
439 let b = [&[0; 32][..], &n.to_be_bytes()].concat();
440 stack.push_slice(&b).unwrap();
441 assert_eq!(stack.data, [U256::ZERO, U256::from(n)]);
442 });
443
444 run(|stack| {
445 let b = [&[0; 64][..], &n.to_be_bytes()].concat();
446 stack.push_slice(&b).unwrap();
447 assert_eq!(stack.data, [U256::ZERO, U256::ZERO, U256::from(n)]);
448 });
449 }
450
451 #[test]
452 fn stack_clone() {
453 let empty_stack = Stack::new();
455 let cloned_empty = empty_stack.clone();
456 assert_eq!(empty_stack, cloned_empty);
457 assert_eq!(cloned_empty.len(), 0);
458 assert_eq!(cloned_empty.data().capacity(), STACK_LIMIT);
459
460 let mut partial_stack = Stack::new();
462 for i in 0..10 {
463 assert!(partial_stack.push(U256::from(i)));
464 }
465 let mut cloned_partial = partial_stack.clone();
466 assert_eq!(partial_stack, cloned_partial);
467 assert_eq!(cloned_partial.len(), 10);
468 assert_eq!(cloned_partial.data().capacity(), STACK_LIMIT);
469
470 assert!(cloned_partial.push(U256::from(100)));
472 assert_ne!(partial_stack, cloned_partial);
473 assert_eq!(partial_stack.len(), 10);
474 assert_eq!(cloned_partial.len(), 11);
475
476 let mut full_stack = Stack::new();
478 for i in 0..STACK_LIMIT {
479 assert!(full_stack.push(U256::from(i)));
480 }
481 let mut cloned_full = full_stack.clone();
482 assert_eq!(full_stack, cloned_full);
483 assert_eq!(cloned_full.len(), STACK_LIMIT);
484 assert_eq!(cloned_full.data().capacity(), STACK_LIMIT);
485
486 assert!(!full_stack.push(U256::from(100)));
488 assert!(!cloned_full.push(U256::from(100)));
489 }
490}