revm_interpreter/interpreter/
stack.rs

1use crate::InstructionResult;
2use core::{fmt, ptr};
3use primitives::U256;
4use std::vec::Vec;
5
6use super::StackTr;
7
8/// EVM interpreter stack limit.
9pub const STACK_LIMIT: usize = 1024;
10
11/// EVM stack with [STACK_LIMIT] capacity of words.
12#[derive(Debug, PartialEq, Eq, Hash)]
13#[cfg_attr(feature = "serde", derive(serde::Serialize))]
14pub struct Stack {
15    /// The underlying data of the stack.
16    data: Vec<U256>,
17}
18
19impl fmt::Display for Stack {
20    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
21        f.write_str("[")?;
22        for (i, x) in self.data.iter().enumerate() {
23            if i > 0 {
24                f.write_str(", ")?;
25            }
26            write!(f, "{x}")?;
27        }
28        f.write_str("]")
29    }
30}
31
32impl Default for Stack {
33    #[inline]
34    fn default() -> Self {
35        Self::new()
36    }
37}
38
39impl Clone for Stack {
40    fn clone(&self) -> Self {
41        // Use `Self::new()` to ensure the cloned Stack maintains the STACK_LIMIT capacity,
42        // and then copy the data. This preserves the invariant that Stack always has
43        // STACK_LIMIT capacity, which is crucial for the safety and correctness of other methods.
44        let mut new_stack = Self::new();
45        new_stack.data.extend_from_slice(&self.data);
46        new_stack
47    }
48}
49
50impl StackTr for Stack {
51    fn len(&self) -> usize {
52        self.len()
53    }
54
55    #[inline]
56    fn popn<const N: usize>(&mut self) -> Option<[U256; N]> {
57        if self.len() < N {
58            return None;
59        }
60        // SAFETY: Stack length is checked above.
61        Some(unsafe { self.popn::<N>() })
62    }
63
64    #[inline]
65    fn popn_top<const POPN: usize>(&mut self) -> Option<([U256; POPN], &mut U256)> {
66        if self.len() < POPN + 1 {
67            return None;
68        }
69        // SAFETY: Stack length is checked above.
70        Some(unsafe { self.popn_top::<POPN>() })
71    }
72
73    fn exchange(&mut self, n: usize, m: usize) -> bool {
74        self.exchange(n, m)
75    }
76
77    fn dup(&mut self, n: usize) -> bool {
78        self.dup(n)
79    }
80
81    fn push(&mut self, value: U256) -> bool {
82        self.push(value)
83    }
84}
85
86impl Stack {
87    /// Instantiate a new stack with the [default stack limit][STACK_LIMIT].
88    #[inline]
89    pub fn new() -> Self {
90        Self {
91            // SAFETY: Expansion functions assume that capacity is `STACK_LIMIT`.
92            data: Vec::with_capacity(STACK_LIMIT),
93        }
94    }
95
96    /// Returns the length of the stack in words.
97    #[inline]
98    pub fn len(&self) -> usize {
99        self.data.len()
100    }
101
102    /// Returns whether the stack is empty.
103    #[inline]
104    pub fn is_empty(&self) -> bool {
105        self.data.is_empty()
106    }
107
108    /// Returns a reference to the underlying data buffer.
109    #[inline]
110    pub fn data(&self) -> &Vec<U256> {
111        &self.data
112    }
113
114    /// Returns a mutable reference to the underlying data buffer.
115    #[inline]
116    pub fn data_mut(&mut self) -> &mut Vec<U256> {
117        &mut self.data
118    }
119
120    /// Consumes the stack and returns the underlying data buffer.
121    #[inline]
122    pub fn into_data(self) -> Vec<U256> {
123        self.data
124    }
125
126    /// Removes the topmost element from the stack and returns it, or `StackUnderflow` if it is
127    /// empty.
128    #[inline]
129    #[cfg_attr(debug_assertions, track_caller)]
130    pub fn pop(&mut self) -> Result<U256, InstructionResult> {
131        self.data.pop().ok_or(InstructionResult::StackUnderflow)
132    }
133
134    /// Removes the topmost element from the stack and returns it.
135    ///
136    /// # Safety
137    ///
138    /// The caller is responsible for checking the length of the stack.
139    #[inline]
140    #[cfg_attr(debug_assertions, track_caller)]
141    pub unsafe fn pop_unsafe(&mut self) -> U256 {
142        self.data.pop().unwrap_unchecked()
143    }
144
145    /// Peeks the top of the stack.
146    ///
147    /// # Safety
148    ///
149    /// The caller is responsible for checking the length of the stack.
150    #[inline]
151    #[cfg_attr(debug_assertions, track_caller)]
152    pub unsafe fn top_unsafe(&mut self) -> &mut U256 {
153        let len = self.data.len();
154        self.data.get_unchecked_mut(len - 1)
155    }
156
157    /// Pops `N` values from the stack.
158    ///
159    /// # Safety
160    ///
161    /// The caller is responsible for checking the length of the stack.
162    #[inline]
163    #[cfg_attr(debug_assertions, track_caller)]
164    pub unsafe fn popn<const N: usize>(&mut self) -> [U256; N] {
165        if N == 0 {
166            return [U256::ZERO; N];
167        }
168        let mut result = [U256::ZERO; N];
169        for v in &mut result {
170            *v = self.data.pop().unwrap_unchecked();
171        }
172        result
173    }
174
175    /// Pops `N` values from the stack and returns the top of the stack.
176    ///
177    /// # Safety
178    ///
179    /// The caller is responsible for checking the length of the stack.
180    #[inline]
181    #[cfg_attr(debug_assertions, track_caller)]
182    pub unsafe fn popn_top<const POPN: usize>(&mut self) -> ([U256; POPN], &mut U256) {
183        let result = self.popn::<POPN>();
184        let top = self.top_unsafe();
185        (result, top)
186    }
187
188    /// Push a new value onto the stack.
189    ///
190    /// If it will exceed the stack limit, returns false and leaves the stack
191    /// unchanged.
192    #[inline]
193    #[must_use]
194    #[cfg_attr(debug_assertions, track_caller)]
195    pub fn push(&mut self, value: U256) -> bool {
196        // Allows the compiler to optimize out the `Vec::push` capacity check.
197        assume!(self.data.capacity() == STACK_LIMIT);
198        if self.data.len() == STACK_LIMIT {
199            return false;
200        }
201        self.data.push(value);
202        true
203    }
204
205    /// Peek a value at given index for the stack, where the top of
206    /// the stack is at index `0`. If the index is too large,
207    /// `StackError::Underflow` is returned.
208    #[inline]
209    pub fn peek(&self, no_from_top: usize) -> Result<U256, InstructionResult> {
210        if self.data.len() > no_from_top {
211            Ok(self.data[self.data.len() - no_from_top - 1])
212        } else {
213            Err(InstructionResult::StackUnderflow)
214        }
215    }
216
217    /// Duplicates the `N`th value from the top of the stack.
218    ///
219    /// # Panics
220    ///
221    /// Panics if `n` is 0.
222    #[inline]
223    #[must_use]
224    #[cfg_attr(debug_assertions, track_caller)]
225    pub fn dup(&mut self, n: usize) -> bool {
226        assume!(n > 0, "attempted to dup 0");
227        let len = self.data.len();
228        if len < n || len + 1 > STACK_LIMIT {
229            false
230        } else {
231            // SAFETY: Check for out of bounds is done above and it makes this safe to do.
232            unsafe {
233                let ptr = self.data.as_mut_ptr().add(len);
234                ptr::copy_nonoverlapping(ptr.sub(n), ptr, 1);
235                self.data.set_len(len + 1);
236            }
237            true
238        }
239    }
240
241    /// Swaps the topmost value with the `N`th value from the top.
242    ///
243    /// # Panics
244    ///
245    /// Panics if `n` is 0.
246    #[inline(always)]
247    #[cfg_attr(debug_assertions, track_caller)]
248    pub fn swap(&mut self, n: usize) -> bool {
249        self.exchange(0, n)
250    }
251
252    /// Exchange two values on the stack.
253    ///
254    /// `n` is the first index, and the second index is calculated as `n + m`.
255    ///
256    /// # Panics
257    ///
258    /// Panics if `m` is zero.
259    #[inline]
260    #[cfg_attr(debug_assertions, track_caller)]
261    pub fn exchange(&mut self, n: usize, m: usize) -> bool {
262        assume!(m > 0, "overlapping exchange");
263        let len = self.data.len();
264        let n_m_index = n + m;
265        if n_m_index >= len {
266            return false;
267        }
268        // SAFETY: `n` and `n_m` are checked to be within bounds, and they don't overlap.
269        unsafe {
270            // Note: `ptr::swap_nonoverlapping` is more efficient than `slice::swap` or `ptr::swap`
271            // because it operates under the assumption that the pointers do not overlap,
272            // eliminating an intermediate copy,
273            // which is a condition we know to be true in this context.
274            let top = self.data.as_mut_ptr().add(len - 1);
275            core::ptr::swap_nonoverlapping(top.sub(n), top.sub(n_m_index), 1);
276        }
277        true
278    }
279
280    /// Pushes an arbitrary length slice of bytes onto the stack, padding the last word with zeros
281    /// if necessary.
282    #[inline]
283    pub fn push_slice(&mut self, slice: &[u8]) -> Result<(), InstructionResult> {
284        if slice.is_empty() {
285            return Ok(());
286        }
287
288        let n_words = slice.len().div_ceil(32);
289        let new_len = self.data.len() + n_words;
290        if new_len > STACK_LIMIT {
291            return Err(InstructionResult::StackOverflow);
292        }
293
294        // SAFETY: Length checked above.
295        unsafe {
296            let dst = self.data.as_mut_ptr().add(self.data.len()).cast::<u64>();
297            self.data.set_len(new_len);
298
299            let mut i = 0;
300
301            // Write full words
302            let words = slice.chunks_exact(32);
303            let partial_last_word = words.remainder();
304            for word in words {
305                // Note: We unroll `U256::from_be_bytes` here to write directly into the buffer,
306                // instead of creating a 32 byte array on the stack and then copying it over.
307                for l in word.rchunks_exact(8) {
308                    dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap()));
309                    i += 1;
310                }
311            }
312
313            if partial_last_word.is_empty() {
314                return Ok(());
315            }
316
317            // Write limbs of partial last word
318            let limbs = partial_last_word.rchunks_exact(8);
319            let partial_last_limb = limbs.remainder();
320            for l in limbs {
321                dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap()));
322                i += 1;
323            }
324
325            // Write partial last limb by padding with zeros
326            if !partial_last_limb.is_empty() {
327                let mut tmp = [0u8; 8];
328                tmp[8 - partial_last_limb.len()..].copy_from_slice(partial_last_limb);
329                dst.add(i).write(u64::from_be_bytes(tmp));
330                i += 1;
331            }
332
333            debug_assert_eq!(i.div_ceil(4), n_words, "wrote too much");
334
335            // Zero out upper bytes of last word
336            let m = i % 4; // 32 / 8
337            if m != 0 {
338                dst.add(i).write_bytes(0, 4 - m);
339            }
340        }
341
342        Ok(())
343    }
344
345    /// Set a value at given index for the stack, where the top of the
346    /// stack is at index `0`. If the index is too large,
347    /// `StackError::Underflow` is returned.
348    #[inline]
349    pub fn set(&mut self, no_from_top: usize, val: U256) -> Result<(), InstructionResult> {
350        if self.data.len() > no_from_top {
351            let len = self.data.len();
352            self.data[len - no_from_top - 1] = val;
353            Ok(())
354        } else {
355            Err(InstructionResult::StackUnderflow)
356        }
357    }
358}
359
360#[cfg(feature = "serde")]
361impl<'de> serde::Deserialize<'de> for Stack {
362    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
363    where
364        D: serde::Deserializer<'de>,
365    {
366        let mut data = Vec::<U256>::deserialize(deserializer)?;
367        if data.len() > STACK_LIMIT {
368            return Err(serde::de::Error::custom(std::format!(
369                "stack size exceeds limit: {} > {}",
370                data.len(),
371                STACK_LIMIT
372            )));
373        }
374        data.reserve(STACK_LIMIT - data.len());
375        Ok(Self { data })
376    }
377}
378
379#[cfg(test)]
380mod tests {
381    use super::*;
382
383    fn run(f: impl FnOnce(&mut Stack)) {
384        let mut stack = Stack::new();
385        // Fill capacity with non-zero values
386        unsafe {
387            stack.data.set_len(STACK_LIMIT);
388            stack.data.fill(U256::MAX);
389            stack.data.set_len(0);
390        }
391        f(&mut stack);
392    }
393
394    #[test]
395    fn push_slices() {
396        // No-op
397        run(|stack| {
398            stack.push_slice(b"").unwrap();
399            assert_eq!(stack.data, []);
400        });
401
402        // One word
403        run(|stack| {
404            stack.push_slice(&[42]).unwrap();
405            assert_eq!(stack.data, [U256::from(42)]);
406        });
407
408        let n = 0x1111_2222_3333_4444_5555_6666_7777_8888_u128;
409        run(|stack| {
410            stack.push_slice(&n.to_be_bytes()).unwrap();
411            assert_eq!(stack.data, [U256::from(n)]);
412        });
413
414        // More than one word
415        run(|stack| {
416            let b = [U256::from(n).to_be_bytes::<32>(); 2].concat();
417            stack.push_slice(&b).unwrap();
418            assert_eq!(stack.data, [U256::from(n); 2]);
419        });
420
421        run(|stack| {
422            let b = [&[0; 32][..], &[42u8]].concat();
423            stack.push_slice(&b).unwrap();
424            assert_eq!(stack.data, [U256::ZERO, U256::from(42)]);
425        });
426
427        run(|stack| {
428            let b = [&[0; 32][..], &n.to_be_bytes()].concat();
429            stack.push_slice(&b).unwrap();
430            assert_eq!(stack.data, [U256::ZERO, U256::from(n)]);
431        });
432
433        run(|stack| {
434            let b = [&[0; 64][..], &n.to_be_bytes()].concat();
435            stack.push_slice(&b).unwrap();
436            assert_eq!(stack.data, [U256::ZERO, U256::ZERO, U256::from(n)]);
437        });
438    }
439
440    #[test]
441    fn stack_clone() {
442        // Test cloning an empty stack
443        let empty_stack = Stack::new();
444        let cloned_empty = empty_stack.clone();
445        assert_eq!(empty_stack, cloned_empty);
446        assert_eq!(cloned_empty.len(), 0);
447        assert_eq!(cloned_empty.data().capacity(), STACK_LIMIT);
448
449        // Test cloning a partially filled stack
450        let mut partial_stack = Stack::new();
451        for i in 0..10 {
452            assert!(partial_stack.push(U256::from(i)));
453        }
454        let mut cloned_partial = partial_stack.clone();
455        assert_eq!(partial_stack, cloned_partial);
456        assert_eq!(cloned_partial.len(), 10);
457        assert_eq!(cloned_partial.data().capacity(), STACK_LIMIT);
458
459        // Test that modifying the clone doesn't affect the original
460        assert!(cloned_partial.push(U256::from(100)));
461        assert_ne!(partial_stack, cloned_partial);
462        assert_eq!(partial_stack.len(), 10);
463        assert_eq!(cloned_partial.len(), 11);
464
465        // Test cloning a full stack
466        let mut full_stack = Stack::new();
467        for i in 0..STACK_LIMIT {
468            assert!(full_stack.push(U256::from(i)));
469        }
470        let mut cloned_full = full_stack.clone();
471        assert_eq!(full_stack, cloned_full);
472        assert_eq!(cloned_full.len(), STACK_LIMIT);
473        assert_eq!(cloned_full.data().capacity(), STACK_LIMIT);
474
475        // Test push to the full original or cloned stack should return StackOverflow
476        assert!(!full_stack.push(U256::from(100)));
477        assert!(!cloned_full.push(U256::from(100)));
478    }
479}