revm_interpreter/interpreter/
stack.rs

1use crate::InstructionResult;
2use core::{fmt, ptr};
3use primitives::U256;
4use std::vec::Vec;
5
6use super::StackTr;
7
8/// EVM interpreter stack limit.
9pub const STACK_LIMIT: usize = 1024;
10
11/// EVM stack with [STACK_LIMIT] capacity of words.
12#[derive(Debug, PartialEq, Eq, Hash)]
13#[cfg_attr(feature = "serde", derive(serde::Serialize))]
14pub struct Stack {
15    /// The underlying data of the stack.
16    data: Vec<U256>,
17}
18
19impl fmt::Display for Stack {
20    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
21        f.write_str("[")?;
22        for (i, x) in self.data.iter().enumerate() {
23            if i > 0 {
24                f.write_str(", ")?;
25            }
26            write!(f, "{x}")?;
27        }
28        f.write_str("]")
29    }
30}
31
32impl Default for Stack {
33    #[inline]
34    fn default() -> Self {
35        Self::new()
36    }
37}
38
39impl Clone for Stack {
40    fn clone(&self) -> Self {
41        // Use `Self::new()` to ensure the cloned Stack is constructed with at least
42        // STACK_LIMIT capacity, and then copy the data. This preserves the invariant
43        // that Stack has sufficient capacity for operations that rely on it.
44        let mut new_stack = Self::new();
45        new_stack.data.extend_from_slice(&self.data);
46        new_stack
47    }
48}
49
50impl StackTr for Stack {
51    #[inline]
52    fn len(&self) -> usize {
53        self.len()
54    }
55
56    #[inline]
57    fn data(&self) -> &[U256] {
58        &self.data
59    }
60
61    #[inline]
62    fn clear(&mut self) {
63        self.data.clear();
64    }
65
66    #[inline]
67    fn popn<const N: usize>(&mut self) -> Option<[U256; N]> {
68        if self.len() < N {
69            return None;
70        }
71        // SAFETY: Stack length is checked above.
72        Some(unsafe { self.popn::<N>() })
73    }
74
75    #[inline]
76    fn popn_top<const POPN: usize>(&mut self) -> Option<([U256; POPN], &mut U256)> {
77        if self.len() < POPN + 1 {
78            return None;
79        }
80        // SAFETY: Stack length is checked above.
81        Some(unsafe { self.popn_top::<POPN>() })
82    }
83
84    #[inline]
85    fn exchange(&mut self, n: usize, m: usize) -> bool {
86        self.exchange(n, m)
87    }
88
89    #[inline]
90    fn dup(&mut self, n: usize) -> bool {
91        self.dup(n)
92    }
93
94    #[inline]
95    fn push(&mut self, value: U256) -> bool {
96        self.push(value)
97    }
98
99    #[inline]
100    fn push_slice(&mut self, slice: &[u8]) -> bool {
101        self.push_slice_(slice)
102    }
103}
104
105impl Stack {
106    /// Instantiate a new stack with the [default stack limit][STACK_LIMIT].
107    #[inline]
108    pub fn new() -> Self {
109        Self {
110            // SAFETY: Expansion functions assume that capacity is `STACK_LIMIT`.
111            data: Vec::with_capacity(STACK_LIMIT),
112        }
113    }
114
115    /// Instantiate a new invalid Stack.
116    #[inline]
117    pub fn invalid() -> Self {
118        Self { data: Vec::new() }
119    }
120
121    /// Returns the length of the stack in words.
122    #[inline]
123    pub fn len(&self) -> usize {
124        self.data.len()
125    }
126
127    /// Returns whether the stack is empty.
128    #[inline]
129    pub fn is_empty(&self) -> bool {
130        self.data.is_empty()
131    }
132
133    /// Returns a reference to the underlying data buffer.
134    #[inline]
135    pub fn data(&self) -> &Vec<U256> {
136        &self.data
137    }
138
139    /// Returns a mutable reference to the underlying data buffer.
140    #[inline]
141    pub fn data_mut(&mut self) -> &mut Vec<U256> {
142        &mut self.data
143    }
144
145    /// Consumes the stack and returns the underlying data buffer.
146    #[inline]
147    pub fn into_data(self) -> Vec<U256> {
148        self.data
149    }
150
151    /// Removes the topmost element from the stack and returns it, or `StackUnderflow` if it is
152    /// empty.
153    #[inline]
154    #[cfg_attr(debug_assertions, track_caller)]
155    pub fn pop(&mut self) -> Result<U256, InstructionResult> {
156        self.data.pop().ok_or(InstructionResult::StackUnderflow)
157    }
158
159    /// Removes the topmost element from the stack and returns it.
160    ///
161    /// # Safety
162    ///
163    /// The caller is responsible for checking the length of the stack.
164    #[inline]
165    #[cfg_attr(debug_assertions, track_caller)]
166    pub unsafe fn pop_unsafe(&mut self) -> U256 {
167        assume!(!self.data.is_empty());
168        self.data.pop().unwrap_unchecked()
169    }
170
171    /// Peeks the top of the stack.
172    ///
173    /// # Safety
174    ///
175    /// The caller is responsible for checking the length of the stack.
176    #[inline]
177    #[cfg_attr(debug_assertions, track_caller)]
178    pub unsafe fn top_unsafe(&mut self) -> &mut U256 {
179        assume!(!self.data.is_empty());
180        self.data.last_mut().unwrap_unchecked()
181    }
182
183    /// Pops `N` values from the stack.
184    ///
185    /// # Safety
186    ///
187    /// The caller is responsible for checking the length of the stack.
188    #[inline]
189    #[cfg_attr(debug_assertions, track_caller)]
190    pub unsafe fn popn<const N: usize>(&mut self) -> [U256; N] {
191        assume!(self.data.len() >= N);
192        core::array::from_fn(|_| unsafe { self.pop_unsafe() })
193    }
194
195    /// Pops `N` values from the stack and returns the top of the stack.
196    ///
197    /// # Safety
198    ///
199    /// The caller is responsible for checking the length of the stack.
200    #[inline]
201    #[cfg_attr(debug_assertions, track_caller)]
202    pub unsafe fn popn_top<const POPN: usize>(&mut self) -> ([U256; POPN], &mut U256) {
203        let result = self.popn::<POPN>();
204        let top = self.top_unsafe();
205        (result, top)
206    }
207
208    /// Push a new value onto the stack.
209    ///
210    /// If it will exceed the stack limit, returns false and leaves the stack
211    /// unchanged.
212    #[inline]
213    #[must_use]
214    #[cfg_attr(debug_assertions, track_caller)]
215    pub fn push(&mut self, value: U256) -> bool {
216        // In debug builds, verify we have sufficient capacity provisioned.
217        debug_assert!(self.data.capacity() >= STACK_LIMIT);
218        if self.data.len() == STACK_LIMIT {
219            return false;
220        }
221        self.data.push(value);
222        true
223    }
224
225    /// Peek a value at given index for the stack, where the top of
226    /// the stack is at index `0`. If the index is too large,
227    /// `StackError::Underflow` is returned.
228    #[inline]
229    pub fn peek(&self, no_from_top: usize) -> Result<U256, InstructionResult> {
230        if self.data.len() > no_from_top {
231            Ok(self.data[self.data.len() - no_from_top - 1])
232        } else {
233            Err(InstructionResult::StackUnderflow)
234        }
235    }
236
237    /// Duplicates the `N`th value from the top of the stack.
238    ///
239    /// # Panics
240    ///
241    /// Panics if `n` is 0.
242    #[inline]
243    #[must_use]
244    #[cfg_attr(debug_assertions, track_caller)]
245    pub fn dup(&mut self, n: usize) -> bool {
246        assume!(n > 0, "attempted to dup 0");
247        let len = self.data.len();
248        if len < n || len + 1 > STACK_LIMIT {
249            false
250        } else {
251            // SAFETY: Check for out of bounds is done above and it makes this safe to do.
252            unsafe {
253                let ptr = self.data.as_mut_ptr().add(len);
254                ptr::copy_nonoverlapping(ptr.sub(n), ptr, 1);
255                self.data.set_len(len + 1);
256            }
257            true
258        }
259    }
260
261    /// Swaps the topmost value with the `N`th value from the top.
262    ///
263    /// # Panics
264    ///
265    /// Panics if `n` is 0.
266    #[inline(always)]
267    #[cfg_attr(debug_assertions, track_caller)]
268    pub fn swap(&mut self, n: usize) -> bool {
269        self.exchange(0, n)
270    }
271
272    /// Exchange two values on the stack.
273    ///
274    /// `n` is the first index, and the second index is calculated as `n + m`.
275    ///
276    /// # Panics
277    ///
278    /// Panics if `m` is zero.
279    #[inline]
280    #[cfg_attr(debug_assertions, track_caller)]
281    pub fn exchange(&mut self, n: usize, m: usize) -> bool {
282        assume!(m > 0, "overlapping exchange");
283        let len = self.data.len();
284        let n_m_index = n + m;
285        if n_m_index >= len {
286            return false;
287        }
288        // SAFETY: `n` and `n_m` are checked to be within bounds, and they don't overlap.
289        unsafe {
290            // Note: `ptr::swap_nonoverlapping` is more efficient than `slice::swap` or `ptr::swap`
291            // because it operates under the assumption that the pointers do not overlap,
292            // eliminating an intermediate copy,
293            // which is a condition we know to be true in this context.
294            let top = self.data.as_mut_ptr().add(len - 1);
295            core::ptr::swap_nonoverlapping(top.sub(n), top.sub(n_m_index), 1);
296        }
297        true
298    }
299
300    /// Pushes an arbitrary length slice of bytes onto the stack, padding the last word with zeros
301    /// if necessary.
302    #[inline]
303    pub fn push_slice(&mut self, slice: &[u8]) -> Result<(), InstructionResult> {
304        if self.push_slice_(slice) {
305            Ok(())
306        } else {
307            Err(InstructionResult::StackOverflow)
308        }
309    }
310
311    /// Pushes an arbitrary length slice of bytes onto the stack, padding the last word with zeros
312    /// if necessary.
313    #[inline]
314    fn push_slice_(&mut self, slice: &[u8]) -> bool {
315        if slice.is_empty() {
316            return true;
317        }
318
319        let n_words = slice.len().div_ceil(32);
320        let new_len = self.data.len() + n_words;
321        if new_len > STACK_LIMIT {
322            return false;
323        }
324
325        // In debug builds, ensure underlying capacity is sufficient for the write.
326        debug_assert!(self.data.capacity() >= new_len);
327
328        // SAFETY: Length checked above.
329        unsafe {
330            let dst = self.data.as_mut_ptr().add(self.data.len()).cast::<u64>();
331            self.data.set_len(new_len);
332
333            let mut i = 0;
334
335            // Write full words
336            let words = slice.chunks_exact(32);
337            let partial_last_word = words.remainder();
338            for word in words {
339                // Note: We unroll `U256::from_be_bytes` here to write directly into the buffer,
340                // instead of creating a 32 byte array on the stack and then copying it over.
341                for l in word.rchunks_exact(8) {
342                    dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap()));
343                    i += 1;
344                }
345            }
346
347            if partial_last_word.is_empty() {
348                return true;
349            }
350
351            // Write limbs of partial last word
352            let limbs = partial_last_word.rchunks_exact(8);
353            let partial_last_limb = limbs.remainder();
354            for l in limbs {
355                dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap()));
356                i += 1;
357            }
358
359            // Write partial last limb by padding with zeros
360            if !partial_last_limb.is_empty() {
361                let mut tmp = [0u8; 8];
362                tmp[8 - partial_last_limb.len()..].copy_from_slice(partial_last_limb);
363                dst.add(i).write(u64::from_be_bytes(tmp));
364                i += 1;
365            }
366
367            debug_assert_eq!(i.div_ceil(4), n_words, "wrote too much");
368
369            // Zero out upper bytes of last word
370            let m = i % 4; // 32 / 8
371            if m != 0 {
372                dst.add(i).write_bytes(0, 4 - m);
373            }
374        }
375
376        true
377    }
378
379    /// Set a value at given index for the stack, where the top of the
380    /// stack is at index `0`. If the index is too large,
381    /// `StackError::Underflow` is returned.
382    #[inline]
383    pub fn set(&mut self, no_from_top: usize, val: U256) -> Result<(), InstructionResult> {
384        if self.data.len() > no_from_top {
385            let len = self.data.len();
386            self.data[len - no_from_top - 1] = val;
387            Ok(())
388        } else {
389            Err(InstructionResult::StackUnderflow)
390        }
391    }
392}
393
394#[cfg(feature = "serde")]
395impl<'de> serde::Deserialize<'de> for Stack {
396    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
397    where
398        D: serde::Deserializer<'de>,
399    {
400        #[derive(serde::Deserialize)]
401        struct StackSerde {
402            data: Vec<U256>,
403        }
404
405        let mut stack = StackSerde::deserialize(deserializer)?;
406        if stack.data.len() > STACK_LIMIT {
407            return Err(serde::de::Error::custom(std::format!(
408                "stack size exceeds limit: {} > {}",
409                stack.data.len(),
410                STACK_LIMIT
411            )));
412        }
413        stack.data.reserve(STACK_LIMIT - stack.data.len());
414        Ok(Self { data: stack.data })
415    }
416}
417
418#[cfg(test)]
419mod tests {
420    use super::*;
421
422    fn run(f: impl FnOnce(&mut Stack)) {
423        let mut stack = Stack::new();
424        // Fill capacity with non-zero values
425        unsafe {
426            stack.data.set_len(STACK_LIMIT);
427            stack.data.fill(U256::MAX);
428            stack.data.set_len(0);
429        }
430        f(&mut stack);
431    }
432
433    #[test]
434    fn push_slices() {
435        // No-op
436        run(|stack| {
437            stack.push_slice(b"").unwrap();
438            assert!(stack.data.is_empty());
439        });
440
441        // One word
442        run(|stack| {
443            stack.push_slice(&[42]).unwrap();
444            assert_eq!(stack.data, [U256::from(42)]);
445        });
446
447        let n = 0x1111_2222_3333_4444_5555_6666_7777_8888_u128;
448        run(|stack| {
449            stack.push_slice(&n.to_be_bytes()).unwrap();
450            assert_eq!(stack.data, [U256::from(n)]);
451        });
452
453        // More than one word
454        run(|stack| {
455            let b = [U256::from(n).to_be_bytes::<32>(); 2].concat();
456            stack.push_slice(&b).unwrap();
457            assert_eq!(stack.data, [U256::from(n); 2]);
458        });
459
460        run(|stack| {
461            let b = [&[0; 32][..], &[42u8]].concat();
462            stack.push_slice(&b).unwrap();
463            assert_eq!(stack.data, [U256::ZERO, U256::from(42)]);
464        });
465
466        run(|stack| {
467            let b = [&[0; 32][..], &n.to_be_bytes()].concat();
468            stack.push_slice(&b).unwrap();
469            assert_eq!(stack.data, [U256::ZERO, U256::from(n)]);
470        });
471
472        run(|stack| {
473            let b = [&[0; 64][..], &n.to_be_bytes()].concat();
474            stack.push_slice(&b).unwrap();
475            assert_eq!(stack.data, [U256::ZERO, U256::ZERO, U256::from(n)]);
476        });
477    }
478
479    #[test]
480    fn stack_clone() {
481        // Test cloning an empty stack
482        let empty_stack = Stack::new();
483        let cloned_empty = empty_stack.clone();
484        assert_eq!(empty_stack, cloned_empty);
485        assert_eq!(cloned_empty.len(), 0);
486        assert_eq!(cloned_empty.data().capacity(), STACK_LIMIT);
487
488        // Test cloning a partially filled stack
489        let mut partial_stack = Stack::new();
490        for i in 0..10 {
491            assert!(partial_stack.push(U256::from(i)));
492        }
493        let mut cloned_partial = partial_stack.clone();
494        assert_eq!(partial_stack, cloned_partial);
495        assert_eq!(cloned_partial.len(), 10);
496        assert_eq!(cloned_partial.data().capacity(), STACK_LIMIT);
497
498        // Test that modifying the clone doesn't affect the original
499        assert!(cloned_partial.push(U256::from(100)));
500        assert_ne!(partial_stack, cloned_partial);
501        assert_eq!(partial_stack.len(), 10);
502        assert_eq!(cloned_partial.len(), 11);
503
504        // Test cloning a full stack
505        let mut full_stack = Stack::new();
506        for i in 0..STACK_LIMIT {
507            assert!(full_stack.push(U256::from(i)));
508        }
509        let mut cloned_full = full_stack.clone();
510        assert_eq!(full_stack, cloned_full);
511        assert_eq!(cloned_full.len(), STACK_LIMIT);
512        assert_eq!(cloned_full.data().capacity(), STACK_LIMIT);
513
514        // Test push to the full original or cloned stack should return StackOverflow
515        assert!(!full_stack.push(U256::from(100)));
516        assert!(!cloned_full.push(U256::from(100)));
517    }
518}