revm_interpreter/interpreter/
stack.rs

1use crate::InstructionResult;
2use core::{fmt, ptr};
3use primitives::U256;
4use std::vec::Vec;
5
6use super::StackTr;
7
8/// EVM interpreter stack limit.
9pub const STACK_LIMIT: usize = 1024;
10
11/// EVM stack with [STACK_LIMIT] capacity of words.
12#[derive(Debug, PartialEq, Eq, Hash)]
13#[cfg_attr(feature = "serde", derive(serde::Serialize))]
14pub struct Stack {
15    /// The underlying data of the stack.
16    data: Vec<U256>,
17}
18
19impl fmt::Display for Stack {
20    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
21        f.write_str("[")?;
22        for (i, x) in self.data.iter().enumerate() {
23            if i > 0 {
24                f.write_str(", ")?;
25            }
26            write!(f, "{x}")?;
27        }
28        f.write_str("]")
29    }
30}
31
32impl Default for Stack {
33    #[inline]
34    fn default() -> Self {
35        Self::new()
36    }
37}
38
39impl Clone for Stack {
40    fn clone(&self) -> Self {
41        // Use `Self::new()` to ensure the cloned Stack is constructed with at least
42        // STACK_LIMIT capacity, and then copy the data. This preserves the invariant
43        // that Stack has sufficient capacity for operations that rely on it.
44        let mut new_stack = Self::new();
45        new_stack.data.extend_from_slice(&self.data);
46        new_stack
47    }
48}
49
50impl StackTr for Stack {
51    #[inline]
52    fn len(&self) -> usize {
53        self.len()
54    }
55
56    #[inline]
57    fn data(&self) -> &[U256] {
58        &self.data
59    }
60
61    #[inline]
62    fn clear(&mut self) {
63        self.data.clear();
64    }
65
66    #[inline]
67    fn popn<const N: usize>(&mut self) -> Option<[U256; N]> {
68        if self.len() < N {
69            return None;
70        }
71        // SAFETY: Stack length is checked above.
72        Some(unsafe { self.popn::<N>() })
73    }
74
75    #[inline]
76    fn popn_top<const POPN: usize>(&mut self) -> Option<([U256; POPN], &mut U256)> {
77        if self.len() < POPN + 1 {
78            return None;
79        }
80        // SAFETY: Stack length is checked above.
81        Some(unsafe { self.popn_top::<POPN>() })
82    }
83
84    #[inline]
85    fn exchange(&mut self, n: usize, m: usize) -> bool {
86        self.exchange(n, m)
87    }
88
89    #[inline]
90    fn dup(&mut self, n: usize) -> bool {
91        self.dup(n)
92    }
93
94    #[inline]
95    fn push(&mut self, value: U256) -> bool {
96        self.push(value)
97    }
98
99    #[inline]
100    fn push_slice(&mut self, slice: &[u8]) -> bool {
101        self.push_slice_(slice)
102    }
103}
104
105impl Stack {
106    /// Instantiate a new stack with the [default stack limit][STACK_LIMIT].
107    #[inline]
108    pub fn new() -> Self {
109        Self {
110            // SAFETY: Expansion functions assume that capacity is `STACK_LIMIT`.
111            data: Vec::with_capacity(STACK_LIMIT),
112        }
113    }
114
115    /// Instantiate a new invalid Stack.
116    #[inline]
117    pub fn invalid() -> Self {
118        Self { data: Vec::new() }
119    }
120
121    /// Returns the length of the stack in words.
122    #[inline]
123    pub fn len(&self) -> usize {
124        self.data.len()
125    }
126
127    /// Returns whether the stack is empty.
128    #[inline]
129    pub fn is_empty(&self) -> bool {
130        self.data.is_empty()
131    }
132
133    /// Returns a reference to the underlying data buffer.
134    #[inline]
135    pub fn data(&self) -> &Vec<U256> {
136        &self.data
137    }
138
139    /// Returns a mutable reference to the underlying data buffer.
140    #[inline]
141    pub fn data_mut(&mut self) -> &mut Vec<U256> {
142        &mut self.data
143    }
144
145    /// Consumes the stack and returns the underlying data buffer.
146    #[inline]
147    pub fn into_data(self) -> Vec<U256> {
148        self.data
149    }
150
151    /// Removes the topmost element from the stack and returns it, or `StackUnderflow` if it is
152    /// empty.
153    #[inline]
154    #[cfg_attr(debug_assertions, track_caller)]
155    pub fn pop(&mut self) -> Result<U256, InstructionResult> {
156        let len = self.data.len();
157        if primitives::hints_util::unlikely(len == 0) {
158            Err(InstructionResult::StackUnderflow)
159        } else {
160            unsafe {
161                self.data.set_len(len - 1);
162                Ok(core::ptr::read(self.data.as_ptr().add(len - 1)))
163            }
164        }
165    }
166
167    /// Removes the topmost element from the stack and returns it.
168    ///
169    /// # Safety
170    ///
171    /// The caller is responsible for checking the length of the stack.
172    #[inline]
173    #[cfg_attr(debug_assertions, track_caller)]
174    pub unsafe fn pop_unsafe(&mut self) -> U256 {
175        assume!(!self.data.is_empty());
176        self.data.pop().unwrap_unchecked()
177    }
178
179    /// Peeks the top of the stack.
180    ///
181    /// # Safety
182    ///
183    /// The caller is responsible for checking the length of the stack.
184    #[inline]
185    #[cfg_attr(debug_assertions, track_caller)]
186    pub unsafe fn top_unsafe(&mut self) -> &mut U256 {
187        assume!(!self.data.is_empty());
188        self.data.last_mut().unwrap_unchecked()
189    }
190
191    /// Pops `N` values from the stack.
192    ///
193    /// # Safety
194    ///
195    /// The caller is responsible for checking the length of the stack.
196    #[inline]
197    #[cfg_attr(debug_assertions, track_caller)]
198    pub unsafe fn popn<const N: usize>(&mut self) -> [U256; N] {
199        assume!(self.data.len() >= N);
200        core::array::from_fn(|_| unsafe { self.pop_unsafe() })
201    }
202
203    /// Pops `N` values from the stack and returns the top of the stack.
204    ///
205    /// # Safety
206    ///
207    /// The caller is responsible for checking the length of the stack.
208    #[inline]
209    #[cfg_attr(debug_assertions, track_caller)]
210    pub unsafe fn popn_top<const POPN: usize>(&mut self) -> ([U256; POPN], &mut U256) {
211        let result = self.popn::<POPN>();
212        let top = self.top_unsafe();
213        (result, top)
214    }
215
216    /// Push a new value onto the stack.
217    ///
218    /// If it will exceed the stack limit, returns false and leaves the stack
219    /// unchanged.
220    #[inline]
221    #[must_use]
222    #[cfg_attr(debug_assertions, track_caller)]
223    pub fn push(&mut self, value: U256) -> bool {
224        // In debug builds, verify we have sufficient capacity provisioned.
225        debug_assert!(self.data.capacity() >= STACK_LIMIT);
226        let len = self.data.len();
227        if len == STACK_LIMIT {
228            return false;
229        }
230        unsafe {
231            let end = self.data.as_mut_ptr().add(len);
232            core::ptr::write(end, value);
233            self.data.set_len(len + 1);
234        }
235        true
236    }
237
238    /// Peek a value at given index for the stack, where the top of
239    /// the stack is at index `0`. If the index is too large,
240    /// `StackError::Underflow` is returned.
241    #[inline]
242    pub fn peek(&self, no_from_top: usize) -> Result<U256, InstructionResult> {
243        if self.data.len() > no_from_top {
244            Ok(self.data[self.data.len() - no_from_top - 1])
245        } else {
246            Err(InstructionResult::StackUnderflow)
247        }
248    }
249
250    /// Duplicates the `N`th value from the top of the stack.
251    ///
252    /// # Panics
253    ///
254    /// Panics if `n` is 0.
255    #[inline]
256    #[must_use]
257    #[cfg_attr(debug_assertions, track_caller)]
258    pub fn dup(&mut self, n: usize) -> bool {
259        assume!(n > 0, "attempted to dup 0");
260        let len = self.data.len();
261        if len < n || len + 1 > STACK_LIMIT {
262            false
263        } else {
264            // SAFETY: Check for out of bounds is done above and it makes this safe to do.
265            unsafe {
266                let ptr = self.data.as_mut_ptr().add(len);
267                ptr::copy_nonoverlapping(ptr.sub(n), ptr, 1);
268                self.data.set_len(len + 1);
269            }
270            true
271        }
272    }
273
274    /// Swaps the topmost value with the `N`th value from the top.
275    ///
276    /// # Panics
277    ///
278    /// Panics if `n` is 0.
279    #[inline(always)]
280    #[cfg_attr(debug_assertions, track_caller)]
281    pub fn swap(&mut self, n: usize) -> bool {
282        self.exchange(0, n)
283    }
284
285    /// Exchange two values on the stack.
286    ///
287    /// `n` is the first index, and the second index is calculated as `n + m`.
288    ///
289    /// # Panics
290    ///
291    /// Panics if `m` is zero.
292    #[inline]
293    #[cfg_attr(debug_assertions, track_caller)]
294    pub fn exchange(&mut self, n: usize, m: usize) -> bool {
295        assume!(m > 0, "overlapping exchange");
296        let len = self.data.len();
297        let n_m_index = n + m;
298        if n_m_index >= len {
299            return false;
300        }
301        // SAFETY: `n` and `n_m` are checked to be within bounds, and they don't overlap.
302        unsafe {
303            // Note: `ptr::swap_nonoverlapping` is more efficient than `slice::swap` or `ptr::swap`
304            // because it operates under the assumption that the pointers do not overlap,
305            // eliminating an intermediate copy,
306            // which is a condition we know to be true in this context.
307            let top = self.data.as_mut_ptr().add(len - 1);
308            core::ptr::swap_nonoverlapping(top.sub(n), top.sub(n_m_index), 1);
309        }
310        true
311    }
312
313    /// Pushes an arbitrary length slice of bytes onto the stack, padding the last word with zeros
314    /// if necessary.
315    #[inline]
316    pub fn push_slice(&mut self, slice: &[u8]) -> Result<(), InstructionResult> {
317        if self.push_slice_(slice) {
318            Ok(())
319        } else {
320            Err(InstructionResult::StackOverflow)
321        }
322    }
323
324    /// Pushes an arbitrary length slice of bytes onto the stack, padding the last word with zeros
325    /// if necessary.
326    #[inline]
327    fn push_slice_(&mut self, slice: &[u8]) -> bool {
328        if slice.is_empty() {
329            return true;
330        }
331
332        let n_words = slice.len().div_ceil(32);
333        let new_len = self.data.len() + n_words;
334        if new_len > STACK_LIMIT {
335            return false;
336        }
337
338        // In debug builds, ensure underlying capacity is sufficient for the write.
339        debug_assert!(self.data.capacity() >= new_len);
340
341        // SAFETY: Length checked above.
342        unsafe {
343            let dst = self.data.as_mut_ptr().add(self.data.len()).cast::<u64>();
344            self.data.set_len(new_len);
345
346            let mut i = 0;
347
348            // Write full words
349            let words = slice.chunks_exact(32);
350            let partial_last_word = words.remainder();
351            for word in words {
352                // Note: We unroll `U256::from_be_bytes` here to write directly into the buffer,
353                // instead of creating a 32 byte array on the stack and then copying it over.
354                for l in word.rchunks_exact(8) {
355                    dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap()));
356                    i += 1;
357                }
358            }
359
360            if partial_last_word.is_empty() {
361                return true;
362            }
363
364            // Write limbs of partial last word
365            let limbs = partial_last_word.rchunks_exact(8);
366            let partial_last_limb = limbs.remainder();
367            for l in limbs {
368                dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap()));
369                i += 1;
370            }
371
372            // Write partial last limb by padding with zeros
373            if !partial_last_limb.is_empty() {
374                let mut tmp = [0u8; 8];
375                tmp[8 - partial_last_limb.len()..].copy_from_slice(partial_last_limb);
376                dst.add(i).write(u64::from_be_bytes(tmp));
377                i += 1;
378            }
379
380            debug_assert_eq!(i.div_ceil(4), n_words, "wrote too much");
381
382            // Zero out upper bytes of last word
383            let m = i % 4; // 32 / 8
384            if m != 0 {
385                dst.add(i).write_bytes(0, 4 - m);
386            }
387        }
388
389        true
390    }
391
392    /// Set a value at given index for the stack, where the top of the
393    /// stack is at index `0`. If the index is too large,
394    /// `StackError::Underflow` is returned.
395    #[inline]
396    pub fn set(&mut self, no_from_top: usize, val: U256) -> Result<(), InstructionResult> {
397        if self.data.len() > no_from_top {
398            let len = self.data.len();
399            self.data[len - no_from_top - 1] = val;
400            Ok(())
401        } else {
402            Err(InstructionResult::StackUnderflow)
403        }
404    }
405}
406
407#[cfg(feature = "serde")]
408impl<'de> serde::Deserialize<'de> for Stack {
409    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
410    where
411        D: serde::Deserializer<'de>,
412    {
413        #[derive(serde::Deserialize)]
414        struct StackSerde {
415            data: Vec<U256>,
416        }
417
418        let mut stack = StackSerde::deserialize(deserializer)?;
419        if stack.data.len() > STACK_LIMIT {
420            return Err(serde::de::Error::custom(std::format!(
421                "stack size exceeds limit: {} > {}",
422                stack.data.len(),
423                STACK_LIMIT
424            )));
425        }
426        stack.data.reserve(STACK_LIMIT - stack.data.len());
427        Ok(Self { data: stack.data })
428    }
429}
430
431#[cfg(test)]
432mod tests {
433    use super::*;
434
435    fn run(f: impl FnOnce(&mut Stack)) {
436        let mut stack = Stack::new();
437        // Fill capacity with non-zero values
438        unsafe {
439            stack.data.set_len(STACK_LIMIT);
440            stack.data.fill(U256::MAX);
441            stack.data.set_len(0);
442        }
443        f(&mut stack);
444    }
445
446    #[test]
447    fn push_slices() {
448        // No-op
449        run(|stack| {
450            stack.push_slice(b"").unwrap();
451            assert!(stack.data.is_empty());
452        });
453
454        // One word
455        run(|stack| {
456            stack.push_slice(&[42]).unwrap();
457            assert_eq!(stack.data, [U256::from(42)]);
458        });
459
460        let n = 0x1111_2222_3333_4444_5555_6666_7777_8888_u128;
461        run(|stack| {
462            stack.push_slice(&n.to_be_bytes()).unwrap();
463            assert_eq!(stack.data, [U256::from(n)]);
464        });
465
466        // More than one word
467        run(|stack| {
468            let b = [U256::from(n).to_be_bytes::<32>(); 2].concat();
469            stack.push_slice(&b).unwrap();
470            assert_eq!(stack.data, [U256::from(n); 2]);
471        });
472
473        run(|stack| {
474            let b = [&[0; 32][..], &[42u8]].concat();
475            stack.push_slice(&b).unwrap();
476            assert_eq!(stack.data, [U256::ZERO, U256::from(42)]);
477        });
478
479        run(|stack| {
480            let b = [&[0; 32][..], &n.to_be_bytes()].concat();
481            stack.push_slice(&b).unwrap();
482            assert_eq!(stack.data, [U256::ZERO, U256::from(n)]);
483        });
484
485        run(|stack| {
486            let b = [&[0; 64][..], &n.to_be_bytes()].concat();
487            stack.push_slice(&b).unwrap();
488            assert_eq!(stack.data, [U256::ZERO, U256::ZERO, U256::from(n)]);
489        });
490    }
491
492    #[test]
493    fn stack_clone() {
494        // Test cloning an empty stack
495        let empty_stack = Stack::new();
496        let cloned_empty = empty_stack.clone();
497        assert_eq!(empty_stack, cloned_empty);
498        assert_eq!(cloned_empty.len(), 0);
499        assert_eq!(cloned_empty.data().capacity(), STACK_LIMIT);
500
501        // Test cloning a partially filled stack
502        let mut partial_stack = Stack::new();
503        for i in 0..10 {
504            assert!(partial_stack.push(U256::from(i)));
505        }
506        let mut cloned_partial = partial_stack.clone();
507        assert_eq!(partial_stack, cloned_partial);
508        assert_eq!(cloned_partial.len(), 10);
509        assert_eq!(cloned_partial.data().capacity(), STACK_LIMIT);
510
511        // Test that modifying the clone doesn't affect the original
512        assert!(cloned_partial.push(U256::from(100)));
513        assert_ne!(partial_stack, cloned_partial);
514        assert_eq!(partial_stack.len(), 10);
515        assert_eq!(cloned_partial.len(), 11);
516
517        // Test cloning a full stack
518        let mut full_stack = Stack::new();
519        for i in 0..STACK_LIMIT {
520            assert!(full_stack.push(U256::from(i)));
521        }
522        let mut cloned_full = full_stack.clone();
523        assert_eq!(full_stack, cloned_full);
524        assert_eq!(cloned_full.len(), STACK_LIMIT);
525        assert_eq!(cloned_full.data().capacity(), STACK_LIMIT);
526
527        // Test push to the full original or cloned stack should return StackOverflow
528        assert!(!full_stack.push(U256::from(100)));
529        assert!(!cloned_full.push(U256::from(100)));
530    }
531}