Skip to main content

revm_interpreter/interpreter/
stack.rs

1use super::StackTr;
2use crate::InstructionResult;
3use core::fmt;
4use primitives::{hints_util::cold_path, U256};
5use std::vec::Vec;
6
7/// EVM interpreter stack limit.
8pub const STACK_LIMIT: usize = 1024;
9
10/// EVM stack with [STACK_LIMIT] capacity of words.
11#[derive(Debug, PartialEq, Eq, Hash)]
12#[cfg_attr(feature = "serde", derive(serde::Serialize))]
13pub struct Stack {
14    /// The underlying data of the stack.
15    data: Vec<U256>,
16}
17
18impl fmt::Display for Stack {
19    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
20        f.write_str("[")?;
21        for (i, x) in self.data.iter().enumerate() {
22            if i > 0 {
23                f.write_str(", ")?;
24            }
25            write!(f, "{x}")?;
26        }
27        f.write_str("]")
28    }
29}
30
31impl Default for Stack {
32    #[inline]
33    fn default() -> Self {
34        Self::new()
35    }
36}
37
38impl Clone for Stack {
39    fn clone(&self) -> Self {
40        // Use `Self::new()` to ensure the cloned Stack is constructed with at least
41        // STACK_LIMIT capacity, and then copy the data. This preserves the invariant
42        // that Stack has sufficient capacity for operations that rely on it.
43        let mut new_stack = Self::new();
44        new_stack.data.extend_from_slice(&self.data);
45        new_stack
46    }
47}
48
49impl StackTr for Stack {
50    #[inline]
51    fn len(&self) -> usize {
52        self.len()
53    }
54
55    #[inline]
56    fn data(&self) -> &[U256] {
57        &self.data
58    }
59
60    #[inline]
61    fn clear(&mut self) {
62        self.data.clear();
63    }
64
65    #[inline]
66    fn popn<const N: usize>(&mut self) -> Option<[U256; N]> {
67        self.popn()
68    }
69
70    #[inline]
71    fn popn_top<const POPN: usize>(&mut self) -> Option<([U256; POPN], &mut U256)> {
72        self.popn_top()
73    }
74
75    #[inline]
76    fn exchange(&mut self, n: usize, m: usize) -> bool {
77        self.exchange(n, m)
78    }
79
80    #[inline]
81    fn dup(&mut self, n: usize) -> bool {
82        self.dup(n)
83    }
84
85    #[inline]
86    fn push(&mut self, value: U256) -> bool {
87        self.push(value)
88    }
89
90    #[inline]
91    fn push_slice(&mut self, slice: &[u8]) -> bool {
92        self.push_slice_(slice)
93    }
94}
95
96impl Stack {
97    /// Instantiate a new stack with the [default stack limit][STACK_LIMIT].
98    #[inline]
99    pub fn new() -> Self {
100        Self {
101            // SAFETY: Expansion functions assume that capacity is `STACK_LIMIT`.
102            data: Vec::with_capacity(STACK_LIMIT),
103        }
104    }
105
106    /// Instantiate a new invalid Stack.
107    #[inline]
108    pub const fn invalid() -> Self {
109        Self { data: Vec::new() }
110    }
111
112    /// Returns the length of the stack in words.
113    #[inline]
114    pub const fn len(&self) -> usize {
115        self.data.len()
116    }
117
118    /// Returns whether the stack is empty.
119    #[inline]
120    pub const fn is_empty(&self) -> bool {
121        self.data.is_empty()
122    }
123
124    /// Returns a reference to the underlying data buffer.
125    #[inline]
126    pub const fn data(&self) -> &Vec<U256> {
127        &self.data
128    }
129
130    /// Returns a mutable reference to the underlying data buffer.
131    #[inline]
132    pub const fn data_mut(&mut self) -> &mut Vec<U256> {
133        &mut self.data
134    }
135
136    /// Consumes the stack and returns the underlying data buffer.
137    #[inline]
138    pub fn into_data(self) -> Vec<U256> {
139        self.data
140    }
141
142    /// Removes the topmost element from the stack and returns it, or `StackUnderflow` if it is
143    /// empty.
144    #[inline]
145    #[cfg_attr(debug_assertions, track_caller)]
146    pub fn pop(&mut self) -> Result<U256, InstructionResult> {
147        self.data.pop().ok_or(InstructionResult::StackUnderflow)
148    }
149
150    /// Removes the topmost element from the stack and returns it.
151    ///
152    /// # Safety
153    ///
154    /// The caller is responsible for checking the length of the stack.
155    #[inline]
156    #[cfg_attr(debug_assertions, track_caller)]
157    pub unsafe fn pop_unchecked(&mut self) -> U256 {
158        assume!(!self.is_empty());
159        self.data.pop().unwrap_unchecked()
160    }
161
162    /// Peeks the top of the stack.
163    #[inline]
164    pub fn top(&mut self) -> Option<&mut U256> {
165        self.data.last_mut()
166    }
167
168    /// Peeks the top of the stack.
169    ///
170    /// # Safety
171    ///
172    /// The caller is responsible for checking the length of the stack.
173    #[inline]
174    #[cfg_attr(debug_assertions, track_caller)]
175    pub unsafe fn top_unchecked(&mut self) -> &mut U256 {
176        assume!(!self.is_empty());
177        self.data.last_mut().unwrap_unchecked()
178    }
179
180    /// Pops `N` values from the stack.
181    #[inline]
182    pub fn popn<const N: usize>(&mut self) -> Option<[U256; N]> {
183        if self.len() < N {
184            return None;
185        }
186        // SAFETY: Stack length is checked above.
187        Some(unsafe { self.popn_unchecked() })
188    }
189
190    /// Pops `N` values from the stack.
191    ///
192    /// # Safety
193    ///
194    /// The caller is responsible for checking the length of the stack.
195    #[inline]
196    #[cfg_attr(debug_assertions, track_caller)]
197    pub unsafe fn popn_unchecked<const N: usize>(&mut self) -> [U256; N] {
198        assume!(self.len() >= N);
199        core::array::from_fn(|_| unsafe { self.pop_unchecked() })
200    }
201
202    /// Pops `N` values from the stack and returns the top of the stack.
203    #[inline]
204    #[cfg_attr(debug_assertions, track_caller)]
205    pub fn popn_top<const N: usize>(&mut self) -> Option<([U256; N], &mut U256)> {
206        if self.len() < N + 1 {
207            return None;
208        }
209        // SAFETY: Stack length is checked above.
210        Some(unsafe { self.popn_top_unchecked() })
211    }
212
213    /// Pops `N` values from the stack and returns the top of the stack.
214    ///
215    /// # Safety
216    ///
217    /// The caller is responsible for checking the length of the stack.
218    #[inline]
219    #[cfg_attr(debug_assertions, track_caller)]
220    pub unsafe fn popn_top_unchecked<const N: usize>(&mut self) -> ([U256; N], &mut U256) {
221        unsafe { (self.popn_unchecked(), self.top_unchecked()) }
222    }
223
224    /// Push a new value onto the stack.
225    ///
226    /// If it will exceed the stack limit, returns false and leaves the stack
227    /// unchanged.
228    #[inline]
229    #[must_use]
230    #[cfg_attr(debug_assertions, track_caller)]
231    pub fn push(&mut self, value: U256) -> bool {
232        // In debug builds, verify we have sufficient capacity provisioned.
233        debug_assert!(self.data.capacity() >= STACK_LIMIT);
234        let len = self.len();
235        if len == STACK_LIMIT {
236            cold_path();
237            return false;
238        }
239        unsafe {
240            let end = self.data.as_mut_ptr().add(len);
241            core::ptr::write(end, value);
242            self.data.set_len(len + 1);
243        }
244        true
245    }
246
247    /// Peek a value at given index for the stack, where the top of
248    /// the stack is at index `0`. If the index is too large,
249    /// `StackError::Underflow` is returned.
250    #[inline]
251    pub fn peek(&self, no_from_top: usize) -> Result<U256, InstructionResult> {
252        if self.data.len() > no_from_top {
253            Ok(self.data[self.data.len() - no_from_top - 1])
254        } else {
255            cold_path();
256            Err(InstructionResult::StackUnderflow)
257        }
258    }
259
260    /// Duplicates the `N`th value from the top of the stack.
261    ///
262    /// # Panics
263    ///
264    /// Panics if `n` is 0.
265    #[inline]
266    #[must_use]
267    #[cfg_attr(debug_assertions, track_caller)]
268    pub fn dup(&mut self, n: usize) -> bool {
269        assume!(n > 0, "attempted to dup 0");
270        let len = self.len();
271        if (len < n) | (len + 1 > STACK_LIMIT) {
272            cold_path();
273            return false;
274        }
275        // SAFETY: Check for out of bounds is done above and it makes this safe to do.
276        unsafe {
277            let ptr = self.data.as_mut_ptr().add(len);
278            *ptr = *ptr.sub(n);
279            self.data.set_len(len + 1);
280        }
281        true
282    }
283
284    /// Swaps the topmost value with the `N`th value from the top.
285    ///
286    /// # Panics
287    ///
288    /// Panics if `n` is 0.
289    #[inline(always)]
290    #[cfg_attr(debug_assertions, track_caller)]
291    pub fn swap(&mut self, n: usize) -> bool {
292        self.exchange(0, n)
293    }
294
295    /// Exchange two values on the stack.
296    ///
297    /// `n` is the first index, and the second index is calculated as `n + m`.
298    ///
299    /// # Panics
300    ///
301    /// Panics if `m` is zero.
302    #[inline]
303    #[cfg_attr(debug_assertions, track_caller)]
304    pub fn exchange(&mut self, n: usize, m: usize) -> bool {
305        assume!(m > 0, "overlapping exchange");
306        let len = self.len();
307        let n_m_index = n + m;
308        if n_m_index >= len {
309            cold_path();
310            return false;
311        }
312        // SAFETY: `n` and `n_m` are checked to be within bounds, and they don't overlap.
313        unsafe {
314            // Note: `ptr::swap_nonoverlapping` is more efficient than `slice::swap` or `ptr::swap`
315            // because it operates under the assumption that the pointers do not overlap,
316            // eliminating an intermediate copy,
317            // which is a condition we know to be true in this context.
318            let top = self.data.as_mut_ptr().add(len - 1);
319            core::ptr::swap_nonoverlapping(top.sub(n), top.sub(n_m_index), 1);
320        }
321        true
322    }
323
324    /// Pushes an arbitrary length slice of bytes onto the stack, padding the last word with zeros
325    /// if necessary.
326    #[inline]
327    pub fn push_slice(&mut self, slice: &[u8]) -> Result<(), InstructionResult> {
328        if self.push_slice_(slice) {
329            Ok(())
330        } else {
331            Err(InstructionResult::StackOverflow)
332        }
333    }
334
335    /// Pushes an arbitrary length slice of bytes onto the stack, padding the last word with zeros
336    /// if necessary.
337    #[inline]
338    fn push_slice_(&mut self, slice: &[u8]) -> bool {
339        if slice.is_empty() {
340            cold_path();
341            return true;
342        }
343
344        let n_words = slice.len().div_ceil(32);
345        let new_len = self.len() + n_words;
346        if new_len > STACK_LIMIT {
347            cold_path();
348            return false;
349        }
350
351        // In debug builds, ensure underlying capacity is sufficient for the write.
352        debug_assert!(self.data.capacity() >= new_len);
353
354        // SAFETY: Length checked above.
355        unsafe {
356            let dst = self.data.as_mut_ptr().add(self.len()).cast::<u64>();
357            self.data.set_len(new_len);
358
359            let mut i = 0;
360
361            // Write full words
362            let words = slice.chunks_exact(32);
363            let partial_last_word = words.remainder();
364            for word in words {
365                // Note: We unroll `U256::from_be_bytes` here to write directly into the buffer,
366                // instead of creating a 32 byte array on the stack and then copying it over.
367                for l in word.rchunks_exact(8) {
368                    dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap()));
369                    i += 1;
370                }
371            }
372
373            if partial_last_word.is_empty() {
374                return true;
375            }
376
377            // Write limbs of partial last word
378            let limbs = partial_last_word.rchunks_exact(8);
379            let partial_last_limb = limbs.remainder();
380            for l in limbs {
381                dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap()));
382                i += 1;
383            }
384
385            // Write partial last limb by padding with zeros
386            if !partial_last_limb.is_empty() {
387                let mut tmp = [0u8; 8];
388                tmp[8 - partial_last_limb.len()..].copy_from_slice(partial_last_limb);
389                dst.add(i).write(u64::from_be_bytes(tmp));
390                i += 1;
391            }
392
393            debug_assert_eq!(i.div_ceil(4), n_words, "wrote too much");
394
395            // Zero out upper bytes of last word
396            let m = i % 4; // 32 / 8
397            if m != 0 {
398                dst.add(i).write_bytes(0, 4 - m);
399            }
400        }
401
402        true
403    }
404
405    /// Set a value at given index for the stack, where the top of the
406    /// stack is at index `0`. If the index is too large,
407    /// `StackError::Underflow` is returned.
408    #[inline]
409    pub fn set(&mut self, no_from_top: usize, val: U256) -> Result<(), InstructionResult> {
410        if self.len() <= no_from_top {
411            cold_path();
412            return Err(InstructionResult::StackUnderflow);
413        }
414        let len = self.len();
415        self.data[len - no_from_top - 1] = val;
416        Ok(())
417    }
418}
419
420#[cfg(feature = "serde")]
421impl<'de> serde::Deserialize<'de> for Stack {
422    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
423    where
424        D: serde::Deserializer<'de>,
425    {
426        #[derive(serde::Deserialize)]
427        struct StackSerde {
428            data: Vec<U256>,
429        }
430
431        let mut stack = StackSerde::deserialize(deserializer)?;
432        if stack.data.len() > STACK_LIMIT {
433            return Err(serde::de::Error::custom(std::format!(
434                "stack size exceeds limit: {} > {}",
435                stack.data.len(),
436                STACK_LIMIT
437            )));
438        }
439        stack.data.reserve(STACK_LIMIT - stack.data.len());
440        Ok(Self { data: stack.data })
441    }
442}
443
444#[cfg(test)]
445mod tests {
446    use super::*;
447
448    fn run(f: impl FnOnce(&mut Stack)) {
449        let mut stack = Stack::new();
450        // Fill capacity with non-zero values
451        unsafe {
452            stack.data.set_len(STACK_LIMIT);
453            stack.data.fill(U256::MAX);
454            stack.data.set_len(0);
455        }
456        f(&mut stack);
457    }
458
459    #[test]
460    fn push_slices() {
461        // No-op
462        run(|stack| {
463            stack.push_slice(b"").unwrap();
464            assert!(stack.is_empty());
465        });
466
467        // One word
468        run(|stack| {
469            stack.push_slice(&[42]).unwrap();
470            assert_eq!(stack.data, [U256::from(42)]);
471        });
472
473        let n = 0x1111_2222_3333_4444_5555_6666_7777_8888_u128;
474        run(|stack| {
475            stack.push_slice(&n.to_be_bytes()).unwrap();
476            assert_eq!(stack.data, [U256::from(n)]);
477        });
478
479        // More than one word
480        run(|stack| {
481            let b = [U256::from(n).to_be_bytes::<32>(); 2].concat();
482            stack.push_slice(&b).unwrap();
483            assert_eq!(stack.data, [U256::from(n); 2]);
484        });
485
486        run(|stack| {
487            let b = [&[0; 32][..], &[42u8]].concat();
488            stack.push_slice(&b).unwrap();
489            assert_eq!(stack.data, [U256::ZERO, U256::from(42)]);
490        });
491
492        run(|stack| {
493            let b = [&[0; 32][..], &n.to_be_bytes()].concat();
494            stack.push_slice(&b).unwrap();
495            assert_eq!(stack.data, [U256::ZERO, U256::from(n)]);
496        });
497
498        run(|stack| {
499            let b = [&[0; 64][..], &n.to_be_bytes()].concat();
500            stack.push_slice(&b).unwrap();
501            assert_eq!(stack.data, [U256::ZERO, U256::ZERO, U256::from(n)]);
502        });
503    }
504
505    #[test]
506    fn stack_clone() {
507        // Test cloning an empty stack
508        let empty_stack = Stack::new();
509        let cloned_empty = empty_stack.clone();
510        assert_eq!(empty_stack, cloned_empty);
511        assert_eq!(cloned_empty.len(), 0);
512        assert_eq!(cloned_empty.data().capacity(), STACK_LIMIT);
513
514        // Test cloning a partially filled stack
515        let mut partial_stack = Stack::new();
516        for i in 0..10 {
517            assert!(partial_stack.push(U256::from(i)));
518        }
519        let mut cloned_partial = partial_stack.clone();
520        assert_eq!(partial_stack, cloned_partial);
521        assert_eq!(cloned_partial.len(), 10);
522        assert_eq!(cloned_partial.data().capacity(), STACK_LIMIT);
523
524        // Test that modifying the clone doesn't affect the original
525        assert!(cloned_partial.push(U256::from(100)));
526        assert_ne!(partial_stack, cloned_partial);
527        assert_eq!(partial_stack.len(), 10);
528        assert_eq!(cloned_partial.len(), 11);
529
530        // Test cloning a full stack
531        let mut full_stack = Stack::new();
532        for i in 0..STACK_LIMIT {
533            assert!(full_stack.push(U256::from(i)));
534        }
535        let mut cloned_full = full_stack.clone();
536        assert_eq!(full_stack, cloned_full);
537        assert_eq!(cloned_full.len(), STACK_LIMIT);
538        assert_eq!(cloned_full.data().capacity(), STACK_LIMIT);
539
540        // Test push to the full original or cloned stack should return StackOverflow
541        assert!(!full_stack.push(U256::from(100)));
542        assert!(!cloned_full.push(U256::from(100)));
543    }
544}