revm_interpreter/interpreter/
stack.rs1use crate::InstructionResult;
2use core::{fmt, ptr};
3use primitives::U256;
4use std::vec::Vec;
5
6use super::StackTr;
7
8pub const STACK_LIMIT: usize = 1024;
10
11#[derive(Debug, PartialEq, Eq, Hash)]
13#[cfg_attr(feature = "serde", derive(serde::Serialize))]
14pub struct Stack {
15 data: Vec<U256>,
17}
18
19impl fmt::Display for Stack {
20 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
21 f.write_str("[")?;
22 for (i, x) in self.data.iter().enumerate() {
23 if i > 0 {
24 f.write_str(", ")?;
25 }
26 write!(f, "{x}")?;
27 }
28 f.write_str("]")
29 }
30}
31
32impl Default for Stack {
33 #[inline]
34 fn default() -> Self {
35 Self::new()
36 }
37}
38
39impl Clone for Stack {
40 fn clone(&self) -> Self {
41 let mut new_stack = Self::new();
45 new_stack.data.extend_from_slice(&self.data);
46 new_stack
47 }
48}
49
50impl StackTr for Stack {
51 fn len(&self) -> usize {
52 self.len()
53 }
54
55 #[inline]
56 fn popn<const N: usize>(&mut self) -> Option<[U256; N]> {
57 if self.len() < N {
58 return None;
59 }
60 Some(unsafe { self.popn::<N>() })
62 }
63
64 #[inline]
65 fn popn_top<const POPN: usize>(&mut self) -> Option<([U256; POPN], &mut U256)> {
66 if self.len() < POPN + 1 {
67 return None;
68 }
69 Some(unsafe { self.popn_top::<POPN>() })
71 }
72
73 fn exchange(&mut self, n: usize, m: usize) -> bool {
74 self.exchange(n, m)
75 }
76
77 fn dup(&mut self, n: usize) -> bool {
78 self.dup(n)
79 }
80
81 fn push(&mut self, value: U256) -> bool {
82 self.push(value)
83 }
84}
85
86impl Stack {
87 #[inline]
89 pub fn new() -> Self {
90 Self {
91 data: Vec::with_capacity(STACK_LIMIT),
93 }
94 }
95
96 #[inline]
98 pub fn len(&self) -> usize {
99 self.data.len()
100 }
101
102 #[inline]
104 pub fn is_empty(&self) -> bool {
105 self.data.is_empty()
106 }
107
108 #[inline]
110 pub fn data(&self) -> &Vec<U256> {
111 &self.data
112 }
113
114 #[inline]
116 pub fn data_mut(&mut self) -> &mut Vec<U256> {
117 &mut self.data
118 }
119
120 #[inline]
122 pub fn into_data(self) -> Vec<U256> {
123 self.data
124 }
125
126 #[inline]
129 #[cfg_attr(debug_assertions, track_caller)]
130 pub fn pop(&mut self) -> Result<U256, InstructionResult> {
131 self.data.pop().ok_or(InstructionResult::StackUnderflow)
132 }
133
134 #[inline]
140 #[cfg_attr(debug_assertions, track_caller)]
141 pub unsafe fn pop_unsafe(&mut self) -> U256 {
142 self.data.pop().unwrap_unchecked()
143 }
144
145 #[inline]
151 #[cfg_attr(debug_assertions, track_caller)]
152 pub unsafe fn top_unsafe(&mut self) -> &mut U256 {
153 let len = self.data.len();
154 self.data.get_unchecked_mut(len - 1)
155 }
156
157 #[inline]
163 #[cfg_attr(debug_assertions, track_caller)]
164 pub unsafe fn popn<const N: usize>(&mut self) -> [U256; N] {
165 if N == 0 {
166 return [U256::ZERO; N];
167 }
168 let mut result = [U256::ZERO; N];
169 for v in &mut result {
170 *v = self.data.pop().unwrap_unchecked();
171 }
172 result
173 }
174
175 #[inline]
181 #[cfg_attr(debug_assertions, track_caller)]
182 pub unsafe fn popn_top<const POPN: usize>(&mut self) -> ([U256; POPN], &mut U256) {
183 let result = self.popn::<POPN>();
184 let top = self.top_unsafe();
185 (result, top)
186 }
187
188 #[inline]
193 #[must_use]
194 #[cfg_attr(debug_assertions, track_caller)]
195 pub fn push(&mut self, value: U256) -> bool {
196 assume!(self.data.capacity() == STACK_LIMIT);
198 if self.data.len() == STACK_LIMIT {
199 return false;
200 }
201 self.data.push(value);
202 true
203 }
204
205 #[inline]
209 pub fn peek(&self, no_from_top: usize) -> Result<U256, InstructionResult> {
210 if self.data.len() > no_from_top {
211 Ok(self.data[self.data.len() - no_from_top - 1])
212 } else {
213 Err(InstructionResult::StackUnderflow)
214 }
215 }
216
217 #[inline]
223 #[must_use]
224 #[cfg_attr(debug_assertions, track_caller)]
225 pub fn dup(&mut self, n: usize) -> bool {
226 assume!(n > 0, "attempted to dup 0");
227 let len = self.data.len();
228 if len < n || len + 1 > STACK_LIMIT {
229 false
230 } else {
231 unsafe {
233 let ptr = self.data.as_mut_ptr().add(len);
234 ptr::copy_nonoverlapping(ptr.sub(n), ptr, 1);
235 self.data.set_len(len + 1);
236 }
237 true
238 }
239 }
240
241 #[inline(always)]
247 #[cfg_attr(debug_assertions, track_caller)]
248 pub fn swap(&mut self, n: usize) -> bool {
249 self.exchange(0, n)
250 }
251
252 #[inline]
260 #[cfg_attr(debug_assertions, track_caller)]
261 pub fn exchange(&mut self, n: usize, m: usize) -> bool {
262 assume!(m > 0, "overlapping exchange");
263 let len = self.data.len();
264 let n_m_index = n + m;
265 if n_m_index >= len {
266 return false;
267 }
268 unsafe {
270 let top = self.data.as_mut_ptr().add(len - 1);
275 core::ptr::swap_nonoverlapping(top.sub(n), top.sub(n_m_index), 1);
276 }
277 true
278 }
279
280 #[inline]
283 pub fn push_slice(&mut self, slice: &[u8]) -> Result<(), InstructionResult> {
284 if slice.is_empty() {
285 return Ok(());
286 }
287
288 let n_words = slice.len().div_ceil(32);
289 let new_len = self.data.len() + n_words;
290 if new_len > STACK_LIMIT {
291 return Err(InstructionResult::StackOverflow);
292 }
293
294 unsafe {
296 let dst = self.data.as_mut_ptr().add(self.data.len()).cast::<u64>();
297 self.data.set_len(new_len);
298
299 let mut i = 0;
300
301 let words = slice.chunks_exact(32);
303 let partial_last_word = words.remainder();
304 for word in words {
305 for l in word.rchunks_exact(8) {
308 dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap()));
309 i += 1;
310 }
311 }
312
313 if partial_last_word.is_empty() {
314 return Ok(());
315 }
316
317 let limbs = partial_last_word.rchunks_exact(8);
319 let partial_last_limb = limbs.remainder();
320 for l in limbs {
321 dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap()));
322 i += 1;
323 }
324
325 if !partial_last_limb.is_empty() {
327 let mut tmp = [0u8; 8];
328 tmp[8 - partial_last_limb.len()..].copy_from_slice(partial_last_limb);
329 dst.add(i).write(u64::from_be_bytes(tmp));
330 i += 1;
331 }
332
333 debug_assert_eq!(i.div_ceil(4), n_words, "wrote too much");
334
335 let m = i % 4; if m != 0 {
338 dst.add(i).write_bytes(0, 4 - m);
339 }
340 }
341
342 Ok(())
343 }
344
345 #[inline]
349 pub fn set(&mut self, no_from_top: usize, val: U256) -> Result<(), InstructionResult> {
350 if self.data.len() > no_from_top {
351 let len = self.data.len();
352 self.data[len - no_from_top - 1] = val;
353 Ok(())
354 } else {
355 Err(InstructionResult::StackUnderflow)
356 }
357 }
358}
359
360#[cfg(feature = "serde")]
361impl<'de> serde::Deserialize<'de> for Stack {
362 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
363 where
364 D: serde::Deserializer<'de>,
365 {
366 let mut data = Vec::<U256>::deserialize(deserializer)?;
367 if data.len() > STACK_LIMIT {
368 return Err(serde::de::Error::custom(std::format!(
369 "stack size exceeds limit: {} > {}",
370 data.len(),
371 STACK_LIMIT
372 )));
373 }
374 data.reserve(STACK_LIMIT - data.len());
375 Ok(Self { data })
376 }
377}
378
379#[cfg(test)]
380mod tests {
381 use super::*;
382
383 fn run(f: impl FnOnce(&mut Stack)) {
384 let mut stack = Stack::new();
385 unsafe {
387 stack.data.set_len(STACK_LIMIT);
388 stack.data.fill(U256::MAX);
389 stack.data.set_len(0);
390 }
391 f(&mut stack);
392 }
393
394 #[test]
395 fn push_slices() {
396 run(|stack| {
398 stack.push_slice(b"").unwrap();
399 assert_eq!(stack.data, []);
400 });
401
402 run(|stack| {
404 stack.push_slice(&[42]).unwrap();
405 assert_eq!(stack.data, [U256::from(42)]);
406 });
407
408 let n = 0x1111_2222_3333_4444_5555_6666_7777_8888_u128;
409 run(|stack| {
410 stack.push_slice(&n.to_be_bytes()).unwrap();
411 assert_eq!(stack.data, [U256::from(n)]);
412 });
413
414 run(|stack| {
416 let b = [U256::from(n).to_be_bytes::<32>(); 2].concat();
417 stack.push_slice(&b).unwrap();
418 assert_eq!(stack.data, [U256::from(n); 2]);
419 });
420
421 run(|stack| {
422 let b = [&[0; 32][..], &[42u8]].concat();
423 stack.push_slice(&b).unwrap();
424 assert_eq!(stack.data, [U256::ZERO, U256::from(42)]);
425 });
426
427 run(|stack| {
428 let b = [&[0; 32][..], &n.to_be_bytes()].concat();
429 stack.push_slice(&b).unwrap();
430 assert_eq!(stack.data, [U256::ZERO, U256::from(n)]);
431 });
432
433 run(|stack| {
434 let b = [&[0; 64][..], &n.to_be_bytes()].concat();
435 stack.push_slice(&b).unwrap();
436 assert_eq!(stack.data, [U256::ZERO, U256::ZERO, U256::from(n)]);
437 });
438 }
439
440 #[test]
441 fn stack_clone() {
442 let empty_stack = Stack::new();
444 let cloned_empty = empty_stack.clone();
445 assert_eq!(empty_stack, cloned_empty);
446 assert_eq!(cloned_empty.len(), 0);
447 assert_eq!(cloned_empty.data().capacity(), STACK_LIMIT);
448
449 let mut partial_stack = Stack::new();
451 for i in 0..10 {
452 assert!(partial_stack.push(U256::from(i)));
453 }
454 let mut cloned_partial = partial_stack.clone();
455 assert_eq!(partial_stack, cloned_partial);
456 assert_eq!(cloned_partial.len(), 10);
457 assert_eq!(cloned_partial.data().capacity(), STACK_LIMIT);
458
459 assert!(cloned_partial.push(U256::from(100)));
461 assert_ne!(partial_stack, cloned_partial);
462 assert_eq!(partial_stack.len(), 10);
463 assert_eq!(cloned_partial.len(), 11);
464
465 let mut full_stack = Stack::new();
467 for i in 0..STACK_LIMIT {
468 assert!(full_stack.push(U256::from(i)));
469 }
470 let mut cloned_full = full_stack.clone();
471 assert_eq!(full_stack, cloned_full);
472 assert_eq!(cloned_full.len(), STACK_LIMIT);
473 assert_eq!(cloned_full.data().capacity(), STACK_LIMIT);
474
475 assert!(!full_stack.push(U256::from(100)));
477 assert!(!cloned_full.push(U256::from(100)));
478 }
479}