revm_interpreter/interpreter/
shared_memory.rs

1use super::MemoryTr;
2use crate::InstructionResult;
3use context_interface::cfg::GasParams;
4use core::{
5    cell::{Ref, RefCell, RefMut},
6    cmp::min,
7    fmt,
8    ops::Range,
9};
10use primitives::{hex, B256, U256};
11use std::{rc::Rc, vec::Vec};
12
13trait RefcellExt<T> {
14    fn dbg_borrow(&self) -> Ref<'_, T>;
15    fn dbg_borrow_mut(&self) -> RefMut<'_, T>;
16}
17
18impl<T> RefcellExt<T> for RefCell<T> {
19    #[inline]
20    fn dbg_borrow(&self) -> Ref<'_, T> {
21        match self.try_borrow() {
22            Ok(b) => b,
23            Err(e) => debug_unreachable!("{e}"),
24        }
25    }
26
27    #[inline]
28    fn dbg_borrow_mut(&self) -> RefMut<'_, T> {
29        match self.try_borrow_mut() {
30            Ok(b) => b,
31            Err(e) => debug_unreachable!("{e}"),
32        }
33    }
34}
35
36/// A sequential memory shared between calls, which uses
37/// a `Vec` for internal representation.
38/// A [SharedMemory] instance should always be obtained using
39/// the `new` static method to ensure memory safety.
40#[derive(Clone, PartialEq, Eq)]
41#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
42pub struct SharedMemory {
43    /// The underlying buffer.
44    buffer: Option<Rc<RefCell<Vec<u8>>>>,
45    /// Memory checkpoints for each depth.
46    /// Invariant: these are always in bounds of `data`.
47    my_checkpoint: usize,
48    /// Child checkpoint that we need to free context to.
49    child_checkpoint: Option<usize>,
50    /// Memory limit. See [`Cfg`](context_interface::Cfg).
51    #[cfg(feature = "memory_limit")]
52    memory_limit: u64,
53}
54
55impl fmt::Debug for SharedMemory {
56    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
57        f.debug_struct("SharedMemory")
58            .field("current_len", &self.len())
59            .field("context_memory", &hex::encode(&*self.context_memory()))
60            .finish_non_exhaustive()
61    }
62}
63
64impl Default for SharedMemory {
65    #[inline]
66    fn default() -> Self {
67        Self::new()
68    }
69}
70
71impl MemoryTr for SharedMemory {
72    fn set_data(&mut self, memory_offset: usize, data_offset: usize, len: usize, data: &[u8]) {
73        self.set_data(memory_offset, data_offset, len, data);
74    }
75
76    fn set(&mut self, memory_offset: usize, data: &[u8]) {
77        self.set(memory_offset, data);
78    }
79
80    fn size(&self) -> usize {
81        self.len()
82    }
83
84    fn copy(&mut self, destination: usize, source: usize, len: usize) {
85        self.copy(destination, source, len);
86    }
87
88    fn slice(&self, range: Range<usize>) -> Ref<'_, [u8]> {
89        self.slice_range(range)
90    }
91
92    fn local_memory_offset(&self) -> usize {
93        self.my_checkpoint
94    }
95
96    fn set_data_from_global(
97        &mut self,
98        memory_offset: usize,
99        data_offset: usize,
100        len: usize,
101        data_range: Range<usize>,
102    ) {
103        self.global_to_local_set_data(memory_offset, data_offset, len, data_range);
104    }
105
106    /// Returns a byte slice of the memory region at the given offset.
107    ///
108    /// # Panics
109    ///
110    /// Panics on out of bounds access in debug builds only.
111    ///
112    /// # Safety
113    ///
114    /// In release builds, calling this method with an out-of-bounds range triggers undefined
115    /// behavior. Callers must ensure that the range is within the bounds of the buffer.
116    #[inline]
117    #[cfg_attr(debug_assertions, track_caller)]
118    fn global_slice(&self, range: Range<usize>) -> Ref<'_, [u8]> {
119        let buffer = self.buffer_ref();
120        Ref::map(buffer, |b| match b.get(range) {
121            Some(slice) => slice,
122            None => debug_unreachable!("slice OOB: range; len: {}", self.len()),
123        })
124    }
125
126    fn resize(&mut self, new_size: usize) -> bool {
127        self.resize(new_size);
128        true
129    }
130
131    /// Returns `true` if the `new_size` for the current context memory will
132    /// make the shared buffer length exceed the `memory_limit`.
133    #[cfg(feature = "memory_limit")]
134    #[inline]
135    fn limit_reached(&self, offset: usize, len: usize) -> bool {
136        self.my_checkpoint
137            .saturating_add(offset)
138            .saturating_add(len) as u64
139            > self.memory_limit
140    }
141}
142
143impl SharedMemory {
144    /// Creates a new memory instance that can be shared between calls.
145    ///
146    /// The default initial capacity is 4KiB.
147    #[inline]
148    pub fn new() -> Self {
149        Self::with_capacity(4 * 1024) // from evmone
150    }
151
152    /// Creates a new invalid memory instance.
153    #[inline]
154    pub fn invalid() -> Self {
155        Self {
156            buffer: None,
157            my_checkpoint: 0,
158            child_checkpoint: None,
159            #[cfg(feature = "memory_limit")]
160            memory_limit: 0,
161        }
162    }
163
164    /// Creates a new memory instance with a given shared buffer.
165    pub fn new_with_buffer(buffer: Rc<RefCell<Vec<u8>>>) -> Self {
166        Self {
167            buffer: Some(buffer),
168            my_checkpoint: 0,
169            child_checkpoint: None,
170            #[cfg(feature = "memory_limit")]
171            memory_limit: u64::MAX,
172        }
173    }
174
175    /// Creates a new memory instance that can be shared between calls with the given `capacity`.
176    #[inline]
177    pub fn with_capacity(capacity: usize) -> Self {
178        Self {
179            buffer: Some(Rc::new(RefCell::new(Vec::with_capacity(capacity)))),
180            my_checkpoint: 0,
181            child_checkpoint: None,
182            #[cfg(feature = "memory_limit")]
183            memory_limit: u64::MAX,
184        }
185    }
186
187    /// Creates a new memory instance that can be shared between calls,
188    /// with `memory_limit` as upper bound for allocation size.
189    ///
190    /// The default initial capacity is 4KiB.
191    #[cfg(feature = "memory_limit")]
192    #[inline]
193    pub fn new_with_memory_limit(memory_limit: u64) -> Self {
194        Self {
195            memory_limit,
196            ..Self::new()
197        }
198    }
199
200    /// Sets the memory limit in bytes.
201    #[inline]
202    pub fn set_memory_limit(&mut self, limit: u64) {
203        #[cfg(feature = "memory_limit")]
204        {
205            self.memory_limit = limit;
206        }
207        // for clippy.
208        let _ = limit;
209    }
210
211    #[inline]
212    fn buffer(&self) -> &Rc<RefCell<Vec<u8>>> {
213        debug_assert!(self.buffer.is_some(), "cannot use SharedMemory::empty");
214        unsafe { self.buffer.as_ref().unwrap_unchecked() }
215    }
216
217    #[inline]
218    fn buffer_ref(&self) -> Ref<'_, Vec<u8>> {
219        self.buffer().dbg_borrow()
220    }
221
222    #[inline]
223    fn buffer_ref_mut(&self) -> RefMut<'_, Vec<u8>> {
224        self.buffer().dbg_borrow_mut()
225    }
226
227    /// Prepares the shared memory for a new child context.
228    ///
229    /// # Panics
230    ///
231    /// Panics if this function was already called without freeing child context.
232    #[inline]
233    pub fn new_child_context(&mut self) -> SharedMemory {
234        if self.child_checkpoint.is_some() {
235            panic!("new_child_context was already called without freeing child context");
236        }
237        let new_checkpoint = self.full_len();
238        self.child_checkpoint = Some(new_checkpoint);
239        SharedMemory {
240            buffer: Some(self.buffer().clone()),
241            my_checkpoint: new_checkpoint,
242            // child_checkpoint is same as my_checkpoint
243            child_checkpoint: None,
244            #[cfg(feature = "memory_limit")]
245            memory_limit: self.memory_limit,
246        }
247    }
248
249    /// Prepares the shared memory for returning from child context. Do nothing if there is no child context.
250    #[inline]
251    pub fn free_child_context(&mut self) {
252        let Some(child_checkpoint) = self.child_checkpoint.take() else {
253            return;
254        };
255        unsafe {
256            self.buffer_ref_mut().set_len(child_checkpoint);
257        }
258    }
259
260    /// Returns the length of the current memory range.
261    #[inline]
262    pub fn len(&self) -> usize {
263        self.full_len() - self.my_checkpoint
264    }
265
266    fn full_len(&self) -> usize {
267        self.buffer_ref().len()
268    }
269
270    /// Returns `true` if the current memory range is empty.
271    #[inline]
272    pub fn is_empty(&self) -> bool {
273        self.len() == 0
274    }
275
276    /// Resizes the memory in-place so that `len` is equal to `new_len`.
277    #[inline]
278    pub fn resize(&mut self, new_size: usize) {
279        self.buffer()
280            .dbg_borrow_mut()
281            .resize(self.my_checkpoint + new_size, 0);
282    }
283
284    /// Returns a byte slice of the memory region at the given offset.
285    ///
286    /// # Panics
287    ///
288    /// Panics on out of bounds.
289    #[inline]
290    #[cfg_attr(debug_assertions, track_caller)]
291    pub fn slice_len(&self, offset: usize, size: usize) -> Ref<'_, [u8]> {
292        self.slice_range(offset..offset + size)
293    }
294
295    /// Returns a byte slice of the memory region at the given offset.
296    ///
297    /// # Panics
298    ///
299    /// Panics on out of bounds access in debug builds only.
300    ///
301    /// # Safety
302    ///
303    /// In release builds, calling this method with an out-of-bounds range triggers undefined
304    /// behavior. Callers must ensure that the range is within the bounds of the memory (i.e.,
305    /// `range.end <= self.len()`).
306    #[inline]
307    #[cfg_attr(debug_assertions, track_caller)]
308    pub fn slice_range(&self, range: Range<usize>) -> Ref<'_, [u8]> {
309        let buffer = self.buffer_ref();
310        Ref::map(buffer, |b| {
311            match b.get(range.start + self.my_checkpoint..range.end + self.my_checkpoint) {
312                Some(slice) => slice,
313                None => debug_unreachable!("slice OOB: range; len: {}", self.len()),
314            }
315        })
316    }
317
318    /// Returns a byte slice of the memory region at the given offset.
319    ///
320    /// # Panics
321    ///
322    /// Panics on out of bounds access in debug builds only.
323    ///
324    /// # Safety
325    ///
326    /// In release builds, calling this method with an out-of-bounds range triggers undefined
327    /// behavior. Callers must ensure that the range is within the bounds of the buffer.
328    #[inline]
329    #[cfg_attr(debug_assertions, track_caller)]
330    pub fn global_slice_range(&self, range: Range<usize>) -> Ref<'_, [u8]> {
331        let buffer = self.buffer_ref();
332        Ref::map(buffer, |b| match b.get(range) {
333            Some(slice) => slice,
334            None => debug_unreachable!("slice OOB: range; len: {}", self.len()),
335        })
336    }
337
338    /// Returns a byte slice of the memory region at the given offset.
339    ///
340    /// # Panics
341    ///
342    /// Panics on out of bounds access in debug builds only.
343    ///
344    /// # Safety
345    ///
346    /// In release builds, calling this method with out-of-bounds parameters triggers undefined
347    /// behavior. Callers must ensure that `offset + size` does not exceed the length of the
348    /// memory.
349    #[inline]
350    #[cfg_attr(debug_assertions, track_caller)]
351    pub fn slice_mut(&mut self, offset: usize, size: usize) -> RefMut<'_, [u8]> {
352        let buffer = self.buffer_ref_mut();
353        RefMut::map(buffer, |b| {
354            match b.get_mut(self.my_checkpoint + offset..self.my_checkpoint + offset + size) {
355                Some(slice) => slice,
356                None => debug_unreachable!("slice OOB: {offset}..{}", offset + size),
357            }
358        })
359    }
360
361    /// Returns the byte at the given offset.
362    ///
363    /// # Panics
364    ///
365    /// Panics on out of bounds.
366    #[inline]
367    pub fn get_byte(&self, offset: usize) -> u8 {
368        self.slice_len(offset, 1)[0]
369    }
370
371    /// Returns a 32-byte slice of the memory region at the given offset.
372    ///
373    /// # Panics
374    ///
375    /// Panics on out of bounds.
376    #[inline]
377    pub fn get_word(&self, offset: usize) -> B256 {
378        (*self.slice_len(offset, 32)).try_into().unwrap()
379    }
380
381    /// Returns a U256 of the memory region at the given offset.
382    ///
383    /// # Panics
384    ///
385    /// Panics on out of bounds.
386    #[inline]
387    pub fn get_u256(&self, offset: usize) -> U256 {
388        self.get_word(offset).into()
389    }
390
391    /// Sets the `byte` at the given `index`.
392    ///
393    /// # Panics
394    ///
395    /// Panics on out of bounds.
396    #[inline]
397    #[cfg_attr(debug_assertions, track_caller)]
398    pub fn set_byte(&mut self, offset: usize, byte: u8) {
399        self.set(offset, &[byte]);
400    }
401
402    /// Sets the given 32-byte `value` to the memory region at the given `offset`.
403    ///
404    /// # Panics
405    ///
406    /// Panics on out of bounds.
407    #[inline]
408    #[cfg_attr(debug_assertions, track_caller)]
409    pub fn set_word(&mut self, offset: usize, value: &B256) {
410        self.set(offset, &value[..]);
411    }
412
413    /// Sets the given U256 `value` to the memory region at the given `offset`.
414    ///
415    /// # Panics
416    ///
417    /// Panics on out of bounds.
418    #[inline]
419    #[cfg_attr(debug_assertions, track_caller)]
420    pub fn set_u256(&mut self, offset: usize, value: U256) {
421        self.set(offset, &value.to_be_bytes::<32>());
422    }
423
424    /// Set memory region at given `offset`.
425    ///
426    /// # Panics
427    ///
428    /// Panics on out of bounds.
429    #[inline]
430    #[cfg_attr(debug_assertions, track_caller)]
431    pub fn set(&mut self, offset: usize, value: &[u8]) {
432        if !value.is_empty() {
433            self.slice_mut(offset, value.len()).copy_from_slice(value);
434        }
435    }
436
437    /// Set memory from data. Our memory offset+len is expected to be correct but we
438    /// are doing bound checks on data/data_offeset/len and zeroing parts that is not copied.
439    ///
440    /// # Panics
441    ///
442    /// Panics if memory is out of bounds.
443    #[inline]
444    #[cfg_attr(debug_assertions, track_caller)]
445    pub fn set_data(&mut self, memory_offset: usize, data_offset: usize, len: usize, data: &[u8]) {
446        let mut dst = self.context_memory_mut();
447        unsafe { set_data(dst.as_mut(), data, memory_offset, data_offset, len) };
448    }
449
450    /// Set data from global memory to local memory. If global range is smaller than len, zeroes the rest.
451    #[inline]
452    #[cfg_attr(debug_assertions, track_caller)]
453    pub fn global_to_local_set_data(
454        &mut self,
455        memory_offset: usize,
456        data_offset: usize,
457        len: usize,
458        data_range: Range<usize>,
459    ) {
460        let mut buffer = self.buffer_ref_mut();
461        let (src, dst) = buffer.split_at_mut(self.my_checkpoint);
462        let src = if data_range.is_empty() {
463            &mut []
464        } else {
465            src.get_mut(data_range).unwrap()
466        };
467        unsafe { set_data(dst, src, memory_offset, data_offset, len) };
468    }
469
470    /// Copies elements from one part of the memory to another part of itself.
471    ///
472    /// # Panics
473    ///
474    /// Panics on out of bounds.
475    #[inline]
476    #[cfg_attr(debug_assertions, track_caller)]
477    pub fn copy(&mut self, dst: usize, src: usize, len: usize) {
478        self.context_memory_mut().copy_within(src..src + len, dst);
479    }
480
481    /// Returns a reference to the memory of the current context, the active memory.
482    ///
483    /// # Panics
484    ///
485    /// Panics if the checkpoint is invalid in debug builds only.
486    ///
487    /// # Safety
488    ///
489    /// In release builds, calling this method with an invalid checkpoint triggers undefined
490    /// behavior. The checkpoint must be within the bounds of the buffer.
491    #[inline]
492    pub fn context_memory(&self) -> Ref<'_, [u8]> {
493        let buffer = self.buffer_ref();
494        Ref::map(buffer, |b| match b.get(self.my_checkpoint..) {
495            Some(slice) => slice,
496            None => debug_unreachable!("Context memory should be always valid"),
497        })
498    }
499
500    /// Returns a mutable reference to the memory of the current context.
501    ///
502    /// # Panics
503    ///
504    /// Panics if the checkpoint is invalid in debug builds only.
505    ///
506    /// # Safety
507    ///
508    /// In release builds, calling this method with an invalid checkpoint triggers undefined
509    /// behavior. The checkpoint must be within the bounds of the buffer.
510    #[inline]
511    pub fn context_memory_mut(&mut self) -> RefMut<'_, [u8]> {
512        let buffer = self.buffer_ref_mut();
513        RefMut::map(buffer, |b| match b.get_mut(self.my_checkpoint..) {
514            Some(slice) => slice,
515            None => debug_unreachable!("Context memory should be always valid"),
516        })
517    }
518}
519
520/// Copies data from src to dst taking into account the offsets and len.
521///
522/// If src does not have enough data, it nullifies the rest of dst that is not copied.
523///
524/// # Safety
525///
526/// Assumes that dst has enough space to copy the data.
527/// Assumes that src has enough data to copy.
528/// Assumes that dst_offset and src_offset are in bounds.
529/// Assumes that dst and src are valid.
530/// Assumes that dst and src do not overlap.
531unsafe fn set_data(dst: &mut [u8], src: &[u8], dst_offset: usize, src_offset: usize, len: usize) {
532    if len == 0 {
533        return;
534    }
535    if src_offset >= src.len() {
536        // Nullify all memory slots
537        dst.get_mut(dst_offset..dst_offset + len).unwrap().fill(0);
538        return;
539    }
540    let src_end = min(src_offset + len, src.len());
541    let src_len = src_end - src_offset;
542    debug_assert!(src_offset < src.len() && src_end <= src.len());
543    let data = unsafe { src.get_unchecked(src_offset..src_end) };
544    unsafe {
545        dst.get_unchecked_mut(dst_offset..dst_offset + src_len)
546            .copy_from_slice(data)
547    };
548
549    // Nullify rest of memory slots
550    // SAFETY: Memory is assumed to be valid, and it is commented where this assumption is made.
551    unsafe {
552        dst.get_unchecked_mut(dst_offset + src_len..dst_offset + len)
553            .fill(0)
554    };
555}
556
557/// Returns number of words what would fit to provided number of bytes,
558/// i.e. it rounds up the number bytes to number of words.
559#[inline]
560pub const fn num_words(len: usize) -> usize {
561    len.div_ceil(32)
562}
563
564/// Performs EVM memory resize.
565#[inline]
566pub fn resize_memory<Memory: MemoryTr>(
567    gas: &mut crate::Gas,
568    memory: &mut Memory,
569    gas_table: &GasParams,
570    offset: usize,
571    len: usize,
572) -> Result<(), InstructionResult> {
573    #[cfg(feature = "memory_limit")]
574    if memory.limit_reached(offset, len) {
575        return Err(InstructionResult::MemoryLimitOOG);
576    }
577
578    let new_num_words = num_words(offset.saturating_add(len));
579    if new_num_words > gas.memory().words_num {
580        return resize_memory_cold(gas, memory, gas_table, new_num_words);
581    }
582
583    Ok(())
584}
585
586#[cold]
587#[inline(never)]
588fn resize_memory_cold<Memory: MemoryTr>(
589    gas: &mut crate::Gas,
590    memory: &mut Memory,
591    gas_table: &GasParams,
592    new_num_words: usize,
593) -> Result<(), InstructionResult> {
594    let cost = gas_table.memory_cost(new_num_words);
595    let cost = unsafe {
596        gas.memory_mut()
597            .set_words_num(new_num_words, cost)
598            .unwrap_unchecked()
599    };
600
601    if !gas.record_cost(cost) {
602        return Err(InstructionResult::MemoryOOG);
603    }
604    memory.resize(new_num_words * 32);
605    Ok(())
606}
607
608#[cfg(test)]
609mod tests {
610    use super::*;
611
612    #[test]
613    fn test_num_words() {
614        assert_eq!(num_words(0), 0);
615        assert_eq!(num_words(1), 1);
616        assert_eq!(num_words(31), 1);
617        assert_eq!(num_words(32), 1);
618        assert_eq!(num_words(33), 2);
619        assert_eq!(num_words(63), 2);
620        assert_eq!(num_words(64), 2);
621        assert_eq!(num_words(65), 3);
622        assert_eq!(num_words(usize::MAX - 31), usize::MAX / 32);
623        assert_eq!(num_words(usize::MAX - 30), (usize::MAX / 32) + 1);
624        assert_eq!(num_words(usize::MAX), (usize::MAX / 32) + 1);
625    }
626
627    #[test]
628    fn new_free_child_context() {
629        let mut sm1 = SharedMemory::new();
630
631        assert_eq!(sm1.buffer_ref().len(), 0);
632        assert_eq!(sm1.my_checkpoint, 0);
633
634        unsafe { sm1.buffer_ref_mut().set_len(32) };
635        assert_eq!(sm1.len(), 32);
636        let mut sm2 = sm1.new_child_context();
637
638        assert_eq!(sm2.buffer_ref().len(), 32);
639        assert_eq!(sm2.my_checkpoint, 32);
640        assert_eq!(sm2.len(), 0);
641
642        unsafe { sm2.buffer_ref_mut().set_len(96) };
643        assert_eq!(sm2.len(), 64);
644        let mut sm3 = sm2.new_child_context();
645
646        assert_eq!(sm3.buffer_ref().len(), 96);
647        assert_eq!(sm3.my_checkpoint, 96);
648        assert_eq!(sm3.len(), 0);
649
650        unsafe { sm3.buffer_ref_mut().set_len(128) };
651        let sm4 = sm3.new_child_context();
652        assert_eq!(sm4.buffer_ref().len(), 128);
653        assert_eq!(sm4.my_checkpoint, 128);
654        assert_eq!(sm4.len(), 0);
655
656        // Free contexts
657        drop(sm4);
658        sm3.free_child_context();
659        assert_eq!(sm3.buffer_ref().len(), 128);
660        assert_eq!(sm3.my_checkpoint, 96);
661        assert_eq!(sm3.len(), 32);
662
663        sm2.free_child_context();
664        assert_eq!(sm2.buffer_ref().len(), 96);
665        assert_eq!(sm2.my_checkpoint, 32);
666        assert_eq!(sm2.len(), 64);
667
668        sm1.free_child_context();
669        assert_eq!(sm1.buffer_ref().len(), 32);
670        assert_eq!(sm1.my_checkpoint, 0);
671        assert_eq!(sm1.len(), 32);
672    }
673
674    #[test]
675    fn resize() {
676        let mut sm1 = SharedMemory::new();
677        sm1.resize(32);
678        assert_eq!(sm1.buffer_ref().len(), 32);
679        assert_eq!(sm1.len(), 32);
680        assert_eq!(sm1.buffer_ref().get(0..32), Some(&[0_u8; 32] as &[u8]));
681
682        let mut sm2 = sm1.new_child_context();
683        sm2.resize(96);
684        assert_eq!(sm2.buffer_ref().len(), 128);
685        assert_eq!(sm2.len(), 96);
686        assert_eq!(sm2.buffer_ref().get(32..128), Some(&[0_u8; 96] as &[u8]));
687
688        sm1.free_child_context();
689        assert_eq!(sm1.buffer_ref().len(), 32);
690        assert_eq!(sm1.len(), 32);
691        assert_eq!(sm1.buffer_ref().get(0..32), Some(&[0_u8; 32] as &[u8]));
692    }
693}