revm_interpreter/interpreter/
ext_bytecode.rs

1use core::ops::Deref;
2
3use bytecode::{
4    eof::TypesSection,
5    utils::{read_i16, read_u16},
6    Bytecode,
7};
8use primitives::{Bytes, B256};
9
10use super::{EofCodeInfo, EofContainer, EofData, Immediates, Jumps, LegacyBytecode};
11
12#[cfg(feature = "serde")]
13mod serde;
14
15#[derive(Debug)]
16pub struct ExtBytecode {
17    base: Bytecode,
18    bytecode_hash: Option<B256>,
19    instruction_pointer: *const u8,
20}
21
22impl Deref for ExtBytecode {
23    type Target = Bytecode;
24
25    fn deref(&self) -> &Self::Target {
26        &self.base
27    }
28}
29
30impl ExtBytecode {
31    /// Create new extended bytecode and set the instruction pointer to the start of the bytecode.
32    pub fn new(base: Bytecode) -> Self {
33        let instruction_pointer = base.bytecode_ptr();
34        Self {
35            base,
36            instruction_pointer,
37            bytecode_hash: None,
38        }
39    }
40
41    /// Creates new `ExtBytecode` with the given hash.
42    pub fn new_with_hash(base: Bytecode, hash: B256) -> Self {
43        let instruction_pointer = base.bytecode_ptr();
44        Self {
45            base,
46            instruction_pointer,
47            bytecode_hash: Some(hash),
48        }
49    }
50
51    /// Regenerates the bytecode hash.
52    pub fn regenerate_hash(&mut self) -> B256 {
53        let hash = self.base.hash_slow();
54        self.bytecode_hash = Some(hash);
55        hash
56    }
57
58    /// Returns the bytecode hash.
59    pub fn hash(&mut self) -> Option<B256> {
60        self.bytecode_hash
61    }
62}
63
64impl Jumps for ExtBytecode {
65    #[inline]
66    fn relative_jump(&mut self, offset: isize) {
67        self.instruction_pointer = unsafe { self.instruction_pointer.offset(offset) };
68    }
69
70    #[inline]
71    fn absolute_jump(&mut self, offset: usize) {
72        self.instruction_pointer = unsafe { self.base.bytes_ref().as_ptr().add(offset) };
73    }
74
75    #[inline]
76    fn is_valid_legacy_jump(&mut self, offset: usize) -> bool {
77        self.base
78            .legacy_jump_table()
79            .expect("Panic if not legacy")
80            .is_valid(offset)
81    }
82
83    #[inline]
84    fn opcode(&self) -> u8 {
85        // SAFETY: `instruction_pointer` always point to bytecode.
86        unsafe { *self.instruction_pointer }
87    }
88
89    #[inline]
90    fn pc(&self) -> usize {
91        // SAFETY: `instruction_pointer` should be at an offset from the start of the bytes.
92        // In practice this is always true unless a caller modifies the `instruction_pointer` field manually.
93        unsafe {
94            self.instruction_pointer
95                .offset_from(self.base.bytes_ref().as_ptr()) as usize
96        }
97    }
98}
99
100impl Immediates for ExtBytecode {
101    #[inline]
102    fn read_i16(&self) -> i16 {
103        unsafe { read_i16(self.instruction_pointer) }
104    }
105
106    #[inline]
107    fn read_u16(&self) -> u16 {
108        unsafe { read_u16(self.instruction_pointer) }
109    }
110
111    #[inline]
112    fn read_i8(&self) -> i8 {
113        unsafe { core::mem::transmute(*self.instruction_pointer) }
114    }
115
116    #[inline]
117    fn read_u8(&self) -> u8 {
118        unsafe { *self.instruction_pointer }
119    }
120
121    #[inline]
122    fn read_slice(&self, len: usize) -> &[u8] {
123        unsafe { core::slice::from_raw_parts(self.instruction_pointer, len) }
124    }
125
126    #[inline]
127    fn read_offset_i16(&self, offset: isize) -> i16 {
128        unsafe {
129            read_i16(
130                self.instruction_pointer
131                    // Offset for max_index that is one byte
132                    .offset(offset),
133            )
134        }
135    }
136
137    #[inline]
138    fn read_offset_u16(&self, offset: isize) -> u16 {
139        unsafe {
140            read_u16(
141                self.instruction_pointer
142                    // Offset for max_index that is one byte
143                    .offset(offset),
144            )
145        }
146    }
147}
148
149impl EofCodeInfo for ExtBytecode {
150    fn code_section_info(&self, idx: usize) -> Option<&TypesSection> {
151        self.base
152            .eof()
153            .and_then(|eof| eof.body.types_section.get(idx))
154    }
155
156    fn code_section_pc(&self, idx: usize) -> Option<usize> {
157        self.base
158            .eof()
159            .and_then(|eof| eof.body.eof_code_section_start(idx))
160    }
161}
162
163impl EofData for ExtBytecode {
164    fn data(&self) -> &[u8] {
165        self.base.eof().expect("eof").data()
166    }
167
168    fn data_slice(&self, offset: usize, len: usize) -> &[u8] {
169        self.base.eof().expect("eof").data_slice(offset, len)
170    }
171
172    fn data_size(&self) -> usize {
173        self.base.eof().expect("eof").header.data_size as usize
174    }
175}
176
177impl EofContainer for ExtBytecode {
178    fn eof_container(&self, index: usize) -> Option<&Bytes> {
179        self.base
180            .eof()
181            .and_then(|eof| eof.body.container_section.get(index))
182    }
183}
184
185impl LegacyBytecode for ExtBytecode {
186    fn bytecode_len(&self) -> usize {
187        // Inform the optimizer that the bytecode cannot be EOF to remove a bounds check.
188        assume!(!self.base.is_eof());
189        self.base.len()
190    }
191
192    fn bytecode_slice(&self) -> &[u8] {
193        // Inform the optimizer that the bytecode cannot be EOF to remove a bounds check.
194        assume!(!self.base.is_eof());
195        self.base.original_byte_slice()
196    }
197}
198
199#[cfg(test)]
200mod tests {
201    use super::*;
202    use primitives::Bytes;
203
204    #[test]
205    fn test_with_hash_constructor() {
206        let bytecode = Bytecode::new_raw(Bytes::from(&[0x60, 0x00][..]));
207        let hash = bytecode.hash_slow();
208        let ext_bytecode = ExtBytecode::new_with_hash(bytecode.clone(), hash);
209        assert_eq!(ext_bytecode.bytecode_hash, Some(hash));
210    }
211}