revm_bytecode/legacy/
jump_map.rs1use bitvec::vec::BitVec;
2use core::{
3 cmp::Ordering,
4 hash::{Hash, Hasher},
5};
6use primitives::{hex, OnceLock};
7use std::{fmt::Debug, sync::Arc};
8
9#[derive(Clone, Eq)]
13pub struct JumpTable {
14 table_ptr: *const u8,
16 len: usize,
18 table: Arc<BitVec<u8>>,
20}
21
22unsafe impl Send for JumpTable {}
24unsafe impl Sync for JumpTable {}
25
26impl PartialEq for JumpTable {
27 fn eq(&self, other: &Self) -> bool {
28 self.table.eq(&other.table)
29 }
30}
31
32impl Hash for JumpTable {
33 fn hash<H: Hasher>(&self, state: &mut H) {
34 self.table.hash(state);
35 }
36}
37
38impl PartialOrd for JumpTable {
39 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
40 Some(self.cmp(other))
41 }
42}
43
44impl Ord for JumpTable {
45 fn cmp(&self, other: &Self) -> Ordering {
46 self.table.cmp(&other.table)
47 }
48}
49
50#[cfg(feature = "serde")]
51impl serde::Serialize for JumpTable {
52 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
53 where
54 S: serde::Serializer,
55 {
56 self.table.serialize(serializer)
57 }
58}
59
60#[cfg(feature = "serde")]
61impl<'de> serde::Deserialize<'de> for JumpTable {
62 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
63 where
64 D: serde::Deserializer<'de>,
65 {
66 let bitvec = BitVec::deserialize(deserializer)?;
67 Ok(Self::new(bitvec))
68 }
69}
70
71impl Debug for JumpTable {
72 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
73 f.debug_struct("JumpTable")
74 .field("map", &hex::encode(self.table.as_raw_slice()))
75 .finish()
76 }
77}
78
79impl Default for JumpTable {
80 #[inline]
81 fn default() -> Self {
82 static DEFAULT: OnceLock<JumpTable> = OnceLock::new();
83 DEFAULT.get_or_init(|| Self::new(BitVec::default())).clone()
84 }
85}
86
87impl JumpTable {
88 pub fn new(jumps: BitVec<u8>) -> Self {
90 let table = Arc::new(jumps);
91 let table_ptr = table.as_raw_slice().as_ptr();
92 let len = table.len();
93
94 Self {
95 table,
96 table_ptr,
97 len,
98 }
99 }
100
101 #[inline]
103 pub fn as_slice(&self) -> &[u8] {
104 self.table.as_raw_slice()
105 }
106
107 #[inline]
109 pub fn len(&self) -> usize {
110 self.len
111 }
112
113 #[inline]
115 pub fn is_empty(&self) -> bool {
116 self.len == 0
117 }
118
119 #[inline]
127 pub fn from_slice(slice: &[u8], bit_len: usize) -> Self {
128 const BYTE_LEN: usize = 8;
129 assert!(
130 slice.len() * BYTE_LEN >= bit_len,
131 "slice bit length {} is less than bit_len {}",
132 slice.len() * BYTE_LEN,
133 bit_len
134 );
135 let mut bitvec = BitVec::from_slice(slice);
136 unsafe { bitvec.set_len(bit_len) };
137 Self::new(bitvec)
138 }
139
140 #[inline]
143 pub fn is_valid(&self, pc: usize) -> bool {
144 pc < self.len && unsafe { *self.table_ptr.add(pc >> 3) & (1 << (pc & 7)) != 0 }
145 }
146}
147
148#[cfg(test)]
149mod tests {
150 use super::*;
151
152 #[test]
153 #[should_panic(expected = "slice bit length 8 is less than bit_len 10")]
154 fn test_jump_table_from_slice_panic() {
155 let slice = &[0x00];
156 let _ = JumpTable::from_slice(slice, 10);
157 }
158
159 #[test]
160 fn test_jump_table_from_slice() {
161 let slice = &[0x00];
162 let jumptable = JumpTable::from_slice(slice, 3);
163 assert_eq!(jumptable.len, 3);
164 }
165
166 #[test]
167 fn test_is_valid() {
168 let jump_table = JumpTable::from_slice(&[0x0D, 0x06], 13);
169
170 assert_eq!(jump_table.len, 13);
171
172 assert!(jump_table.is_valid(0)); assert!(!jump_table.is_valid(1));
174 assert!(jump_table.is_valid(2)); assert!(jump_table.is_valid(3)); assert!(!jump_table.is_valid(4));
177 assert!(!jump_table.is_valid(5));
178 assert!(!jump_table.is_valid(6));
179 assert!(!jump_table.is_valid(7));
180 assert!(!jump_table.is_valid(8));
181 assert!(jump_table.is_valid(9)); assert!(jump_table.is_valid(10)); assert!(!jump_table.is_valid(11));
184 assert!(!jump_table.is_valid(12));
185 }
186}
187
188#[cfg(test)]
189mod bench_is_valid {
190 use super::*;
191 use std::time::Instant;
192
193 const ITERATIONS: usize = 1_000_000;
194 const TEST_SIZE: usize = 10_000;
195
196 fn create_test_table() -> BitVec<u8> {
197 let mut bitvec = BitVec::from_vec(vec![0u8; TEST_SIZE.div_ceil(8)]);
198 bitvec.resize(TEST_SIZE, false);
199 for i in (0..TEST_SIZE).step_by(3) {
200 bitvec.set(i, true);
201 }
202 bitvec
203 }
204
205 #[derive(Clone)]
206 pub(super) struct JumpTableWithArcDeref(pub Arc<BitVec<u8>>);
207
208 impl JumpTableWithArcDeref {
209 #[inline]
210 pub(super) fn is_valid(&self, pc: usize) -> bool {
211 pc < self.0.len() && unsafe { *self.0.get_unchecked(pc) }
212 }
213 }
214
215 fn benchmark_implementation<F>(name: &str, table: &F, test_fn: impl Fn(&F, usize) -> bool)
216 where
217 F: Clone,
218 {
219 for i in 0..10_000 {
221 std::hint::black_box(test_fn(table, i % TEST_SIZE));
222 }
223
224 let start = Instant::now();
225 let mut count = 0;
226
227 for i in 0..ITERATIONS {
228 if test_fn(table, i % TEST_SIZE) {
229 count += 1;
230 }
231 }
232
233 let duration = start.elapsed();
234 let ns_per_op = duration.as_nanos() as f64 / ITERATIONS as f64;
235 let ops_per_sec = ITERATIONS as f64 / duration.as_secs_f64();
236
237 println!("{name} Performance:");
238 println!(" Time per op: {ns_per_op:.2} ns");
239 println!(" Ops per sec: {ops_per_sec:.0}");
240 println!(" True count: {count}");
241 println!();
242
243 std::hint::black_box(count);
244 }
245
246 #[test]
247 fn bench_is_valid() {
248 println!("JumpTable is_valid() Benchmark Comparison");
249 println!("=========================================");
250
251 let bitvec = create_test_table();
252
253 let cached_table = JumpTable::new(bitvec.clone());
255 benchmark_implementation("JumpTable (Cached Pointer)", &cached_table, |table, pc| {
256 table.is_valid(pc)
257 });
258
259 let arc_table = JumpTableWithArcDeref(Arc::new(bitvec));
261 benchmark_implementation("JumpTableWithArcDeref (Arc)", &arc_table, |table, pc| {
262 table.is_valid(pc)
263 });
264
265 println!("Benchmark completed successfully!");
266 }
267
268 #[test]
269 fn bench_different_access_patterns() {
270 let bitvec = create_test_table();
271 let cached_table = JumpTable::new(bitvec.clone());
272 let arc_table = JumpTableWithArcDeref(Arc::new(bitvec));
273
274 println!("Access Pattern Comparison");
275 println!("========================");
276
277 let start = Instant::now();
279 for i in 0..ITERATIONS {
280 std::hint::black_box(cached_table.is_valid(i % TEST_SIZE));
281 }
282 let cached_sequential = start.elapsed();
283
284 let start = Instant::now();
285 for i in 0..ITERATIONS {
286 std::hint::black_box(arc_table.is_valid(i % TEST_SIZE));
287 }
288 let arc_sequential = start.elapsed();
289
290 let start = Instant::now();
292 for i in 0..ITERATIONS {
293 std::hint::black_box(cached_table.is_valid((i * 17) % TEST_SIZE));
294 }
295 let cached_random = start.elapsed();
296
297 let start = Instant::now();
298 for i in 0..ITERATIONS {
299 std::hint::black_box(arc_table.is_valid((i * 17) % TEST_SIZE));
300 }
301 let arc_random = start.elapsed();
302
303 println!("Sequential Access:");
304 println!(
305 " Cached: {:.2} ns/op",
306 cached_sequential.as_nanos() as f64 / ITERATIONS as f64
307 );
308 println!(
309 " Arc: {:.2} ns/op",
310 arc_sequential.as_nanos() as f64 / ITERATIONS as f64
311 );
312 println!(
313 " Speedup: {:.1}x",
314 arc_sequential.as_nanos() as f64 / cached_sequential.as_nanos() as f64
315 );
316
317 println!();
318 println!("Random Access:");
319 println!(
320 " Cached: {:.2} ns/op",
321 cached_random.as_nanos() as f64 / ITERATIONS as f64
322 );
323 println!(
324 " Arc: {:.2} ns/op",
325 arc_random.as_nanos() as f64 / ITERATIONS as f64
326 );
327 println!(
328 " Speedup: {:.1}x",
329 arc_random.as_nanos() as f64 / cached_random.as_nanos() as f64
330 );
331 }
332}