revm_bytecode/legacy/
jump_map.rs1use bitvec::vec::BitVec;
2use core::hash::{Hash, Hasher};
3use once_cell::race::OnceBox;
4use primitives::hex;
5use std::{fmt::Debug, sync::Arc};
6
7#[derive(Clone, Eq, Ord, PartialOrd)]
9pub struct JumpTable {
10 table: Arc<BitVec<u8>>,
12 table_ptr: *const u8,
14 len: usize,
16}
17
18impl PartialEq for JumpTable {
19 fn eq(&self, other: &Self) -> bool {
20 self.table.eq(&other.table) && self.len.eq(&other.len)
21 }
22}
23
24impl Hash for JumpTable {
25 fn hash<H: Hasher>(&self, state: &mut H) {
26 self.table.hash(state);
27 self.len.hash(state);
28 }
29}
30
31#[cfg(feature = "serde")]
32impl serde::Serialize for JumpTable {
33 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
34 where
35 S: serde::Serializer,
36 {
37 self.table.serialize(serializer)
38 }
39}
40
41#[cfg(feature = "serde")]
42impl<'de> serde::Deserialize<'de> for JumpTable {
43 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
44 where
45 D: serde::Deserializer<'de>,
46 {
47 let bitvec = BitVec::deserialize(deserializer)?;
48 Ok(Self::new(bitvec))
49 }
50}
51
52unsafe impl Send for JumpTable {}
54unsafe impl Sync for JumpTable {}
55
56impl Debug for JumpTable {
57 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
58 f.debug_struct("JumpTable")
59 .field("map", &hex::encode(self.table.as_raw_slice()))
60 .finish()
61 }
62}
63
64impl Default for JumpTable {
65 #[inline]
66 fn default() -> Self {
67 static DEFAULT: OnceBox<JumpTable> = OnceBox::new();
68 DEFAULT
69 .get_or_init(|| Self::new(BitVec::default()).into())
70 .clone()
71 }
72}
73
74impl JumpTable {
75 pub fn new(jumps: BitVec<u8>) -> Self {
77 let table = Arc::new(jumps);
78 let table_ptr = table.as_raw_slice().as_ptr();
79 let len = table.len();
80
81 Self {
82 table,
83 table_ptr,
84 len,
85 }
86 }
87
88 #[inline]
90 pub fn as_slice(&self) -> &[u8] {
91 self.table.as_raw_slice()
92 }
93
94 #[inline]
96 pub fn len(&self) -> usize {
97 self.len
98 }
99
100 #[inline]
102 pub fn is_empty(&self) -> bool {
103 self.len == 0
104 }
105
106 #[inline]
114 pub fn from_slice(slice: &[u8], bit_len: usize) -> Self {
115 const BYTE_LEN: usize = 8;
116 assert!(
117 slice.len() * BYTE_LEN >= bit_len,
118 "slice bit length {} is less than bit_len {}",
119 slice.len() * BYTE_LEN,
120 bit_len
121 );
122 let mut bitvec = BitVec::from_slice(slice);
123 unsafe { bitvec.set_len(bit_len) };
124
125 let table = Arc::new(bitvec);
126 let table_ptr = table.as_raw_slice().as_ptr();
127 let len = table.len();
128
129 Self {
130 table,
131 table_ptr,
132 len,
133 }
134 }
135
136 #[inline]
139 pub fn is_valid(&self, pc: usize) -> bool {
140 pc < self.len && unsafe { *self.table_ptr.add(pc >> 3) & (1 << (pc & 7)) != 0 }
141 }
142}
143
144#[cfg(test)]
145mod tests {
146 use super::*;
147
148 #[test]
149 #[should_panic(expected = "slice bit length 8 is less than bit_len 10")]
150 fn test_jump_table_from_slice_panic() {
151 let slice = &[0x00];
152 let _ = JumpTable::from_slice(slice, 10);
153 }
154
155 #[test]
156 fn test_jump_table_from_slice() {
157 let slice = &[0x00];
158 let jumptable = JumpTable::from_slice(slice, 3);
159 assert_eq!(jumptable.len, 3);
160 }
161
162 #[test]
163 fn test_is_valid() {
164 let jump_table = JumpTable::from_slice(&[0x0D, 0x06], 13);
165
166 assert_eq!(jump_table.len, 13);
167
168 assert!(jump_table.is_valid(0)); assert!(!jump_table.is_valid(1));
170 assert!(jump_table.is_valid(2)); assert!(jump_table.is_valid(3)); assert!(!jump_table.is_valid(4));
173 assert!(!jump_table.is_valid(5));
174 assert!(!jump_table.is_valid(6));
175 assert!(!jump_table.is_valid(7));
176 assert!(!jump_table.is_valid(8));
177 assert!(jump_table.is_valid(9)); assert!(jump_table.is_valid(10)); assert!(!jump_table.is_valid(11));
180 assert!(!jump_table.is_valid(12));
181 }
182}
183
184#[cfg(test)]
185mod bench_is_valid {
186 use super::*;
187 use std::time::Instant;
188
189 const ITERATIONS: usize = 1_000_000;
190 const TEST_SIZE: usize = 10_000;
191
192 fn create_test_table() -> BitVec<u8> {
193 let mut bitvec = BitVec::from_vec(vec![0u8; TEST_SIZE.div_ceil(8)]);
194 bitvec.resize(TEST_SIZE, false);
195 for i in (0..TEST_SIZE).step_by(3) {
196 bitvec.set(i, true);
197 }
198 bitvec
199 }
200
201 #[derive(Clone)]
202 pub(super) struct JumpTableWithArcDeref(pub Arc<BitVec<u8>>);
203
204 impl JumpTableWithArcDeref {
205 #[inline]
206 pub(super) fn is_valid(&self, pc: usize) -> bool {
207 pc < self.0.len() && unsafe { *self.0.get_unchecked(pc) }
208 }
209 }
210
211 fn benchmark_implementation<F>(name: &str, table: &F, test_fn: impl Fn(&F, usize) -> bool)
212 where
213 F: Clone,
214 {
215 for i in 0..10_000 {
217 std::hint::black_box(test_fn(table, i % TEST_SIZE));
218 }
219
220 let start = Instant::now();
221 let mut count = 0;
222
223 for i in 0..ITERATIONS {
224 if test_fn(table, i % TEST_SIZE) {
225 count += 1;
226 }
227 }
228
229 let duration = start.elapsed();
230 let ns_per_op = duration.as_nanos() as f64 / ITERATIONS as f64;
231 let ops_per_sec = ITERATIONS as f64 / duration.as_secs_f64();
232
233 println!("{name} Performance:");
234 println!(" Time per op: {ns_per_op:.2} ns");
235 println!(" Ops per sec: {ops_per_sec:.0}");
236 println!(" True count: {count}");
237 println!();
238
239 std::hint::black_box(count);
240 }
241
242 #[test]
243 fn bench_is_valid() {
244 println!("JumpTable is_valid() Benchmark Comparison");
245 println!("=========================================");
246
247 let bitvec = create_test_table();
248
249 let cached_table = JumpTable::new(bitvec.clone());
251 benchmark_implementation("JumpTable (Cached Pointer)", &cached_table, |table, pc| {
252 table.is_valid(pc)
253 });
254
255 let arc_table = JumpTableWithArcDeref(Arc::new(bitvec));
257 benchmark_implementation("JumpTableWithArcDeref (Arc)", &arc_table, |table, pc| {
258 table.is_valid(pc)
259 });
260
261 println!("Benchmark completed successfully!");
262 }
263
264 #[test]
265 fn bench_different_access_patterns() {
266 let bitvec = create_test_table();
267 let cached_table = JumpTable::new(bitvec.clone());
268 let arc_table = JumpTableWithArcDeref(Arc::new(bitvec));
269
270 println!("Access Pattern Comparison");
271 println!("========================");
272
273 let start = Instant::now();
275 for i in 0..ITERATIONS {
276 std::hint::black_box(cached_table.is_valid(i % TEST_SIZE));
277 }
278 let cached_sequential = start.elapsed();
279
280 let start = Instant::now();
281 for i in 0..ITERATIONS {
282 std::hint::black_box(arc_table.is_valid(i % TEST_SIZE));
283 }
284 let arc_sequential = start.elapsed();
285
286 let start = Instant::now();
288 for i in 0..ITERATIONS {
289 std::hint::black_box(cached_table.is_valid((i * 17) % TEST_SIZE));
290 }
291 let cached_random = start.elapsed();
292
293 let start = Instant::now();
294 for i in 0..ITERATIONS {
295 std::hint::black_box(arc_table.is_valid((i * 17) % TEST_SIZE));
296 }
297 let arc_random = start.elapsed();
298
299 println!("Sequential Access:");
300 println!(
301 " Cached: {:.2} ns/op",
302 cached_sequential.as_nanos() as f64 / ITERATIONS as f64
303 );
304 println!(
305 " Arc: {:.2} ns/op",
306 arc_sequential.as_nanos() as f64 / ITERATIONS as f64
307 );
308 println!(
309 " Speedup: {:.1}x",
310 arc_sequential.as_nanos() as f64 / cached_sequential.as_nanos() as f64
311 );
312
313 println!();
314 println!("Random Access:");
315 println!(
316 " Cached: {:.2} ns/op",
317 cached_random.as_nanos() as f64 / ITERATIONS as f64
318 );
319 println!(
320 " Arc: {:.2} ns/op",
321 arc_random.as_nanos() as f64 / ITERATIONS as f64
322 );
323 println!(
324 " Speedup: {:.1}x",
325 arc_random.as_nanos() as f64 / cached_random.as_nanos() as f64
326 );
327 }
328}