Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
bytecode_trace.cpp
Go to the documentation of this file.
2
3#include <cstddef>
4#include <cstdint>
5#include <vector>
6
19
21
22namespace bb::avm2::tracegen {
23
39 TraceContainer& trace)
40{
41 using C = Column;
42 // Since next_packed_pc - pc is always in the range [0, 31), we can precompute the inverses:
43 std::vector<FF> next_packed_pc_min_pc_inverses = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
44 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 };
45 FF::batch_invert(next_packed_pc_min_pc_inverses);
46
47 // We start from row 1 because we need a row of zeroes for the shifts.
48 uint32_t row = 1;
49
50 for (const auto& event : events) {
51 const auto& bytecode = *event.bytecode;
52 const auto id = event.bytecode_id;
53 auto bytecode_at = [&bytecode](size_t i) -> uint8_t { return i < bytecode.size() ? bytecode[i] : 0; };
54 const uint32_t bytecode_len = static_cast<uint32_t>(bytecode.size());
55
56 for (uint32_t i = 0; i < bytecode_len; i++) {
57 const uint32_t remaining = bytecode_len - i;
58 const uint32_t bytes_to_read = std::min(remaining, DECOMPOSE_WINDOW_SIZE);
59 const bool is_last = remaining == 1;
60 const bool is_windows_eq_remaining = remaining == DECOMPOSE_WINDOW_SIZE;
61
62 // Check that we still expect the max public bytecode in bytes to fit within 24 bits (i.e. <= 0xffffff).
63 static_assert(MAX_PACKED_PUBLIC_BYTECODE_SIZE_IN_FIELDS * 31 <= 0xffffff);
64
65 // We set the decomposition in bytes, and other values.
66 trace.set(row + i,
67 { {
68 { C::bc_decomposition_sel, 1 },
69 { C::bc_decomposition_id, id },
70 { C::bc_decomposition_pc, i },
71 { C::bc_decomposition_start, i == 0 ? 1 : 0 },
72 { C::bc_decomposition_last_of_contract, is_last ? 1 : 0 },
73 { C::bc_decomposition_bytes_remaining, remaining },
74 { C::bc_decomposition_bytes_to_read, bytes_to_read },
75 { C::bc_decomposition_sel_windows_gt_remaining, DECOMPOSE_WINDOW_SIZE > remaining ? 1 : 0 },
76 { C::bc_decomposition_sel_windows_eq_remaining, is_windows_eq_remaining ? 1 : 0 },
77 // Inverses will be calculated in batch later.
78 { C::bc_decomposition_bytes_rem_inv, remaining },
79 { C::bc_decomposition_bytes_rem_min_one_inv, is_last ? 0 : FF(remaining - 1) },
80 { C::bc_decomposition_windows_min_remaining_inv,
81 is_windows_eq_remaining ? 0 : FF(DECOMPOSE_WINDOW_SIZE) - FF(remaining) },
82 // Sliding window.
83 { C::bc_decomposition_bytes, bytecode_at(i) },
84 { C::bc_decomposition_bytes_pc_plus_1, bytecode_at(i + 1) },
85 { C::bc_decomposition_bytes_pc_plus_2, bytecode_at(i + 2) },
86 { C::bc_decomposition_bytes_pc_plus_3, bytecode_at(i + 3) },
87 { C::bc_decomposition_bytes_pc_plus_4, bytecode_at(i + 4) },
88 { C::bc_decomposition_bytes_pc_plus_5, bytecode_at(i + 5) },
89 { C::bc_decomposition_bytes_pc_plus_6, bytecode_at(i + 6) },
90 { C::bc_decomposition_bytes_pc_plus_7, bytecode_at(i + 7) },
91 { C::bc_decomposition_bytes_pc_plus_8, bytecode_at(i + 8) },
92 { C::bc_decomposition_bytes_pc_plus_9, bytecode_at(i + 9) },
93 { C::bc_decomposition_bytes_pc_plus_10, bytecode_at(i + 10) },
94 { C::bc_decomposition_bytes_pc_plus_11, bytecode_at(i + 11) },
95 { C::bc_decomposition_bytes_pc_plus_12, bytecode_at(i + 12) },
96 { C::bc_decomposition_bytes_pc_plus_13, bytecode_at(i + 13) },
97 { C::bc_decomposition_bytes_pc_plus_14, bytecode_at(i + 14) },
98 { C::bc_decomposition_bytes_pc_plus_15, bytecode_at(i + 15) },
99 { C::bc_decomposition_bytes_pc_plus_16, bytecode_at(i + 16) },
100 { C::bc_decomposition_bytes_pc_plus_17, bytecode_at(i + 17) },
101 { C::bc_decomposition_bytes_pc_plus_18, bytecode_at(i + 18) },
102 { C::bc_decomposition_bytes_pc_plus_19, bytecode_at(i + 19) },
103 { C::bc_decomposition_bytes_pc_plus_20, bytecode_at(i + 20) },
104 { C::bc_decomposition_bytes_pc_plus_21, bytecode_at(i + 21) },
105 { C::bc_decomposition_bytes_pc_plus_22, bytecode_at(i + 22) },
106 { C::bc_decomposition_bytes_pc_plus_23, bytecode_at(i + 23) },
107 { C::bc_decomposition_bytes_pc_plus_24, bytecode_at(i + 24) },
108 { C::bc_decomposition_bytes_pc_plus_25, bytecode_at(i + 25) },
109 { C::bc_decomposition_bytes_pc_plus_26, bytecode_at(i + 26) },
110 { C::bc_decomposition_bytes_pc_plus_27, bytecode_at(i + 27) },
111 { C::bc_decomposition_bytes_pc_plus_28, bytecode_at(i + 28) },
112 { C::bc_decomposition_bytes_pc_plus_29, bytecode_at(i + 29) },
113 { C::bc_decomposition_bytes_pc_plus_30, bytecode_at(i + 30) },
114 { C::bc_decomposition_bytes_pc_plus_31, bytecode_at(i + 31) },
115 { C::bc_decomposition_bytes_pc_plus_32, bytecode_at(i + 32) },
116 { C::bc_decomposition_bytes_pc_plus_33, bytecode_at(i + 33) },
117 { C::bc_decomposition_bytes_pc_plus_34, bytecode_at(i + 34) },
118 { C::bc_decomposition_bytes_pc_plus_35, bytecode_at(i + 35) },
119 { C::bc_decomposition_bytes_pc_plus_36, bytecode_at(i + 36) },
120 } });
121 }
122
123 // We set the packed field every 31 bytes.
124 auto bytecode_field_at = [&](size_t i) -> FF {
125 // We need to read uint256_ts because reading FFs messes up the order of the bytes.
126 uint256_t as_int = 0;
127 if (bytecode_len - i >= 32) {
128 // If we have more than 31 bytes remaining, we read 32 bytes directly from the bytecode
129 // vector starting at byte i:
130 as_int = from_buffer<uint256_t>(bytecode, i);
131 } else {
132 // Otherwise, we pad the final bytes with zeros to 32:
133 std::vector<uint8_t> tail(bytecode.begin() + static_cast<ssize_t>(i), bytecode.end());
134 tail.resize(32, 0);
135 as_int = from_buffer<uint256_t>(tail, 0);
136 }
137 // We shift to form a 31 byte int:
138 return as_int >> 8;
139 };
140 for (uint32_t i = 0; i < bytecode_len; i += 31) {
141 // Set the packed field and related columns. Note that the multipermutation columns (sel_packed_read)
142 // are set separately by the MultiPermutationBuilder.
143 trace.set(row + i,
144 { {
145 { C::bc_decomposition_sel_packed, 1 },
146 { C::bc_decomposition_packed_field, bytecode_field_at(i) },
147 { C::bc_decomposition_next_packed_pc, i },
148 { C::bc_decomposition_next_packed_pc_min_pc_inv, 0 },
149 } });
150 // At each row until the next packed field, set the next pc and inverse required for the zero check
151 // (#[PC_IS_PACKED]):
152 for (uint32_t j = i + 1; j < std::min(bytecode_len, i + 31); j++) {
153 trace.set(
154 row + j,
155 { {
156 { C::bc_decomposition_next_packed_pc, i + 31 },
157 { C::bc_decomposition_next_packed_pc_min_pc_inv, next_packed_pc_min_pc_inverses[i + 31 - j] },
158 } });
159 }
160 }
161
162 // We advance to the next bytecode.
163 row += bytecode_len;
164 }
165
166 // Batch invert the columns.
167 trace.invert_columns({ { C::bc_decomposition_bytes_rem_inv,
168 C::bc_decomposition_bytes_rem_min_one_inv,
169 C::bc_decomposition_windows_min_remaining_inv } });
170}
171
174{
175 using C = Column;
176 uint32_t row = 1;
177
178 for (const auto& event : events) {
179 const auto id = event.bytecode_id;
180 // Note that bytecode fields from the BytecodeHashingEvent do not contain the prepended field length | separator
181 std::vector<FF> fields = { simulation::compute_public_bytecode_first_field(event.bytecode_length) };
182 fields.reserve(1 + event.bytecode_fields.size());
183 fields.insert(fields.end(), event.bytecode_fields.begin(), event.bytecode_fields.end());
184 auto bytecode_field_at = [&fields](size_t i) -> FF { return i < fields.size() ? fields[i] : 0; };
185 FF output_hash = Poseidon2::hash(fields);
186 auto padding_amount = (3 - (fields.size() % 3)) % 3;
187 auto num_rounds = (fields.size() + padding_amount) / 3;
188 uint32_t pc_index = 0;
189 for (uint32_t i = 0; i < fields.size(); i += 3) {
190 bool start_of_bytecode = i == 0;
191 bool end_of_bytecode = i + 3 >= fields.size();
192 // When we start the bytecode, we want to look up field 1 at pc = 0 in the decomposition trace, since we
193 // force field 0 to be the separator:
194 uint32_t pc_index_1 = start_of_bytecode ? 0 : pc_index + 31;
195 trace.set(row,
196 { { { C::bc_hashing_sel, 1 },
197 { C::bc_hashing_start, start_of_bytecode },
198 { C::bc_hashing_sel_not_start, !start_of_bytecode },
199 { C::bc_hashing_latch, end_of_bytecode },
200 { C::bc_hashing_bytecode_id, id },
201 { C::bc_hashing_size_in_bytes,
202 event.bytecode_length }, // Note: only needs to be constrained at start
203 { C::bc_hashing_input_len, fields.size() },
204 { C::bc_hashing_rounds_rem, num_rounds },
205 { C::bc_hashing_pc_index, pc_index },
206 { C::bc_hashing_pc_index_1, pc_index_1 },
207 { C::bc_hashing_pc_index_2, pc_index_1 + 31 },
208 { C::bc_hashing_packed_fields_0, bytecode_field_at(i) },
209 { C::bc_hashing_packed_fields_1, bytecode_field_at(i + 1) },
210 { C::bc_hashing_packed_fields_2, bytecode_field_at(i + 2) },
211 { C::bc_hashing_sel_not_padding_1, end_of_bytecode && padding_amount == 2 ? 0 : 1 },
212 { C::bc_hashing_sel_not_padding_2, end_of_bytecode && padding_amount > 0 ? 0 : 1 },
213 { C::bc_hashing_output_hash, output_hash } } });
214 if (end_of_bytecode) {
215 // Below sets the pc at which the final field starts. We only use/constrain it at latch == 1.
216 // Note: It can't just be pc_index + 31 * padding_amount because we 'skip' 31 bytes at start == 1 to
217 // force the first field to be the separator.
218 FF pc_at_final_field =
219 padding_amount == 2
220 // Two padding fields => we are currently at the final field:
221 ? pc_index
222 // One padding field => the final field starts at pc_index_1
223 // No padding fields => the final field starts at pc_index_2 (= pc_index_1 + 31):
224 : pc_index_1 + (31 * (1 - padding_amount));
225 trace.set(row,
226 { {
227 { C::bc_hashing_pc_at_final_field, pc_at_final_field },
228 } });
229 }
230 pc_index = pc_index_1 + 62;
231 row++;
232 num_rounds--;
233 }
234 }
235}
236
252 TraceContainer& trace)
253{
254 using C = Column;
255
256 uint32_t row = 0;
257 for (const auto& event : events) {
258 // Since the maximum is (currently) 21 and we prove incrementation of next_available_leaf_index
259 // at each row, the use of uint64 should be safe and never underflow.
260 uint64_t remaining_bytecodes = MAX_PUBLIC_CALLS_TO_UNIQUE_CONTRACT_CLASS_IDS +
262 event.retrieved_bytecodes_snapshot_before.next_available_leaf_index;
263 bool error = event.error.has_value();
265 BB_ASSERT(event.is_new_class == true & remaining_bytecodes == 0,
266 "TOO_MANY_BYTECODES error incorrectly set for bytecode retrieval");
267 }
268 trace.set(
269 row,
270 { {
271 { C::bc_retrieval_sel, 1 },
272 { C::bc_retrieval_bytecode_id, event.bytecode_id },
273 { C::bc_retrieval_address, event.address },
274
275 // Contract instance members (for lookup into contract_instance_retrieval)
276 { C::bc_retrieval_current_class_id, event.current_class_id },
277
278 // Contract class members (for lookup into class_id_derivation)
279 { C::bc_retrieval_artifact_hash, event.contract_class.artifact_hash },
280 { C::bc_retrieval_private_functions_root, event.contract_class.private_functions_root },
281
282 // Tree context (for lookup into contract_instance_retrieval)
283 { C::bc_retrieval_public_data_tree_root, event.public_data_tree_root },
284 { C::bc_retrieval_nullifier_tree_root, event.nullifier_root },
285
286 // Retrieved bytecodes tree context (for lookup into indexed_tree_check)
287 { C::bc_retrieval_retrieved_bytecodes_tree_height, AVM_RETRIEVED_BYTECODES_TREE_HEIGHT },
288 { C::bc_retrieval_prev_retrieved_bytecodes_tree_root, event.retrieved_bytecodes_snapshot_before.root },
289 { C::bc_retrieval_prev_retrieved_bytecodes_tree_size,
290 event.retrieved_bytecodes_snapshot_before.next_available_leaf_index },
291 { C::bc_retrieval_next_retrieved_bytecodes_tree_root, event.retrieved_bytecodes_snapshot_after.root },
292 { C::bc_retrieval_next_retrieved_bytecodes_tree_size,
293 event.retrieved_bytecodes_snapshot_after.next_available_leaf_index },
294
295 // Instance existence determined by shared contract instance retrieval
296 { C::bc_retrieval_instance_exists,
298
299 // Error handling
300 { C::bc_retrieval_error, error ? 1 : 0 },
301 { C::bc_retrieval_is_new_class, event.is_new_class },
302 { C::bc_retrieval_should_retrieve, error ? 0 : 1 },
303 // Too many bytecodes handling
304 { C::bc_retrieval_no_remaining_bytecodes, remaining_bytecodes == 0 ? 1 : 0 },
305 { C::bc_retrieval_remaining_bytecodes_inv, remaining_bytecodes }, // Will be inverted in batch later.
306 } });
307 row++;
308 }
309
310 // Batch invert the columns.
311 trace.invert_columns({ { C::bc_retrieval_remaining_bytecodes_inv } });
312}
313
316 TraceContainer& trace)
317{
318 using C = Column;
324
325 // We start from row 1 because we need a row of zeroes for the shifts.
326 uint32_t row = 1;
327
328 for (const auto& event : events) {
329 const auto bytecode_id = event.bytecode_id;
330 const auto bytecode_size = event.bytecode->size();
331 // To match column PARSING_ERROR_EXCEPT_TAG_ERROR:
332 const bool parsing_error_non_tag = event.error == PC_OUT_OF_RANGE || event.error == OPCODE_OUT_OF_RANGE ||
333 event.error == INSTRUCTION_OUT_OF_RANGE;
334
335 auto get_operand = [&](size_t i) -> FF {
336 return i < event.instruction.operands.size() && !parsing_error_non_tag
337 ? static_cast<FF>(event.instruction.operands[i])
338 : 0;
339 };
340 auto bytecode_at = [&](size_t i) -> uint8_t { return i < bytecode_size ? (*event.bytecode)[i] : 0; };
341
342 const uint8_t wire_opcode = bytecode_at(event.pc);
343 const bool wire_opcode_in_range =
344 event.error != PC_OUT_OF_RANGE && wire_opcode < static_cast<uint8_t>(WireOpCode::LAST_OPCODE_SENTINEL);
345
346 uint32_t size_in_bytes = 0;
347 ExecutionOpCode exec_opcode = static_cast<ExecutionOpCode>(0);
348 std::array<uint8_t, NUM_OP_DC_SELECTORS> op_dc_selectors{};
349 uint8_t has_tag = 0;
350 uint8_t tag_is_op2 = 0;
351 uint8_t tag_value = 0;
352
353 if (wire_opcode_in_range) {
354 const auto& wire_instr_spec = get_wire_instruction_spec().at(static_cast<WireOpCode>(wire_opcode));
355 size_in_bytes = wire_instr_spec.size_in_bytes;
356 exec_opcode = wire_instr_spec.exec_opcode;
357 op_dc_selectors = wire_instr_spec.op_dc_selectors;
358
359 if (wire_instr_spec.tag_operand_idx.has_value()) {
360 const auto tag_value_idx = wire_instr_spec.tag_operand_idx.value();
361 BB_ASSERT((tag_value_idx == 2 || tag_value_idx == 3),
362 "Current constraints support only tag for operand index equal to 2 or 3");
363 has_tag = 1;
364
365 if (tag_value_idx == 2) {
366 tag_is_op2 = 1;
367 tag_value = static_cast<uint8_t>(get_operand(1)); // in instruction.operands, op2 has index 1
368 } else {
369 tag_value = static_cast<uint8_t>(get_operand(2));
370 }
371 }
372 }
373
374 const uint32_t bytes_remaining =
375 event.error == PC_OUT_OF_RANGE ? 0 : static_cast<uint32_t>(bytecode_size - event.pc);
376 const uint32_t bytes_to_read = std::min(bytes_remaining, DECOMPOSE_WINDOW_SIZE);
377
378 uint32_t instr_abs_diff = 0;
379 if (size_in_bytes <= bytes_to_read) {
380 instr_abs_diff = bytes_to_read - size_in_bytes;
381 } else {
382 instr_abs_diff = size_in_bytes - bytes_to_read - 1;
383 }
384
385 uint32_t bytecode_size_u32 = static_cast<uint32_t>(bytecode_size);
386 uint32_t pc_abs_diff =
387 bytecode_size_u32 > event.pc ? bytecode_size_u32 - event.pc - 1 : event.pc - bytecode_size_u32;
388
389 trace.set(row,
390 { {
391 { C::instr_fetching_sel, 1 },
392 { C::instr_fetching_bytecode_id, bytecode_id },
393 { C::instr_fetching_pc, event.pc },
394 // indirect + operands.
395 { C::instr_fetching_addressing_mode, event.instruction.addressing_mode },
396 { C::instr_fetching_op1, get_operand(0) },
397 { C::instr_fetching_op2, get_operand(1) },
398 { C::instr_fetching_op3, get_operand(2) },
399 { C::instr_fetching_op4, get_operand(3) },
400 { C::instr_fetching_op5, get_operand(4) },
401 { C::instr_fetching_op6, get_operand(5) },
402 { C::instr_fetching_op7, get_operand(6) },
403 // Single bytes.
404 { C::instr_fetching_bd0, wire_opcode },
405 { C::instr_fetching_bd1, bytecode_at(event.pc + 1) },
406 { C::instr_fetching_bd2, bytecode_at(event.pc + 2) },
407 { C::instr_fetching_bd3, bytecode_at(event.pc + 3) },
408 { C::instr_fetching_bd4, bytecode_at(event.pc + 4) },
409 { C::instr_fetching_bd5, bytecode_at(event.pc + 5) },
410 { C::instr_fetching_bd6, bytecode_at(event.pc + 6) },
411 { C::instr_fetching_bd7, bytecode_at(event.pc + 7) },
412 { C::instr_fetching_bd8, bytecode_at(event.pc + 8) },
413 { C::instr_fetching_bd9, bytecode_at(event.pc + 9) },
414 { C::instr_fetching_bd10, bytecode_at(event.pc + 10) },
415 { C::instr_fetching_bd11, bytecode_at(event.pc + 11) },
416 { C::instr_fetching_bd12, bytecode_at(event.pc + 12) },
417 { C::instr_fetching_bd13, bytecode_at(event.pc + 13) },
418 { C::instr_fetching_bd14, bytecode_at(event.pc + 14) },
419 { C::instr_fetching_bd15, bytecode_at(event.pc + 15) },
420 { C::instr_fetching_bd16, bytecode_at(event.pc + 16) },
421 { C::instr_fetching_bd17, bytecode_at(event.pc + 17) },
422 { C::instr_fetching_bd18, bytecode_at(event.pc + 18) },
423 { C::instr_fetching_bd19, bytecode_at(event.pc + 19) },
424 { C::instr_fetching_bd20, bytecode_at(event.pc + 20) },
425 { C::instr_fetching_bd21, bytecode_at(event.pc + 21) },
426 { C::instr_fetching_bd22, bytecode_at(event.pc + 22) },
427 { C::instr_fetching_bd23, bytecode_at(event.pc + 23) },
428 { C::instr_fetching_bd24, bytecode_at(event.pc + 24) },
429 { C::instr_fetching_bd25, bytecode_at(event.pc + 25) },
430 { C::instr_fetching_bd26, bytecode_at(event.pc + 26) },
431 { C::instr_fetching_bd27, bytecode_at(event.pc + 27) },
432 { C::instr_fetching_bd28, bytecode_at(event.pc + 28) },
433 { C::instr_fetching_bd29, bytecode_at(event.pc + 29) },
434 { C::instr_fetching_bd30, bytecode_at(event.pc + 30) },
435 { C::instr_fetching_bd31, bytecode_at(event.pc + 31) },
436 { C::instr_fetching_bd32, bytecode_at(event.pc + 32) },
437 { C::instr_fetching_bd33, bytecode_at(event.pc + 33) },
438 { C::instr_fetching_bd34, bytecode_at(event.pc + 34) },
439 { C::instr_fetching_bd35, bytecode_at(event.pc + 35) },
440 { C::instr_fetching_bd36, bytecode_at(event.pc + 36) },
441
442 // From instruction table.
443 { C::instr_fetching_exec_opcode, static_cast<uint32_t>(exec_opcode) },
444 { C::instr_fetching_instr_size, size_in_bytes },
445 { C::instr_fetching_sel_has_tag, has_tag },
446 { C::instr_fetching_sel_tag_is_op2, tag_is_op2 },
447
448 // Fill operand decomposition selectors
449 { C::instr_fetching_sel_op_dc_0, op_dc_selectors.at(0) },
450 { C::instr_fetching_sel_op_dc_1, op_dc_selectors.at(1) },
451 { C::instr_fetching_sel_op_dc_2, op_dc_selectors.at(2) },
452 { C::instr_fetching_sel_op_dc_3, op_dc_selectors.at(3) },
453 { C::instr_fetching_sel_op_dc_4, op_dc_selectors.at(4) },
454 { C::instr_fetching_sel_op_dc_5, op_dc_selectors.at(5) },
455 { C::instr_fetching_sel_op_dc_6, op_dc_selectors.at(6) },
456 { C::instr_fetching_sel_op_dc_7, op_dc_selectors.at(7) },
457 { C::instr_fetching_sel_op_dc_8, op_dc_selectors.at(8) },
458 { C::instr_fetching_sel_op_dc_9, op_dc_selectors.at(9) },
459 { C::instr_fetching_sel_op_dc_10, op_dc_selectors.at(10) },
460 { C::instr_fetching_sel_op_dc_11, op_dc_selectors.at(11) },
461 { C::instr_fetching_sel_op_dc_12, op_dc_selectors.at(12) },
462 { C::instr_fetching_sel_op_dc_13, op_dc_selectors.at(13) },
463 { C::instr_fetching_sel_op_dc_14, op_dc_selectors.at(14) },
464 { C::instr_fetching_sel_op_dc_15, op_dc_selectors.at(15) },
465 { C::instr_fetching_sel_op_dc_16, op_dc_selectors.at(16) },
466
467 // Parsing errors
468 { C::instr_fetching_pc_out_of_range, event.error == PC_OUT_OF_RANGE ? 1 : 0 },
469 { C::instr_fetching_opcode_out_of_range, event.error == OPCODE_OUT_OF_RANGE ? 1 : 0 },
470 { C::instr_fetching_instr_out_of_range, event.error == INSTRUCTION_OUT_OF_RANGE ? 1 : 0 },
471 { C::instr_fetching_tag_out_of_range, event.error == TAG_OUT_OF_RANGE ? 1 : 0 },
472 { C::instr_fetching_sel_parsing_err, event.error.has_value() ? 1 : 0 },
473
474 // selector for lookups
475 { C::instr_fetching_sel_pc_in_range, event.error != PC_OUT_OF_RANGE ? 1 : 0 },
476
477 { C::instr_fetching_bytecode_size, bytecode_size },
478 { C::instr_fetching_bytes_to_read, bytes_to_read },
479 { C::instr_fetching_instr_abs_diff, instr_abs_diff },
480 { C::instr_fetching_pc_abs_diff, pc_abs_diff },
481 { C::instr_fetching_pc_size_in_bits,
482 AVM_PC_SIZE_IN_BITS }, // Remove when we support constants in lookups
483 { C::instr_fetching_tag_value, tag_value },
484 } });
485 row++;
486 }
487}
488
491 // Bytecode Hashing
493 .add<lookup_bc_hashing_check_final_bytes_remaining_settings, InteractionType::LookupSequential>()
495 // Bytecode Retrieval
496 .add<lookup_bc_retrieval_contract_instance_retrieval_settings, InteractionType::LookupSequential>()
498 .add<lookup_bc_retrieval_is_new_class_check_settings, InteractionType::LookupSequential>()
500 // Bytecode Decomposition
501 .add<lookup_bc_decomposition_bytes_are_bytes_settings, InteractionType::LookupIntoIndexedByRow>()
505 perm_bc_hashing_get_packed_field_2_settings>(Column::bc_decomposition_sel_packed)
506 // Instruction Fetching
507 .add<lookup_instr_fetching_bytes_from_bc_dec_settings, InteractionType::LookupGeneric>()
509 .add<lookup_instr_fetching_wire_instruction_info_settings, InteractionType::LookupIntoIndexedByRow>()
511 .add<lookup_instr_fetching_tag_value_validation_settings, InteractionType::LookupIntoIndexedByRow>()
513
514} // namespace bb::avm2::tracegen
#define BB_ASSERT(expression,...)
Definition assert.hpp:70
std::shared_ptr< Napi::ThreadSafeFunction > bytecode
#define AVM_RETRIEVED_BYTECODES_TREE_INITIAL_SIZE
#define MAX_PUBLIC_CALLS_TO_UNIQUE_CONTRACT_CLASS_IDS
#define AVM_PC_SIZE_IN_BITS
#define MAX_PACKED_PUBLIC_BYTECODE_SIZE_IN_FIELDS
#define AVM_RETRIEVED_BYTECODES_TREE_HEIGHT
void process_retrieval(const simulation::EventEmitterInterface< simulation::BytecodeRetrievalEvent >::Container &events, TraceContainer &trace)
Process bytecode retrieval events and populate the relevant columns in the trace. Corresponds to bc_r...
static const InteractionDefinition interactions
void process_decomposition(const simulation::EventEmitterInterface< simulation::BytecodeDecompositionEvent >::Container &events, TraceContainer &trace)
Process bytecode decomposition events and populate the relevant columns in the trace....
void process_hashing(const simulation::EventEmitterInterface< simulation::BytecodeHashingEvent >::Container &events, TraceContainer &trace)
void process_instruction_fetching(const simulation::EventEmitterInterface< simulation::InstructionFetchingEvent >::Container &events, TraceContainer &trace)
InteractionDefinition & add(auto &&... args)
Native Poseidon2 hash function implementation.
Definition poseidon2.hpp:22
static FF hash(const std::vector< FF > &input)
Hashes a vector of field elements.
TestTraceContainer trace
FF compute_public_bytecode_first_field(size_t bytecode_size)
permutation_settings< perm_bc_hashing_bytecode_length_bytes_settings_ > perm_bc_hashing_bytecode_length_bytes_settings
lookup_settings< lookup_bc_retrieval_retrieved_bytecodes_insertion_settings_ > lookup_bc_retrieval_retrieved_bytecodes_insertion_settings
lookup_settings< lookup_instr_fetching_bytecode_size_from_bc_dec_settings_ > lookup_instr_fetching_bytecode_size_from_bc_dec_settings
lookup_settings< lookup_bc_hashing_poseidon2_hash_settings_ > lookup_bc_hashing_poseidon2_hash_settings
AvmFlavorSettings::FF FF
Definition field.hpp:10
const std::unordered_map< WireOpCode, WireInstructionSpec > & get_wire_instruction_spec()
constexpr uint32_t DECOMPOSE_WINDOW_SIZE
permutation_settings< perm_bc_hashing_get_packed_field_1_settings_ > perm_bc_hashing_get_packed_field_1_settings
permutation_settings< perm_bc_hashing_get_packed_field_0_settings_ > perm_bc_hashing_get_packed_field_0_settings
lookup_settings< lookup_instr_fetching_instr_abs_diff_positive_settings_ > lookup_instr_fetching_instr_abs_diff_positive_settings
lookup_settings< lookup_instr_fetching_pc_abs_diff_positive_settings_ > lookup_instr_fetching_pc_abs_diff_positive_settings
lookup_settings< lookup_bc_retrieval_class_id_derivation_settings_ > lookup_bc_retrieval_class_id_derivation_settings
simulation::PublicDataTreeReadWriteEvent event
static void batch_invert(C &coeffs) noexcept
Batch invert a collection of field elements using Montgomery's trick.