Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
execution_trace.cpp
Go to the documentation of this file.
2
3#include <algorithm>
4#include <array>
5#include <cstddef>
6#include <numeric>
7#include <ranges>
8#include <stdexcept>
9
37
42
43namespace bb::avm2::tracegen {
44namespace {
45
46constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_COLUMNS = {
47 C::execution_op_0_, C::execution_op_1_, C::execution_op_2_, C::execution_op_3_,
48 C::execution_op_4_, C::execution_op_5_, C::execution_op_6_,
49};
50constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_IS_ADDRESS_COLUMNS = {
51 C::execution_sel_op_is_address_0_, C::execution_sel_op_is_address_1_, C::execution_sel_op_is_address_2_,
52 C::execution_sel_op_is_address_3_, C::execution_sel_op_is_address_4_, C::execution_sel_op_is_address_5_,
53 C::execution_sel_op_is_address_6_,
54};
55constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_AFTER_RELATIVE_COLUMNS = {
56 C::execution_op_after_relative_0_, C::execution_op_after_relative_1_, C::execution_op_after_relative_2_,
57 C::execution_op_after_relative_3_, C::execution_op_after_relative_4_, C::execution_op_after_relative_5_,
58 C::execution_op_after_relative_6_,
59};
60constexpr std::array<C, AVM_MAX_OPERANDS> RESOLVED_OPERAND_COLUMNS = {
61 C::execution_rop_0_, C::execution_rop_1_, C::execution_rop_2_, C::execution_rop_3_,
62 C::execution_rop_4_, C::execution_rop_5_, C::execution_rop_6_,
63};
64constexpr std::array<C, AVM_MAX_OPERANDS> RESOLVED_OPERAND_TAG_COLUMNS = {
65 C::execution_rop_tag_0_, C::execution_rop_tag_1_, C::execution_rop_tag_2_, C::execution_rop_tag_3_,
66 C::execution_rop_tag_4_, C::execution_rop_tag_5_, C::execution_rop_tag_6_,
67};
68constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_SHOULD_APPLY_INDIRECTION_COLUMNS = {
69 C::execution_sel_should_apply_indirection_0_, C::execution_sel_should_apply_indirection_1_,
70 C::execution_sel_should_apply_indirection_2_, C::execution_sel_should_apply_indirection_3_,
71 C::execution_sel_should_apply_indirection_4_, C::execution_sel_should_apply_indirection_5_,
72 C::execution_sel_should_apply_indirection_6_,
73};
74constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_RELATIVE_OVERFLOW_COLUMNS = {
75 C::execution_sel_relative_overflow_0_, C::execution_sel_relative_overflow_1_, C::execution_sel_relative_overflow_2_,
76 C::execution_sel_relative_overflow_3_, C::execution_sel_relative_overflow_4_, C::execution_sel_relative_overflow_5_,
77 C::execution_sel_relative_overflow_6_,
78};
79constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_IS_RELATIVE_VALID_BASE_COLUMNS = {
80 C::execution_sel_op_do_overflow_check_0_, C::execution_sel_op_do_overflow_check_1_,
81 C::execution_sel_op_do_overflow_check_2_, C::execution_sel_op_do_overflow_check_3_,
82 C::execution_sel_op_do_overflow_check_4_, C::execution_sel_op_do_overflow_check_5_,
83 C::execution_sel_op_do_overflow_check_6_,
84};
85constexpr size_t TOTAL_INDIRECT_BITS = 16;
86static_assert(static_cast<size_t>(AVM_MAX_OPERANDS) * 2 <= TOTAL_INDIRECT_BITS);
87constexpr std::array<C, TOTAL_INDIRECT_BITS / 2> OPERAND_IS_RELATIVE_WIRE_COLUMNS = {
88 C::execution_sel_op_is_relative_wire_0_, C::execution_sel_op_is_relative_wire_1_,
89 C::execution_sel_op_is_relative_wire_2_, C::execution_sel_op_is_relative_wire_3_,
90 C::execution_sel_op_is_relative_wire_4_, C::execution_sel_op_is_relative_wire_5_,
91 C::execution_sel_op_is_relative_wire_6_, C::execution_sel_op_is_relative_wire_7_,
92
93};
94constexpr std::array<C, TOTAL_INDIRECT_BITS / 2> OPERAND_IS_INDIRECT_WIRE_COLUMNS = {
95 C::execution_sel_op_is_indirect_wire_0_, C::execution_sel_op_is_indirect_wire_1_,
96 C::execution_sel_op_is_indirect_wire_2_, C::execution_sel_op_is_indirect_wire_3_,
97 C::execution_sel_op_is_indirect_wire_4_, C::execution_sel_op_is_indirect_wire_5_,
98 C::execution_sel_op_is_indirect_wire_6_, C::execution_sel_op_is_indirect_wire_7_,
99};
100
101constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_COLUMNS = {
102 C::execution_register_0_, C::execution_register_1_, C::execution_register_2_,
103 C::execution_register_3_, C::execution_register_4_, C::execution_register_5_,
104};
105constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_MEM_TAG_COLUMNS = {
106 C::execution_mem_tag_reg_0_, C::execution_mem_tag_reg_1_, C::execution_mem_tag_reg_2_,
107 C::execution_mem_tag_reg_3_, C::execution_mem_tag_reg_4_, C::execution_mem_tag_reg_5_,
108};
109constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_IS_WRITE_COLUMNS = {
110 C::execution_rw_reg_0_, C::execution_rw_reg_1_, C::execution_rw_reg_2_,
111 C::execution_rw_reg_3_, C::execution_rw_reg_4_, C::execution_rw_reg_5_,
112};
113constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_MEM_OP_COLUMNS = {
114 C::execution_sel_mem_op_reg_0_, C::execution_sel_mem_op_reg_1_, C::execution_sel_mem_op_reg_2_,
115 C::execution_sel_mem_op_reg_3_, C::execution_sel_mem_op_reg_4_, C::execution_sel_mem_op_reg_5_,
116};
117constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_EXPECTED_TAG_COLUMNS = {
118 C::execution_expected_tag_reg_0_, C::execution_expected_tag_reg_1_, C::execution_expected_tag_reg_2_,
119 C::execution_expected_tag_reg_3_, C::execution_expected_tag_reg_4_, C::execution_expected_tag_reg_5_,
120};
121constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_TAG_CHECK_COLUMNS = {
122 C::execution_sel_tag_check_reg_0_, C::execution_sel_tag_check_reg_1_, C::execution_sel_tag_check_reg_2_,
123 C::execution_sel_tag_check_reg_3_, C::execution_sel_tag_check_reg_4_, C::execution_sel_tag_check_reg_5_,
124};
125constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_OP_REG_EFFECTIVE_COLUMNS = {
126 C::execution_sel_op_reg_effective_0_, C::execution_sel_op_reg_effective_1_, C::execution_sel_op_reg_effective_2_,
127 C::execution_sel_op_reg_effective_3_, C::execution_sel_op_reg_effective_4_, C::execution_sel_op_reg_effective_5_,
128};
129
137C get_execution_opcode_selector(ExecutionOpCode exec_opcode)
138{
139 switch (exec_opcode) {
141 return C::execution_sel_execute_get_env_var;
143 return C::execution_sel_execute_mov;
145 return C::execution_sel_execute_jump;
147 return C::execution_sel_execute_jumpi;
149 return C::execution_sel_execute_call;
151 return C::execution_sel_execute_static_call;
153 return C::execution_sel_execute_internal_call;
155 return C::execution_sel_execute_internal_return;
157 return C::execution_sel_execute_return;
159 return C::execution_sel_execute_revert;
161 return C::execution_sel_execute_success_copy;
163 return C::execution_sel_execute_returndata_size;
165 return C::execution_sel_execute_debug_log;
167 return C::execution_sel_execute_sload;
169 return C::execution_sel_execute_sstore;
171 return C::execution_sel_execute_notehash_exists;
173 return C::execution_sel_execute_emit_notehash;
175 return C::execution_sel_execute_l1_to_l2_message_exists;
177 return C::execution_sel_execute_nullifier_exists;
179 return C::execution_sel_execute_emit_nullifier;
181 return C::execution_sel_execute_send_l2_to_l1_msg;
182 default:
183 throw std::runtime_error("Execution opcode does not have a corresponding selector");
184 }
185}
186
190struct FailingContexts {
191 bool app_logic_failure = false;
192 bool teardown_failure = false;
195 unordered_flat_set<uint32_t> does_context_fail;
196};
197
209FailingContexts preprocess_for_discard(
211{
212 FailingContexts dying_info;
213
214 // We use `after_context_event` to retrieve parent_id, context_id, and phase to be consistent with
215 // how these values are populated in the trace (see ExecutionTraceBuilder::process()). These values
216 // should not change during the life-cycle of an execution event though and before_context_event
217 // would lead to the same results.
218
219 // Preprocessing pass 1: find the events that exit the app logic and teardown phases
220 for (const auto& ex_event : ex_events) {
221 bool is_exit = ex_event.is_exit();
222 bool is_top_level = ex_event.after_context_event.parent_id == 0;
223
224 if (is_exit && is_top_level) {
225 if (ex_event.after_context_event.phase == TransactionPhase::APP_LOGIC) {
226 dying_info.app_logic_failure = ex_event.is_failure();
227 dying_info.app_logic_exit_context_id = ex_event.after_context_event.id;
228 } else if (ex_event.after_context_event.phase == TransactionPhase::TEARDOWN) {
229 dying_info.teardown_failure = ex_event.is_failure();
230 dying_info.teardown_exit_context_id = ex_event.after_context_event.id;
231 break; // Teardown is the last phase we care about
232 }
233 }
234 }
235
236 // Preprocessing pass 2: find all contexts that fail and mark them
237 for (const auto& ex_event : ex_events) {
238 if (ex_event.is_failure()) {
239 dying_info.does_context_fail.insert(ex_event.after_context_event.id);
240 }
241 }
242
243 return dying_info;
244}
245
253bool is_phase_discarded(TransactionPhase phase, const FailingContexts& failures)
254{
255 // Note that app logic also gets discarded if teardown failures
256 return (phase == TransactionPhase::APP_LOGIC && (failures.app_logic_failure || failures.teardown_failure)) ||
257 (phase == TransactionPhase::TEARDOWN && failures.teardown_failure);
258}
259
267uint32_t dying_context_for_phase(TransactionPhase phase, const FailingContexts& failures)
268{
270 "Execution events must have app logic or teardown phase");
271
272 switch (phase) {
274 if (failures.app_logic_failure) {
275 return failures.app_logic_exit_context_id;
276 }
277
278 // Note that app logic also gets discarded if teardown failures
279 if (failures.teardown_failure) {
280 return failures.teardown_exit_context_id;
281 }
282
283 return 0;
284 }
286 return failures.teardown_failure ? failures.teardown_exit_context_id : 0;
287 default:
288 __builtin_unreachable(); // tell the compiler "we never reach here"
289 }
290}
291
292} // namespace
293
296{
297 uint32_t row = 1; // We start from row 1 because this trace contains shifted columns.
298
299 // Preprocess events to determine which contexts will fail
300 const FailingContexts failures = preprocess_for_discard(ex_events);
301
302 // Some variables updated per loop iteration to track
303 // whether or not the upcoming row should "discard" [side effects].
304 uint32_t dying_context_id = 0;
305 // dying_context_id captures whether we discard or not. Namely, discard == 1 <=> dying_context_id != 0
306 // is a circuit invariant. For this reason, we use a lambda to preserve the invariant.
307 auto is_discarding = [&dying_context_id]() { return dying_context_id != 0; };
308 bool is_first_event_in_enqueued_call = true;
309 bool prev_row_was_enter_call = false;
310
311 for (const auto& ex_event : ex_events) {
312 // Check if this is the first event in an enqueued call and whether
313 // the phase should be discarded
314 if (!is_discarding() && is_first_event_in_enqueued_call &&
315 is_phase_discarded(ex_event.after_context_event.phase, failures)) {
316 dying_context_id = dying_context_for_phase(ex_event.after_context_event.phase, failures);
317 }
318
319 const bool has_parent = ex_event.after_context_event.parent_id != 0;
320
321 /**************************************************************************************************
322 * Setup.
323 **************************************************************************************************/
324
325 trace.set(
326 row,
327 { {
328 { C::execution_sel, 1 },
329 { C::execution_clk, row },
330 // Selectors that indicate "dispatch" from tx trace
331 // Note: Enqueued Call End is determined during the opcode execution temporality group
332 { C::execution_enqueued_call_start, is_first_event_in_enqueued_call ? 1 : 0 },
333 // Context
334 { C::execution_context_id, ex_event.after_context_event.id },
335 { C::execution_parent_id, ex_event.after_context_event.parent_id },
336 // Warning: pc in after_context_event is the pc of the next instruction, not the current instruction.
337 { C::execution_pc, ex_event.before_context_event.pc },
338 { C::execution_msg_sender, ex_event.after_context_event.msg_sender },
339 { C::execution_contract_address, ex_event.after_context_event.contract_addr },
340 { C::execution_transaction_fee, ex_event.after_context_event.transaction_fee },
341 { C::execution_is_static, ex_event.after_context_event.is_static },
342 { C::execution_parent_calldata_addr, ex_event.after_context_event.parent_cd_addr },
343 { C::execution_parent_calldata_size, ex_event.after_context_event.parent_cd_size },
344 { C::execution_last_child_returndata_addr, ex_event.after_context_event.last_child_rd_addr },
345 { C::execution_last_child_returndata_size, ex_event.after_context_event.last_child_rd_size },
346 { C::execution_last_child_success, ex_event.after_context_event.last_child_success },
347 { C::execution_last_child_id, ex_event.after_context_event.last_child_id },
348 { C::execution_l2_gas_limit, ex_event.after_context_event.gas_limit.l2_gas },
349 { C::execution_da_gas_limit, ex_event.after_context_event.gas_limit.da_gas },
350 { C::execution_l2_gas_used, ex_event.after_context_event.gas_used.l2_gas },
351 { C::execution_da_gas_used, ex_event.after_context_event.gas_used.da_gas },
352 { C::execution_parent_l2_gas_limit, ex_event.after_context_event.parent_gas_limit.l2_gas },
353 { C::execution_parent_da_gas_limit, ex_event.after_context_event.parent_gas_limit.da_gas },
354 { C::execution_parent_l2_gas_used, ex_event.after_context_event.parent_gas_used.l2_gas },
355 { C::execution_parent_da_gas_used, ex_event.after_context_event.parent_gas_used.da_gas },
356 { C::execution_next_context_id, ex_event.next_context_id },
357 // Context - gas.
358 { C::execution_prev_l2_gas_used, ex_event.before_context_event.gas_used.l2_gas },
359 { C::execution_prev_da_gas_used, ex_event.before_context_event.gas_used.da_gas },
360 // Context - tree states
361 // Context - tree states - Written public data slots tree
362 { C::execution_prev_written_public_data_slots_tree_root,
363 ex_event.before_context_event.written_public_data_slots_tree_snapshot.root },
364 { C::execution_prev_written_public_data_slots_tree_size,
365 ex_event.before_context_event.written_public_data_slots_tree_snapshot.next_available_leaf_index },
366 { C::execution_written_public_data_slots_tree_root,
367 ex_event.after_context_event.written_public_data_slots_tree_snapshot.root },
368 { C::execution_written_public_data_slots_tree_size,
369 ex_event.after_context_event.written_public_data_slots_tree_snapshot.next_available_leaf_index },
370 // Context - tree states - Nullifier tree
371 { C::execution_prev_nullifier_tree_root,
372 ex_event.before_context_event.tree_states.nullifier_tree.tree.root },
373 { C::execution_prev_nullifier_tree_size,
374 ex_event.before_context_event.tree_states.nullifier_tree.tree.next_available_leaf_index },
375 { C::execution_prev_num_nullifiers_emitted,
376 ex_event.before_context_event.tree_states.nullifier_tree.counter },
377 { C::execution_nullifier_tree_root, ex_event.after_context_event.tree_states.nullifier_tree.tree.root },
378 { C::execution_nullifier_tree_size,
379 ex_event.after_context_event.tree_states.nullifier_tree.tree.next_available_leaf_index },
380 { C::execution_num_nullifiers_emitted,
381 ex_event.after_context_event.tree_states.nullifier_tree.counter },
382 // Context - tree states - Public data tree
383 { C::execution_prev_public_data_tree_root,
384 ex_event.before_context_event.tree_states.public_data_tree.tree.root },
385 { C::execution_prev_public_data_tree_size,
386 ex_event.before_context_event.tree_states.public_data_tree.tree.next_available_leaf_index },
387 { C::execution_public_data_tree_root,
388 ex_event.after_context_event.tree_states.public_data_tree.tree.root },
389 { C::execution_public_data_tree_size,
390 ex_event.after_context_event.tree_states.public_data_tree.tree.next_available_leaf_index },
391 // Context - tree states - Note hash tree
392 { C::execution_prev_note_hash_tree_root,
393 ex_event.before_context_event.tree_states.note_hash_tree.tree.root },
394 { C::execution_prev_note_hash_tree_size,
395 ex_event.before_context_event.tree_states.note_hash_tree.tree.next_available_leaf_index },
396 { C::execution_prev_num_note_hashes_emitted,
397 ex_event.before_context_event.tree_states.note_hash_tree.counter },
398 { C::execution_note_hash_tree_root, ex_event.after_context_event.tree_states.note_hash_tree.tree.root },
399 { C::execution_note_hash_tree_size,
400 ex_event.after_context_event.tree_states.note_hash_tree.tree.next_available_leaf_index },
401 { C::execution_num_note_hashes_emitted,
402 ex_event.after_context_event.tree_states.note_hash_tree.counter },
403 // Context - tree states - L1 to L2 message tree
404 { C::execution_l1_l2_tree_root,
405 ex_event.after_context_event.tree_states.l1_to_l2_message_tree.tree.root },
406 // Context - tree states - Retrieved bytecodes tree
407 { C::execution_prev_retrieved_bytecodes_tree_root,
408 ex_event.before_context_event.retrieved_bytecodes_tree_snapshot.root },
409 { C::execution_prev_retrieved_bytecodes_tree_size,
410 ex_event.before_context_event.retrieved_bytecodes_tree_snapshot.next_available_leaf_index },
411 { C::execution_retrieved_bytecodes_tree_root,
412 ex_event.after_context_event.retrieved_bytecodes_tree_snapshot.root },
413 { C::execution_retrieved_bytecodes_tree_size,
414 ex_event.after_context_event.retrieved_bytecodes_tree_snapshot.next_available_leaf_index },
415 // Context - side effects
416 { C::execution_prev_num_public_log_fields, ex_event.before_context_event.numPublicLogFields },
417 { C::execution_num_public_log_fields, ex_event.after_context_event.numPublicLogFields },
418 { C::execution_prev_num_l2_to_l1_messages, ex_event.before_context_event.numL2ToL1Messages },
419 { C::execution_num_l2_to_l1_messages, ex_event.after_context_event.numL2ToL1Messages },
420 // Helpers for identifying parent context
421 { C::execution_has_parent_ctx, has_parent ? 1 : 0 },
422 { C::execution_is_parent_id_inv, ex_event.after_context_event.parent_id }, // Will be inverted in batch.
423 } });
424
425 // Internal stack
426 // Important: It is crucial to use `before_context_event` to populate the internal call stack columns because
427 // these values are mutated by the internal call and return opcodes and therefore
428 // `after_context_event` would populate incorrect values.
429 const auto& internal_call_return_id = ex_event.before_context_event.internal_call_return_id;
430 trace.set(row,
431 { {
432 { C::execution_internal_call_id, ex_event.before_context_event.internal_call_id },
433 { C::execution_internal_call_return_id, internal_call_return_id },
434 { C::execution_next_internal_call_id, ex_event.before_context_event.next_internal_call_id },
435 } });
436
437 /**************************************************************************************************
438 * Temporality group 1: Bytecode retrieval.
439 **************************************************************************************************/
440
441 const bool bytecode_retrieval_failed = ex_event.error == ExecutionError::BYTECODE_RETRIEVAL;
442 const bool sel_first_row_in_context = prev_row_was_enter_call || is_first_event_in_enqueued_call;
443 trace.set(row,
444 { {
445 { C::execution_sel_first_row_in_context, sel_first_row_in_context ? 1 : 0 },
446 { C::execution_sel_bytecode_retrieval_failure, bytecode_retrieval_failed ? 1 : 0 },
447 { C::execution_sel_bytecode_retrieval_success, !bytecode_retrieval_failed ? 1 : 0 },
448 { C::execution_bytecode_id, ex_event.after_context_event.bytecode_id },
449 } });
450
451 /**************************************************************************************************
452 * Temporality group 2: Instruction fetching. Mapping from wire to execution and addressing.
453 **************************************************************************************************/
454
455 // This will only have a value if instruction fetching succeeded.
457 const bool error_in_instruction_fetching = ex_event.error == ExecutionError::INSTRUCTION_FETCHING;
458 const bool instruction_fetching_success = !bytecode_retrieval_failed && !error_in_instruction_fetching;
459 trace.set(C::execution_sel_instruction_fetching_failure, row, error_in_instruction_fetching ? 1 : 0);
460
461 if (instruction_fetching_success) {
462 exec_opcode = ex_event.wire_instruction.get_exec_opcode();
463 process_instr_fetching(ex_event.wire_instruction, trace, row);
464
465 // If we fetched an instruction successfully, we can set the next PC.
466 // In circuit, we enforce next_pc to be pc + instr_length, but in simulation,
467 // we set next_pc (as member of the context) to be the real pc of the next instruction
468 // which is different for JUMP, JUMPI, INTERNALCALL, and INTERNALRETURN.
469 // Therefore, we must not use after_context_event.pc (which is simulation next_pc) to set
470 // C::execution_next_pc.
471 trace.set(row,
472 { {
473 { C::execution_next_pc,
474 static_cast<uint32_t>(ex_event.before_context_event.pc +
475 ex_event.wire_instruction.size_in_bytes()) },
476 } });
477
478 // Along this function we need to set the info we get from the #[EXEC_SPEC_READ] lookup.
479 process_execution_spec(ex_event, trace, row);
480
481 process_addressing(ex_event.addressing_event, ex_event.wire_instruction, trace, row);
482 }
483
484 const bool addressing_failed = ex_event.error == ExecutionError::ADDRESSING;
485
486 /**************************************************************************************************
487 * Temporality group 3: Registers read.
488 **************************************************************************************************/
489
490 // Note that if addressing did not fail, register reading will not fail.
492 std::ranges::fill(registers, MemoryValue::from_tag(static_cast<MemoryTag>(0), 0));
493 const bool should_process_registers = instruction_fetching_success && !addressing_failed;
494 const bool register_processing_failed = ex_event.error == ExecutionError::REGISTER_READ;
495 if (should_process_registers) {
497 *exec_opcode, ex_event.inputs, ex_event.output, registers, register_processing_failed, trace, row);
498 }
499
500 /**************************************************************************************************
501 * Temporality group 4: Gas (both base and dynamic).
502 **************************************************************************************************/
503
504 const bool should_check_gas = should_process_registers && !register_processing_failed;
505 if (should_check_gas) {
506 process_gas(ex_event.gas_event, *exec_opcode, trace, row);
507
508 // To_Radix Dynamic Gas Factor related selectors.
509 // We need the register information to compute dynamic gas factor and process_gas() does not have
510 // access to it and nor should it.
511 if (*exec_opcode == ExecutionOpCode::TORADIXBE) {
512 uint32_t radix = ex_event.inputs[1].as<uint32_t>(); // Safe since already tag checked
513 uint32_t num_limbs = ex_event.inputs[2].as<uint32_t>(); // Safe since already tag checked
514 uint32_t num_p_limbs = radix > 256 ? 32 : static_cast<uint32_t>(get_p_limbs_per_radix_size(radix));
515 trace.set(row,
516 { {
517 // To Radix BE Dynamic Gas
518 { C::execution_two_five_six, 256 },
519 { C::execution_sel_radix_gt_256, radix > 256 ? 1 : 0 },
520 { C::execution_sel_lookup_num_p_limbs, radix <= 256 ? 1 : 0 },
521 { C::execution_num_p_limbs, num_p_limbs },
522 { C::execution_sel_use_num_limbs, num_limbs > num_p_limbs ? 1 : 0 },
523 // Don't set dyn gas factor here since already set in process_gas
524 } });
525 } else if (*exec_opcode == ExecutionOpCode::SSTORE) {
526 trace.set(row,
527 { {
528 // SSTORE Dynamic Gas
529 { C::execution_written_slots_tree_height, AVM_WRITTEN_PUBLIC_DATA_SLOTS_TREE_HEIGHT },
530 { C::execution_written_slots_tree_siloing_separator, DOM_SEP__PUBLIC_LEAF_SLOT },
531 } });
532 }
533 }
534
535 const bool oog = ex_event.error == ExecutionError::GAS;
536 /**************************************************************************************************
537 * Temporality group 5: Opcode execution.
538 **************************************************************************************************/
539
540 const bool should_execute_opcode = should_check_gas && !oog;
541
542 // These booleans are used after of the "opcode code execution" block but need
543 // to be set as part of the "opcode code execution" block.
544 bool sel_enter_call = false;
545 bool sel_exit_call = false;
546 bool should_execute_revert = false;
547
548 const bool opcode_execution_failed = ex_event.error == ExecutionError::OPCODE_EXECUTION;
549 if (should_execute_opcode) {
550 // At this point we can assume instruction fetching succeeded, so this should never fail.
551 const auto& dispatch_to_subtrace = get_subtrace_info_map().at(*exec_opcode);
552 trace.set(row,
553 { {
554 { C::execution_sel_should_execute_opcode, 1 },
555 { C::execution_sel_opcode_error, opcode_execution_failed ? 1 : 0 },
556 { get_subtrace_selector(dispatch_to_subtrace.subtrace_selector), 1 },
557 } });
558
559 // Execution Trace opcodes - separating for clarity
560 if (dispatch_to_subtrace.subtrace_selector == SubtraceSel::EXECUTION) {
561 trace.set(get_execution_opcode_selector(*exec_opcode), row, 1);
562 }
563
564 // Execution trace opcodes specific logic.
565 // Note that the opcode selectors were set above. (e.g., sel_execute_call, sel_execute_static_call, ..).
566 if (*exec_opcode == ExecutionOpCode::CALL || *exec_opcode == ExecutionOpCode::STATICCALL) {
567 sel_enter_call = true;
568
569 const Gas gas_left = ex_event.after_context_event.gas_limit - ex_event.after_context_event.gas_used;
570
571 uint32_t allocated_l2_gas = registers[0].as<uint32_t>();
572 bool is_l2_gas_left_gt_allocated = gas_left.l2_gas > allocated_l2_gas;
573
574 uint32_t allocated_da_gas = registers[1].as<uint32_t>();
575 bool is_da_gas_left_gt_allocated = gas_left.da_gas > allocated_da_gas;
576
577 trace.set(row,
578 { {
579 { C::execution_sel_enter_call, 1 },
580 { C::execution_l2_gas_left, gas_left.l2_gas },
581 { C::execution_da_gas_left, gas_left.da_gas },
582 { C::execution_is_l2_gas_left_gt_allocated, is_l2_gas_left_gt_allocated ? 1 : 0 },
583 { C::execution_is_da_gas_left_gt_allocated, is_da_gas_left_gt_allocated ? 1 : 0 },
584 } });
585 } else if (*exec_opcode == ExecutionOpCode::RETURN) {
586 sel_exit_call = true;
587 trace.set(row,
588 { {
589 { C::execution_nested_return, has_parent ? 1 : 0 },
590 } });
591 } else if (*exec_opcode == ExecutionOpCode::REVERT) {
592 sel_exit_call = true;
593 should_execute_revert = true;
594 } else if (exec_opcode == ExecutionOpCode::GETENVVAR) {
595 BB_ASSERT_EQ(ex_event.addressing_event.resolution_info.size(),
596 static_cast<size_t>(2),
597 "GETENVVAR should have exactly two resolved operands (envvar enum and output)");
598 // rop[1] is the envvar enum
599 Operand envvar_enum = ex_event.addressing_event.resolution_info[1].resolved_operand;
600 process_get_env_var_opcode(envvar_enum, ex_event.output, trace, row);
601 } else if (*exec_opcode == ExecutionOpCode::INTERNALRETURN) {
602 if (!opcode_execution_failed) {
603 // If we have an opcode error, we don't need to compute the inverse (see internal_call.pil)
604 trace.set(C::execution_internal_call_return_id_inv,
605 row,
606 internal_call_return_id); // Will be inverted in batch later.
607 trace.set(C::execution_sel_read_unwind_call_stack, row, 1);
608 }
609 } else if (*exec_opcode == ExecutionOpCode::SSTORE) {
610 // Equivalent to PIL's (MAX + INITIAL_SIZE - prev_written_public_data_slots_tree_size)
611 // since prev_size = counter + 1 and INITIAL_SIZE = 1.
612 uint32_t remaining_data_writes = MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX -
613 ex_event.before_context_event.tree_states.public_data_tree.counter;
614
615 trace.set(row,
616 { {
617 { C::execution_max_data_writes_reached, remaining_data_writes == 0 },
618 { C::execution_remaining_data_writes_inv,
619 remaining_data_writes }, // Will be inverted in batch later.
620 { C::execution_sel_write_public_data, !opcode_execution_failed },
621 { C::execution_written_slots_tree_height, AVM_WRITTEN_PUBLIC_DATA_SLOTS_TREE_HEIGHT },
622 { C::execution_written_slots_tree_siloing_separator, DOM_SEP__PUBLIC_LEAF_SLOT },
623 } });
624 } else if (*exec_opcode == ExecutionOpCode::NOTEHASHEXISTS) {
625 uint64_t leaf_index = registers[1].as<uint64_t>();
626 uint64_t note_hash_tree_leaf_count = NOTE_HASH_TREE_LEAF_COUNT;
627 bool note_hash_leaf_in_range = leaf_index < note_hash_tree_leaf_count;
628
629 trace.set(row,
630 { {
631 { C::execution_note_hash_leaf_in_range, note_hash_leaf_in_range },
632 { C::execution_note_hash_tree_leaf_count, FF(note_hash_tree_leaf_count) },
633 } });
634 } else if (*exec_opcode == ExecutionOpCode::EMITNOTEHASH) {
635 uint32_t remaining_note_hashes =
636 MAX_NOTE_HASHES_PER_TX - ex_event.before_context_event.tree_states.note_hash_tree.counter;
637
638 trace.set(row,
639 { {
640 { C::execution_sel_reached_max_note_hashes, remaining_note_hashes == 0 },
641 { C::execution_remaining_note_hashes_inv,
642 remaining_note_hashes }, // Will be inverted in batch later.
643 { C::execution_sel_write_note_hash, !opcode_execution_failed },
644 } });
645 } else if (*exec_opcode == ExecutionOpCode::L1TOL2MSGEXISTS) {
646 uint64_t leaf_index = registers[1].as<uint64_t>();
647 uint64_t l1_to_l2_msg_tree_leaf_count = L1_TO_L2_MSG_TREE_LEAF_COUNT;
648 bool l1_to_l2_msg_leaf_in_range = leaf_index < l1_to_l2_msg_tree_leaf_count;
649
650 trace.set(row,
651 { {
652 { C::execution_l1_to_l2_msg_leaf_in_range, l1_to_l2_msg_leaf_in_range },
653 { C::execution_l1_to_l2_msg_tree_leaf_count, FF(l1_to_l2_msg_tree_leaf_count) },
654 } });
655 } else if (exec_opcode == ExecutionOpCode::NULLIFIEREXISTS) {
656 trace.set(row,
657 { {
658 { C::execution_nullifier_tree_height, NULLIFIER_TREE_HEIGHT },
659 } });
660 } else if (*exec_opcode == ExecutionOpCode::EMITNULLIFIER) {
661 uint32_t remaining_nullifiers =
662 MAX_NULLIFIERS_PER_TX - ex_event.before_context_event.tree_states.nullifier_tree.counter;
663
664 trace.set(row,
665 { { { C::execution_sel_reached_max_nullifiers, remaining_nullifiers == 0 },
666 { C::execution_remaining_nullifiers_inv,
667 remaining_nullifiers }, // Will be inverted in batch later.
668 { C::execution_sel_write_nullifier,
669 remaining_nullifiers != 0 && !ex_event.before_context_event.is_static },
670 { C::execution_nullifier_pi_offset,
672 ex_event.before_context_event.tree_states.nullifier_tree.counter },
673 { C::execution_nullifier_tree_height, NULLIFIER_TREE_HEIGHT },
674 { C::execution_nullifier_siloing_separator, DOM_SEP__SILOED_NULLIFIER } } });
675 } else if (*exec_opcode == ExecutionOpCode::SENDL2TOL1MSG) {
676 uint32_t remaining_l2_to_l1_msgs =
677 MAX_L2_TO_L1_MSGS_PER_TX - ex_event.before_context_event.numL2ToL1Messages;
678
679 FF recipient = registers[0].as<FF>();
680 bool sel_too_large_recipient_error =
681 static_cast<uint256_t>(recipient) > static_cast<uint256_t>(MAX_ETH_ADDRESS_VALUE);
682
683 trace.set(row,
684 { { { C::execution_sel_l2_to_l1_msg_limit_error, remaining_l2_to_l1_msgs == 0 },
685 { C::execution_remaining_l2_to_l1_msgs_inv,
686 remaining_l2_to_l1_msgs }, // Will be inverted in batch later.
687 { C::execution_max_eth_address_value, FF(MAX_ETH_ADDRESS_VALUE) },
688 { C::execution_sel_too_large_recipient_error, sel_too_large_recipient_error },
689 { C::execution_sel_write_l2_to_l1_msg, !opcode_execution_failed && !is_discarding() },
690 {
691 C::execution_public_inputs_index,
693 ex_event.before_context_event.numL2ToL1Messages,
694 } } });
695 }
696 }
697
698 /**************************************************************************************************
699 * Temporality group 6: Register write.
700 **************************************************************************************************/
701
702 const bool should_process_register_write = should_execute_opcode && !opcode_execution_failed;
703 if (should_process_register_write) {
704 process_registers_write(*exec_opcode, trace, row);
705 }
706
707 /**************************************************************************************************
708 * Discarding and error related selectors.
709 **************************************************************************************************/
710
711 const bool is_dying_context = ex_event.after_context_event.id == dying_context_id;
712 // Need to generate the item below for checking "is dying context" in circuit
713 // No need to condition by `!is_dying_context` as batch inversion skips 0.
714 const FF dying_context_diff = FF(ex_event.after_context_event.id) - FF(dying_context_id);
715
716 // This is here instead of guarded by `should_execute_opcode` because is_err is a higher level error
717 // than just an opcode error (i.e., it is on if there are any errors in any temporality group).
718 const bool is_err = ex_event.error != ExecutionError::NONE;
719 sel_exit_call = sel_exit_call || is_err; // sel_execute_revert || sel_execute_return || sel_error
720 const bool is_failure = should_execute_revert || is_err;
721 const bool enqueued_call_end = sel_exit_call && !has_parent;
722 const bool nested_failure = is_failure && has_parent;
723
724 trace.set(row,
725 { {
726 { C::execution_sel_exit_call, sel_exit_call ? 1 : 0 },
727 { C::execution_nested_failure, nested_failure ? 1 : 0 },
728 { C::execution_sel_error, is_err ? 1 : 0 },
729 { C::execution_sel_failure, is_failure ? 1 : 0 },
730 { C::execution_discard, is_discarding() ? 1 : 0 },
731 { C::execution_dying_context_id, dying_context_id },
732 { C::execution_dying_context_id_inv, dying_context_id }, // Will be inverted in batch.
733 { C::execution_is_dying_context, is_dying_context ? 1 : 0 },
734 { C::execution_dying_context_diff_inv, dying_context_diff }, // Will be inverted in batch.
735 { C::execution_enqueued_call_end, enqueued_call_end ? 1 : 0 },
736 } });
737
738 // Trace-generation is done for this event.
739 // Now, use this event to determine whether we should set/reset the discard flag for the NEXT event.
740 // Note: is_failure implies discard is true.
741 const bool event_kills_dying_context = is_failure && is_dying_context;
742
743 if (event_kills_dying_context) {
744 // Set/unset discard flag if the current event is the one that kills the dying context
745 dying_context_id = 0;
746 } else if (sel_enter_call && !is_discarding() &&
747 failures.does_context_fail.contains(ex_event.next_context_id)) {
748 // If making a nested call, and discard isn't already high...
749 // if the nested context being entered eventually dies, we set which context is dying (implicitly raise
750 // discard flag). NOTE: If a [STATIC]CALL instruction _itself_ errors, we don't set the discard flag
751 // because we aren't actually entering a new context. This is already captured by `sel_enter_call`
752 // boolean which is set to true only during opcode execution temporality group which cannot
753 // fail for CALL/STATICALL.
754 dying_context_id = ex_event.next_context_id;
755 }
756 // Otherwise, we aren't entering or exiting a dying context,
757 // so just propagate discard and dying context.
758 // Implicit: dying_context_id = dying_context_id; discard = discard;
759
760 // If an enqueued call just exited, next event (if any) is the first in an enqueued call.
761 // Update flag for next iteration.
762 is_first_event_in_enqueued_call = !has_parent && sel_exit_call;
763
764 // Track this bool for use determining whether the next row is the first in a context
765 prev_row_was_enter_call = sel_enter_call;
766
767 row++;
768 }
769
770 // Batch invert the columns.
772}
773
775 TraceContainer& trace,
776 uint32_t row)
777{
778 trace.set(row,
779 { {
780 { C::execution_sel_instruction_fetching_success, 1 },
781 { C::execution_ex_opcode, static_cast<uint8_t>(instruction.get_exec_opcode()) },
782 { C::execution_addressing_mode, instruction.addressing_mode },
783 { C::execution_instr_length, instruction.size_in_bytes() },
784 } });
785
786 // At this point we can assume instruction fetching succeeded.
787 auto operands = instruction.operands;
788 BB_ASSERT_LTE(operands.size(), static_cast<size_t>(AVM_MAX_OPERANDS), "Operands size is out of range");
789 operands.resize(AVM_MAX_OPERANDS, Operand::from<FF>(0));
790
791 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
792 trace.set(OPERAND_COLUMNS[i], row, operands.at(i));
793 }
794}
795
797 TraceContainer& trace,
798 uint32_t row)
799{
800 // At this point we can assume instruction fetching succeeded, so this should never fail.
801 ExecutionOpCode exec_opcode = ex_event.wire_instruction.get_exec_opcode();
802 const auto& exec_spec = get_exec_instruction_spec().at(exec_opcode);
803 const auto& gas_cost = exec_spec.gas_cost;
804
805 // Gas.
806 trace.set(row,
807 { {
808 { C::execution_opcode_gas, gas_cost.opcode_gas },
809 { C::execution_base_da_gas, gas_cost.base_da },
810 { C::execution_dynamic_l2_gas, gas_cost.dyn_l2 },
811 { C::execution_dynamic_da_gas, gas_cost.dyn_da },
812 } });
813
814 const auto& register_info = exec_spec.register_info;
815 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
816 trace.set(row,
817 { {
818 { REGISTER_IS_WRITE_COLUMNS[i], register_info.is_write(i) ? 1 : 0 },
819 { REGISTER_MEM_OP_COLUMNS[i], register_info.is_active(i) ? 1 : 0 },
820 { REGISTER_EXPECTED_TAG_COLUMNS[i],
821 register_info.need_tag_check(i) ? static_cast<uint32_t>(*(register_info.expected_tag(i))) : 0 },
822 { REGISTER_TAG_CHECK_COLUMNS[i], register_info.need_tag_check(i) ? 1 : 0 },
823 } });
824 }
825
826 // Set is_address columns
827 const auto& num_addresses = exec_spec.num_addresses;
828 for (size_t i = 0; i < num_addresses; i++) {
829 trace.set(OPERAND_IS_ADDRESS_COLUMNS[i], row, 1);
830 }
831
832 // At this point we can assume instruction fetching succeeded, so this should never fail.
833 const auto& dispatch_to_subtrace = get_subtrace_info_map().at(exec_opcode);
834 trace.set(row,
835 { {
836 { C::execution_subtrace_id, get_subtrace_id(dispatch_to_subtrace.subtrace_selector) },
837 { C::execution_subtrace_operation_id, dispatch_to_subtrace.subtrace_operation_id },
838 { C::execution_dyn_gas_id, exec_spec.dyn_gas_id },
839 } });
840}
841
843 ExecutionOpCode exec_opcode,
844 TraceContainer& trace,
845 uint32_t row)
846{
847 bool oog = gas_event.oog_l2 || gas_event.oog_da;
848 trace.set(row,
849 { {
850 { C::execution_sel_should_check_gas, 1 },
851 { C::execution_out_of_gas_l2, gas_event.oog_l2 ? 1 : 0 },
852 { C::execution_out_of_gas_da, gas_event.oog_da ? 1 : 0 },
853 { C::execution_sel_out_of_gas, oog ? 1 : 0 },
854 // Addressing gas.
855 { C::execution_addressing_gas, gas_event.addressing_gas },
856 // Dynamic gas.
857 { C::execution_dynamic_l2_gas_factor, gas_event.dynamic_gas_factor.l2_gas },
858 { C::execution_dynamic_da_gas_factor, gas_event.dynamic_gas_factor.da_gas },
859 // Derived cumulative gas used.
860 { C::execution_total_gas_l2, gas_event.total_gas_used_l2 },
861 { C::execution_total_gas_da, gas_event.total_gas_used_da },
862 } });
863
864 const auto& exec_spec = get_exec_instruction_spec().at(exec_opcode);
865 if (exec_spec.dyn_gas_id != 0) {
866 trace.set(get_dyn_gas_selector(exec_spec.dyn_gas_id), row, 1);
867 }
868}
869
872 TraceContainer& trace,
873 uint32_t row)
874{
875 // At this point we can assume instruction fetching succeeded, so this should never fail.
876 ExecutionOpCode exec_opcode = instruction.get_exec_opcode();
877 const ExecInstructionSpec& ex_spec = get_exec_instruction_spec().at(exec_opcode);
878
879 auto resolution_info_vec = addr_event.resolution_info;
881 resolution_info_vec.size(), static_cast<size_t>(AVM_MAX_OPERANDS), "Resolution info size is out of range");
882 // Pad with default values for the missing operands.
883 resolution_info_vec.resize(AVM_MAX_OPERANDS,
884 {
885 // This is the default we want: both tag and value 0.
886 .after_relative = FF::zero(),
887 .resolved_operand = Operand::from_tag(static_cast<ValueTag>(0), 0),
888 .error = std::nullopt,
889 });
890
891 std::array<bool, AVM_MAX_OPERANDS> should_apply_indirection{};
894 std::array<bool, AVM_MAX_OPERANDS> is_relative_effective{};
895 std::array<bool, AVM_MAX_OPERANDS> is_indirect_effective{};
897 std::array<FF, AVM_MAX_OPERANDS> after_relative{};
898 std::array<FF, AVM_MAX_OPERANDS> resolved_operand{};
899 std::array<uint8_t, AVM_MAX_OPERANDS> resolved_operand_tag{};
900 uint8_t num_relative_operands = 0;
901
902 // The error about the base address being invalid is stored in every resolution_info member when it happens.
903 bool base_address_invalid = resolution_info_vec[0].error.has_value() &&
904 *resolution_info_vec[0].error == AddressingEventError::BASE_ADDRESS_INVALID;
905 bool do_base_check = false; // Whether we need to retrieve the base address,
906 // i.e., at least one operand is relative.
907
908 // Gather operand information.
909 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
910 const auto& resolution_info = resolution_info_vec[i];
911 bool op_is_address = i < ex_spec.num_addresses;
912 relative_oob[i] = resolution_info.error.has_value() &&
913 *resolution_info.error == AddressingEventError::RELATIVE_COMPUTATION_OOB;
914 is_relative[i] = is_operand_relative(instruction.addressing_mode, i);
915 is_indirect[i] = is_operand_indirect(instruction.addressing_mode, i);
916 is_relative_effective[i] = op_is_address && is_relative[i];
917 is_indirect_effective[i] = op_is_address && is_indirect[i];
918 should_apply_indirection[i] = is_indirect_effective[i] && !relative_oob[i] && !base_address_invalid;
919 resolved_operand_tag[i] = static_cast<uint8_t>(resolution_info.resolved_operand.get_tag());
920 after_relative[i] = resolution_info.after_relative;
921 resolved_operand[i] = resolution_info.resolved_operand;
922 if (is_relative_effective[i]) {
923 do_base_check = true;
924 num_relative_operands++;
925 }
926 }
927
928 BB_ASSERT(do_base_check || !base_address_invalid, "Base address is invalid but we are not checking it.");
929
930 // Set the operand columns.
931 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
932 trace.set(row,
933 { {
934 { OPERAND_IS_RELATIVE_WIRE_COLUMNS[i], is_relative[i] ? 1 : 0 },
935 { OPERAND_IS_INDIRECT_WIRE_COLUMNS[i], is_indirect[i] ? 1 : 0 },
936 { OPERAND_RELATIVE_OVERFLOW_COLUMNS[i], relative_oob[i] ? 1 : 0 },
937 { OPERAND_AFTER_RELATIVE_COLUMNS[i], after_relative[i] },
938 { OPERAND_SHOULD_APPLY_INDIRECTION_COLUMNS[i], should_apply_indirection[i] ? 1 : 0 },
939 { OPERAND_IS_RELATIVE_VALID_BASE_COLUMNS[i],
940 (is_relative_effective[i] && !base_address_invalid) ? 1 : 0 },
941 { RESOLVED_OPERAND_COLUMNS[i], resolved_operand[i] },
942 { RESOLVED_OPERAND_TAG_COLUMNS[i], resolved_operand_tag[i] },
943 } });
944 }
945
946 // We need to compute relative and indirect over the whole 16 bits of the indirect flag.
947 // See comment in PIL file about indirect upper bits.
948 for (size_t i = AVM_MAX_OPERANDS; i < TOTAL_INDIRECT_BITS / 2; i++) {
949 bool is_relative = is_operand_relative(instruction.addressing_mode, i);
950 bool is_indirect = is_operand_indirect(instruction.addressing_mode, i);
951 trace.set(row,
952 { {
953 { OPERAND_IS_RELATIVE_WIRE_COLUMNS[i], is_relative ? 1 : 0 },
954 { OPERAND_IS_INDIRECT_WIRE_COLUMNS[i], is_indirect ? 1 : 0 },
955 } });
956 }
957
958 // Inverse of following difference is required when base address is invalid.
959 FF base_address_tag_diff = base_address_invalid ? FF(static_cast<uint8_t>(addr_event.base_address.get_tag())) -
960 FF(static_cast<uint8_t>(MemoryTag::U32))
961 : 0;
962
963 // Tag check after indirection.
964 bool some_final_check_failed = std::ranges::any_of(addr_event.resolution_info, [](const auto& info) {
965 return info.error.has_value() && *info.error == AddressingEventError::INVALID_ADDRESS_AFTER_INDIRECTION;
966 });
967 FF batched_tags_diff = 0;
968 if (some_final_check_failed) {
969 FF power_of_2 = 1;
970 for (size_t i = 0; i < AVM_MAX_OPERANDS; ++i) {
971 if (should_apply_indirection[i]) {
972 batched_tags_diff += power_of_2 * (FF(resolved_operand_tag[i]) - FF(MEM_TAG_U32));
973 }
974 power_of_2 *= 8; // 2^3
975 }
976 }
977
978 // Collect addressing errors. See PIL file for reference.
979 bool addressing_failed =
980 std::ranges::any_of(addr_event.resolution_info, [](const auto& info) { return info.error.has_value(); });
981 FF addressing_error_collection =
982 addressing_failed
983 ? FF(
984 // Base address invalid.
985 (base_address_invalid ? 1 : 0) +
986 // Relative overflow.
987 std::accumulate(addr_event.resolution_info.begin(),
988 addr_event.resolution_info.end(),
989 static_cast<uint32_t>(0),
990 [](uint32_t acc, const auto& info) {
991 return acc +
992 (info.error.has_value() &&
993 *info.error == AddressingEventError::RELATIVE_COMPUTATION_OOB
994 ? 1
995 : 0);
996 }) +
997 // Some invalid address after indirection.
998 (some_final_check_failed ? 1 : 0))
999 : 0;
1000
1001 trace.set(
1002 row,
1003 { {
1004 { C::execution_sel_addressing_error, addressing_failed ? 1 : 0 },
1005 { C::execution_addressing_error_collection_inv, addressing_error_collection }, // Will be inverted in batch.
1006 { C::execution_base_address_val, addr_event.base_address.as_ff() },
1007 { C::execution_base_address_tag, static_cast<uint8_t>(addr_event.base_address.get_tag()) },
1008 { C::execution_base_address_tag_diff_inv, base_address_tag_diff }, // Will be inverted in batch.
1009 { C::execution_batched_tags_diff_inv, batched_tags_diff }, // Will be inverted in batch.
1010 { C::execution_sel_some_final_check_failed, some_final_check_failed ? 1 : 0 },
1011 { C::execution_sel_base_address_failure, base_address_invalid ? 1 : 0 },
1012 { C::execution_num_relative_operands_inv, num_relative_operands }, // Will be inverted in batch later.
1013 { C::execution_sel_do_base_check, do_base_check ? 1 : 0 },
1014 { C::execution_highest_address, AVM_HIGHEST_MEM_ADDRESS },
1015 } });
1016}
1017
1019{
1020 trace.invert_columns({ {
1021 // Registers.
1022 C::execution_batched_tags_diff_inv_reg,
1023 // Context.
1024 C::execution_is_parent_id_inv,
1025 C::execution_internal_call_return_id_inv,
1026 // Trees.
1027 C::execution_remaining_data_writes_inv,
1028 C::execution_remaining_note_hashes_inv,
1029 C::execution_remaining_nullifiers_inv,
1030 // L1ToL2MsgExists.
1031 C::execution_remaining_l2_to_l1_msgs_inv,
1032 // Discard.
1033 C::execution_dying_context_id_inv,
1034 C::execution_dying_context_diff_inv,
1035 // Addressing.
1036 C::execution_addressing_error_collection_inv,
1037 C::execution_batched_tags_diff_inv,
1038 C::execution_base_address_tag_diff_inv,
1039 C::execution_num_relative_operands_inv,
1040 } });
1041}
1042
1045 const MemoryValue& output,
1047 bool register_processing_failed,
1048 TraceContainer& trace,
1049 uint32_t row)
1050{
1051 BB_ASSERT_EQ(registers.size(), static_cast<size_t>(AVM_MAX_REGISTERS), "Registers size is out of range");
1052 // At this point we can assume instruction fetching succeeded, so this should never fail.
1053 const auto& register_info = get_exec_instruction_spec().at(exec_opcode).register_info;
1054
1055 // Registers. We set all of them here, even the write ones. This is fine because
1056 // if an error occured before the register write group, simulation would pass the default
1057 // value-tag (0, 0). Furthermore, the permutation of the memory write would not be activated.
1058 size_t input_counter = 0;
1059 for (uint8_t i = 0; i < AVM_MAX_REGISTERS; ++i) {
1060 if (register_info.is_active(i)) {
1061 if (register_info.is_write(i)) {
1062 // If this is a write operation, we need to get the value from the output.
1063 registers[i] = output;
1064 } else {
1065 // If this is a read operation, we need to get the value from the input.
1066
1067 // Register specifications must be consistent with the number of inputs.
1068 BB_ASSERT(inputs.size() > input_counter, "Not enough inputs for register read");
1069
1070 registers[i] = inputs.at(input_counter);
1071 input_counter++;
1072 }
1073 }
1074 }
1075
1076 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1077 trace.set(REGISTER_COLUMNS[i], row, registers[i]);
1078 trace.set(REGISTER_MEM_TAG_COLUMNS[i], row, static_cast<uint8_t>(registers[i].get_tag()));
1079 // This one is special because it sets the reads (but not the writes).
1080 // If we got here, sel_should_read_registers=1.
1081 if (register_info.is_active(i) && !register_info.is_write(i)) {
1082 trace.set(REGISTER_OP_REG_EFFECTIVE_COLUMNS[i], row, 1);
1083 }
1084 }
1085
1086 FF batched_tags_diff_reg = 0;
1087 if (register_processing_failed) {
1088 FF power_of_2 = 1;
1089 for (size_t i = 0; i < AVM_MAX_REGISTERS; ++i) {
1090 if (register_info.need_tag_check(i)) {
1091 batched_tags_diff_reg += power_of_2 * (FF(static_cast<uint8_t>(registers[i].get_tag())) -
1092 FF(static_cast<uint8_t>(*register_info.expected_tag(i))));
1093 }
1094 power_of_2 *= 8; // 2^3
1095 }
1096 }
1097
1098 trace.set(row,
1099 { {
1100 { C::execution_sel_should_read_registers, 1 },
1101 { C::execution_batched_tags_diff_inv_reg, batched_tags_diff_reg }, // Will be inverted in batch.
1102 { C::execution_sel_register_read_error, register_processing_failed ? 1 : 0 },
1103 } });
1104}
1105
1107{
1108 const auto& register_info = get_exec_instruction_spec().at(exec_opcode).register_info;
1109 trace.set(C::execution_sel_should_write_registers, row, 1);
1110
1111 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1112 // This one is special because it sets the writes.
1113 // If we got here, sel_should_write_registers=1.
1114 if (register_info.is_active(i) && register_info.is_write(i)) {
1115 trace.set(REGISTER_OP_REG_EFFECTIVE_COLUMNS[i], row, 1);
1116 }
1117 }
1118}
1119
1121 MemoryValue output,
1122 TraceContainer& trace,
1123 uint32_t row)
1124{
1125 BB_ASSERT_EQ(envvar_enum.get_tag(), ValueTag::U8, "Envvar enum tag is not U8");
1126 const auto& envvar_spec = GetEnvVarSpec::get_table(envvar_enum.as<uint8_t>());
1127
1128 trace.set(row,
1129 { {
1130 { C::execution_sel_execute_get_env_var, 1 },
1131 { C::execution_sel_envvar_pi_lookup_col0, envvar_spec.envvar_pi_lookup_col0 ? 1 : 0 },
1132 { C::execution_sel_envvar_pi_lookup_col1, envvar_spec.envvar_pi_lookup_col1 ? 1 : 0 },
1133 { C::execution_envvar_pi_row_idx, envvar_spec.envvar_pi_row_idx },
1134 { C::execution_is_address, envvar_spec.is_address ? 1 : 0 },
1135 { C::execution_is_sender, envvar_spec.is_sender ? 1 : 0 },
1136 { C::execution_is_transactionfee, envvar_spec.is_transactionfee ? 1 : 0 },
1137 { C::execution_is_isstaticcall, envvar_spec.is_isstaticcall ? 1 : 0 },
1138 { C::execution_is_l2gasleft, envvar_spec.is_l2gasleft ? 1 : 0 },
1139 { C::execution_is_dagasleft, envvar_spec.is_dagasleft ? 1 : 0 },
1140 { C::execution_value_from_pi,
1141 envvar_spec.envvar_pi_lookup_col0 || envvar_spec.envvar_pi_lookup_col1 ? output.as_ff() : 0 },
1142 { C::execution_mem_tag_reg_0_, envvar_spec.out_tag },
1143 } });
1144}
1145
1148 // Execution specification (precomputed)
1150 // Bytecode retrieval
1151 .add<lookup_execution_bytecode_retrieval_result_settings, InteractionType::LookupGeneric>()
1152 // Instruction fetching
1154 .add<lookup_execution_instruction_fetching_body_settings, InteractionType::LookupGeneric>()
1155 // Addressing
1157 .add<lookup_addressing_relative_overflow_result_1_settings, InteractionType::LookupGeneric>(C::gt_sel)
1159 .add<lookup_addressing_relative_overflow_result_3_settings, InteractionType::LookupGeneric>(C::gt_sel)
1161 .add<lookup_addressing_relative_overflow_result_5_settings, InteractionType::LookupGeneric>(C::gt_sel)
1163 // Internal Call Stack
1164 .add<perm_internal_call_push_call_stack_settings, InteractionType::Permutation>()
1166 // Gas
1167 .add<lookup_gas_addressing_gas_read_settings, InteractionType::LookupIntoIndexedByRow>()
1169 .add<lookup_gas_is_out_of_gas_da_settings, InteractionType::LookupGeneric>(C::gt_sel)
1171 // Gas - ToRadix BE
1172 .add<lookup_execution_check_radix_gt_256_settings, InteractionType::LookupGeneric>(C::gt_sel)
1174 .add<lookup_execution_get_max_limbs_settings, InteractionType::LookupGeneric>(C::gt_sel)
1175 // Dynamic Gas - SStore
1177 // Context Stack
1178 .add<perm_context_ctx_stack_call_settings, InteractionType::Permutation>()
1180 .add<lookup_context_ctx_stack_return_settings, InteractionType::LookupGeneric>()
1181 // External Call
1183 .add<lookup_external_call_is_da_gas_left_gt_allocated_settings, InteractionType::LookupGeneric>(C::gt_sel)
1184 // GetEnvVar opcode
1186 .add<lookup_get_env_var_read_from_public_inputs_col0_settings, InteractionType::LookupIntoIndexedByRow>()
1188 // Sload opcode (cannot be sequential as public data tree check trace is sorted in tracegen)
1189 .add<lookup_sload_storage_read_settings, InteractionType::LookupGeneric>()
1190 // Sstore opcode
1192 // NoteHashExists
1193 .add<lookup_notehash_exists_note_hash_read_settings, InteractionType::LookupSequential>()
1195 // NullifierExists opcode
1196 .add<lookup_nullifier_exists_nullifier_exists_check_settings, InteractionType::LookupSequential>()
1197 // EmitNullifier
1199 // EmitNoteHash
1200 .add<lookup_emit_notehash_notehash_tree_write_settings, InteractionType::LookupSequential>()
1201 // L1ToL2MsgExists
1203 C::gt_sel)
1204 .add<lookup_l1_to_l2_message_exists_l1_to_l2_msg_read_settings, InteractionType::LookupSequential>()
1205 // SendL2ToL1Msg
1207 .add<lookup_send_l2_to_l1_msg_write_l2_to_l1_msg_settings, InteractionType::LookupIntoIndexedByRow>()
1208 // Dispatching to other sub-traces
1210 .add<lookup_execution_dispatch_to_bitwise_settings, InteractionType::LookupGeneric>()
1212 .add<perm_execution_dispatch_to_rd_copy_settings, InteractionType::Permutation>()
1214 .add<lookup_execution_dispatch_to_set_settings, InteractionType::LookupGeneric>()
1216 .add<perm_execution_dispatch_to_emit_public_log_settings, InteractionType::Permutation>()
1218 .add<perm_execution_dispatch_to_sha256_compression_settings, InteractionType::Permutation>()
1220 .add<perm_execution_dispatch_to_ecc_add_settings, InteractionType::Permutation>()
1222
1223} // namespace bb::avm2::tracegen
#define BB_ASSERT(expression,...)
Definition assert.hpp:70
#define BB_ASSERT_EQ(actual, expected,...)
Definition assert.hpp:83
#define BB_ASSERT_LTE(left, right,...)
Definition assert.hpp:158
#define MEM_TAG_U32
#define AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX
#define AVM_MAX_OPERANDS
#define AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_NULLIFIERS_ROW_IDX
#define NOTE_HASH_TREE_LEAF_COUNT
#define AVM_WRITTEN_PUBLIC_DATA_SLOTS_TREE_HEIGHT
#define MAX_ETH_ADDRESS_VALUE
#define L1_TO_L2_MSG_TREE_LEAF_COUNT
#define DOM_SEP__SILOED_NULLIFIER
#define AVM_MAX_REGISTERS
#define NULLIFIER_TREE_HEIGHT
#define MAX_L2_TO_L1_MSGS_PER_TX
#define DOM_SEP__PUBLIC_LEAF_SLOT
#define MAX_NOTE_HASHES_PER_TX
#define MAX_NULLIFIERS_PER_TX
#define AVM_HIGHEST_MEM_ADDRESS
#define MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX
static TaggedValue from_tag(ValueTag tag, FF value)
ValueTag get_tag() const
void process_execution_spec(const simulation::ExecutionEvent &ex_event, TraceContainer &trace, uint32_t row)
void process_instr_fetching(const simulation::Instruction &instruction, TraceContainer &trace, uint32_t row)
static const InteractionDefinition interactions
void process_registers(ExecutionOpCode exec_opcode, const std::vector< MemoryValue > &inputs, const MemoryValue &output, std::span< MemoryValue > registers, bool register_processing_failed, TraceContainer &trace, uint32_t row)
void process_get_env_var_opcode(simulation::Operand envvar_enum, MemoryValue output, TraceContainer &trace, uint32_t row)
void process_registers_write(ExecutionOpCode exec_opcode, TraceContainer &trace, uint32_t row)
void process_gas(const simulation::GasEvent &gas_event, ExecutionOpCode exec_opcode, TraceContainer &trace, uint32_t row)
void process(const simulation::EventEmitterInterface< simulation::ExecutionEvent >::Container &ex_events, TraceContainer &trace)
void process_addressing(const simulation::AddressingEvent &addr_event, const simulation::Instruction &instruction, TraceContainer &trace, uint32_t row)
static Table get_table(uint8_t envvar)
InteractionDefinition & add(auto &&... args)
#define info(...)
Definition log.hpp:93
TestTraceContainer trace
bool app_logic_failure
uint32_t app_logic_exit_context_id
bool teardown_failure
unordered_flat_set< uint32_t > does_context_fail
uint32_t teardown_exit_context_id
GasEvent gas_event
Instruction instruction
AvmProvingInputs inputs
Column get_dyn_gas_selector(uint32_t dyn_gas_id)
Get the column selector for a given dynamic gas ID.
const std::unordered_map< ExecutionOpCode, SubtraceInfo > & get_subtrace_info_map()
Column get_subtrace_selector(SubtraceSel subtrace_sel)
Get the column selector for a given subtrace selector.
FF get_subtrace_id(SubtraceSel subtrace_sel)
Get the subtrace ID for a given subtrace enum.
lookup_settings< lookup_send_l2_to_l1_msg_recipient_check_settings_ > lookup_send_l2_to_l1_msg_recipient_check_settings
lookup_settings< lookup_get_env_var_read_from_public_inputs_col1_settings_ > lookup_get_env_var_read_from_public_inputs_col1_settings
permutation_settings< perm_execution_dispatch_to_poseidon2_perm_settings_ > perm_execution_dispatch_to_poseidon2_perm_settings
permutation_settings< perm_execution_dispatch_to_get_contract_instance_settings_ > perm_execution_dispatch_to_get_contract_instance_settings
lookup_settings< lookup_execution_check_written_storage_slot_settings_ > lookup_execution_check_written_storage_slot_settings
lookup_settings< lookup_addressing_relative_overflow_result_2_settings_ > lookup_addressing_relative_overflow_result_2_settings
lookup_settings< lookup_addressing_relative_overflow_result_4_settings_ > lookup_addressing_relative_overflow_result_4_settings
lookup_settings< lookup_execution_dyn_l2_factor_bitwise_settings_ > lookup_execution_dyn_l2_factor_bitwise_settings
lookup_settings< lookup_execution_dispatch_to_alu_settings_ > lookup_execution_dispatch_to_alu_settings
lookup_settings< lookup_external_call_is_l2_gas_left_gt_allocated_settings_ > lookup_external_call_is_l2_gas_left_gt_allocated_settings
bool is_operand_relative(uint16_t indirect_flag, size_t operand_index)
Checks if the operand at the given index is relative.
lookup_settings< lookup_emit_nullifier_write_nullifier_settings_ > lookup_emit_nullifier_write_nullifier_settings
size_t get_p_limbs_per_radix_size(size_t radix)
Gets the number of limbs that the modulus, p, decomposes into for a given radix.
Definition to_radix.cpp:75
lookup_settings< lookup_gas_is_out_of_gas_l2_settings_ > lookup_gas_is_out_of_gas_l2_settings
AvmFlavorSettings::FF FF
Definition field.hpp:10
permutation_settings< perm_execution_dispatch_to_cd_copy_settings_ > perm_execution_dispatch_to_cd_copy_settings
lookup_settings< lookup_execution_dispatch_to_cast_settings_ > lookup_execution_dispatch_to_cast_settings
lookup_settings< lookup_context_ctx_stack_rollback_settings_ > lookup_context_ctx_stack_rollback_settings
permutation_settings< perm_execution_dispatch_to_keccakf1600_settings_ > perm_execution_dispatch_to_keccakf1600_settings
bool is_operand_indirect(uint16_t indirect_flag, size_t operand_index)
Checks if the operand at the given index is indirect.
lookup_settings< lookup_execution_get_p_limbs_settings_ > lookup_execution_get_p_limbs_settings
const std::unordered_map< ExecutionOpCode, ExecInstructionSpec > & get_exec_instruction_spec()
lookup_settings< lookup_internal_call_unwind_call_stack_settings_ > lookup_internal_call_unwind_call_stack_settings
lookup_settings< lookup_execution_exec_spec_read_settings_ > lookup_execution_exec_spec_read_settings
lookup_settings< lookup_get_env_var_precomputed_info_settings_ > lookup_get_env_var_precomputed_info_settings
lookup_settings< lookup_addressing_relative_overflow_result_0_settings_ > lookup_addressing_relative_overflow_result_0_settings
lookup_settings< lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_settings_ > lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_settings
lookup_settings< lookup_addressing_relative_overflow_result_6_settings_ > lookup_addressing_relative_overflow_result_6_settings
lookup_settings< lookup_execution_instruction_fetching_result_settings_ > lookup_execution_instruction_fetching_result_settings
lookup_settings< lookup_notehash_exists_note_hash_leaf_index_in_range_settings_ > lookup_notehash_exists_note_hash_leaf_index_in_range_settings
permutation_settings< perm_execution_dispatch_to_to_radix_settings_ > perm_execution_dispatch_to_to_radix_settings
lookup_settings< lookup_sstore_record_written_storage_slot_settings_ > lookup_sstore_record_written_storage_slot_settings
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
std::vector< OperandResolutionInfo > resolution_info
ExecutionOpCode get_exec_opcode() const