Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
sha256.cpp
Go to the documentation of this file.
1// === AUDIT STATUS ===
2// internal: { status: Complete, auditors: [Luke], commit: dd03c4a23ab067274b4964cacb36d1545f73fb14}
3// external_1: { status: not started, auditors: [], commit: }
4// external_2: { status: not started, auditors: [], commit: }
5// =====================
6
24#include "sha256.hpp"
25
31
32using namespace bb;
33
34namespace bb::stdlib {
35using namespace bb::plookup;
36
45template <typename Builder>
47{
49
50 sparse_witness_limbs result(input);
51 const auto lookup = plookup_read<Builder>::get_lookup_accumulators(MultiTableId::SHA256_WITNESS_INPUT, input);
52
54 lookup[ColumnIdx::C2][0],
55 lookup[ColumnIdx::C2][1],
56 lookup[ColumnIdx::C2][2],
57 lookup[ColumnIdx::C2][3],
58 };
60 lookup[ColumnIdx::C3][0],
61 lookup[ColumnIdx::C3][1],
62 lookup[ColumnIdx::C3][2],
63 lookup[ColumnIdx::C3][3],
64 };
65 result.has_sparse_limbs = true;
66
67 return result;
68}
69
82template <typename Builder>
84{
86
87 Builder* ctx = w_in[0].get_context();
88
90
91 // Populate initial 16 words from input (sparse form computed lazily as needed)
92 for (size_t i = 0; i < 16; ++i) {
93 w_sparse[i] = SHA256<Builder>::sparse_witness_limbs(w_in[i]);
94 // Extract builder context from inputs
95 if ((ctx == nullptr) && w_in[i].get_context()) {
96 ctx = w_in[i].get_context();
97 }
98 }
99
100 // Compute extended words W[16..63]
101 for (size_t i = 16; i < 64; ++i) {
102 auto& w_left = w_sparse[i - 15];
103 auto& w_right = w_sparse[i - 2];
104
105 if (!w_left.has_sparse_limbs) {
106 w_left = convert_witness(w_left.normal);
107 }
108 if (!w_right.has_sparse_limbs) {
109 w_right = convert_witness(w_right.normal);
110 }
111
112 // Compute the (partially) rotated sparse limbs for σ₀
113 // Note: remaining contributions accounted for via w_left.rotated_limb_corrections
115 w_left.sparse_limbs[0] * left_multipliers[0],
116 w_left.sparse_limbs[1] * left_multipliers[1],
117 w_left.sparse_limbs[2] * left_multipliers[2],
118 w_left.sparse_limbs[3] * left_multipliers[3],
119 };
120
121 // Compute the (partially) rotated sparse limbs for σ₁
122 // Note: remaining contributions accounted for via w_right.rotated_limb_corrections
124 w_right.sparse_limbs[0] * right_multipliers[0],
125 w_right.sparse_limbs[1] * right_multipliers[1],
126 w_right.sparse_limbs[2] * right_multipliers[2],
127 w_right.sparse_limbs[3] * right_multipliers[3],
128 };
129
130 // Compute σ₀(w[i-15]) in sparse form where σ₀(x) = (x >>> 7) ⊕ (x >>> 18) ⊕ (x >> 3).
131 // Each sparse digit holds the sum of contributions from the three rotation/shift operations (digit value in
132 // {0,1,2,3}). The fr(4) scaling positions σ₀'s contribution in the upper 2 bits of each 4-bit digit slot: when
133 // combined with σ₁ (unscaled, in lower 2 bits), each digit becomes 4*σ₀_digit + σ₁_digit ∈ [0,15].
134 const field_pt left_xor_sparse =
135 left[0].add_two(left[1], left[2]).add_two(left[3], w_left.rotated_limb_corrections[1]) * fr(4);
136
137 // Compute σ₀(w[i-15]) + σ₁(w[i-2]) in sparse form where σ₁(x) = (x >>> 17) ⊕ (x >>> 19) ⊕ (x >> 10).
138 const field_pt xor_result_sparse = right[0]
139 .add_two(right[1], right[2])
140 .add_two(right[3], w_right.rotated_limb_corrections[2])
141 .add_two(w_right.rotated_limb_corrections[3], left_xor_sparse);
142
143 // Normalize the sparse representation via a lookup to obtain the genuine result σ₀ + σ₁
145
146 // Compute W[i] = σ₁(W[i-2]) + W[i-7] + σ₀(W[i-15]) + W[i-16]
147 field_pt w_out_raw = xor_result.add_two(w_sparse[i - 16].normal, w_sparse[i - 7].normal);
148
149 // Natively compute value reduced to 32 bits per SHA-256 spec
150 const uint64_t w_out_modded = static_cast<uint32_t>(w_out_raw.get_value());
151 field_pt w_out;
152 if (w_out_raw.is_constant()) {
153 w_out = field_pt(ctx, fr(w_out_modded));
154 } else {
155 // Establish w_out as the 32-bit reduction of w_out_raw via w_out_raw = w_out + divisor*2^32
156 w_out = witness_t<Builder>(ctx, fr(w_out_modded));
157 static constexpr fr inv_pow_two = fr(2).pow(32).invert();
158
159 field_pt w_out_raw_inv_pow_two = w_out_raw * inv_pow_two;
160 field_pt w_out_inv_pow_two = w_out * inv_pow_two;
161 field_pt divisor = w_out_raw_inv_pow_two - w_out_inv_pow_two;
162 // Sum of four 32-bit values: divisor ≤ 3.
163 divisor.create_range_constraint(/*num_bits=*/2);
164 }
165
166 w_sparse[i] = sparse_witness_limbs(w_out);
167 }
168
175 w_sparse[62].normal.create_range_constraint(32);
176 w_sparse[63].normal.create_range_constraint(32);
177
178 std::array<field_pt, 64> w_extended;
179 for (size_t i = 0; i < 64; ++i) {
180 w_extended[i] = w_sparse[i].normal;
181 }
182 return w_extended;
183}
184
195template <typename Builder>
204
215template <typename Builder>
224
242template <typename Builder>
244{
246 // Separates rotation contributions (0-3) from Choose encoding (0-6) in each base-28 digit
247 constexpr fr SPARSE_MULT = fr(7);
248
250 const auto rotation_coefficients = sha256_tables::get_choose_rotation_multipliers();
251
252 field_pt rotation_result = lookup[ColumnIdx::C3][0];
253 e.sparse = lookup[ColumnIdx::C2][0];
254 field_pt sparse_L2 = lookup[ColumnIdx::C2][2];
255
256 // Compute e + 7*Σ₁(e) in sparse form
257 field_pt xor_result = (rotation_result * SPARSE_MULT)
258 .add_two(e.sparse * (rotation_coefficients[0] * SPARSE_MULT + fr(1)),
259 sparse_L2 * (rotation_coefficients[2] * SPARSE_MULT));
260
261 // Add 2f + 3g to get e + 7*Σ₁(e) + 2f + 3g (each digit in 0..27)
262 field_pt choose_result_sparse = xor_result.add_two(f.sparse + f.sparse, g.sparse + g.sparse + g.sparse);
263
264 // Normalize via lookup: each digit maps to Σ₁(e)_i + Ch(e,f,g)_i
265 field_pt choose_result = plookup_read<Builder>::read_from_1_to_2_table(SHA256_CH_OUTPUT, choose_result_sparse);
266
267 return choose_result;
268}
269
287template <typename Builder>
289{
291 // Separates rotation contributions (0-3) from Majority encoding (0-3) in each base-16 digit
292 constexpr fr SPARSE_MULT = fr(4);
293
295 const auto rotation_coefficients = sha256_tables::get_majority_rotation_multipliers();
296
297 // first row of 3rd column gives accumulating sum of "non-trivial" wraps
298 field_pt rotation_result = lookup[ColumnIdx::C3][0];
299 a.sparse = lookup[ColumnIdx::C2][0];
300 field_pt sparse_L1_acc = lookup[ColumnIdx::C2][1];
301
302 // Compute a + 4*Σ₀(a) in sparse form
303 field_pt xor_result = (rotation_result * SPARSE_MULT)
304 .add_two(a.sparse * (rotation_coefficients[0] * SPARSE_MULT + fr(1)),
305 sparse_L1_acc * (rotation_coefficients[1] * SPARSE_MULT));
306
307 // Add b + c to get a + 4*Σ₀(a) + b + c (each digit in 0..15)
308 field_pt majority_result_sparse = xor_result.add_two(b.sparse, c.sparse);
309
310 // Normalize via lookup: each digit maps to Σ₀(a)_i + Maj(a,b,c)_i
311 field_pt majority_result = plookup_read<Builder>::read_from_1_to_2_table(SHA256_MAJ_OUTPUT, majority_result_sparse);
312
313 return majority_result;
314}
315
332template <typename Builder>
334 const std::array<field_t<Builder>, 16>& input)
335{
337
338 // Constrain inputs not implicitly constrained via lookups (h_init[3], h_init[7], input[0]).
339 // Other inputs are lookup-constrained in sparse form conversions or message extension.
340 h_init[3].create_range_constraint(32);
341 h_init[7].create_range_constraint(32);
342 input[0].create_range_constraint(32);
343
349 sparse_value a = sparse_value(h_init[0]); // delay conversion to maj sparse form
350 auto b = map_into_maj_sparse_form(h_init[1]);
351 auto c = map_into_maj_sparse_form(h_init[2]);
352 sparse_value d = sparse_value(h_init[3]);
353 sparse_value e = sparse_value(h_init[4]); // delay conversion to choose sparse form
354 auto f = map_into_choose_sparse_form(h_init[5]);
355 auto g = map_into_choose_sparse_form(h_init[6]);
356 sparse_value h = sparse_value(h_init[7]);
357
358 // Extend the 16-word message block to 64 words per SHA-256 specification
359 const std::array<field_t<Builder>, 64> w = extend_witness(input);
360
372 for (size_t i = 0; i < 64; ++i) {
373 auto ch = choose_with_sigma1(e, f, g); // ch = Σ1(e) + Ch(e,f,g) ≤ 2*(2^32-1)
374 auto maj = majority_with_sigma0(a, b, c); // maj = Σ0(a) + Maj(a,b,c) ≤ 2*(2^32-1)
375
376 // T1 = ch + h + K[i] + W[i] ≤ 5*(2^32-1)
377 auto T1 = ch.add_two(h.normal, w[i] + fr(round_constants[i]));
378
379 h = g;
380 g = f;
381 f = e;
382 e.normal =
383 hash_utils::add_normalize_unsafe(d.normal, T1, /*overflow_bits=*/3); // d + T1: 6 × 32-bit, overflow ≤ 5
384 d = c;
385 c = b;
386 b = a;
387 a.normal =
388 hash_utils::add_normalize_unsafe(T1, maj, /*overflow_bits=*/3); // T1 + Σ0+Maj: 7 × 32-bit, overflow ≤ 6
389 }
390
391 // Apply range constraints to `a` and `e` which are the only outputs of the previous loop not already
392 // lookup-constrained via sparse form conversion. Although not strictly necessary, this simplifies the analysis that
393 // the output of compression is fully constrained at minimal cost.
394 a.normal.create_range_constraint(32);
396
397 // Add round results into previous block output.
398 // Overflow bits = 1 since each summand is constrained to 32 bits.
400 output[0] = hash_utils::add_normalize_unsafe(a.normal, h_init[0], /*overflow_bits=*/1);
401 output[1] = hash_utils::add_normalize_unsafe(b.normal, h_init[1], /*overflow_bits=*/1);
402 output[2] = hash_utils::add_normalize_unsafe(c.normal, h_init[2], /*overflow_bits=*/1);
403 output[3] = hash_utils::add_normalize_unsafe(d.normal, h_init[3], /*overflow_bits=*/1);
404 output[4] = hash_utils::add_normalize_unsafe(e.normal, h_init[4], /*overflow_bits=*/1);
405 output[5] = hash_utils::add_normalize_unsafe(f.normal, h_init[5], /*overflow_bits=*/1);
406 output[6] = hash_utils::add_normalize_unsafe(g.normal, h_init[6], /*overflow_bits=*/1);
407 output[7] = hash_utils::add_normalize_unsafe(h.normal, h_init[7], /*overflow_bits=*/1);
408
409 // The final add_normalize outputs are not consumed by lookup tables, so they must be explicitly range-constrained.
410 // (Within the compression loop, lookup tables provide implicit 32-bit constraints on add_normalize outputs.)
411 for (const auto& val : output) {
412 val.create_range_constraint(32);
413 }
414
415 return output;
416}
417
419template class SHA256<bb::MegaCircuitBuilder>;
420
421} // namespace bb::stdlib
static sparse_value map_into_maj_sparse_form(const field_ct &input)
Convert a field element to sparse form for use in the Majority function.
Definition sha256.cpp:216
static std::array< field_ct, 64 > extend_witness(const std::array< field_ct, 16 > &w_in)
Extend the 16-word message block to 64 words per SHA-256 specification.
Definition sha256.cpp:83
static field_ct choose_with_sigma1(sparse_value &e, const sparse_value &f, const sparse_value &g)
Compute Σ₁(e) + Ch(e,f,g) for SHA-256 compression rounds.
Definition sha256.cpp:243
static sparse_witness_limbs convert_witness(const field_ct &input)
Convert a 32-bit value to sparse limbs form for message schedule extension.
Definition sha256.cpp:46
static field_ct majority_with_sigma0(sparse_value &a, const sparse_value &b, const sparse_value &c)
Compute Σ₀(a) + Maj(a,b,c) for SHA-256 compression rounds.
Definition sha256.cpp:288
static sparse_value map_into_choose_sparse_form(const field_ct &input)
Convert a field element to sparse form for use in the Choose function.
Definition sha256.cpp:196
static std::array< field_ct, 8 > sha256_block(const std::array< field_ct, 8 > &h_init, const std::array< field_ct, 16 > &input)
Apply the SHA-256 compression function to a single 512-bit message block.
Definition sha256.cpp:333
void create_range_constraint(size_t num_bits, std::string const &msg="field_t::range_constraint") const
Let x = *this.normalize(), constrain x.v < 2^{num_bits}.
Definition field.cpp:919
bb::fr get_value() const
Given a := *this, compute its value given by a.v * a.mul + a.add.
Definition field.cpp:836
bool is_constant() const
Definition field.hpp:441
field_t add_two(const field_t &add_b, const field_t &add_c) const
Efficiently compute (this + a + b) using big_mul gate.
Definition field.cpp:583
static plookup::ReadData< field_pt > get_lookup_accumulators(const plookup::MultiTableId id, const field_pt &key_a, const field_pt &key_b=0, const bool is_2_to_1_lookup=false)
Definition plookup.cpp:19
static field_pt read_from_1_to_2_table(const plookup::MultiTableId id, const field_pt &key_a)
Definition plookup.cpp:89
FF a
FF b
stdlib::field_t< UltraCircuitBuilder > field_pt
std::array< bb::fr, 3 > get_choose_rotation_multipliers()
Returns multipliers for computing Σ₁(e) rotations in choose_with_sigma1.
Definition sha256.hpp:409
std::array< bb::fr, 3 > get_majority_rotation_multipliers()
Returns multipliers for computing Σ₀(a) rotations in majority_with_sigma0.
Definition sha256.hpp:392
@ SHA256_CH_INPUT
Definition types.hpp:92
@ SHA256_MAJ_OUTPUT
Definition types.hpp:95
@ SHA256_CH_OUTPUT
Definition types.hpp:93
@ SHA256_WITNESS_OUTPUT
Definition types.hpp:97
@ SHA256_MAJ_INPUT
Definition types.hpp:94
void g(field_t< Builder > state[BLAKE_STATE_SIZE], size_t a, size_t b, size_t c, size_t d, field_t< Builder > x, field_t< Builder > y)
field_t< Builder > add_normalize_unsafe(const field_t< Builder > &a, const field_t< Builder > &b, size_t overflow_bits)
Compute (a + b) mod 2^32 with circuit constraints.
Entry point for Barretenberg command-line interface.
Definition api.hpp:5
field< Bn254FrParams > fr
Definition fr.hpp:155
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
Plookup tables for SHA-256 using sparse form representation.
BB_INLINE constexpr field pow(const uint256_t &exponent) const noexcept
constexpr field invert() const noexcept
std::array< field_ct, 4 > rotated_limb_corrections
Definition sha256.hpp:116
std::array< field_ct, 4 > sparse_limbs
Definition sha256.hpp:114