17 const uint64_t stagger,
21 BB_ASSERT_LT(stagger, 32ULL,
"biggroup_nafs: stagger value ≥ 32");
29 BB_ASSERT_LT(fragment_u64, (1ULL << stagger),
"biggroup_nafs: fragment value ≥ 2^{stagger}");
32 int fragment =
static_cast<int>(fragment_u64);
39 if (!is_negative && wnaf_skew) {
40 fragment -= (1 << stagger);
44 if (is_negative && wnaf_skew) {
45 fragment += (1 << stagger);
51 bool output_skew = (fragment_u64 & 1) == 0;
52 if (!is_negative && output_skew) {
54 }
else if (is_negative && output_skew) {
59 const int signed_wnaf_value = (fragment / 2);
60 constexpr int wnaf_window_size = (1ULL << (wnaf_size - 1));
61 uint64_t output_fragment = 0;
63 output_fragment =
static_cast<uint64_t
>(wnaf_window_size + signed_wnaf_value - 1);
65 output_fragment =
static_cast<uint64_t
>(wnaf_window_size + signed_wnaf_value);
74 C*
builder,
const uint64_t* wnaf_values,
bool is_negative,
size_t rounds,
const bool range_constrain_wnaf)
76 constexpr uint64_t wnaf_window_size = (1ULL << (wnaf_size - 1));
79 for (
size_t i = 0; i < rounds; ++i) {
81 const bool predicate = (wnaf_values[i] >> 31U) & 1U;
82 const uint64_t wnaf_magnitude = (wnaf_values[i] & 0x7fffffffU);
87 uint64_t offset_wnaf_entry = 0;
88 if ((!predicate && !is_negative) || (predicate && is_negative)) {
89 offset_wnaf_entry = wnaf_window_size + wnaf_magnitude;
91 offset_wnaf_entry = wnaf_window_size - wnaf_magnitude - 1;
97 if (range_constrain_wnaf) {
100 wnaf_entries.emplace_back(wnaf_entry);
112 const size_t stagger,
117 for (
size_t i = 0; i < rounds; ++i) {
118 field_ct entry = wnaf[rounds - 1 - i];
120 accumulator.emplace_back(entry);
126 sum += (stagger_fragment);
130 Fr reconstructed_positive_part =
134 reconstructed_positive_part =
135 (reconstructed_positive_part + reconstructed_positive_part)
140 constexpr uint64_t wnaf_window_size = (1ULL << (wnaf_size - 1));
141 uint256_t negative_constant_wnaf_offset(0);
142 for (
size_t i = 0; i < rounds; ++i) {
143 negative_constant_wnaf_offset +=
uint256_t((wnaf_window_size * 2) - 1) * (
uint256_t(1) << (i * wnaf_size));
147 negative_constant_wnaf_offset = negative_constant_wnaf_offset << stagger;
151 negative_constant_wnaf_offset += ((1ULL << wnaf_size) - 1ULL);
155 Fr reconstructed_negative_part =
159 Fr reconstructed = reconstructed_positive_part - reconstructed_negative_part;
161 return reconstructed;
171 const bool range_constrain_wnaf,
175 constexpr size_t num_rounds = ((num_bits + wnaf_size - 1) / wnaf_size);
178 const uint64_t stagger_mask = (1ULL << stagger) - 1;
181 const uint64_t stagger_scalar = scalar.
data[0] & stagger_mask;
184 bool skew_without_stagger =
false;
186 k_u256 = k_u256 >> stagger;
189 &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0);
192 &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0);
196 const size_t num_rounds_excluding_stagger_bits = ((num_bits + wnaf_size - 1 - stagger) / wnaf_size);
199 const auto [first_fragment, skew] =
200 get_staggered_wnaf_fragment_value<wnaf_size>(stagger_scalar, stagger, is_negative, skew_without_stagger);
205 builder, &wnaf_values[0], is_negative, num_rounds_excluding_stagger_bits, range_constrain_wnaf);
212 bool_ct both_skews_cannot_be_one = !(positive_skew & negative_skew);
214 bool_ct(
builder,
true),
"biggroup_nafs: both positive and negative skews cannot be set at the same time");
221 if (range_constrain_wnaf) {
226 Fr reconstructed = reconstruct_bigfield_from_wnaf<wnaf_size>(
227 builder, wnaf, positive_skew, negative_skew, stagger_fragment, stagger, num_rounds_excluding_stagger_bits);
230 .positive_skew = positive_skew,
231 .negative_skew = negative_skew,
232 .least_significant_wnaf_fragment = stagger_fragment,
233 .has_wnaf_fragment = (stagger > 0) };
328 const Fr& scalar,
const bool range_constrain_wnaf)
358 constexpr size_t num_bits = 129;
368 bool klo_negative =
false;
369 bool khi_negative =
false;
386 const auto [klo_reconstructed, klo_out] =
388 builder, klo, lo_stagger, klo_negative, range_constrain_wnaf,
true);
390 const auto [khi_reconstructed, khi_out] =
392 builder, khi, hi_stagger, khi_negative, range_constrain_wnaf,
false);
397 Fr reconstructed_scalar = khi_reconstructed.madd(minus_lambda, { klo_reconstructed });
403 if (scalar.is_constant()) {
404 reconstructed_scalar.self_reduce();
408 scalar.assert_equal(reconstructed_scalar,
"biggroup_nafs: reconstructed scalar does not match reduced input");
410 return { .klo = klo_out, .khi = khi_out };
421 uint256_t scalar_multiplier = scalar_multiplier_512.
lo;
425 const size_t num_rounds = (max_num_bits == 0 || scalar_multiplier == 0) ?
Fr::modulus.
get_msb() + 1 : max_num_bits;
428 if (scalar_multiplier == 0) {
444 const bool skew_value = !scalar_multiplier.
get_bit(0);
445 scalar_multiplier +=
uint256_t(
static_cast<uint64_t
>(skew_value));
449 naf_entries[num_rounds].set_origin_tag(scalar.get_origin_tag());
451 for (
size_t i = 0; i < num_rounds - 1; ++i) {
455 const bool next_entry = scalar_multiplier.
get_bit(i + 1);
459 naf_entries[num_rounds - i - 1].set_origin_tag(scalar.get_origin_tag());
465 naf_entries[0].set_origin_tag(scalar.get_origin_tag());
468 if constexpr (!Fr::is_composite) {
469 std::vector<Fr> accumulators;
470 for (
size_t i = 0; i < num_rounds; ++i) {
472 Fr entry(naf_entries[num_rounds - i - 1]);
476 accumulators.emplace_back(entry);
478 accumulators.emplace_back(-
Fr(naf_entries[num_rounds]));
479 Fr accumulator_result = Fr::accumulate(accumulators);
480 scalar.assert_equal(accumulator_result);
482 const auto reconstruct_half_naf = [](
bool_ct* nafs,
const size_t half_round_length) {
485 for (
size_t i = 0; i < half_round_length; ++i) {
486 negative_accumulator = negative_accumulator + negative_accumulator +
field_ct(nafs[i]);
487 positive_accumulator = positive_accumulator + positive_accumulator +
field_ct(1) -
field_ct(nafs[i]);
489 return std::make_pair(positive_accumulator, negative_accumulator);
495 if (num_rounds > Fr::NUM_LIMB_BITS * 2) {
496 const size_t midpoint = num_rounds - (Fr::NUM_LIMB_BITS * 2);
497 hi_accumulators = reconstruct_half_naf(&naf_entries[0], midpoint);
498 lo_accumulators = reconstruct_half_naf(&naf_entries[midpoint], num_rounds - midpoint);
505 lo_accumulators = reconstruct_half_naf(&naf_entries[0], num_rounds);
512 field_ct lo_neg_with_skew = lo_accumulators.second +
field_ct(naf_entries[num_rounds]);
521 lo_accumulators.second = lo_neg_with_skew *
field_ct(!has_overflow);
522 hi_accumulators.second = hi_accumulators.second +
field_ct(has_overflow);
524 Fr reconstructed_positive =
Fr(lo_accumulators.first, hi_accumulators.first);
525 Fr reconstructed_negative =
Fr(lo_accumulators.second, hi_accumulators.second);
526 Fr accumulator = reconstructed_positive - reconstructed_negative;
532 if (scalar.is_constant()) {
533 accumulator.self_reduce();
537 accumulator.assert_equal(scalar);
541 const auto original_tag = scalar.get_origin_tag();
542 for (
auto& naf_entry : naf_entries) {
543 naf_entry.set_origin_tag(original_tag);
void fixed_wnaf(const uint64_t *scalar, uint64_t *wnaf, bool &skew_map, const uint64_t point_index, const uint64_t num_points, const size_t wnaf_bits) noexcept
Performs fixed-window non-adjacent form (WNAF) computation for scalar multiplication.