Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
scalar_multiplication.cpp
Go to the documentation of this file.
1// === AUDIT STATUS ===
2// internal: { status: Planned, auditors: [Sergei], commit: }
3// external_1: { status: not started, auditors: [], commit: }
4// external_2: { status: not started, auditors: [], commit: }
5// =====================
9
10#include "./process_buckets.hpp"
18
21
23
24// Naive double-and-add fallback for small inputs (< PIPPENGER_THRESHOLD points).
25template <typename Curve> typename Curve::Element small_mul(const typename MSM<Curve>::MSMData& msm_data) noexcept
26{
27 const auto& scalars = msm_data.scalars;
28 const auto& points = msm_data.points;
29 const auto& scalar_indices = msm_data.scalar_indices;
30 const size_t range = scalar_indices.size();
31
32 typename Curve::Element r = Curve::Group::point_at_infinity;
33 for (size_t i = 0; i < range; ++i) {
34 typename Curve::Element f = points[scalar_indices[i]];
35 r += f * scalars[scalar_indices[i]].to_montgomery_form();
36 }
37 return r;
38}
39
40template <typename Curve>
42 std::vector<uint32_t>& nonzero_scalar_indices) noexcept
43{
45
46 // Pass 1: Each thread converts from Montgomery and collects nonzero indices into its own vector
47 parallel_for([&](const ThreadChunk& chunk) {
48 BB_ASSERT_EQ(chunk.total_threads, thread_indices.size());
49 auto range = chunk.range(scalars.size());
50 if (range.empty()) {
51 return;
52 }
53 std::vector<uint32_t>& thread_scalar_indices = thread_indices[chunk.thread_index];
54 thread_scalar_indices.reserve(range.size());
55 for (size_t i : range) {
56 BB_ASSERT_DEBUG(i < scalars.size());
57 auto& scalar = scalars[i];
58 scalar.self_from_montgomery_form_reduced();
59
60 if (!scalar.is_zero()) {
61 thread_scalar_indices.push_back(static_cast<uint32_t>(i));
62 }
63 }
64 });
65
66 size_t num_entries = 0;
67 for (const auto& indices : thread_indices) {
68 num_entries += indices.size();
69 }
70 nonzero_scalar_indices.resize(num_entries);
71
72 // Pass 2: Copy each thread's indices to the output vector (no branching)
73 parallel_for([&](const ThreadChunk& chunk) {
74 BB_ASSERT_EQ(chunk.total_threads, thread_indices.size());
75 size_t offset = 0;
76 for (size_t i = 0; i < chunk.thread_index; ++i) {
77 offset += thread_indices[i].size();
78 }
79 for (size_t i = offset; i < offset + thread_indices[chunk.thread_index].size(); ++i) {
80 nonzero_scalar_indices[i] = thread_indices[chunk.thread_index][i - offset];
81 }
82 });
83}
84
85template <typename Curve>
87 std::span<std::span<ScalarField>> scalars, std::vector<std::vector<uint32_t>>& msm_scalar_indices) noexcept
88{
89 const size_t num_msms = scalars.size();
90 msm_scalar_indices.resize(num_msms);
91 for (size_t i = 0; i < num_msms; ++i) {
92 transform_scalar_and_get_nonzero_scalar_indices(scalars[i], msm_scalar_indices[i]);
93 }
94
95 size_t total_work = 0;
96 for (const auto& indices : msm_scalar_indices) {
97 total_work += indices.size();
98 }
99
100 const size_t num_threads = get_num_cpus();
101 std::vector<ThreadWorkUnits> work_units(num_threads);
102
103 const size_t work_per_thread = numeric::ceil_div(total_work, num_threads);
104 const size_t work_of_last_thread = total_work - (work_per_thread * (num_threads - 1));
105
106 // Only use a single work unit if we don't have enough work for every thread
107 if (num_threads > total_work) {
108 for (size_t i = 0; i < num_msms; ++i) {
109 work_units[0].push_back(MSMWorkUnit{
110 .batch_msm_index = i,
111 .start_index = 0,
112 .size = msm_scalar_indices[i].size(),
113 });
114 }
115 return work_units;
116 }
117
118 size_t thread_accumulated_work = 0;
119 size_t current_thread_idx = 0;
120 for (size_t i = 0; i < num_msms; ++i) {
121 size_t msm_work_remaining = msm_scalar_indices[i].size();
122 const size_t initial_msm_work = msm_work_remaining;
123
124 while (msm_work_remaining > 0) {
125 BB_ASSERT_LT(current_thread_idx, work_units.size());
126
127 const size_t total_thread_work =
128 (current_thread_idx == num_threads - 1) ? work_of_last_thread : work_per_thread;
129 const size_t available_thread_work = total_thread_work - thread_accumulated_work;
130 const size_t work_to_assign = std::min(available_thread_work, msm_work_remaining);
131
132 work_units[current_thread_idx].push_back(MSMWorkUnit{
133 .batch_msm_index = i,
134 .start_index = initial_msm_work - msm_work_remaining,
135 .size = work_to_assign,
136 });
137
138 thread_accumulated_work += work_to_assign;
139 msm_work_remaining -= work_to_assign;
140
141 // Move to next thread if current thread is full
142 if (thread_accumulated_work >= total_thread_work) {
143 current_thread_idx++;
144 thread_accumulated_work = 0;
145 }
146 }
147 }
148 return work_units;
149}
150
166template <typename Curve>
167uint32_t MSM<Curve>::get_scalar_slice(const typename Curve::ScalarField& scalar,
168 size_t round,
169 size_t slice_size) noexcept
170{
171 constexpr size_t LIMB_BITS = 64;
172
173 size_t hi_bit = NUM_BITS_IN_FIELD - (round * slice_size);
174 size_t lo_bit = (hi_bit < slice_size) ? 0 : hi_bit - slice_size;
175
176 BB_ASSERT_DEBUG(lo_bit < hi_bit);
177 BB_ASSERT_DEBUG(hi_bit <= NUM_BITS_IN_FIELD); // Ensures hi_bit < 256, so end_limb <= 3
178
179 size_t start_limb = lo_bit / LIMB_BITS;
180 size_t end_limb = hi_bit / LIMB_BITS;
181 size_t lo_slice_offset = lo_bit & (LIMB_BITS - 1);
182 size_t actual_slice_size = hi_bit - lo_bit;
183 size_t lo_slice_bits =
184 (LIMB_BITS - lo_slice_offset < actual_slice_size) ? (LIMB_BITS - lo_slice_offset) : actual_slice_size;
185 size_t hi_slice_bits = actual_slice_size - lo_slice_bits;
186
187 uint64_t lo_slice = (scalar.data[start_limb] >> lo_slice_offset) & ((1ULL << lo_slice_bits) - 1);
188 uint64_t hi_slice = (start_limb != end_limb) ? (scalar.data[end_limb] & ((1ULL << hi_slice_bits) - 1)) : 0;
189
190 return static_cast<uint32_t>(lo_slice | (hi_slice << lo_slice_bits));
191}
192
193template <typename Curve> uint32_t MSM<Curve>::get_optimal_log_num_buckets(const size_t num_points) noexcept
194{
195 // Cost model: total_cost = num_rounds * (num_points + num_buckets * BUCKET_ACCUMULATION_COST)
196 auto compute_cost = [&](uint32_t bits) {
197 size_t rounds = numeric::ceil_div(NUM_BITS_IN_FIELD, static_cast<size_t>(bits));
198 size_t buckets = size_t{ 1 } << bits;
199 return rounds * (num_points + buckets * BUCKET_ACCUMULATION_COST);
200 };
201
202 uint32_t best_bits = 1;
203 size_t best_cost = compute_cost(1);
204 for (uint32_t bits = 2; bits < MAX_SLICE_BITS; ++bits) {
205 size_t cost = compute_cost(bits);
206 if (cost < best_cost) {
207 best_cost = cost;
208 best_bits = bits;
209 }
210 }
211 return best_bits;
212}
213
214template <typename Curve> bool MSM<Curve>::use_affine_trick(const size_t num_points, const size_t num_buckets) noexcept
215{
216 if (num_points < AFFINE_TRICK_THRESHOLD) {
217 return false;
218 }
219
220 // Affine trick requires log(N) modular inversions per Pippenger round.
221 // It saves num_points * AFFINE_TRICK_SAVINGS_PER_OP field muls, plus
222 // num_buckets * JACOBIAN_Z_NOT_ONE_PENALTY field muls (buckets have Z=1 with affine trick)
223
224 // Cost of modular inversion via exponentiation:
225 // - NUM_BITS_IN_FIELD squarings
226 // - (NUM_BITS_IN_FIELD + 3) / 4 multiplications (4-bit windows)
227 // - INVERSION_TABLE_COST multiplications for lookup table
228 constexpr size_t COST_OF_INVERSION = NUM_BITS_IN_FIELD + ((NUM_BITS_IN_FIELD + 3) / 4) + INVERSION_TABLE_COST;
229
230 double log2_num_points = log2(static_cast<double>(num_points));
231 size_t savings_per_round = (num_points * AFFINE_TRICK_SAVINGS_PER_OP) + (num_buckets * JACOBIAN_Z_NOT_ONE_PENALTY);
232 double inversion_cost_per_round = log2_num_points * static_cast<double>(COST_OF_INVERSION);
233
234 return static_cast<double>(savings_per_round) > inversion_cost_per_round;
235}
236
237template <typename Curve>
239 const size_t num_points,
240 typename Curve::BaseField* scratch_space) noexcept
241{
242 using AffineElement = typename Curve::AffineElement;
243 using BaseField = typename Curve::BaseField;
244
245 // Pippenger-specific interleaved batch add with direct prefetch and no aliasing overhead.
246 // The generic batch_affine_add_impl suffers from aliasing (lhs_base == rhs_base) causing
247 // the compiler to reload lhs coordinates after writing output. This version avoids that.
248 bb::group_elements::batch_affine_add_interleaved<AffineElement, BaseField>(points, num_points, scratch_space);
249}
250
251template <typename Curve>
253{
254 const size_t size = msm_data.scalar_indices.size();
255 const uint32_t bits_per_slice = get_optimal_log_num_buckets(size);
256 const size_t num_buckets = size_t{ 1 } << bits_per_slice;
257 const uint32_t num_rounds = static_cast<uint32_t>((NUM_BITS_IN_FIELD + bits_per_slice - 1) / bits_per_slice);
258 const uint32_t remainder = NUM_BITS_IN_FIELD % bits_per_slice;
259
260 JacobianBucketAccumulators bucket_data(num_buckets);
261 Element msm_result = Curve::Group::point_at_infinity;
262
263 for (uint32_t round = 0; round < num_rounds; ++round) {
264 // Populate buckets using Jacobian accumulation
265 for (size_t i = 0; i < size; ++i) {
266 uint32_t idx = msm_data.scalar_indices[i];
267 uint32_t bucket = get_scalar_slice(msm_data.scalars[idx], round, bits_per_slice);
268 if (bucket > 0) {
269 if (bucket_data.bucket_exists.get(bucket)) {
270 bucket_data.buckets[bucket] += msm_data.points[idx];
271 } else {
272 bucket_data.buckets[bucket] = msm_data.points[idx];
273 bucket_data.bucket_exists.set(bucket, true);
274 }
275 }
276 }
277
278 // Reduce buckets and accumulate into result
279 Element bucket_result = accumulate_buckets(bucket_data);
280 bucket_data.bucket_exists.clear();
281
282 uint32_t num_doublings = (round == num_rounds - 1 && remainder != 0) ? remainder : bits_per_slice;
283 for (uint32_t i = 0; i < num_doublings; ++i) {
284 msm_result.self_dbl();
285 }
286 msm_result += bucket_result;
287 }
288 return msm_result;
289}
290
291template <typename Curve>
293{
294 const size_t num_points = msm_data.scalar_indices.size();
295 const uint32_t bits_per_slice = get_optimal_log_num_buckets(num_points);
296 const size_t num_buckets = size_t{ 1 } << bits_per_slice;
297
298 if (!use_affine_trick(num_points, num_buckets)) {
299 return jacobian_pippenger_with_transformed_scalars(msm_data);
300 }
301
302 const uint32_t num_rounds = static_cast<uint32_t>((NUM_BITS_IN_FIELD + bits_per_slice - 1) / bits_per_slice);
303 const uint32_t remainder = NUM_BITS_IN_FIELD % bits_per_slice;
304
305 // Per-call allocation for WASM compatibility (thread_local causes issues in WASM)
306 AffineAdditionData affine_data;
307 BucketAccumulators bucket_data(num_buckets);
308
309 Element msm_result = Curve::Group::point_at_infinity;
310
311 for (uint32_t round = 0; round < num_rounds; ++round) {
312 // Build point schedule for this round
313 {
314 for (size_t i = 0; i < num_points; ++i) {
315 uint32_t idx = msm_data.scalar_indices[i];
316 uint32_t bucket_idx = get_scalar_slice(msm_data.scalars[idx], round, bits_per_slice);
317 msm_data.point_schedule[i] = PointScheduleEntry::create(idx, bucket_idx).data;
318 }
319 }
320
321 // Sort by bucket and count zero-bucket entries
322 size_t num_zero_bucket_entries =
323 sort_point_schedule_and_count_zero_buckets(&msm_data.point_schedule[0], num_points, bits_per_slice);
324 size_t round_size = num_points - num_zero_bucket_entries;
325
326 // Accumulate points into buckets
327 Element bucket_result = Curve::Group::point_at_infinity;
328 if (round_size > 0) {
329 std::span<uint64_t> schedule(&msm_data.point_schedule[num_zero_bucket_entries], round_size);
330 batch_accumulate_points_into_buckets(schedule, msm_data.points, affine_data, bucket_data);
331 bucket_result = accumulate_buckets(bucket_data);
332 bucket_data.bucket_exists.clear();
333 }
334
335 // Combine into running result
336 uint32_t num_doublings = (round == num_rounds - 1 && remainder != 0) ? remainder : bits_per_slice;
337 for (uint32_t i = 0; i < num_doublings; ++i) {
338 msm_result.self_dbl();
339 }
340 msm_result += bucket_result;
341 }
342
343 return msm_result;
344}
345
346template <typename Curve>
350 MSM<Curve>::BucketAccumulators& bucket_data) noexcept
351{
352
353 if (point_schedule.empty()) {
354 return;
355 }
356
357 size_t point_it = 0;
358 size_t scratch_it = 0;
359 const size_t num_points = point_schedule.size();
360 const size_t prefetch_max = (num_points >= PREFETCH_LOOKAHEAD) ? (num_points - PREFETCH_LOOKAHEAD) : 0;
361 const size_t last_index = num_points - 1;
362
363 // Iterative loop - continues until all points processed and no work remains in scratch space
364 while (point_it < num_points || scratch_it != 0) {
365 // Step 1: Fill scratch space with up to BATCH_SIZE/2 independent additions
366 while (((scratch_it + 1) < AffineAdditionData::BATCH_SIZE) && (point_it < last_index)) {
367 // Prefetch points we'll need soon (every PREFETCH_INTERVAL iterations)
368 if ((point_it < prefetch_max) && ((point_it & PREFETCH_INTERVAL_MASK) == 0)) {
369 for (size_t i = PREFETCH_LOOKAHEAD / 2; i < PREFETCH_LOOKAHEAD; ++i) {
370 PointScheduleEntry entry{ point_schedule[point_it + i] };
371 __builtin_prefetch(&points[entry.point_index()]);
372 }
373 }
374
375 PointScheduleEntry lhs{ point_schedule[point_it] };
376 PointScheduleEntry rhs{ point_schedule[point_it + 1] };
377
378 process_bucket_pair(lhs.bucket_index(),
379 rhs.bucket_index(),
380 &points[lhs.point_index()],
381 &points[rhs.point_index()],
382 affine_data,
383 bucket_data,
384 scratch_it,
385 point_it);
386 }
387
388 // Handle the last point (odd count case) - separate to avoid bounds check on point_schedule[point_it + 1]
389 if (point_it == last_index) {
390 PointScheduleEntry last{ point_schedule[point_it] };
391 process_single_point(
392 last.bucket_index(), &points[last.point_index()], affine_data, bucket_data, scratch_it, point_it);
393 }
394
395 // Compute independent additions using Montgomery's batch inversion trick
396 size_t num_points_to_add = scratch_it;
397 if (num_points_to_add >= 2) {
398 add_affine_points(
399 affine_data.points_to_add.data(), num_points_to_add, affine_data.inversion_scratch_space.data());
400 }
401
402 // add_affine_points stores results in the top-half of scratch space
403 AffineElement* affine_output = affine_data.points_to_add.data() + (num_points_to_add / 2);
404
405 // Recirculate addition outputs back into scratch space or bucket accumulators
406 size_t new_scratch_it = 0;
407 size_t output_it = 0;
408 size_t num_outputs = num_points_to_add / 2;
409
410 while ((num_outputs > 1) && (output_it + 1 < num_outputs)) {
411 uint32_t lhs_bucket = affine_data.addition_result_bucket_destinations[output_it];
412 uint32_t rhs_bucket = affine_data.addition_result_bucket_destinations[output_it + 1];
413
414 process_bucket_pair(lhs_bucket,
415 rhs_bucket,
416 &affine_output[output_it],
417 &affine_output[output_it + 1],
418 affine_data,
419 bucket_data,
420 new_scratch_it,
421 output_it);
422 }
423
424 // Handle the last output (odd count case)
425 if (num_outputs > 0 && output_it == num_outputs - 1) {
426 uint32_t bucket = affine_data.addition_result_bucket_destinations[output_it];
427 process_single_point(
428 bucket, &affine_output[output_it], affine_data, bucket_data, new_scratch_it, output_it);
429 }
430
431 // Continue with recirculated points
432 scratch_it = new_scratch_it;
433 }
434}
435
436template <typename Curve>
439 std::span<std::span<ScalarField>> scalars,
440 bool handle_edge_cases) noexcept
441{
442 BB_ASSERT_EQ(points.size(), scalars.size());
443 const size_t num_msms = points.size();
444
445 std::vector<std::vector<uint32_t>> msm_scalar_indices;
446 std::vector<ThreadWorkUnits> thread_work_units = get_work_units(scalars, msm_scalar_indices);
447 const size_t num_cpus = get_num_cpus();
448 std::vector<std::vector<std::pair<Element, size_t>>> thread_msm_results(num_cpus);
449 BB_ASSERT_EQ(thread_work_units.size(), num_cpus);
450
451 // Select Pippenger implementation once (hoisting branch outside hot loop)
452 // Jacobian: safe, handles edge cases | Affine: faster, assumes linearly independent points
453 auto pippenger_impl =
454 handle_edge_cases ? jacobian_pippenger_with_transformed_scalars : affine_pippenger_with_transformed_scalars;
455
456 // Once we have our work units, each thread can independently evaluate its assigned msms
457 parallel_for(num_cpus, [&](size_t thread_idx) {
458 if (!thread_work_units[thread_idx].empty()) {
459 const std::vector<MSMWorkUnit>& msms = thread_work_units[thread_idx];
460 std::vector<std::pair<Element, size_t>>& msm_results = thread_msm_results[thread_idx];
461 msm_results.reserve(msms.size());
462
463 // Point schedule buffer for this thread - avoids per-work-unit heap allocation
464 std::vector<uint64_t> point_schedule_buffer;
465
466 for (const MSMWorkUnit& msm : msms) {
467 point_schedule_buffer.resize(msm.size);
468 MSMData msm_data =
469 MSMData::from_work_unit(scalars, points, msm_scalar_indices, point_schedule_buffer, msm);
470 Element msm_result =
471 (msm.size < PIPPENGER_THRESHOLD) ? small_mul<Curve>(msm_data) : pippenger_impl(msm_data);
472
473 msm_results.emplace_back(msm_result, msm.batch_msm_index);
474 }
475 }
476 });
477
478 // Accumulate results. This part needs to be single threaded, but amount of work done here should be small
479 // TODO(@zac-williamson) check this? E.g. if we are doing a 2^16 MSM with 256 threads this single-threaded part
480 // will be painful.
481 std::vector<Element> results(num_msms, Curve::Group::point_at_infinity);
482 for (const auto& single_thread_msm_results : thread_msm_results) {
483 for (const auto& [element, index] : single_thread_msm_results) {
484 results[index] += element;
485 }
486 }
487 Element::batch_normalize(results.data(), num_msms);
488
489 // Convert scalars back TO Montgomery form so they remain unchanged from caller's perspective
490 for (auto& scalar_span : scalars) {
491 parallel_for_range(scalar_span.size(), [&](size_t start, size_t end) {
492 for (size_t i = start; i < end; ++i) {
493 scalar_span[i].self_to_montgomery_form();
494 }
495 });
496 }
497
498 return std::vector<AffineElement>(results.begin(), results.end());
499}
500
501template <typename Curve>
504 bool handle_edge_cases) noexcept
505{
506 if (scalars.size() == 0) {
507 return Curve::Group::affine_point_at_infinity;
508 }
509 const size_t num_scalars = scalars.size();
510 BB_ASSERT_GTE(points.size(), scalars.start_index + num_scalars);
511
512 // const_cast is safe: we convert from Montgomery, compute, then convert back.
513 // Scalars are unchanged from the caller's perspective.
514 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
515 ScalarField* scalar_ptr = const_cast<ScalarField*>(&scalars[scalars.start_index]);
516 std::span<ScalarField> scalar_span(scalar_ptr, num_scalars);
517
518 // Wrap into a size-1 batch and delegate to the general method that properly handles multi-threading
519 std::array<std::span<const AffineElement>, 1> points_batch{ points.subspan(scalars.start_index) };
520 std::array<std::span<ScalarField>, 1> scalars_batch{ scalar_span };
521
522 auto results = batch_multi_scalar_mul(std::span(points_batch), std::span(scalars_batch), handle_edge_cases);
523 return results[0];
524}
525
526template <typename Curve>
529 [[maybe_unused]] bool handle_edge_cases) noexcept
530{
531 return MSM<Curve>::msm(points, scalars, handle_edge_cases);
532}
533
534template <typename Curve>
540
543 bool handle_edge_cases = true) noexcept;
544
545template curve::Grumpkin::Element pippenger_unsafe<curve::Grumpkin>(
546 PolynomialSpan<const curve::Grumpkin::ScalarField> scalars, std::span<const curve::Grumpkin::AffineElement> points);
547
548template curve::BN254::Element pippenger<curve::BN254>(PolynomialSpan<const curve::BN254::ScalarField> scalars,
549 std::span<const curve::BN254::AffineElement> points,
550 bool handle_edge_cases = true);
551
552template curve::BN254::Element pippenger_unsafe<curve::BN254>(PolynomialSpan<const curve::BN254::ScalarField> scalars,
553 std::span<const curve::BN254::AffineElement> points);
554
555} // namespace bb::scalar_multiplication
556
557template class bb::scalar_multiplication::MSM<bb::curve::Grumpkin>;
558template class bb::scalar_multiplication::MSM<bb::curve::BN254>;
#define BB_ASSERT_GTE(left, right,...)
Definition assert.hpp:128
#define BB_ASSERT_DEBUG(expression,...)
Definition assert.hpp:55
#define BB_ASSERT_EQ(actual, expected,...)
Definition assert.hpp:83
#define BB_ASSERT_LT(left, right,...)
Definition assert.hpp:143
BB_INLINE bool get(size_t index) const noexcept
Definition bitvector.hpp:42
BB_INLINE void set(size_t index, bool value) noexcept
Definition bitvector.hpp:28
void clear()
Definition bitvector.hpp:50
typename Group::element Element
Definition grumpkin.hpp:64
typename Group::affine_element AffineElement
Definition grumpkin.hpp:65
typename Curve::BaseField BaseField
static bool use_affine_trick(size_t num_points, size_t num_buckets) noexcept
Decide if batch inversion saves work vs Jacobian additions.
static Element jacobian_pippenger_with_transformed_scalars(MSMData &msm_data) noexcept
Pippenger using Jacobian buckets (handles edge cases: doubling, infinity)
static uint32_t get_scalar_slice(const ScalarField &scalar, size_t round, size_t slice_size) noexcept
Extract c-bit slice from scalar for bucket index computation.
static Element affine_pippenger_with_transformed_scalars(MSMData &msm_data) noexcept
Pippenger using affine buckets with batch inversion (faster, no edge case handling)
static void add_affine_points(AffineElement *points, const size_t num_points, typename Curve::BaseField *scratch_space) noexcept
Batch add n/2 independent point pairs using Montgomery's trick.
static std::vector< ThreadWorkUnits > get_work_units(std::span< std::span< ScalarField > > scalars, std::vector< std::vector< uint32_t > > &msm_scalar_indices) noexcept
Distribute multiple MSMs across threads with balanced point counts.
static uint32_t get_optimal_log_num_buckets(size_t num_points) noexcept
Compute optimal bits per slice by minimizing cost over c in [1, MAX_SLICE_BITS)
static std::vector< AffineElement > batch_multi_scalar_mul(std::span< std::span< const AffineElement > > points, std::span< std::span< ScalarField > > scalars, bool handle_edge_cases=true) noexcept
Compute multiple MSMs in parallel with work balancing.
static void batch_accumulate_points_into_buckets(std::span< const uint64_t > point_schedule, std::span< const AffineElement > points, AffineAdditionData &affine_data, BucketAccumulators &bucket_data) noexcept
Process sorted point schedule into bucket accumulators using batched affine additions.
typename Curve::ScalarField ScalarField
typename Curve::AffineElement AffineElement
static void transform_scalar_and_get_nonzero_scalar_indices(std::span< ScalarField > scalars, std::vector< uint32_t > &nonzero_scalar_indices) noexcept
Convert scalars from Montgomery form and collect indices of nonzero scalars.
bb::curve::BN254::Element Element
ssize_t offset
Definition engine.cpp:52
constexpr T ceil_div(const T &numerator, const T &denominator)
Computes the ceiling of the division of two integral types.
Definition general.hpp:23
Curve::Element small_mul(const typename MSM< Curve >::MSMData &msm_data) noexcept
Curve::Element pippenger(PolynomialSpan< const typename Curve::ScalarField > scalars, std::span< const typename Curve::AffineElement > points, bool handle_edge_cases) noexcept
Safe MSM wrapper (defaults to handle_edge_cases=true)
size_t sort_point_schedule_and_count_zero_buckets(uint64_t *point_schedule, const size_t num_entries, const uint32_t bucket_index_bits) noexcept
Sort point schedule by bucket index and count zero-bucket entries.
Curve::Element pippenger_unsafe(PolynomialSpan< const typename Curve::ScalarField > scalars, std::span< const typename Curve::AffineElement > points) noexcept
Fast MSM wrapper for linearly independent points (no edge case handling)
Entry point for Barretenberg command-line interface.
Definition api.hpp:5
size_t get_num_cpus()
Definition thread.cpp:33
void parallel_for(size_t num_iterations, const std::function< void(size_t)> &func)
Definition thread.cpp:111
void parallel_for_range(size_t num_points, const std::function< void(size_t, size_t)> &func, size_t no_multhreading_if_less_or_equal)
Split a loop into several loops running in parallel.
Definition thread.cpp:141
STL namespace.
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
size_t total_threads
Definition thread.hpp:151
size_t thread_index
Definition thread.hpp:150
auto range(size_t size, size_t offset=0) const
Definition thread.hpp:152
Scratch space for batched affine point additions (one per thread)
Affine bucket accumulators for the fast affine-trick Pippenger variant.
Jacobian bucket accumulators for the safe Pippenger variant.
Container for MSM input data passed between algorithm stages.
MSMWorkUnit describes an MSM that may be part of a larger MSM.
Packed point schedule entry: (point_index << 32) | bucket_index.