27 const auto& scalars = msm_data.scalars;
28 const auto& points = msm_data.points;
29 const auto& scalar_indices = msm_data.scalar_indices;
30 const size_t range = scalar_indices.size();
33 for (
size_t i = 0; i < range; ++i) {
35 r += f * scalars[scalar_indices[i]].to_montgomery_form();
40template <
typename Curve>
42 std::vector<uint32_t>& nonzero_scalar_indices)
noexcept
49 auto range = chunk.
range(scalars.size());
53 std::vector<uint32_t>& thread_scalar_indices = thread_indices[chunk.
thread_index];
54 thread_scalar_indices.reserve(range.size());
55 for (
size_t i : range) {
57 auto& scalar = scalars[i];
58 scalar.self_from_montgomery_form_reduced();
60 if (!scalar.is_zero()) {
61 thread_scalar_indices.push_back(
static_cast<uint32_t
>(i));
66 size_t num_entries = 0;
67 for (
const auto& indices : thread_indices) {
68 num_entries += indices.size();
70 nonzero_scalar_indices.resize(num_entries);
77 offset += thread_indices[i].size();
85template <
typename Curve>
87 std::span<
std::span<ScalarField>> scalars, std::vector<std::vector<uint32_t>>& msm_scalar_indices)
noexcept
89 const size_t num_msms = scalars.size();
90 msm_scalar_indices.resize(num_msms);
91 for (
size_t i = 0; i < num_msms; ++i) {
92 transform_scalar_and_get_nonzero_scalar_indices(scalars[i], msm_scalar_indices[i]);
95 size_t total_work = 0;
96 for (
const auto& indices : msm_scalar_indices) {
97 total_work += indices.size();
104 const size_t work_of_last_thread = total_work - (work_per_thread * (num_threads - 1));
107 if (num_threads > total_work) {
108 for (
size_t i = 0; i < num_msms; ++i) {
112 .size = msm_scalar_indices[i].size(),
118 size_t thread_accumulated_work = 0;
119 size_t current_thread_idx = 0;
120 for (
size_t i = 0; i < num_msms; ++i) {
121 size_t msm_work_remaining = msm_scalar_indices[i].size();
122 const size_t initial_msm_work = msm_work_remaining;
124 while (msm_work_remaining > 0) {
127 const size_t total_thread_work =
128 (current_thread_idx == num_threads - 1) ? work_of_last_thread : work_per_thread;
129 const size_t available_thread_work = total_thread_work - thread_accumulated_work;
130 const size_t work_to_assign = std::min(available_thread_work, msm_work_remaining);
132 work_units[current_thread_idx].push_back(
MSMWorkUnit{
134 .start_index = initial_msm_work - msm_work_remaining,
135 .size = work_to_assign,
138 thread_accumulated_work += work_to_assign;
139 msm_work_remaining -= work_to_assign;
142 if (thread_accumulated_work >= total_thread_work) {
143 current_thread_idx++;
144 thread_accumulated_work = 0;
166template <
typename Curve>
169 size_t slice_size)
noexcept
171 constexpr size_t LIMB_BITS = 64;
173 size_t hi_bit = NUM_BITS_IN_FIELD - (round * slice_size);
174 size_t lo_bit = (hi_bit < slice_size) ? 0 : hi_bit - slice_size;
179 size_t start_limb = lo_bit / LIMB_BITS;
180 size_t end_limb = hi_bit / LIMB_BITS;
181 size_t lo_slice_offset = lo_bit & (LIMB_BITS - 1);
182 size_t actual_slice_size = hi_bit - lo_bit;
183 size_t lo_slice_bits =
184 (LIMB_BITS - lo_slice_offset < actual_slice_size) ? (LIMB_BITS - lo_slice_offset) : actual_slice_size;
185 size_t hi_slice_bits = actual_slice_size - lo_slice_bits;
187 uint64_t lo_slice = (scalar.data[start_limb] >> lo_slice_offset) & ((1ULL << lo_slice_bits) - 1);
188 uint64_t hi_slice = (start_limb != end_limb) ? (scalar.data[end_limb] & ((1ULL << hi_slice_bits) - 1)) : 0;
190 return static_cast<uint32_t
>(lo_slice | (hi_slice << lo_slice_bits));
196 auto compute_cost = [&](uint32_t bits) {
198 size_t buckets =
size_t{ 1 } << bits;
199 return rounds * (num_points + buckets * BUCKET_ACCUMULATION_COST);
202 uint32_t best_bits = 1;
203 size_t best_cost = compute_cost(1);
204 for (uint32_t bits = 2; bits < MAX_SLICE_BITS; ++bits) {
205 size_t cost = compute_cost(bits);
206 if (cost < best_cost) {
216 if (num_points < AFFINE_TRICK_THRESHOLD) {
228 constexpr size_t COST_OF_INVERSION = NUM_BITS_IN_FIELD + ((NUM_BITS_IN_FIELD + 3) / 4) + INVERSION_TABLE_COST;
230 double log2_num_points = log2(
static_cast<double>(num_points));
231 size_t savings_per_round = (num_points * AFFINE_TRICK_SAVINGS_PER_OP) + (num_buckets * JACOBIAN_Z_NOT_ONE_PENALTY);
232 double inversion_cost_per_round = log2_num_points *
static_cast<double>(COST_OF_INVERSION);
234 return static_cast<double>(savings_per_round) > inversion_cost_per_round;
237template <
typename Curve>
239 const size_t num_points,
248 bb::group_elements::batch_affine_add_interleaved<AffineElement, BaseField>(points, num_points, scratch_space);
251template <
typename Curve>
254 const size_t size = msm_data.scalar_indices.size();
255 const uint32_t bits_per_slice = get_optimal_log_num_buckets(size);
256 const size_t num_buckets =
size_t{ 1 } << bits_per_slice;
257 const uint32_t num_rounds =
static_cast<uint32_t
>((NUM_BITS_IN_FIELD + bits_per_slice - 1) / bits_per_slice);
258 const uint32_t remainder = NUM_BITS_IN_FIELD % bits_per_slice;
261 Element msm_result = Curve::Group::point_at_infinity;
263 for (uint32_t round = 0; round < num_rounds; ++round) {
265 for (
size_t i = 0; i < size; ++i) {
266 uint32_t idx = msm_data.scalar_indices[i];
267 uint32_t bucket = get_scalar_slice(msm_data.scalars[idx], round, bits_per_slice);
270 bucket_data.
buckets[bucket] += msm_data.points[idx];
272 bucket_data.
buckets[bucket] = msm_data.points[idx];
279 Element bucket_result = accumulate_buckets(bucket_data);
282 uint32_t num_doublings = (round == num_rounds - 1 && remainder != 0) ? remainder : bits_per_slice;
283 for (uint32_t i = 0; i < num_doublings; ++i) {
284 msm_result.self_dbl();
286 msm_result += bucket_result;
291template <
typename Curve>
294 const size_t num_points = msm_data.scalar_indices.size();
295 const uint32_t bits_per_slice = get_optimal_log_num_buckets(num_points);
296 const size_t num_buckets =
size_t{ 1 } << bits_per_slice;
298 if (!use_affine_trick(num_points, num_buckets)) {
299 return jacobian_pippenger_with_transformed_scalars(msm_data);
302 const uint32_t num_rounds =
static_cast<uint32_t
>((NUM_BITS_IN_FIELD + bits_per_slice - 1) / bits_per_slice);
303 const uint32_t remainder = NUM_BITS_IN_FIELD % bits_per_slice;
309 Element msm_result = Curve::Group::point_at_infinity;
311 for (uint32_t round = 0; round < num_rounds; ++round) {
314 for (
size_t i = 0; i < num_points; ++i) {
315 uint32_t idx = msm_data.scalar_indices[i];
316 uint32_t bucket_idx = get_scalar_slice(msm_data.scalars[idx], round, bits_per_slice);
317 msm_data.point_schedule[i] = PointScheduleEntry::create(idx, bucket_idx).data;
322 size_t num_zero_bucket_entries =
324 size_t round_size = num_points - num_zero_bucket_entries;
327 Element bucket_result = Curve::Group::point_at_infinity;
328 if (round_size > 0) {
329 std::span<uint64_t> schedule(&msm_data.point_schedule[num_zero_bucket_entries], round_size);
330 batch_accumulate_points_into_buckets(schedule, msm_data.points, affine_data, bucket_data);
331 bucket_result = accumulate_buckets(bucket_data);
336 uint32_t num_doublings = (round == num_rounds - 1 && remainder != 0) ? remainder : bits_per_slice;
337 for (uint32_t i = 0; i < num_doublings; ++i) {
338 msm_result.self_dbl();
340 msm_result += bucket_result;
346template <
typename Curve>
353 if (point_schedule.empty()) {
358 size_t scratch_it = 0;
359 const size_t num_points = point_schedule.size();
360 const size_t prefetch_max = (num_points >= PREFETCH_LOOKAHEAD) ? (num_points - PREFETCH_LOOKAHEAD) : 0;
361 const size_t last_index = num_points - 1;
364 while (point_it < num_points || scratch_it != 0) {
366 while (((scratch_it + 1) < AffineAdditionData::BATCH_SIZE) && (point_it < last_index)) {
368 if ((point_it < prefetch_max) && ((point_it & PREFETCH_INTERVAL_MASK) == 0)) {
369 for (
size_t i = PREFETCH_LOOKAHEAD / 2; i < PREFETCH_LOOKAHEAD; ++i) {
371 __builtin_prefetch(&points[entry.point_index()]);
378 process_bucket_pair(lhs.bucket_index(),
380 &points[lhs.point_index()],
381 &points[rhs.point_index()],
389 if (point_it == last_index) {
391 process_single_point(
392 last.bucket_index(), &points[last.point_index()], affine_data, bucket_data, scratch_it, point_it);
396 size_t num_points_to_add = scratch_it;
397 if (num_points_to_add >= 2) {
399 affine_data.points_to_add.data(), num_points_to_add, affine_data.inversion_scratch_space.data());
403 AffineElement* affine_output = affine_data.points_to_add.data() + (num_points_to_add / 2);
406 size_t new_scratch_it = 0;
407 size_t output_it = 0;
408 size_t num_outputs = num_points_to_add / 2;
410 while ((num_outputs > 1) && (output_it + 1 < num_outputs)) {
411 uint32_t lhs_bucket = affine_data.addition_result_bucket_destinations[output_it];
412 uint32_t rhs_bucket = affine_data.addition_result_bucket_destinations[output_it + 1];
414 process_bucket_pair(lhs_bucket,
416 &affine_output[output_it],
417 &affine_output[output_it + 1],
425 if (num_outputs > 0 && output_it == num_outputs - 1) {
426 uint32_t bucket = affine_data.addition_result_bucket_destinations[output_it];
427 process_single_point(
428 bucket, &affine_output[output_it], affine_data, bucket_data, new_scratch_it, output_it);
432 scratch_it = new_scratch_it;
436template <
typename Curve>
440 bool handle_edge_cases)
noexcept
443 const size_t num_msms = points.size();
453 auto pippenger_impl =
454 handle_edge_cases ? jacobian_pippenger_with_transformed_scalars : affine_pippenger_with_transformed_scalars;
458 if (!thread_work_units[thread_idx].empty()) {
461 msm_results.reserve(msms.size());
464 std::vector<uint64_t> point_schedule_buffer;
467 point_schedule_buffer.resize(msm.size);
469 MSMData::from_work_unit(scalars, points, msm_scalar_indices, point_schedule_buffer, msm);
471 (msm.size < PIPPENGER_THRESHOLD) ? small_mul<Curve>(msm_data) : pippenger_impl(msm_data);
473 msm_results.emplace_back(msm_result, msm.batch_msm_index);
481 std::vector<Element> results(num_msms, Curve::Group::point_at_infinity);
482 for (
const auto& single_thread_msm_results : thread_msm_results) {
483 for (
const auto& [element,
index] : single_thread_msm_results) {
484 results[
index] += element;
487 Element::batch_normalize(results.data(), num_msms);
490 for (
auto& scalar_span : scalars) {
492 for (size_t i = start; i < end; ++i) {
493 scalar_span[i].self_to_montgomery_form();
498 return std::vector<AffineElement>(results.begin(), results.end());
501template <
typename Curve>
504 bool handle_edge_cases)
noexcept
506 if (scalars.size() == 0) {
507 return Curve::Group::affine_point_at_infinity;
509 const size_t num_scalars = scalars.size();
510 BB_ASSERT_GTE(points.size(), scalars.start_index + num_scalars);
522 auto results = batch_multi_scalar_mul(std::span(points_batch), std::span(scalars_batch), handle_edge_cases);
526template <
typename Curve>
529 [[maybe_unused]]
bool handle_edge_cases)
noexcept
534template <
typename Curve>
543 bool handle_edge_cases =
true) noexcept;
545template curve::Grumpkin::
Element pippenger_unsafe<curve::Grumpkin>(
546 PolynomialSpan<const curve::Grumpkin::ScalarField> scalars,
std::span<const curve::Grumpkin::AffineElement> points);
549 std::span<const curve::BN254::AffineElement> points,
550 bool handle_edge_cases = true);
552template curve::BN254::
Element pippenger_unsafe<curve::BN254>(
PolynomialSpan<const curve::BN254::ScalarField> scalars,
553 std::span<const curve::BN254::AffineElement> points);
557template class
bb::scalar_multiplication::
MSM<
bb::curve::Grumpkin>;
558template class
bb::scalar_multiplication::
MSM<
bb::curve::BN254>;
#define BB_ASSERT_GTE(left, right,...)
#define BB_ASSERT_DEBUG(expression,...)
#define BB_ASSERT_EQ(actual, expected,...)
#define BB_ASSERT_LT(left, right,...)
BB_INLINE bool get(size_t index) const noexcept
BB_INLINE void set(size_t index, bool value) noexcept
typename Group::element Element
typename Group::affine_element AffineElement
typename Curve::BaseField BaseField
static bool use_affine_trick(size_t num_points, size_t num_buckets) noexcept
Decide if batch inversion saves work vs Jacobian additions.
static Element jacobian_pippenger_with_transformed_scalars(MSMData &msm_data) noexcept
Pippenger using Jacobian buckets (handles edge cases: doubling, infinity)
static uint32_t get_scalar_slice(const ScalarField &scalar, size_t round, size_t slice_size) noexcept
Extract c-bit slice from scalar for bucket index computation.
static Element affine_pippenger_with_transformed_scalars(MSMData &msm_data) noexcept
Pippenger using affine buckets with batch inversion (faster, no edge case handling)
static void add_affine_points(AffineElement *points, const size_t num_points, typename Curve::BaseField *scratch_space) noexcept
Batch add n/2 independent point pairs using Montgomery's trick.
static std::vector< ThreadWorkUnits > get_work_units(std::span< std::span< ScalarField > > scalars, std::vector< std::vector< uint32_t > > &msm_scalar_indices) noexcept
Distribute multiple MSMs across threads with balanced point counts.
typename Curve::Element Element
static uint32_t get_optimal_log_num_buckets(size_t num_points) noexcept
Compute optimal bits per slice by minimizing cost over c in [1, MAX_SLICE_BITS)
static std::vector< AffineElement > batch_multi_scalar_mul(std::span< std::span< const AffineElement > > points, std::span< std::span< ScalarField > > scalars, bool handle_edge_cases=true) noexcept
Compute multiple MSMs in parallel with work balancing.
static void batch_accumulate_points_into_buckets(std::span< const uint64_t > point_schedule, std::span< const AffineElement > points, AffineAdditionData &affine_data, BucketAccumulators &bucket_data) noexcept
Process sorted point schedule into bucket accumulators using batched affine additions.
typename Curve::ScalarField ScalarField
typename Curve::AffineElement AffineElement
static void transform_scalar_and_get_nonzero_scalar_indices(std::span< ScalarField > scalars, std::vector< uint32_t > &nonzero_scalar_indices) noexcept
Convert scalars from Montgomery form and collect indices of nonzero scalars.
bb::curve::BN254::Element Element
constexpr T ceil_div(const T &numerator, const T &denominator)
Computes the ceiling of the division of two integral types.
Curve::Element small_mul(const typename MSM< Curve >::MSMData &msm_data) noexcept
Curve::Element pippenger(PolynomialSpan< const typename Curve::ScalarField > scalars, std::span< const typename Curve::AffineElement > points, bool handle_edge_cases) noexcept
Safe MSM wrapper (defaults to handle_edge_cases=true)
size_t sort_point_schedule_and_count_zero_buckets(uint64_t *point_schedule, const size_t num_entries, const uint32_t bucket_index_bits) noexcept
Sort point schedule by bucket index and count zero-bucket entries.
Curve::Element pippenger_unsafe(PolynomialSpan< const typename Curve::ScalarField > scalars, std::span< const typename Curve::AffineElement > points) noexcept
Fast MSM wrapper for linearly independent points (no edge case handling)
Entry point for Barretenberg command-line interface.
void parallel_for(size_t num_iterations, const std::function< void(size_t)> &func)
void parallel_for_range(size_t num_points, const std::function< void(size_t, size_t)> &func, size_t no_multhreading_if_less_or_equal)
Split a loop into several loops running in parallel.
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
auto range(size_t size, size_t offset=0) const
Scratch space for batched affine point additions (one per thread)
Affine bucket accumulators for the fast affine-trick Pippenger variant.
Jacobian bucket accumulators for the safe Pippenger variant.
std::vector< Element > buckets
Container for MSM input data passed between algorithm stages.
MSMWorkUnit describes an MSM that may be part of a larger MSM.
Packed point schedule entry: (point_index << 32) | bucket_index.