cargo test --features "yoloproofs"
impl Default for PedersenGens {
fn default() -> Self {
PedersenGens {
B: RISTRETTO_BASEPOINT_POINT,
B_blinding: RistrettoPoint::hash_from_bytes::(
RISTRETTO_BASEPOINT_COMPRESSED.as_bytes(),
),
}
}
}
/// Represents a pair of base points for Pedersen commitments.
///
/// The Bulletproofs implementation and API is designed to support
/// pluggable bases for Pedersen commitments, so that the choice of
/// bases is not hard-coded.
///
/// The default generators are:
///
/// * `B`: the `ristretto255` basepoint;
/// * `B_blinding`: the result of `ristretto255` SHA3-512
/// hash-to-group on input `B_bytes`.
#[derive(Copy, Clone)]
pub struct PedersenGens {
/// Base for the committed value
pub B: RistrettoPoint,
/// Base for the blinding factor
pub B_blinding: RistrettoPoint,
}
/// The `BulletproofGens` struct contains all the generators needed
/// for aggregating up to `m` range proofs of up to `n` bits each.
///
/// # Extensible Generator Generation
///
/// Instead of constructing a single vector of size `m*n`, as
/// described in the Bulletproofs paper, we construct each party's
/// generators separately.
///
/// To construct an arbitrary-length chain of generators, we apply
/// SHAKE256 to a domain separator label, and feed each 64 bytes of
/// XOF output into the `ristretto255` hash-to-group function.
/// Each of the `m` parties' generators are constructed using a
/// different domain separation label, and proving and verification
/// uses the first `n` elements of the arbitrary-length chain.
///
/// This means that the aggregation size (number of
/// parties) is orthogonal to the rangeproof size (number of bits),
/// and allows using the same `BulletproofGens` object for different
/// proving parameters.
///
/// This construction is also forward-compatible with constraint
/// system proofs, which use a much larger slice of the generator
/// chain, and even forward-compatible to multiparty aggregation of
/// constraint system proofs, since the generators are namespaced by
/// their party index.
#[derive(Clone)]
pub struct BulletproofGens {
/// The maximum number of usable generators for each party.
pub gens_capacity: usize,
/// Number of values or parties
pub party_capacity: usize,
/// Precomputed \\(\mathbf G\\) generators for each party.
G_vec: Vec>,
/// Precomputed \\(\mathbf H\\) generators for each party.
H_vec: Vec>,
}
impl BulletproofGens {
/// Create a new `BulletproofGens` object.
///
/// # Inputs
///
/// * `gens_capacity` is the number of generators to precompute
/// for each party. For rangeproofs, it is sufficient to pass
/// `64`, the maximum bitsize of the rangeproofs. For circuit
/// proofs, the capacity must be greater than the number of
/// multipliers, rounded up to the next power of two.
///
/// * `party_capacity` is the maximum number of parties that can
/// produce an aggregated proof.
pub fn new(gens_capacity: usize, party_capacity: usize) -> Self {
let mut gens = BulletproofGens {
gens_capacity: 0,
party_capacity,
G_vec: (0..party_capacity).map(|_| Vec::new()).collect(),
H_vec: (0..party_capacity).map(|_| Vec::new()).collect(),
};
gens.increase_capacity(gens_capacity);
gens
}
/// Returns j-th share of generators, with an appropriate
/// slice of vectors G and H for the j-th range proof.
pub fn share(&self, j: usize) -> BulletproofGensShare {
BulletproofGensShare {
gens: &self,
share: j,
}
}
/// Increases the generators' capacity to the amount specified.
/// If less than or equal to the current capacity, does nothing.
pub fn increase_capacity(&mut self, new_capacity: usize) {
use byteorder::{ByteOrder, LittleEndian};
if self.gens_capacity >= new_capacity {
return;
}
for i in 0..self.party_capacity {
let party_index = i as u32;
let mut label = [b'G', 0, 0, 0, 0];
LittleEndian::write_u32(&mut label[1..5], party_index);
self.G_vec[i].extend(
&mut GeneratorsChain::new(&label)
.fast_forward(self.gens_capacity)
.take(new_capacity - self.gens_capacity),
);
label[0] = b'H';
self.H_vec[i].extend(
&mut GeneratorsChain::new(&label)
.fast_forward(self.gens_capacity)
.take(new_capacity - self.gens_capacity),
);
}
self.gens_capacity = new_capacity;
}
/// Return an iterator over the aggregation of the parties' G generators with given size `n`.
pub(crate) fn G(&self, n: usize, m: usize) -> impl Iterator- {
AggregatedGensIter {
n,
m,
array: &self.G_vec,
party_idx: 0,
gen_idx: 0,
}
}
/// Return an iterator over the aggregation of the parties' H generators with given size `n`.
pub(crate) fn H(&self, n: usize, m: usize) -> impl Iterator
- {
AggregatedGensIter {
n,
m,
array: &self.H_vec,
party_idx: 0,
gen_idx: 0,
}
}
}
/// The `GeneratorsChain` creates an arbitrary-long sequence of
/// orthogonal generators. The sequence can be deterministically
/// produced starting with an arbitrary point.
struct GeneratorsChain {
reader: Sha3XofReader,
}
impl GeneratorsChain {
/// Creates a chain of generators, determined by the hash of `label`.
fn new(label: &[u8]) -> Self {
let mut shake = Shake256::default();
shake.input(b"GeneratorsChain");
shake.input(label);
GeneratorsChain {
reader: shake.xof_result(),
}
}
/// Advances the reader n times, squeezing and discarding
/// the result.
fn fast_forward(mut self, n: usize) -> Self {
for _ in 0..n {
let mut buf = [0u8; 64];
self.reader.read(&mut buf);
}
self
}
}
impl Default for GeneratorsChain {
fn default() -> Self {
Self::new(&[])
}
}
/// A [`ConstraintSystem`] implementation for use by the prover.
///
/// The prover commits high-level variables and their blinding factors `(v, v_blinding)`,
/// allocates low-level variables and creates constraints in terms of these
/// high-level variables and low-level variables.
///
/// When all constraints are added, the proving code calls `prove`
/// which consumes the `Prover` instance, samples random challenges
/// that instantiate the randomized constraints, and creates a complete proof.
pub struct Prover<'t, 'g> {
transcript: &'t mut Transcript,
pc_gens: &'g PedersenGens,
/// The constraints accumulated so far.
constraints: Vec,
/// Stores assignments to the "left" of multiplication gates
a_L: Vec,
/// Stores assignments to the "right" of multiplication gates
a_R: Vec,
/// Stores assignments to the "output" of multiplication gates
a_O: Vec,
/// High-level witness data (value openings to V commitments)
v: Vec,
/// High-level witness data (blinding openings to V commitments)
v_blinding: Vec,
/// This list holds closures that will be called in the second phase of the protocol,
/// when non-randomized variables are committed.
deferred_constraints: Vec) -> Result<(), R1CSError>>>,
/// Index of a pending multiplier that's not fully assigned yet.
pending_multiplier: Option,
}
impl<'t, 'g> Prover<'t, 'g> {
/// Construct an empty constraint system with specified external
/// input variables.
///
/// # Inputs
///
/// The `bp_gens` and `pc_gens` are generators for Bulletproofs
/// and for the Pedersen commitments, respectively. The
/// [`BulletproofGens`] should have `gens_capacity` greater than
/// the number of multiplication constraints that will eventually
/// be added into the constraint system.
///
/// The `transcript` parameter is a Merlin proof transcript. The
/// `ProverCS` holds onto the `&mut Transcript` until it consumes
/// itself during [`ProverCS::prove`], releasing its borrow of the
/// transcript. This ensures that the transcript cannot be
/// altered except by the `ProverCS` before proving is complete.
///
/// # Returns
///
/// Returns a new `Prover` instance.
pub fn new(pc_gens: &'g PedersenGens, transcript: &'t mut Transcript) -> Self {
transcript.r1cs_domain_sep();
Prover {
pc_gens,
transcript,
v: Vec::new(),
v_blinding: Vec::new(),
constraints: Vec::new(),
a_L: Vec::new(),
a_R: Vec::new(),
a_O: Vec::new(),
deferred_constraints: Vec::new(),
pending_multiplier: None,
}
}
/// Creates commitment to a high-level variable and adds it to the transcript.
///
/// # Inputs
///
/// The `v` and `v_blinding` parameters are openings to the
/// commitment to the external variable for the constraint
/// system. Passing the opening (the value together with the
/// blinding factor) makes it possible to reference pre-existing
/// commitments in the constraint system. All external variables
/// must be passed up-front, so that challenges produced by
/// [`ConstraintSystem::challenge_scalar`] are bound to the
/// external variables.
///
/// # Returns
///
/// Returns a pair of a Pedersen commitment (as a compressed Ristretto point),
/// and a [`Variable`] corresponding to it, which can be used to form constraints.
pub fn commit(&mut self, v: Scalar, v_blinding: Scalar) -> (CompressedRistretto, Variable) {
let i = self.v.len();
self.v.push(v);
self.v_blinding.push(v_blinding);
// Add the commitment to the transcript.
let V = self.pc_gens.commit(v, v_blinding).compress();
self.transcript.commit_point(b"V", &V);
(V, Variable::Committed(i))
}
}
/// Represents a variable in a constraint system.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum Variable {
/// Represents an external input specified by a commitment.
Committed(usize),
/// Represents the left input of a multiplication gate.
MultiplierLeft(usize),
/// Represents the right input of a multiplication gate.
MultiplierRight(usize),
/// Represents the output of a multiplication gate.
MultiplierOutput(usize),
/// Represents the constant 1.
One(),
}
fn constrain(&mut self, lc: LinearCombination) {
// TODO: check that the linear combinations are valid
// (e.g. that variables are valid, that the linear combination evals to 0 for prover, etc).
self.constraints.push(lc);
}
impl From for LinearCombination {
fn from(v: Variable) -> LinearCombination {
LinearCombination {
terms: vec![(v, Scalar::one())],
}
}
}
/// Represents a linear combination of
/// [`Variables`](::r1cs::Variable). Each term is represented by a
/// `(Variable, Scalar)` pair.
#[derive(Clone, Debug, PartialEq)]
pub struct LinearCombination {
pub(super) terms: Vec<(Variable, Scalar)>,
}
/// Consume this `ConstraintSystem` to produce a proof.
pub fn prove(mut self, bp_gens: &BulletproofGens) -> Result {
use std::iter;
use util;
// Commit a length _suffix_ for the number of high-level variables.
// We cannot do this in advance because user can commit variables one-by-one,
// but this suffix provides safe disambiguation because each variable
// is prefixed with a separate label.
self.transcript.commit_u64(b"m", self.v.len() as u64);
// Create a `TranscriptRng` from the high-level witness data
//
// The prover wants to rekey the RNG with its witness data.
//
// This consists of the high level witness data (the v's and
// v_blinding's), as well as the low-level witness data (a_L,
// a_R, a_O). Since the low-level data should (hopefully) be
// determined by the high-level data, it doesn't give any
// extra entropy for reseeding the RNG.
//
// Since the v_blindings should be random scalars (in order to
// protect the v's in the commitments), we don't gain much by
// committing the v's as well as the v_blinding's.
let mut rng = {
let mut builder = self.transcript.build_rng();
// Commit the blinding factors for the input wires
for v_b in &self.v_blinding {
builder = builder.commit_witness_bytes(b"v_blinding", v_b.as_bytes());
}
use rand::thread_rng;
builder.finalize(&mut thread_rng())
};
// Commit to the first-phase low-level witness variables.
let n1 = self.a_L.len();
if bp_gens.gens_capacity < n1 {
return Err(R1CSError::InvalidGeneratorsLength);
}
// We are performing a single-party circuit proof, so party index is 0.
let gens = bp_gens.share(0);
let i_blinding1 = Scalar::random(&mut rng);
let o_blinding1 = Scalar::random(&mut rng);
let s_blinding1 = Scalar::random(&mut rng);
let mut s_L1: Vec = (0..n1).map(|_| Scalar::random(&mut rng)).collect();
let mut s_R1: Vec = (0..n1).map(|_| Scalar::random(&mut rng)).collect();
// A_I = + + i_blinding * B_blinding
let A_I1 = RistrettoPoint::multiscalar_mul(
iter::once(&i_blinding1)
.chain(self.a_L.iter())
.chain(self.a_R.iter()),
iter::once(&self.pc_gens.B_blinding)
.chain(gens.G(n1))
.chain(gens.H(n1)),
)
.compress();
// A_O = + o_blinding * B_blinding
let A_O1 = RistrettoPoint::multiscalar_mul(
iter::once(&o_blinding1).chain(self.a_O.iter()),
iter::once(&self.pc_gens.B_blinding).chain(gens.G(n1)),
)
.compress();
// S = + + s_blinding * B_blinding
let S1 = RistrettoPoint::multiscalar_mul(
iter::once(&s_blinding1)
.chain(s_L1.iter())
.chain(s_R1.iter()),
iter::once(&self.pc_gens.B_blinding)
.chain(gens.G(n1))
.chain(gens.H(n1)),
)
.compress();
self.transcript.commit_point(b"A_I1", &A_I1);
self.transcript.commit_point(b"A_O1", &A_O1);
self.transcript.commit_point(b"S1", &S1);
// Process the remaining constraints.
self = self.create_randomized_constraints()?;
// Pad zeros to the next power of two (or do that implicitly when creating vectors)
// If the number of multiplications is not 0 or a power of 2, then pad the circuit.
let n = self.a_L.len();
let n2 = n - n1;
let padded_n = self.a_L.len().next_power_of_two();
let pad = padded_n - n;
if bp_gens.gens_capacity < padded_n {
return Err(R1CSError::InvalidGeneratorsLength);
}
// Commit to the second-phase low-level witness variables
let has_2nd_phase_commitments = n2 > 0;
let (i_blinding2, o_blinding2, s_blinding2) = if has_2nd_phase_commitments {
(
Scalar::random(&mut rng),
Scalar::random(&mut rng),
Scalar::random(&mut rng),
)
} else {
(Scalar::zero(), Scalar::zero(), Scalar::zero())
};
let mut s_L2: Vec = (0..n2).map(|_| Scalar::random(&mut rng)).collect();
let mut s_R2: Vec = (0..n2).map(|_| Scalar::random(&mut rng)).collect();
let (A_I2, A_O2, S2) = if has_2nd_phase_commitments {
(
// A_I = + + i_blinding * B_blinding
RistrettoPoint::multiscalar_mul(
iter::once(&i_blinding2)
.chain(self.a_L.iter().skip(n1))
.chain(self.a_R.iter().skip(n1)),
iter::once(&self.pc_gens.B_blinding)
.chain(gens.G(n).skip(n1))
.chain(gens.H(n).skip(n1)),
)
.compress(),
// A_O = + o_blinding * B_blinding
RistrettoPoint::multiscalar_mul(
iter::once(&o_blinding2).chain(self.a_O.iter().skip(n1)),
iter::once(&self.pc_gens.B_blinding).chain(gens.G(n).skip(n1)),
)
.compress(),
// S = + + s_blinding * B_blinding
RistrettoPoint::multiscalar_mul(
iter::once(&s_blinding2)
.chain(s_L2.iter())
.chain(s_R2.iter()),
iter::once(&self.pc_gens.B_blinding)
.chain(gens.G(n).skip(n1))
.chain(gens.H(n).skip(n1)),
)
.compress(),
)
} else {
// Since we are using zero blinding factors and
// there are no variables to commit,
// the commitments _must_ be identity points,
// so we can hardcode them saving 3 mults+compressions.
(
CompressedRistretto::identity(),
CompressedRistretto::identity(),
CompressedRistretto::identity(),
)
};
self.transcript.commit_point(b"A_I2", &A_I2);
self.transcript.commit_point(b"A_O2", &A_O2);
self.transcript.commit_point(b"S2", &S2);
// 4. Compute blinded vector polynomials l(x) and r(x)
let y = self.transcript.challenge_scalar(b"y");
let z = self.transcript.challenge_scalar(b"z");
let (wL, wR, wO, wV) = self.flattened_constraints(&z);
let mut l_poly = util::VecPoly3::zero(n);
let mut r_poly = util::VecPoly3::zero(n);
let mut exp_y = Scalar::one(); // y^n starting at n=0
let y_inv = y.invert();
let exp_y_inv = util::exp_iter(y_inv).take(padded_n).collect::>();
let sLsR = s_L1
.iter()
.chain(s_L2.iter())
.zip(s_R1.iter().chain(s_R2.iter()));
for (i, (sl, sr)) in sLsR.enumerate() {
// l_poly.0 = 0
// l_poly.1 = a_L + y^-n * (z * z^Q * W_R)
l_poly.1[i] = self.a_L[i] + exp_y_inv[i] * wR[i];
// l_poly.2 = a_O
l_poly.2[i] = self.a_O[i];
// l_poly.3 = s_L
l_poly.3[i] = *sl;
// r_poly.0 = (z * z^Q * W_O) - y^n
r_poly.0[i] = wO[i] - exp_y;
// r_poly.1 = y^n * a_R + (z * z^Q * W_L)
r_poly.1[i] = exp_y * self.a_R[i] + wL[i];
// r_poly.2 = 0
// r_poly.3 = y^n * s_R
r_poly.3[i] = exp_y * sr;
exp_y = exp_y * y; // y^i -> y^(i+1)
}
let t_poly = util::VecPoly3::special_inner_product(&l_poly, &r_poly);
let t_1_blinding = Scalar::random(&mut rng);
let t_3_blinding = Scalar::random(&mut rng);
let t_4_blinding = Scalar::random(&mut rng);
let t_5_blinding = Scalar::random(&mut rng);
let t_6_blinding = Scalar::random(&mut rng);
let T_1 = self.pc_gens.commit(t_poly.t1, t_1_blinding).compress();
let T_3 = self.pc_gens.commit(t_poly.t3, t_3_blinding).compress();
let T_4 = self.pc_gens.commit(t_poly.t4, t_4_blinding).compress();
let T_5 = self.pc_gens.commit(t_poly.t5, t_5_blinding).compress();
let T_6 = self.pc_gens.commit(t_poly.t6, t_6_blinding).compress();
self.transcript.commit_point(b"T_1", &T_1);
self.transcript.commit_point(b"T_3", &T_3);
self.transcript.commit_point(b"T_4", &T_4);
self.transcript.commit_point(b"T_5", &T_5);
self.transcript.commit_point(b"T_6", &T_6);
let u = self.transcript.challenge_scalar(b"u");
let x = self.transcript.challenge_scalar(b"x");
// t_2_blinding =
// in the t_x_blinding calculations, line 76.
let t_2_blinding = wV
.iter()
.zip(self.v_blinding.iter())
.map(|(c, v_blinding)| c * v_blinding)
.sum();
let t_blinding_poly = util::Poly6 {
t1: t_1_blinding,
t2: t_2_blinding,
t3: t_3_blinding,
t4: t_4_blinding,
t5: t_5_blinding,
t6: t_6_blinding,
};
let t_x = t_poly.eval(x);
let t_x_blinding = t_blinding_poly.eval(x);
let mut l_vec = l_poly.eval(x);
l_vec.append(&mut vec![Scalar::zero(); pad]);
let mut r_vec = r_poly.eval(x);
r_vec.append(&mut vec![Scalar::zero(); pad]);
// XXX this should refer to the notes to explain why this is correct
for i in n..padded_n {
r_vec[i] = -exp_y;
exp_y = exp_y * y; // y^i -> y^(i+1)
}
let i_blinding = i_blinding1 + u * i_blinding2;
let o_blinding = o_blinding1 + u * o_blinding2;
let s_blinding = s_blinding1 + u * s_blinding2;
let e_blinding = x * (i_blinding + x * (o_blinding + x * s_blinding));
self.transcript.commit_scalar(b"t_x", &t_x);
self.transcript
.commit_scalar(b"t_x_blinding", &t_x_blinding);
self.transcript.commit_scalar(b"e_blinding", &e_blinding);
// Get a challenge value to combine statements for the IPP
let w = self.transcript.challenge_scalar(b"w");
let Q = w * self.pc_gens.B;
let G_factors = iter::repeat(Scalar::one())
.take(n1)
.chain(iter::repeat(u).take(n2 + pad))
.collect::>();
let H_factors = exp_y_inv
.into_iter()
.zip(G_factors.iter())
.map(|(y, u_or_1)| y * u_or_1)
.collect::>();
let ipp_proof = InnerProductProof::create(
self.transcript,
&Q,
&G_factors,
&H_factors,
gens.G(padded_n).cloned().collect(),
gens.H(padded_n).cloned().collect(),
l_vec,
r_vec,
);
// We do not yet have a ClearOnDrop wrapper for Vec.
// When PR 202 [1] is merged, we can simply wrap s_L and s_R at the point of creation.
// [1] https://github.com/dalek-cryptography/curve25519-dalek/pull/202
for scalar in s_L1
.iter_mut()
.chain(s_L2.iter_mut())
.chain(s_R1.iter_mut())
.chain(s_R2.iter_mut())
{
scalar.clear();
}
Ok(R1CSProof {
A_I1,
A_O1,
S1,
A_I2,
A_O2,
S2,
T_1,
T_3,
T_4,
T_5,
T_6,
t_x,
t_x_blinding,
e_blinding,
ipp_proof,
})
}