bitcoin/src/scalar_4x64_impl.h
Pieter Wuille 1d84107924 Squashed 'src/secp256k1/' changes from 22f60a6..2bfb82b
2bfb82b Merge pull request #351
06aeea5 Turn secp256k1_ec_pubkey_serialize outlen to in/out
970164d Merge pull request #348
6466625 Improvements for coordinate decompression
e2100ad Merge pull request #347
8e48787 Change secp256k1_ec_pubkey_combine's count argument to size_t.
c69dea0 Clear output in more cases for pubkey_combine, adds tests.
269d422 Comment copyediting.
b4d17da Merge pull request #344
4709265 Merge pull request #345
26abce7 Adds 32 static test vectors for scalar mul, sqr, inv.
5b71a3f Better error case handling for pubkey_create & pubkey_serialize, more tests.
3b7bc69 Merge pull request #343
eed87af Change contrib/laxder from headers-only to files compilable as standalone C
d7eb1ae Merge pull request #342
7914a6e Make lax_der_privatekey_parsing.h not depend on internal code
73f64ff Merge pull request #339
9234391 Overhaul flags handling
1a36898 Make flags more explicit, add runtime checks.
1a3e03a Merge pull request #340
96be204 Add additional tests for eckey and arg-checks.
bb5aa4d Make the tweak function zeroize-output-on-fail behavior consistent.
4a243da Move secp256k1_ec_privkey_import/export to contrib.
1b3efc1 Move secp256k1_ecdsa_sig_recover into the recovery module.
e3cd679 Eliminate all side-effects from VERIFY_CHECK() usage.
b30fc85 Avoid nonce_function_rfc6979 algo16 argument emulation.
70d4640 Make secp256k1_ec_pubkey_create skip processing invalid secret keys.
6c476a8 Minor comment improvements.
131afe5 Merge pull request #334
0c6ab2f Introduce explicit lower-S normalization
fea19e7 Add contrib/lax_der_parsing.h
3bb9c44 Rewrite ECDSA signature parsing code
fa57f1b Use secp256k1_rand_int and secp256k1_rand_bits more
49b3749 Add new tests for the extra testrand functions
f684d7d Faster secp256k1_rand_int implementation
251b1a6 Improve testrand: add extra random functions
31994c8 Merge pull request #338
f79aa88 Bugfix: swap arguments to noncefp
c98df26 Merge pull request #319
67f7da4 Extensive interface and operations tests for secp256k1_ec_pubkey_parse.
ee2cb40 Add ARG_CHECKs to secp256k1_ec_pubkey_parse/secp256k1_ec_pubkey_serialize
7450ef1 Merge pull request #328
68a3c76 Merge pull request #329
98135ee Merge pull request #332
37100d7 improve ECDH header-doc
b13d749 Fix couple of typos in API comments
7c823e3 travis: fixup module configs
cc3141a Merge pull request #325
ee58fae Merge pull request #326
213aa67 Do not force benchmarks to be statically linked.
338fc8b Add API exports to secp256k1_nonce_function_default and secp256k1_nonce_function_rfc6979.
52fd03f Merge pull request #320
9f6993f Remove some dead code.
357f8cd Merge pull request #314
118cd82 Use explicit symbol visibility.
4e64608 Include public module headers when compiling modules.
1f41437 Merge pull request #316
fe0d463 Merge pull request #317
cfe0ed9 Fix miscellaneous style nits that irritate overactive static analysis.
2b199de Use the explicit NULL macro for pointer comparisons.
9e90516 Merge pull request #294
dd891e0 Get rid of _t as it is POSIX reserved
201819b Merge pull request #313
912f203 Eliminate a few unbraced statements that crept into the code.
eeab823 Merge pull request #299
486b9bb Use a flags bitfield for compressed option to secp256k1_ec_pubkey_serialize and secp256k1_ec_privkey_export
05732c5 Callback data: Accept pointers to either const or non-const data
1973c73 Bugfix: Reinitialise buffer lengths that have been used as outputs
788038d Use size_t for lengths (at least in external API)
c9d7c2a secp256k1_context_set_{error,illegal}_callback: Restore default handler by passing NULL as function argument
9aac008 secp256k1_context_destroy: Allow NULL argument as a no-op
64b730b secp256k1_context_create: Use unsigned type for flags bitfield
cb04ab5 Merge pull request #309
a551669 Merge pull request #295
81e45ff Update group_impl.h
85e3a2c Merge pull request #112
b2eb63b Merge pull request #293
dc0ce9f [API BREAK] Change argument order to out/outin/in
6d947ca Merge pull request #298
c822693 Merge pull request #301
6d04350 Merge pull request #303
7ab311c Merge pull request #304
5fb3229 Fixes a bug where bench_sign would fail due to passing in too small a buffer.
263dcbc remove unused assignment
b183b41 bugfix: "ARG_CHECK(ctx != NULL)" makes no sense
6da1446 build: fix parallel build
5eb4356 Merge pull request #291
c996d53 Print success
9f443be Move pubkey recovery code to separate module
d49abbd Separate ECDSA recovery tests
439d34a Separate recoverable and normal signatures
a7b046e Merge pull request #289
f66907f Improve/reformat API documentation secp256k1.h
2f77487 Add context building benchmarks
cc623d5 Merge pull request #287
de7e398 small typo fix
9d96e36 Merge pull request #280
432e1ce Merge pull request #283
14727fd Use correct name in gitignore
356b0e9 Actually test static precomputation in Travis
ff3a5df Merge pull request #284
2587208 Merge pull request #212
a5a66c7 Add support for custom EC-Schnorr-SHA256 signatures
d84a378 Merge pull request #252
72ae443 Improve perf. of cmov-based table lookup
92e53fc Implement endomorphism optimization for secp256k1_ecmult_const
ed35d43 Make `secp256k1_scalar_add_bit` conditional; make `secp256k1_scalar_split_lambda_var` constant time
91c0ce9 Add benchmarks for ECDH and const-time multiplication
0739bbb Add ECDH module which works by hashing the output of ecmult_const
4401500 Add constant-time multiply `secp256k1_ecmult_const` for ECDH
e4ce393 build: fix hard-coded usage of "gen_context"
b8e39ac build: don't use BUILT_SOURCES for the static context header
baa75da tests: add a couple tests
ae4f0c6 Merge pull request #278
995c548 Introduce callback functions for dealing with errors.
c333074 Merge pull request #282
18c329c Remove the internal secp256k1_ecdsa_sig_t type
74a2acd Add a secp256k1_ecdsa_signature_t type
23cfa91 Introduce secp256k1_pubkey_t type
4c63780 Merge pull request #269
3e6f1e2 Change rfc6979 implementation to be a generic PRNG
ed5334a Update configure.ac to make it build on OpenBSD
1b68366 Merge pull request #274
a83bb48 Make ecmult static precomputation default
166b32f Merge pull request #276
c37812f Add gen_context src/ecmult_static_context.h to CLEANFILES to fix distclean.
125c15d Merge pull request #275
76f6769 Fix build with static ecmult altroot and make dist.
5133f78 Merge pull request #254
b0a60e6 Merge pull request #258
733c1e6 Add travis build to test the static context.
fbecc38 Add ability to use a statically generated ecmult context.
4fb174d Merge pull request #263
4ab8990 Merge pull request #270
bdf0e0c Merge pull request #271
31d0c1f Merge pull request #273
eb2c8ff Add missing casts to SECP256K1_FE_CONST_INNER
55399c2 Further performance improvements to _ecmult_wnaf
99fd963 Add secp256k1_ec_pubkey_compress(), with test similar to the related decompress() function.
145cc6e Improve performance of _ecmult_wnaf
36b305a Verify the result of GMP modular inverse using non-GMP code
0cbc860 Merge pull request #266
06ff7fe Merge pull request #267
5a43124 Save 1 _fe_negate since s1 == -s2
a5d796e Update code comments
3f3964e Add specific VERIFY tests for _fe_cmov
7d054cd Refactor to save a _fe_negate
b28d02a Refactor to remove a local var
55e7fc3 Perf. improvement in _gej_add_ge
a0601cd Fix VERIFY calculations in _fe_cmov methods
17f7148 Merge pull request #261
7657420 Add tests for adding P+Q with P.x!=Q.x and P.y=-Q.y
8c5d5f7 tests: Add failing unit test for #257 (bad addition formula)
5de4c5d gej_add_ge: fix degenerate case when computing P + (-lambda)P
bcf2fcf gej_add_ge: rearrange algebra
e2a07c7 Fix compilation with C++
873a453 Merge pull request #250
91eb0da Merge pull request #247
210ffed Use separate in and out pointers in `secp256k1_ec_pubkey_decompress`
a1d5ae1 Tiny optimization
729badf Merge pull request #210
2d5a186 Apply effective-affine trick to precomp
4f9791a Effective affine addition in EC multiplication
2b4cf41 Use pkg-config always when possible, with failover to manual checks for libcrypto

git-subtree-dir: src/secp256k1
git-subtree-split: 2bfb82b10edf0f0b0e366a12f94c8b21a914159d
2015-11-13 00:12:43 +01:00

950 lines
30 KiB
C

/**********************************************************************
* Copyright (c) 2013, 2014 Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_
#define _SECP256K1_SCALAR_REPR_IMPL_H_
/* Limbs of the secp256k1 order. */
#define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
#define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)
#define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
#define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
/* Limbs of 2^256 minus the secp256k1 order. */
#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
#define SECP256K1_N_C_1 (~SECP256K1_N_1)
#define SECP256K1_N_C_2 (1)
/* Limbs of half the secp256k1 order. */
#define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL)
#define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL)
#define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
#define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) {
r->d[0] = 0;
r->d[1] = 0;
r->d[2] = 0;
r->d[3] = 0;
}
SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
r->d[0] = v;
r->d[1] = 0;
r->d[2] = 0;
r->d[3] = 0;
}
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
}
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
VERIFY_CHECK(count < 32);
VERIFY_CHECK(offset + count <= 256);
if ((offset + count - 1) >> 6 == offset >> 6) {
return secp256k1_scalar_get_bits(a, offset, count);
} else {
VERIFY_CHECK((offset >> 6) + 1 < 4);
return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1);
}
}
SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) {
int yes = 0;
int no = 0;
no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */
no |= (a->d[2] < SECP256K1_N_2);
yes |= (a->d[2] > SECP256K1_N_2) & ~no;
no |= (a->d[1] < SECP256K1_N_1);
yes |= (a->d[1] > SECP256K1_N_1) & ~no;
yes |= (a->d[0] >= SECP256K1_N_0) & ~no;
return yes;
}
SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) {
uint128_t t;
VERIFY_CHECK(overflow <= 1);
t = (uint128_t)r->d[0] + overflow * SECP256K1_N_C_0;
r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
t += (uint128_t)r->d[1] + overflow * SECP256K1_N_C_1;
r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
t += (uint128_t)r->d[2] + overflow * SECP256K1_N_C_2;
r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
t += (uint64_t)r->d[3];
r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
return overflow;
}
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
int overflow;
uint128_t t = (uint128_t)a->d[0] + b->d[0];
r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
t += (uint128_t)a->d[1] + b->d[1];
r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
t += (uint128_t)a->d[2] + b->d[2];
r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
t += (uint128_t)a->d[3] + b->d[3];
r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
overflow = t + secp256k1_scalar_check_overflow(r);
VERIFY_CHECK(overflow == 0 || overflow == 1);
secp256k1_scalar_reduce(r, overflow);
return overflow;
}
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
uint128_t t;
VERIFY_CHECK(bit < 256);
bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
t = (uint128_t)r->d[0] + (((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
t += (uint128_t)r->d[1] + (((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F));
r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
t += (uint128_t)r->d[2] + (((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F));
r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
t += (uint128_t)r->d[3] + (((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
#ifdef VERIFY
VERIFY_CHECK((t >> 64) == 0);
VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
#endif
}
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
int over;
r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56;
r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56;
r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56;
r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56;
over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r));
if (overflow) {
*overflow = over;
}
}
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3];
bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2];
bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1];
bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0];
}
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
}
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
uint128_t t = (uint128_t)(~a->d[0]) + SECP256K1_N_0 + 1;
r->d[0] = t & nonzero; t >>= 64;
t += (uint128_t)(~a->d[1]) + SECP256K1_N_1;
r->d[1] = t & nonzero; t >>= 64;
t += (uint128_t)(~a->d[2]) + SECP256K1_N_2;
r->d[2] = t & nonzero; t >>= 64;
t += (uint128_t)(~a->d[3]) + SECP256K1_N_3;
r->d[3] = t & nonzero;
}
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
}
static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
int yes = 0;
int no = 0;
no |= (a->d[3] < SECP256K1_N_H_3);
yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */
no |= (a->d[1] < SECP256K1_N_H_1) & ~yes;
yes |= (a->d[1] > SECP256K1_N_H_1) & ~no;
yes |= (a->d[0] > SECP256K1_N_H_0) & ~no;
return yes;
}
static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
/* If we are flag = 0, mask = 00...00 and this is a no-op;
* if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
uint64_t mask = !flag - 1;
uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
uint128_t t = (uint128_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask);
r->d[0] = t & nonzero; t >>= 64;
t += (uint128_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask);
r->d[1] = t & nonzero; t >>= 64;
t += (uint128_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask);
r->d[2] = t & nonzero; t >>= 64;
t += (uint128_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask);
r->d[3] = t & nonzero;
return 2 * (mask == 0) - 1;
}
/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */
/** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
#define muladd(a,b) { \
uint64_t tl, th; \
{ \
uint128_t t = (uint128_t)a * b; \
th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
tl = t; \
} \
c0 += tl; /* overflow is handled on the next line */ \
th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
c1 += th; /* overflow is handled on the next line */ \
c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \
VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
}
/** Add a*b to the number defined by (c0,c1). c1 must never overflow. */
#define muladd_fast(a,b) { \
uint64_t tl, th; \
{ \
uint128_t t = (uint128_t)a * b; \
th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
tl = t; \
} \
c0 += tl; /* overflow is handled on the next line */ \
th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
c1 += th; /* never overflows by contract (verified in the next line) */ \
VERIFY_CHECK(c1 >= th); \
}
/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
#define muladd2(a,b) { \
uint64_t tl, th, th2, tl2; \
{ \
uint128_t t = (uint128_t)a * b; \
th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
tl = t; \
} \
th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \
c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \
th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
c0 += tl2; /* overflow is handled on the next line */ \
th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \
c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
c1 += th2; /* overflow is handled on the next line */ \
c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
}
/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */
#define sumadd(a) { \
unsigned int over; \
c0 += (a); /* overflow is handled on the next line */ \
over = (c0 < (a)) ? 1 : 0; \
c1 += over; /* overflow is handled on the next line */ \
c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \
}
/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
#define sumadd_fast(a) { \
c0 += (a); /* overflow is handled on the next line */ \
c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
VERIFY_CHECK(c2 == 0); \
}
/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. */
#define extract(n) { \
(n) = c0; \
c0 = c1; \
c1 = c2; \
c2 = 0; \
}
/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. c2 is required to be zero. */
#define extract_fast(n) { \
(n) = c0; \
c0 = c1; \
c1 = 0; \
VERIFY_CHECK(c2 == 0); \
}
static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) {
#ifdef USE_ASM_X86_64
/* Reduce 512 bits into 385. */
uint64_t m0, m1, m2, m3, m4, m5, m6;
uint64_t p0, p1, p2, p3, p4;
uint64_t c;
__asm__ __volatile__(
/* Preload. */
"movq 32(%%rsi), %%r11\n"
"movq 40(%%rsi), %%r12\n"
"movq 48(%%rsi), %%r13\n"
"movq 56(%%rsi), %%r14\n"
/* Initialize r8,r9,r10 */
"movq 0(%%rsi), %%r8\n"
"movq $0, %%r9\n"
"movq $0, %%r10\n"
/* (r8,r9) += n0 * c0 */
"movq %8, %%rax\n"
"mulq %%r11\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
/* extract m0 */
"movq %%r8, %q0\n"
"movq $0, %%r8\n"
/* (r9,r10) += l1 */
"addq 8(%%rsi), %%r9\n"
"adcq $0, %%r10\n"
/* (r9,r10,r8) += n1 * c0 */
"movq %8, %%rax\n"
"mulq %%r12\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* (r9,r10,r8) += n0 * c1 */
"movq %9, %%rax\n"
"mulq %%r11\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* extract m1 */
"movq %%r9, %q1\n"
"movq $0, %%r9\n"
/* (r10,r8,r9) += l2 */
"addq 16(%%rsi), %%r10\n"
"adcq $0, %%r8\n"
"adcq $0, %%r9\n"
/* (r10,r8,r9) += n2 * c0 */
"movq %8, %%rax\n"
"mulq %%r13\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
/* (r10,r8,r9) += n1 * c1 */
"movq %9, %%rax\n"
"mulq %%r12\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
/* (r10,r8,r9) += n0 */
"addq %%r11, %%r10\n"
"adcq $0, %%r8\n"
"adcq $0, %%r9\n"
/* extract m2 */
"movq %%r10, %q2\n"
"movq $0, %%r10\n"
/* (r8,r9,r10) += l3 */
"addq 24(%%rsi), %%r8\n"
"adcq $0, %%r9\n"
"adcq $0, %%r10\n"
/* (r8,r9,r10) += n3 * c0 */
"movq %8, %%rax\n"
"mulq %%r14\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
/* (r8,r9,r10) += n2 * c1 */
"movq %9, %%rax\n"
"mulq %%r13\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
/* (r8,r9,r10) += n1 */
"addq %%r12, %%r8\n"
"adcq $0, %%r9\n"
"adcq $0, %%r10\n"
/* extract m3 */
"movq %%r8, %q3\n"
"movq $0, %%r8\n"
/* (r9,r10,r8) += n3 * c1 */
"movq %9, %%rax\n"
"mulq %%r14\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* (r9,r10,r8) += n2 */
"addq %%r13, %%r9\n"
"adcq $0, %%r10\n"
"adcq $0, %%r8\n"
/* extract m4 */
"movq %%r9, %q4\n"
/* (r10,r8) += n3 */
"addq %%r14, %%r10\n"
"adcq $0, %%r8\n"
/* extract m5 */
"movq %%r10, %q5\n"
/* extract m6 */
"movq %%r8, %q6\n"
: "=g"(m0), "=g"(m1), "=g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6)
: "S"(l), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
: "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc");
/* Reduce 385 bits into 258. */
__asm__ __volatile__(
/* Preload */
"movq %q9, %%r11\n"
"movq %q10, %%r12\n"
"movq %q11, %%r13\n"
/* Initialize (r8,r9,r10) */
"movq %q5, %%r8\n"
"movq $0, %%r9\n"
"movq $0, %%r10\n"
/* (r8,r9) += m4 * c0 */
"movq %12, %%rax\n"
"mulq %%r11\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
/* extract p0 */
"movq %%r8, %q0\n"
"movq $0, %%r8\n"
/* (r9,r10) += m1 */
"addq %q6, %%r9\n"
"adcq $0, %%r10\n"
/* (r9,r10,r8) += m5 * c0 */
"movq %12, %%rax\n"
"mulq %%r12\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* (r9,r10,r8) += m4 * c1 */
"movq %13, %%rax\n"
"mulq %%r11\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* extract p1 */
"movq %%r9, %q1\n"
"movq $0, %%r9\n"
/* (r10,r8,r9) += m2 */
"addq %q7, %%r10\n"
"adcq $0, %%r8\n"
"adcq $0, %%r9\n"
/* (r10,r8,r9) += m6 * c0 */
"movq %12, %%rax\n"
"mulq %%r13\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
/* (r10,r8,r9) += m5 * c1 */
"movq %13, %%rax\n"
"mulq %%r12\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
/* (r10,r8,r9) += m4 */
"addq %%r11, %%r10\n"
"adcq $0, %%r8\n"
"adcq $0, %%r9\n"
/* extract p2 */
"movq %%r10, %q2\n"
/* (r8,r9) += m3 */
"addq %q8, %%r8\n"
"adcq $0, %%r9\n"
/* (r8,r9) += m6 * c1 */
"movq %13, %%rax\n"
"mulq %%r13\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
/* (r8,r9) += m5 */
"addq %%r12, %%r8\n"
"adcq $0, %%r9\n"
/* extract p3 */
"movq %%r8, %q3\n"
/* (r9) += m6 */
"addq %%r13, %%r9\n"
/* extract p4 */
"movq %%r9, %q4\n"
: "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4)
: "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
: "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc");
/* Reduce 258 bits into 256. */
__asm__ __volatile__(
/* Preload */
"movq %q5, %%r10\n"
/* (rax,rdx) = p4 * c0 */
"movq %7, %%rax\n"
"mulq %%r10\n"
/* (rax,rdx) += p0 */
"addq %q1, %%rax\n"
"adcq $0, %%rdx\n"
/* extract r0 */
"movq %%rax, 0(%q6)\n"
/* Move to (r8,r9) */
"movq %%rdx, %%r8\n"
"movq $0, %%r9\n"
/* (r8,r9) += p1 */
"addq %q2, %%r8\n"
"adcq $0, %%r9\n"
/* (r8,r9) += p4 * c1 */
"movq %8, %%rax\n"
"mulq %%r10\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
/* Extract r1 */
"movq %%r8, 8(%q6)\n"
"movq $0, %%r8\n"
/* (r9,r8) += p4 */
"addq %%r10, %%r9\n"
"adcq $0, %%r8\n"
/* (r9,r8) += p2 */
"addq %q3, %%r9\n"
"adcq $0, %%r8\n"
/* Extract r2 */
"movq %%r9, 16(%q6)\n"
"movq $0, %%r9\n"
/* (r8,r9) += p3 */
"addq %q4, %%r8\n"
"adcq $0, %%r9\n"
/* Extract r3 */
"movq %%r8, 24(%q6)\n"
/* Extract c */
"movq %%r9, %q0\n"
: "=g"(c)
: "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
: "rax", "rdx", "r8", "r9", "r10", "cc", "memory");
#else
uint128_t c;
uint64_t c0, c1, c2;
uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
uint64_t m0, m1, m2, m3, m4, m5;
uint32_t m6;
uint64_t p0, p1, p2, p3;
uint32_t p4;
/* Reduce 512 bits into 385. */
/* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */
c0 = l[0]; c1 = 0; c2 = 0;
muladd_fast(n0, SECP256K1_N_C_0);
extract_fast(m0);
sumadd_fast(l[1]);
muladd(n1, SECP256K1_N_C_0);
muladd(n0, SECP256K1_N_C_1);
extract(m1);
sumadd(l[2]);
muladd(n2, SECP256K1_N_C_0);
muladd(n1, SECP256K1_N_C_1);
sumadd(n0);
extract(m2);
sumadd(l[3]);
muladd(n3, SECP256K1_N_C_0);
muladd(n2, SECP256K1_N_C_1);
sumadd(n1);
extract(m3);
muladd(n3, SECP256K1_N_C_1);
sumadd(n2);
extract(m4);
sumadd_fast(n3);
extract_fast(m5);
VERIFY_CHECK(c0 <= 1);
m6 = c0;
/* Reduce 385 bits into 258. */
/* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */
c0 = m0; c1 = 0; c2 = 0;
muladd_fast(m4, SECP256K1_N_C_0);
extract_fast(p0);
sumadd_fast(m1);
muladd(m5, SECP256K1_N_C_0);
muladd(m4, SECP256K1_N_C_1);
extract(p1);
sumadd(m2);
muladd(m6, SECP256K1_N_C_0);
muladd(m5, SECP256K1_N_C_1);
sumadd(m4);
extract(p2);
sumadd_fast(m3);
muladd_fast(m6, SECP256K1_N_C_1);
sumadd_fast(m5);
extract_fast(p3);
p4 = c0 + m6;
VERIFY_CHECK(p4 <= 2);
/* Reduce 258 bits into 256. */
/* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */
c = p0 + (uint128_t)SECP256K1_N_C_0 * p4;
r->d[0] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
c += p1 + (uint128_t)SECP256K1_N_C_1 * p4;
r->d[1] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
c += p2 + (uint128_t)p4;
r->d[2] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
c += p3;
r->d[3] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
#endif
/* Final reduction of r. */
secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
}
static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b) {
#ifdef USE_ASM_X86_64
const uint64_t *pb = b->d;
__asm__ __volatile__(
/* Preload */
"movq 0(%%rdi), %%r15\n"
"movq 8(%%rdi), %%rbx\n"
"movq 16(%%rdi), %%rcx\n"
"movq 0(%%rdx), %%r11\n"
"movq 8(%%rdx), %%r12\n"
"movq 16(%%rdx), %%r13\n"
"movq 24(%%rdx), %%r14\n"
/* (rax,rdx) = a0 * b0 */
"movq %%r15, %%rax\n"
"mulq %%r11\n"
/* Extract l0 */
"movq %%rax, 0(%%rsi)\n"
/* (r8,r9,r10) = (rdx) */
"movq %%rdx, %%r8\n"
"xorq %%r9, %%r9\n"
"xorq %%r10, %%r10\n"
/* (r8,r9,r10) += a0 * b1 */
"movq %%r15, %%rax\n"
"mulq %%r12\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
/* (r8,r9,r10) += a1 * b0 */
"movq %%rbx, %%rax\n"
"mulq %%r11\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
/* Extract l1 */
"movq %%r8, 8(%%rsi)\n"
"xorq %%r8, %%r8\n"
/* (r9,r10,r8) += a0 * b2 */
"movq %%r15, %%rax\n"
"mulq %%r13\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* (r9,r10,r8) += a1 * b1 */
"movq %%rbx, %%rax\n"
"mulq %%r12\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* (r9,r10,r8) += a2 * b0 */
"movq %%rcx, %%rax\n"
"mulq %%r11\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* Extract l2 */
"movq %%r9, 16(%%rsi)\n"
"xorq %%r9, %%r9\n"
/* (r10,r8,r9) += a0 * b3 */
"movq %%r15, %%rax\n"
"mulq %%r14\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
/* Preload a3 */
"movq 24(%%rdi), %%r15\n"
/* (r10,r8,r9) += a1 * b2 */
"movq %%rbx, %%rax\n"
"mulq %%r13\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
/* (r10,r8,r9) += a2 * b1 */
"movq %%rcx, %%rax\n"
"mulq %%r12\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
/* (r10,r8,r9) += a3 * b0 */
"movq %%r15, %%rax\n"
"mulq %%r11\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
/* Extract l3 */
"movq %%r10, 24(%%rsi)\n"
"xorq %%r10, %%r10\n"
/* (r8,r9,r10) += a1 * b3 */
"movq %%rbx, %%rax\n"
"mulq %%r14\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
/* (r8,r9,r10) += a2 * b2 */
"movq %%rcx, %%rax\n"
"mulq %%r13\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
/* (r8,r9,r10) += a3 * b1 */
"movq %%r15, %%rax\n"
"mulq %%r12\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
/* Extract l4 */
"movq %%r8, 32(%%rsi)\n"
"xorq %%r8, %%r8\n"
/* (r9,r10,r8) += a2 * b3 */
"movq %%rcx, %%rax\n"
"mulq %%r14\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* (r9,r10,r8) += a3 * b2 */
"movq %%r15, %%rax\n"
"mulq %%r13\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* Extract l5 */
"movq %%r9, 40(%%rsi)\n"
/* (r10,r8) += a3 * b3 */
"movq %%r15, %%rax\n"
"mulq %%r14\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
/* Extract l6 */
"movq %%r10, 48(%%rsi)\n"
/* Extract l7 */
"movq %%r8, 56(%%rsi)\n"
: "+d"(pb)
: "S"(l), "D"(a->d)
: "rax", "rbx", "rcx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "cc", "memory");
#else
/* 160 bit accumulator. */
uint64_t c0 = 0, c1 = 0;
uint32_t c2 = 0;
/* l[0..7] = a[0..3] * b[0..3]. */
muladd_fast(a->d[0], b->d[0]);
extract_fast(l[0]);
muladd(a->d[0], b->d[1]);
muladd(a->d[1], b->d[0]);
extract(l[1]);
muladd(a->d[0], b->d[2]);
muladd(a->d[1], b->d[1]);
muladd(a->d[2], b->d[0]);
extract(l[2]);
muladd(a->d[0], b->d[3]);
muladd(a->d[1], b->d[2]);
muladd(a->d[2], b->d[1]);
muladd(a->d[3], b->d[0]);
extract(l[3]);
muladd(a->d[1], b->d[3]);
muladd(a->d[2], b->d[2]);
muladd(a->d[3], b->d[1]);
extract(l[4]);
muladd(a->d[2], b->d[3]);
muladd(a->d[3], b->d[2]);
extract(l[5]);
muladd_fast(a->d[3], b->d[3]);
extract_fast(l[6]);
VERIFY_CHECK(c1 == 0);
l[7] = c0;
#endif
}
static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar *a) {
#ifdef USE_ASM_X86_64
__asm__ __volatile__(
/* Preload */
"movq 0(%%rdi), %%r11\n"
"movq 8(%%rdi), %%r12\n"
"movq 16(%%rdi), %%r13\n"
"movq 24(%%rdi), %%r14\n"
/* (rax,rdx) = a0 * a0 */
"movq %%r11, %%rax\n"
"mulq %%r11\n"
/* Extract l0 */
"movq %%rax, 0(%%rsi)\n"
/* (r8,r9,r10) = (rdx,0) */
"movq %%rdx, %%r8\n"
"xorq %%r9, %%r9\n"
"xorq %%r10, %%r10\n"
/* (r8,r9,r10) += 2 * a0 * a1 */
"movq %%r11, %%rax\n"
"mulq %%r12\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
/* Extract l1 */
"movq %%r8, 8(%%rsi)\n"
"xorq %%r8, %%r8\n"
/* (r9,r10,r8) += 2 * a0 * a2 */
"movq %%r11, %%rax\n"
"mulq %%r13\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* (r9,r10,r8) += a1 * a1 */
"movq %%r12, %%rax\n"
"mulq %%r12\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* Extract l2 */
"movq %%r9, 16(%%rsi)\n"
"xorq %%r9, %%r9\n"
/* (r10,r8,r9) += 2 * a0 * a3 */
"movq %%r11, %%rax\n"
"mulq %%r14\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
/* (r10,r8,r9) += 2 * a1 * a2 */
"movq %%r12, %%rax\n"
"mulq %%r13\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
"adcq $0, %%r9\n"
/* Extract l3 */
"movq %%r10, 24(%%rsi)\n"
"xorq %%r10, %%r10\n"
/* (r8,r9,r10) += 2 * a1 * a3 */
"movq %%r12, %%rax\n"
"mulq %%r14\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
/* (r8,r9,r10) += a2 * a2 */
"movq %%r13, %%rax\n"
"mulq %%r13\n"
"addq %%rax, %%r8\n"
"adcq %%rdx, %%r9\n"
"adcq $0, %%r10\n"
/* Extract l4 */
"movq %%r8, 32(%%rsi)\n"
"xorq %%r8, %%r8\n"
/* (r9,r10,r8) += 2 * a2 * a3 */
"movq %%r13, %%rax\n"
"mulq %%r14\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
"addq %%rax, %%r9\n"
"adcq %%rdx, %%r10\n"
"adcq $0, %%r8\n"
/* Extract l5 */
"movq %%r9, 40(%%rsi)\n"
/* (r10,r8) += a3 * a3 */
"movq %%r14, %%rax\n"
"mulq %%r14\n"
"addq %%rax, %%r10\n"
"adcq %%rdx, %%r8\n"
/* Extract l6 */
"movq %%r10, 48(%%rsi)\n"
/* Extract l7 */
"movq %%r8, 56(%%rsi)\n"
:
: "S"(l), "D"(a->d)
: "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc", "memory");
#else
/* 160 bit accumulator. */
uint64_t c0 = 0, c1 = 0;
uint32_t c2 = 0;
/* l[0..7] = a[0..3] * b[0..3]. */
muladd_fast(a->d[0], a->d[0]);
extract_fast(l[0]);
muladd2(a->d[0], a->d[1]);
extract(l[1]);
muladd2(a->d[0], a->d[2]);
muladd(a->d[1], a->d[1]);
extract(l[2]);
muladd2(a->d[0], a->d[3]);
muladd2(a->d[1], a->d[2]);
extract(l[3]);
muladd2(a->d[1], a->d[3]);
muladd(a->d[2], a->d[2]);
extract(l[4]);
muladd2(a->d[2], a->d[3]);
extract(l[5]);
muladd_fast(a->d[3], a->d[3]);
extract_fast(l[6]);
VERIFY_CHECK(c1 == 0);
l[7] = c0;
#endif
}
#undef sumadd
#undef sumadd_fast
#undef muladd
#undef muladd_fast
#undef muladd2
#undef extract
#undef extract_fast
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
uint64_t l[8];
secp256k1_scalar_mul_512(l, a, b);
secp256k1_scalar_reduce_512(r, l);
}
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
int ret;
VERIFY_CHECK(n > 0);
VERIFY_CHECK(n < 16);
ret = r->d[0] & ((1 << n) - 1);
r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n));
r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n));
r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n));
r->d[3] = (r->d[3] >> n);
return ret;
}
static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
uint64_t l[8];
secp256k1_scalar_sqr_512(l, a);
secp256k1_scalar_reduce_512(r, l);
}
#ifdef USE_ENDOMORPHISM
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
r1->d[0] = a->d[0];
r1->d[1] = a->d[1];
r1->d[2] = 0;
r1->d[3] = 0;
r2->d[0] = a->d[2];
r2->d[1] = a->d[3];
r2->d[2] = 0;
r2->d[3] = 0;
}
#endif
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
}
SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) {
uint64_t l[8];
unsigned int shiftlimbs;
unsigned int shiftlow;
unsigned int shifthigh;
VERIFY_CHECK(shift >= 256);
secp256k1_scalar_mul_512(l, a, b);
shiftlimbs = shift >> 6;
shiftlow = shift & 0x3F;
shifthigh = 64 - shiftlow;
r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
}
#endif