7 #ifndef _SECP256K1_FIELD_REPR_IMPL_H_ 8 #define _SECP256K1_FIELD_REPR_IMPL_H_ 18 const uint32_t *d = a->
n;
19 int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
20 r &= (d[0] <= 0x3FFFFFFUL *
m);
21 r &= (d[1] <= 0x3FFFFFFUL *
m);
22 r &= (d[2] <= 0x3FFFFFFUL *
m);
23 r &= (d[3] <= 0x3FFFFFFUL *
m);
24 r &= (d[4] <= 0x3FFFFFFUL *
m);
25 r &= (d[5] <= 0x3FFFFFFUL *
m);
26 r &= (d[6] <= 0x3FFFFFFUL *
m);
27 r &= (d[7] <= 0x3FFFFFFUL *
m);
28 r &= (d[8] <= 0x3FFFFFFUL *
m);
29 r &= (d[9] <= 0x03FFFFFUL *
m);
30 r &= (a->magnitude >= 0);
31 r &= (a->magnitude <= 32);
33 r &= (a->magnitude <= 1);
34 if (r && (d[9] == 0x03FFFFFUL)) {
35 uint32_t mid = d[8] & d[7] & d[6] & d[5] & d[4] & d[3] & d[2];
36 if (mid == 0x3FFFFFFUL) {
37 r &= ((d[1] + 0x40UL + ((d[0] + 0x3D1UL) >> 26)) <= 0x3FFFFFFUL);
50 uint32_t t0 = r->
n[0], t1 = r->
n[1], t2 = r->
n[2], t3 = r->
n[3], t4 = r->
n[4],
51 t5 = r->
n[5], t6 = r->
n[6], t7 = r->
n[7], t8 = r->
n[8], t9 = r->
n[9];
55 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
58 t0 += x * 0x3D1UL; t1 += (x << 6);
59 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
60 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
61 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
m = t2;
62 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
m &= t3;
63 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
m &= t4;
64 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
m &= t5;
65 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
m &= t6;
66 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
m &= t7;
67 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
m &= t8;
73 x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (
m == 0x3FFFFFFUL)
74 & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
77 t0 += x * 0x3D1UL; t1 += (x << 6);
78 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
79 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
80 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
81 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
82 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
83 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
84 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
85 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
86 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
94 r->
n[0] = t0; r->
n[1] = t1; r->
n[2] = t2; r->
n[3] = t3; r->
n[4] = t4;
95 r->
n[5] = t5; r->
n[6] = t6; r->
n[7] = t7; r->
n[8] = t8; r->
n[9] = t9;
105 uint32_t t0 = r->
n[0], t1 = r->
n[1], t2 = r->
n[2], t3 = r->
n[3], t4 = r->
n[4],
106 t5 = r->
n[5], t6 = r->
n[6], t7 = r->
n[7], t8 = r->
n[8], t9 = r->
n[9];
109 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
112 t0 += x * 0x3D1UL; t1 += (x << 6);
113 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
114 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
115 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
116 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
117 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
118 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
119 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
120 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
121 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
126 r->
n[0] = t0; r->
n[1] = t1; r->
n[2] = t2; r->
n[3] = t3; r->
n[4] = t4;
127 r->
n[5] = t5; r->
n[6] = t6; r->
n[7] = t7; r->
n[8] = t8; r->
n[9] = t9;
136 uint32_t t0 = r->
n[0], t1 = r->
n[1], t2 = r->
n[2], t3 = r->
n[3], t4 = r->
n[4],
137 t5 = r->
n[5], t6 = r->
n[6], t7 = r->
n[7], t8 = r->
n[8], t9 = r->
n[9];
141 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
144 t0 += x * 0x3D1UL; t1 += (x << 6);
145 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
146 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
147 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
m = t2;
148 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
m &= t3;
149 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
m &= t4;
150 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
m &= t5;
151 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
m &= t6;
152 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
m &= t7;
153 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
m &= t8;
159 x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (
m == 0x3FFFFFFUL)
160 & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
163 t0 += 0x3D1UL; t1 += (x << 6);
164 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
165 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
166 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
167 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
168 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
169 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
170 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
171 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
172 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
181 r->
n[0] = t0; r->
n[1] = t1; r->
n[2] = t2; r->
n[3] = t3; r->
n[4] = t4;
182 r->
n[5] = t5; r->
n[6] = t6; r->
n[7] = t7; r->
n[8] = t8; r->
n[9] = t9;
192 uint32_t t0 = r->
n[0], t1 = r->
n[1], t2 = r->
n[2], t3 = r->
n[3], t4 = r->
n[4],
193 t5 = r->
n[5], t6 = r->
n[6], t7 = r->
n[7], t8 = r->
n[8], t9 = r->
n[9];
199 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
202 t0 += x * 0x3D1UL; t1 += (x << 6);
203 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL; z0 = t0; z1 = t0 ^ 0x3D0UL;
204 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL;
205 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2;
206 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3;
207 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; z0 |= t4; z1 &= t4;
208 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; z0 |= t5; z1 &= t5;
209 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; z0 |= t6; z1 &= t6;
210 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; z0 |= t7; z1 &= t7;
211 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; z0 |= t8; z1 &= t8;
212 z0 |= t9; z1 &= t9 ^ 0x3C00000UL;
217 return (z0 == 0) | (z1 == 0x3FFFFFFUL);
221 uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9;
235 z0 = t0 & 0x3FFFFFFUL;
239 if ((z0 != 0UL) & (z1 != 0x3FFFFFFUL)) {
256 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL;
257 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2;
258 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3;
259 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; z0 |= t4; z1 &= t4;
260 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; z0 |= t5; z1 &= t5;
261 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; z0 |= t6; z1 &= t6;
262 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; z0 |= t7; z1 &= t7;
263 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; z0 |= t8; z1 &= t8;
264 z0 |= t9; z1 &= t9 ^ 0x3C00000UL;
269 return (z0 == 0) | (z1 == 0x3FFFFFFUL);
274 r->
n[1] = r->
n[2] = r->
n[3] = r->
n[4] = r->
n[5] = r->
n[6] = r->
n[7] = r->
n[8] = r->
n[9] = 0;
283 const uint32_t *t = a->
n;
288 return (t[0] | t[1] | t[2] | t[3] | t[4] | t[5] | t[6] | t[7] | t[8] | t[9]) == 0;
305 for (i=0; i<10; i++) {
318 for (i = 9; i >= 0; i--) {
319 if (a->
n[i] > b->
n[i]) {
322 if (a->
n[i] < b->
n[i]) {
331 r->
n[0] = r->
n[1] = r->
n[2] = r->
n[3] = r->
n[4] = 0;
332 r->
n[5] = r->
n[6] = r->
n[7] = r->
n[8] = r->
n[9] = 0;
333 for (i=0; i<32; i++) {
335 for (j=0; j<4; j++) {
336 int limb = (8*i+2*j)/26;
337 int shift = (8*i+2*j)%26;
338 r->
n[limb] |= (uint32_t)((a[31-i] >> (2*j)) & 0x3) << shift;
341 if (r->
n[9] == 0x3FFFFFUL && (r->
n[8] & r->
n[7] & r->
n[6] & r->
n[5] & r->
n[4] & r->
n[3] & r->
n[2]) == 0x3FFFFFFUL && (r->
n[1] + 0x40UL + ((r->
n[0] + 0x3D1UL) >> 26)) > 0x3FFFFFFUL) {
359 for (i=0; i<32; i++) {
362 for (j=0; j<4; j++) {
363 int limb = (8*i+2*j)/26;
364 int shift = (8*i+2*j)%26;
365 c |= ((a->
n[limb] >> shift) & 0x3) << (2 * j);
376 r->
n[0] = 0x3FFFC2FUL * 2 * (
m + 1) - a->
n[0];
377 r->
n[1] = 0x3FFFFBFUL * 2 * (
m + 1) - a->
n[1];
378 r->
n[2] = 0x3FFFFFFUL * 2 * (
m + 1) - a->
n[2];
379 r->
n[3] = 0x3FFFFFFUL * 2 * (
m + 1) - a->
n[3];
380 r->
n[4] = 0x3FFFFFFUL * 2 * (
m + 1) - a->
n[4];
381 r->
n[5] = 0x3FFFFFFUL * 2 * (
m + 1) - a->
n[5];
382 r->
n[6] = 0x3FFFFFFUL * 2 * (
m + 1) - a->
n[6];
383 r->
n[7] = 0x3FFFFFFUL * 2 * (
m + 1) - a->
n[7];
384 r->
n[8] = 0x3FFFFFFUL * 2 * (
m + 1) - a->
n[8];
385 r->
n[9] = 0x03FFFFFUL * 2 * (
m + 1) - a->
n[9];
387 r->magnitude =
m + 1;
426 r->magnitude += a->magnitude;
433 #define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0) 435 #define VERIFY_BITS(x, n) do { } while(0) 440 uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
441 uint32_t t9, t1, t0, t2, t3, t4, t5, t6, t7;
442 const uint32_t
M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL;
470 d = (uint64_t)a[0] * b[9]
471 + (uint64_t)a[1] * b[8]
472 + (uint64_t)a[2] * b[7]
473 + (uint64_t)a[3] * b[6]
474 + (uint64_t)a[4] * b[5]
475 + (uint64_t)a[5] * b[4]
476 + (uint64_t)a[6] * b[3]
477 + (uint64_t)a[7] * b[2]
478 + (uint64_t)a[8] * b[1]
479 + (uint64_t)a[9] * b[0];
482 t9 = d &
M; d >>= 26;
487 c = (uint64_t)a[0] * b[0];
490 d += (uint64_t)a[1] * b[9]
491 + (uint64_t)a[2] * b[8]
492 + (uint64_t)a[3] * b[7]
493 + (uint64_t)a[4] * b[6]
494 + (uint64_t)a[5] * b[5]
495 + (uint64_t)a[6] * b[4]
496 + (uint64_t)a[7] * b[3]
497 + (uint64_t)a[8] * b[2]
498 + (uint64_t)a[9] * b[1];
501 u0 = d &
M; d >>= 26; c += u0 * R0;
506 t0 = c &
M; c >>= 26; c += u0 * R1;
512 c += (uint64_t)a[0] * b[1]
513 + (uint64_t)a[1] * b[0];
516 d += (uint64_t)a[2] * b[9]
517 + (uint64_t)a[3] * b[8]
518 + (uint64_t)a[4] * b[7]
519 + (uint64_t)a[5] * b[6]
520 + (uint64_t)a[6] * b[5]
521 + (uint64_t)a[7] * b[4]
522 + (uint64_t)a[8] * b[3]
523 + (uint64_t)a[9] * b[2];
526 u1 = d &
M; d >>= 26; c += u1 * R0;
531 t1 = c &
M; c >>= 26; c += u1 * R1;
537 c += (uint64_t)a[0] * b[2]
538 + (uint64_t)a[1] * b[1]
539 + (uint64_t)a[2] * b[0];
542 d += (uint64_t)a[3] * b[9]
543 + (uint64_t)a[4] * b[8]
544 + (uint64_t)a[5] * b[7]
545 + (uint64_t)a[6] * b[6]
546 + (uint64_t)a[7] * b[5]
547 + (uint64_t)a[8] * b[4]
548 + (uint64_t)a[9] * b[3];
551 u2 = d &
M; d >>= 26; c += u2 * R0;
556 t2 = c &
M; c >>= 26; c += u2 * R1;
562 c += (uint64_t)a[0] * b[3]
563 + (uint64_t)a[1] * b[2]
564 + (uint64_t)a[2] * b[1]
565 + (uint64_t)a[3] * b[0];
568 d += (uint64_t)a[4] * b[9]
569 + (uint64_t)a[5] * b[8]
570 + (uint64_t)a[6] * b[7]
571 + (uint64_t)a[7] * b[6]
572 + (uint64_t)a[8] * b[5]
573 + (uint64_t)a[9] * b[4];
576 u3 = d &
M; d >>= 26; c += u3 * R0;
581 t3 = c &
M; c >>= 26; c += u3 * R1;
587 c += (uint64_t)a[0] * b[4]
588 + (uint64_t)a[1] * b[3]
589 + (uint64_t)a[2] * b[2]
590 + (uint64_t)a[3] * b[1]
591 + (uint64_t)a[4] * b[0];
594 d += (uint64_t)a[5] * b[9]
595 + (uint64_t)a[6] * b[8]
596 + (uint64_t)a[7] * b[7]
597 + (uint64_t)a[8] * b[6]
598 + (uint64_t)a[9] * b[5];
601 u4 = d &
M; d >>= 26; c += u4 * R0;
606 t4 = c &
M; c >>= 26; c += u4 * R1;
612 c += (uint64_t)a[0] * b[5]
613 + (uint64_t)a[1] * b[4]
614 + (uint64_t)a[2] * b[3]
615 + (uint64_t)a[3] * b[2]
616 + (uint64_t)a[4] * b[1]
617 + (uint64_t)a[5] * b[0];
620 d += (uint64_t)a[6] * b[9]
621 + (uint64_t)a[7] * b[8]
622 + (uint64_t)a[8] * b[7]
623 + (uint64_t)a[9] * b[6];
626 u5 = d &
M; d >>= 26; c += u5 * R0;
631 t5 = c &
M; c >>= 26; c += u5 * R1;
637 c += (uint64_t)a[0] * b[6]
638 + (uint64_t)a[1] * b[5]
639 + (uint64_t)a[2] * b[4]
640 + (uint64_t)a[3] * b[3]
641 + (uint64_t)a[4] * b[2]
642 + (uint64_t)a[5] * b[1]
643 + (uint64_t)a[6] * b[0];
646 d += (uint64_t)a[7] * b[9]
647 + (uint64_t)a[8] * b[8]
648 + (uint64_t)a[9] * b[7];
651 u6 = d &
M; d >>= 26; c += u6 * R0;
656 t6 = c &
M; c >>= 26; c += u6 * R1;
662 c += (uint64_t)a[0] * b[7]
663 + (uint64_t)a[1] * b[6]
664 + (uint64_t)a[2] * b[5]
665 + (uint64_t)a[3] * b[4]
666 + (uint64_t)a[4] * b[3]
667 + (uint64_t)a[5] * b[2]
668 + (uint64_t)a[6] * b[1]
669 + (uint64_t)a[7] * b[0];
673 d += (uint64_t)a[8] * b[9]
674 + (uint64_t)a[9] * b[8];
677 u7 = d &
M; d >>= 26; c += u7 * R0;
683 t7 = c &
M; c >>= 26; c += u7 * R1;
689 c += (uint64_t)a[0] * b[8]
690 + (uint64_t)a[1] * b[7]
691 + (uint64_t)a[2] * b[6]
692 + (uint64_t)a[3] * b[5]
693 + (uint64_t)a[4] * b[4]
694 + (uint64_t)a[5] * b[3]
695 + (uint64_t)a[6] * b[2]
696 + (uint64_t)a[7] * b[1]
697 + (uint64_t)a[8] * b[0];
701 d += (uint64_t)a[9] * b[9];
704 u8 = d &
M; d >>= 26; c += u8 * R0;
727 r[8] = c &
M; c >>= 26; c += u8 * R1;
735 r[9] = c & (
M >> 4); c >>= 22; c += d * (R1 << 4);
742 d = c * (R0 >> 4) + t0;
745 r[0] = d &
M; d >>= 26;
749 d += c * (R1 >> 4) + t1;
754 r[1] = d &
M; d >>= 26;
769 uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
770 uint32_t t9, t0, t1, t2, t3, t4, t5, t6, t7;
771 const uint32_t
M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL;
789 d = (uint64_t)(a[0]*2) * a[9]
790 + (uint64_t)(a[1]*2) * a[8]
791 + (uint64_t)(a[2]*2) * a[7]
792 + (uint64_t)(a[3]*2) * a[6]
793 + (uint64_t)(a[4]*2) * a[5];
796 t9 = d &
M; d >>= 26;
801 c = (uint64_t)a[0] * a[0];
804 d += (uint64_t)(a[1]*2) * a[9]
805 + (uint64_t)(a[2]*2) * a[8]
806 + (uint64_t)(a[3]*2) * a[7]
807 + (uint64_t)(a[4]*2) * a[6]
808 + (uint64_t)a[5] * a[5];
811 u0 = d &
M; d >>= 26; c += u0 * R0;
816 t0 = c &
M; c >>= 26; c += u0 * R1;
822 c += (uint64_t)(a[0]*2) * a[1];
825 d += (uint64_t)(a[2]*2) * a[9]
826 + (uint64_t)(a[3]*2) * a[8]
827 + (uint64_t)(a[4]*2) * a[7]
828 + (uint64_t)(a[5]*2) * a[6];
831 u1 = d &
M; d >>= 26; c += u1 * R0;
836 t1 = c &
M; c >>= 26; c += u1 * R1;
842 c += (uint64_t)(a[0]*2) * a[2]
843 + (uint64_t)a[1] * a[1];
846 d += (uint64_t)(a[3]*2) * a[9]
847 + (uint64_t)(a[4]*2) * a[8]
848 + (uint64_t)(a[5]*2) * a[7]
849 + (uint64_t)a[6] * a[6];
852 u2 = d &
M; d >>= 26; c += u2 * R0;
857 t2 = c &
M; c >>= 26; c += u2 * R1;
863 c += (uint64_t)(a[0]*2) * a[3]
864 + (uint64_t)(a[1]*2) * a[2];
867 d += (uint64_t)(a[4]*2) * a[9]
868 + (uint64_t)(a[5]*2) * a[8]
869 + (uint64_t)(a[6]*2) * a[7];
872 u3 = d &
M; d >>= 26; c += u3 * R0;
877 t3 = c &
M; c >>= 26; c += u3 * R1;
883 c += (uint64_t)(a[0]*2) * a[4]
884 + (uint64_t)(a[1]*2) * a[3]
885 + (uint64_t)a[2] * a[2];
888 d += (uint64_t)(a[5]*2) * a[9]
889 + (uint64_t)(a[6]*2) * a[8]
890 + (uint64_t)a[7] * a[7];
893 u4 = d &
M; d >>= 26; c += u4 * R0;
898 t4 = c &
M; c >>= 26; c += u4 * R1;
904 c += (uint64_t)(a[0]*2) * a[5]
905 + (uint64_t)(a[1]*2) * a[4]
906 + (uint64_t)(a[2]*2) * a[3];
909 d += (uint64_t)(a[6]*2) * a[9]
910 + (uint64_t)(a[7]*2) * a[8];
913 u5 = d &
M; d >>= 26; c += u5 * R0;
918 t5 = c &
M; c >>= 26; c += u5 * R1;
924 c += (uint64_t)(a[0]*2) * a[6]
925 + (uint64_t)(a[1]*2) * a[5]
926 + (uint64_t)(a[2]*2) * a[4]
927 + (uint64_t)a[3] * a[3];
930 d += (uint64_t)(a[7]*2) * a[9]
931 + (uint64_t)a[8] * a[8];
934 u6 = d &
M; d >>= 26; c += u6 * R0;
939 t6 = c &
M; c >>= 26; c += u6 * R1;
945 c += (uint64_t)(a[0]*2) * a[7]
946 + (uint64_t)(a[1]*2) * a[6]
947 + (uint64_t)(a[2]*2) * a[5]
948 + (uint64_t)(a[3]*2) * a[4];
952 d += (uint64_t)(a[8]*2) * a[9];
955 u7 = d &
M; d >>= 26; c += u7 * R0;
961 t7 = c &
M; c >>= 26; c += u7 * R1;
967 c += (uint64_t)(a[0]*2) * a[8]
968 + (uint64_t)(a[1]*2) * a[7]
969 + (uint64_t)(a[2]*2) * a[6]
970 + (uint64_t)(a[3]*2) * a[5]
971 + (uint64_t)a[4] * a[4];
975 d += (uint64_t)a[9] * a[9];
978 u8 = d &
M; d >>= 26; c += u8 * R0;
1001 r[8] = c &
M; c >>= 26; c += u8 * R1;
1009 r[9] = c & (
M >> 4); c >>= 22; c += d * (R1 << 4);
1016 d = c * (R0 >> 4) + t0;
1019 r[0] = d &
M; d >>= 26;
1023 d += c * (R1 >> 4) + t1;
1028 r[1] = d &
M; d >>= 26;
1072 uint32_t mask0, mask1;
1073 mask0 = flag + ~((uint32_t)0);
1075 r->
n[0] = (r->
n[0] & mask0) | (a->
n[0] & mask1);
1076 r->
n[1] = (r->
n[1] & mask0) | (a->
n[1] & mask1);
1077 r->
n[2] = (r->
n[2] & mask0) | (a->
n[2] & mask1);
1078 r->
n[3] = (r->
n[3] & mask0) | (a->
n[3] & mask1);
1079 r->
n[4] = (r->
n[4] & mask0) | (a->
n[4] & mask1);
1080 r->
n[5] = (r->
n[5] & mask0) | (a->
n[5] & mask1);
1081 r->
n[6] = (r->
n[6] & mask0) | (a->
n[6] & mask1);
1082 r->
n[7] = (r->
n[7] & mask0) | (a->
n[7] & mask1);
1083 r->
n[8] = (r->
n[8] & mask0) | (a->
n[8] & mask1);
1084 r->
n[9] = (r->
n[9] & mask0) | (a->
n[9] & mask1);
1086 if (a->magnitude > r->magnitude) {
1087 r->magnitude = a->magnitude;
1089 r->normalized &= a->normalized;
1094 uint32_t mask0, mask1;
1095 mask0 = flag + ~((uint32_t)0);
1097 r->
n[0] = (r->
n[0] & mask0) | (a->
n[0] & mask1);
1098 r->
n[1] = (r->
n[1] & mask0) | (a->
n[1] & mask1);
1099 r->
n[2] = (r->
n[2] & mask0) | (a->
n[2] & mask1);
1100 r->
n[3] = (r->
n[3] & mask0) | (a->
n[3] & mask1);
1101 r->
n[4] = (r->
n[4] & mask0) | (a->
n[4] & mask1);
1102 r->
n[5] = (r->
n[5] & mask0) | (a->
n[5] & mask1);
1103 r->
n[6] = (r->
n[6] & mask0) | (a->
n[6] & mask1);
1104 r->
n[7] = (r->
n[7] & mask0) | (a->
n[7] & mask1);
1111 r->
n[0] = a->
n[0] | a->
n[1] << 26;
1112 r->
n[1] = a->
n[1] >> 6 | a->
n[2] << 20;
1113 r->
n[2] = a->
n[2] >> 12 | a->
n[3] << 14;
1114 r->
n[3] = a->
n[3] >> 18 | a->
n[4] << 8;
1115 r->
n[4] = a->
n[4] >> 24 | a->
n[5] << 2 | a->
n[6] << 28;
1116 r->
n[5] = a->
n[6] >> 4 | a->
n[7] << 22;
1117 r->
n[6] = a->
n[7] >> 10 | a->
n[8] << 16;
1118 r->
n[7] = a->
n[8] >> 16 | a->
n[9] << 10;
1122 r->
n[0] = a->
n[0] & 0x3FFFFFFUL;
1123 r->
n[1] = a->
n[0] >> 26 | ((a->
n[1] << 6) & 0x3FFFFFFUL);
1124 r->
n[2] = a->
n[1] >> 20 | ((a->
n[2] << 12) & 0x3FFFFFFUL);
1125 r->
n[3] = a->
n[2] >> 14 | ((a->
n[3] << 18) & 0x3FFFFFFUL);
1126 r->
n[4] = a->
n[3] >> 8 | ((a->
n[4] << 24) & 0x3FFFFFFUL);
1127 r->
n[5] = (a->
n[4] >> 2) & 0x3FFFFFFUL;
1128 r->
n[6] = a->
n[4] >> 28 | ((a->
n[5] << 4) & 0x3FFFFFFUL);
1129 r->
n[7] = a->
n[5] >> 22 | ((a->
n[6] << 10) & 0x3FFFFFFUL);
1130 r->
n[8] = a->
n[6] >> 16 | ((a->
n[7] << 16) & 0x3FFFFFFUL);
1131 r->
n[9] = a->
n[7] >> 10;
#define VERIFY_CHECK(cond)
static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a)
static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe *SECP256K1_RESTRICT b)
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag)
static SECP256K1_INLINE void secp256k1_fe_clear(secp256k1_fe *a)
static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a)
static SECP256K1_INLINE void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a)
static void secp256k1_fe_verify(const secp256k1_fe *a)
static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a)
static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag)
static SECP256K1_INLINE void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t *SECP256K1_RESTRICT b)
static SECP256K1_INLINE int secp256k1_fe_is_odd(const secp256k1_fe *a)
static SECP256K1_INLINE void secp256k1_fe_mul_int(secp256k1_fe *r, int a)
static SECP256K1_INLINE void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a)
static void secp256k1_fe_normalize_weak(secp256k1_fe *r)
static void secp256k1_fe_normalize(secp256k1_fe *r)
static SECP256K1_INLINE void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m)
#define SECP256K1_RESTRICT
static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b)
static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r)
static SECP256K1_INLINE int secp256k1_fe_is_zero(const secp256k1_fe *a)
static void secp256k1_fe_normalize_var(secp256k1_fe *r)
static SECP256K1_INLINE void secp256k1_fe_set_int(secp256k1_fe *r, int a)
#define VERIFY_BITS(x, n)
static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r)
static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a)
static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a)