Print this page
6799218 RSA using Solaris Kernel Crypto framework lagging behind OpenSSL
5016936 bignumimpl:big_mul: potential memory leak
6810280 panic from bignum module: vmem_xalloc(): size == 0
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/common/bignum/amd64/bignum_amd64.c
+++ new/usr/src/common/bignum/amd64/bignum_amd64.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 - * Common Development and Distribution License, Version 1.0 only
6 - * (the "License"). You may not use this file except in compliance
7 - * with the License.
5 + * Common Development and Distribution License (the "License").
6 + * You may not use this file except in compliance with the License.
8 7 *
9 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 9 * or http://www.opensolaris.org/os/licensing.
11 10 * See the License for the specific language governing permissions
12 11 * and limitations under the License.
13 12 *
14 13 * When distributing Covered Code, include this CDDL HEADER in each
15 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 15 * If applicable, add the following below this CDDL HEADER, with the
17 16 * fields enclosed by brackets "[]" replaced with your own identifying
18 17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 18 *
20 19 * CDDL HEADER END
21 20 */
22 21 /*
23 - * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
22 + * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 23 * Use is subject to license terms.
25 24 */
26 25
27 -#pragma ident "%Z%%M% %I% %E% SMI"
28 -
29 26 /*
30 27 * This file contains bignum implementation code that
31 28 * is specific to AMD64, but which is still more appropriate
32 29 * to write in C, rather than assembly language.
33 30 * bignum_amd64_asm.s does all the assembly language code
34 31 * for AMD64 specific bignum support. The assembly language
35 32 * source file has pure code, no data. Let the C compiler
36 33 * generate what is needed to handle the variations in
37 34 * data representation and addressing, for example,
38 35 * statically linked vs PIC.
39 36 */
40 37
41 38 #include "bignum.h"
42 39
43 40 /*
44 - * The bignum interface deals only with arrays of 32-bit "digits".
45 - * The 64-bit bignum functions are internal implementation details.
46 - * If a bignum happens to be aligned on a 64-bit boundary
47 - * and its length is even, then the pure 64-bit implementation
48 - * can be used.
41 + * The bignum interface deals with arrays of 64-bit "chunks" or "digits".
42 + * Data should be aligned on 8-byte address boundaries for best performance.
49 43 */
50 44
51 -#define ISALIGNED64(p) (((uintptr_t)(p) & 7) == 0)
52 -#define ISBIGNUM64(p, len) (ISALIGNED64(p) && (((len) & 1) == 0))
53 45
54 -#if defined(__lint)
55 -
56 -extern uint64_t *P64(uint32_t *addr);
57 -
58 -#else /* lint */
59 -
60 -#define P64(addr) ((uint64_t *)addr)
61 -
62 -#endif /* lint */
63 -
64 -extern uint64_t big_mul_set_vec64(uint64_t *, uint64_t *, int, uint64_t);
65 -extern uint64_t big_mul_add_vec64(uint64_t *, uint64_t *, int, uint64_t);
66 -extern void big_mul_vec64(uint64_t *, uint64_t *, int, uint64_t *, int);
67 -extern void big_sqr_vec64(uint64_t *, uint64_t *, int);
68 -
69 -extern uint32_t big_mul_set_vec32(uint32_t *, uint32_t *, int, uint32_t);
70 -extern uint32_t big_mul_add_vec32(uint32_t *, uint32_t *, int, uint32_t);
71 -extern void big_mul_vec32(uint32_t *, uint32_t *, int, uint32_t *, int);
72 -extern void big_sqr_vec32(uint32_t *, uint32_t *, int);
73 -
74 -uint32_t big_mul_set_vec(uint32_t *, uint32_t *, int, uint32_t);
75 -uint32_t big_mul_add_vec(uint32_t *, uint32_t *, int, uint32_t);
76 -void big_mul_vec(uint32_t *, uint32_t *, int, uint32_t *, int);
77 -void big_sqr_vec(uint32_t *, uint32_t *, int);
78 -
79 -
80 46 void
81 -big_mul_vec(uint32_t *r, uint32_t *a, int alen, uint32_t *b, int blen)
47 +big_mul_vec(BIG_CHUNK_TYPE *r, BIG_CHUNK_TYPE *a, int alen,
48 + BIG_CHUNK_TYPE *b, int blen)
82 49 {
83 - if (!ISALIGNED64(r) || !ISBIGNUM64(a, alen) || !ISBIGNUM64(b, blen)) {
84 - big_mul_vec32(r, a, alen, b, blen);
85 - return;
86 - }
50 + int i;
87 51
88 - big_mul_vec64(P64(r), P64(a), alen / 2, P64(b), blen / 2);
89 -}
90 -
91 -void
92 -big_sqr_vec(uint32_t *r, uint32_t *a, int alen)
93 -{
94 - if (!ISALIGNED64(r) || !ISBIGNUM64(a, alen)) {
95 - big_mul_vec32(r, a, alen, a, alen);
96 - return;
97 - }
98 - big_sqr_vec64(P64(r), P64(a), alen / 2);
99 -}
100 -
101 -/*
102 - * It is OK to cast the 64-bit carry to 32 bit.
103 - * There will be no loss, because although we are multiplying the vector, a,
104 - * by a uint64_t, its value cannot exceedthat of a uint32_t.
105 - */
106 -
107 -uint32_t
108 -big_mul_set_vec(uint32_t *r, uint32_t *a, int alen, uint32_t digit)
109 -{
110 - if (!ISALIGNED64(r) || !ISBIGNUM64(a, alen))
111 - return (big_mul_set_vec32(r, a, alen, digit));
112 -
113 - return (big_mul_set_vec64(P64(r), P64(a), alen / 2, digit));
114 -}
115 -uint32_t
116 -big_mul_add_vec(uint32_t *r, uint32_t *a, int alen, uint32_t digit)
117 -{
118 - if (!ISALIGNED64(r) || !ISBIGNUM64(a, alen))
119 - return (big_mul_add_vec32(r, a, alen, digit));
120 -
121 - return (big_mul_add_vec64(P64(r), P64(a), alen / 2, digit));
122 -}
123 -
124 -
125 -void
126 -big_mul_vec64(uint64_t *r, uint64_t *a, int alen, uint64_t *b, int blen)
127 -{
128 - int i;
129 -
130 - r[alen] = big_mul_set_vec64(r, a, alen, b[0]);
52 + r[alen] = big_mul_set_vec(r, a, alen, b[0]);
131 53 for (i = 1; i < blen; ++i)
132 - r[alen + i] = big_mul_add_vec64(r+i, a, alen, b[i]);
133 -}
134 -
135 -void
136 -big_mul_vec32(uint32_t *r, uint32_t *a, int alen, uint32_t *b, int blen)
137 -{
138 - int i;
139 -
140 - r[alen] = big_mul_set_vec32(r, a, alen, b[0]);
141 - for (i = 1; i < blen; ++i)
142 - r[alen + i] = big_mul_add_vec32(r+i, a, alen, b[i]);
143 -}
144 -
145 -void
146 -big_sqr_vec32(uint32_t *r, uint32_t *a, int alen)
147 -{
148 - big_mul_vec32(r, a, alen, a, alen);
149 -}
150 -
151 -
152 -uint32_t
153 -big_mul_set_vec32(uint32_t *r, uint32_t *a, int alen, uint32_t digit)
154 -{
155 - uint64_t p, d, cy;
156 -
157 - d = (uint64_t)digit;
158 - cy = 0;
159 - while (alen != 0) {
160 - p = (uint64_t)a[0] * d + cy;
161 - r[0] = (uint32_t)p;
162 - cy = p >> 32;
163 - ++r;
164 - ++a;
165 - --alen;
166 - }
167 - return ((uint32_t)cy);
168 -}
169 -
170 -uint32_t
171 -big_mul_add_vec32(uint32_t *r, uint32_t *a, int alen, uint32_t digit)
172 -{
173 - uint64_t p, d, cy;
174 -
175 - d = (uint64_t)digit;
176 - cy = 0;
177 - while (alen != 0) {
178 - p = r[0] + (uint64_t)a[0] * d + cy;
179 - r[0] = (uint32_t)p;
180 - cy = p >> 32;
181 - ++r;
182 - ++a;
183 - --alen;
184 - }
185 - return ((uint32_t)cy);
54 + r[alen + i] = big_mul_add_vec(r + i, a, alen, b[i]);
186 55 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX