Print this page
6799218 RSA using Solaris Kernel Crypto framework lagging behind OpenSSL
5016936 bignumimpl:big_mul: potential memory leak
6810280 panic from bignum module: vmem_xalloc(): size == 0

@@ -1,12 +1,11 @@
 /*
  * CDDL HEADER START
  *
  * The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License").  You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
  *
  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  * or http://www.opensolaris.org/os/licensing.
  * See the License for the specific language governing permissions
  * and limitations under the License.

@@ -18,35 +17,33 @@
  * information: Portions Copyright [yyyy] [name of copyright owner]
  *
  * CDDL HEADER END
  */
 /*
- * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
  */
 
-#pragma ident   "%Z%%M% %I%     %E% SMI"
-
 #include <sys/asm_linkage.h>
 
 #if defined(lint) || defined(__lint)
 
 #include <sys/types.h>
 
 /* ARGSUSED */
 uint64_t
-big_mul_set_vec64(uint64_t *r, uint64_t *a, int len, uint64_t digit)
+big_mul_set_vec(uint64_t *r, uint64_t *a, int len, uint64_t digit)
 { return (0); }
 
 /* ARGSUSED */
 uint64_t
-big_mul_add_vec64(uint64_t *r, uint64_t *a, int len, uint64_t digit)
+big_mul_add_vec(uint64_t *r, uint64_t *a, int len, uint64_t digit)
 { return (0); }
 
 /* ARGSUSED */
 void
-big_sqr_vec64(uint64_t *r, uint64_t *a, int len)
+big_sqr_vec(uint64_t *r, uint64_t *a, int len)
 {}
 
 #else   /* lint */
 
 / ------------------------------------------------------------------------

@@ -53,26 +50,23 @@
 /
 /  Implementation of big_mul_set_vec which exploits
 /  the 64X64->128 bit  unsigned multiply instruction.
 /
 /  As defined in Sun's bignum library for pkcs11, bignums are
-/  composed of an array of 32-bit "digits" along with descriptive
-/  information.  The arrays of digits are only required to be
-/  aligned on 32-bit boundary.  This implementation works only
-/  when the two factors and the result happen to be 64 bit aligned
-/  and have an even number of digits.
+/  composed of an array of 64-bit "digits" or "chunks" along with
+/  descriptive information.
 /
 / ------------------------------------------------------------------------
 
 / r = a * digit, r and a are vectors of length len
 / returns the carry digit
 / r and a are 64 bit aligned.
 /
 / uint64_t
-/ big_mul_set_vec64(uint64_t *r, uint64_t *a, int len, uint64_t digit)
+/ big_mul_set_vec(uint64_t *r, uint64_t *a, int len, uint64_t digit)
 /
-        ENTRY(big_mul_set_vec64)
+        ENTRY(big_mul_set_vec)
         xorq    %rax, %rax              / if (len == 0) return (0)
         testq   %rdx, %rdx
         jz      .L17
 
         movq    %rdx, %r8               / Use r8 for len; %rdx is used by mul

@@ -217,34 +211,32 @@
 
 
 .L17:
         movq    %r9, %rax
         ret
-        SET_SIZE(big_mul_set_vec64)
+        SET_SIZE(big_mul_set_vec)
 
+
 / ------------------------------------------------------------------------
 /
 /  Implementation of big_mul_add_vec which exploits
 /  the 64X64->128 bit  unsigned multiply instruction.
 /
 /  As defined in Sun's bignum library for pkcs11, bignums are
-/  composed of an array of 32-bit "digits" along with descriptive
-/  information.  The arrays of digits are only required to be
-/  aligned on 32-bit boundary.  This implementation works only
-/  when the two factors and the result happen to be 64 bit aligned
-/  and have an even number of digits.
+/  composed of an array of 64-bit "digits" or "chunks" along with
+/  descriptive information.
 /
 / ------------------------------------------------------------------------
 
 / r += a * digit, r and a are vectors of length len
 / returns the carry digit
 / r and a are 64 bit aligned.
 /
 / uint64_t
-/ big_mul_add_vec64(uint64_t *r, uint64_t *a, int len, uint64_t digit)
+/ big_mul_add_vec(uint64_t *r, uint64_t *a, int len, uint64_t digit)
 /
-        ENTRY(big_mul_add_vec64)
+        ENTRY(big_mul_add_vec)
         xorq    %rax, %rax              / if (len == 0) return (0)
         testq   %rdx, %rdx
         jz      .L27
 
         movq    %rdx, %r8               / Use r8 for len; %rdx is used by mul

@@ -434,17 +426,17 @@
 
 
 .L27:
         movq    %r9, %rax
         ret
-        SET_SIZE(big_mul_add_vec64)
+        SET_SIZE(big_mul_add_vec)
 
 
 / void
-/ big_sqr_vec64(uint64_t *r, uint64_t *a, int len)
+/ big_sqr_vec(uint64_t *r, uint64_t *a, int len)
 
-        ENTRY(big_sqr_vec64)
+        ENTRY(big_sqr_vec)
         pushq   %rbx
         pushq   %rbp
         pushq   %r12
         pushq   %r13
         pushq   %r14

@@ -459,11 +451,11 @@
         decq    %r15                    / tlen = len - 1
         movq    %r13, %rdi              / arg1 = tr
         leaq    8(%r14), %rsi           / arg2 = ta + 1
         movq    %r15, %rdx              / arg3 = tlen
         movq    0(%r14), %rcx           / arg4 = ta[0]
-        call    big_mul_set_vec64
+        call    big_mul_set_vec
         movq    %rax, 0(%r13, %r15, 8)  / tr[tlen] = cy
 .L31:
         decq    %r15                    / --tlen
         jz      .L32                    / while (--tlen != 0)
 

@@ -471,11 +463,11 @@
         addq    $8, %r14                / ++ta
         movq    %r13, %rdi              / arg1 = tr
         leaq    8(%r14), %rsi           / arg2 = ta + 1
         movq    %r15, %rdx              / arg3 = tlen
         movq    0(%r14), %rcx           / arg4 = ta[0]
-        call    big_mul_add_vec64
+        call    big_mul_add_vec
         movq    %rax, 0(%r13, %r15, 8)  / tr[tlen] = cy
         jmp     .L31
 
 .L32:
 

@@ -550,8 +542,8 @@
         popq    %rbp
         popq    %rbx
 
         ret
 
-        SET_SIZE(big_sqr_vec64)
+        SET_SIZE(big_sqr_vec)
 
 #endif  /* lint */