Print this page
5007142 Add ntohll and htonll to sys/byteorder.h
6717509 Need to use bswap/bswapq for byte swap of 64-bit integer on x32/x64
PSARC 2008/474
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/common/crypto/modes/ccm.c
+++ new/usr/src/common/crypto/modes/ccm.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 #ifndef _KERNEL
27 27 #include <strings.h>
28 28 #include <limits.h>
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
29 29 #include <assert.h>
30 30 #include <security/cryptoki.h>
31 31 #endif
32 32
33 33 #include <sys/types.h>
34 34 #include <sys/kmem.h>
35 35 #include <modes/modes.h>
36 36 #include <sys/crypto/common.h>
37 37 #include <sys/crypto/impl.h>
38 38
39 +#if defined(__i386) || defined(__amd64)
40 +#include <sys/byteorder.h>
41 +#define UNALIGNED_POINTERS_PERMITTED
42 +#endif
43 +
39 44 /*
40 45 * Encrypt multiple blocks of data in CCM mode. Decrypt for CCM mode
41 46 * is done in another function.
42 47 */
43 48 int
44 49 ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
45 50 crypto_data_t *out, size_t block_size,
46 51 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
47 52 void (*copy_block)(uint8_t *, uint8_t *),
48 53 void (*xor_block)(uint8_t *, uint8_t *))
49 54 {
50 55 size_t remainder = length;
51 56 size_t need;
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
52 57 uint8_t *datap = (uint8_t *)data;
53 58 uint8_t *blockp;
54 59 uint8_t *lastp;
55 60 void *iov_or_mp;
56 61 offset_t offset;
57 62 uint8_t *out_data_1;
58 63 uint8_t *out_data_2;
59 64 size_t out_data_1_len;
60 65 uint64_t counter;
61 66 uint8_t *mac_buf;
62 -#ifdef _LITTLE_ENDIAN
63 - uint8_t *p;
64 -#endif
65 67
66 68 if (length + ctx->ccm_remainder_len < block_size) {
67 69 /* accumulate bytes here and return */
68 70 bcopy(datap,
69 71 (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
70 72 length);
71 73 ctx->ccm_remainder_len += length;
72 74 ctx->ccm_copy_to = datap;
73 75 return (CRYPTO_SUCCESS);
74 76 }
75 77
76 78 lastp = (uint8_t *)ctx->ccm_cb;
77 79 if (out != NULL)
78 80 crypto_init_ptrs(out, &iov_or_mp, &offset);
79 81
80 82 mac_buf = (uint8_t *)ctx->ccm_mac_buf;
81 83
82 84 do {
83 85 /* Unprocessed data from last call. */
84 86 if (ctx->ccm_remainder_len > 0) {
85 87 need = block_size - ctx->ccm_remainder_len;
86 88
87 89 if (need > remainder)
88 90 return (CRYPTO_DATA_LEN_RANGE);
89 91
90 92 bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
91 93 [ctx->ccm_remainder_len], need);
92 94
93 95 blockp = (uint8_t *)ctx->ccm_remainder;
94 96 } else {
95 97 blockp = datap;
96 98 }
97 99
98 100 /*
99 101 * do CBC MAC
100 102 *
101 103 * XOR the previous cipher block current clear block.
102 104 * mac_buf always contain previous cipher block.
103 105 */
104 106 xor_block(blockp, mac_buf);
105 107 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
106 108
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
107 109 /* ccm_cb is the counter block */
108 110 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb,
109 111 (uint8_t *)ctx->ccm_tmp);
110 112
111 113 lastp = (uint8_t *)ctx->ccm_tmp;
112 114
113 115 /*
114 116 * Increment counter. Counter bits are confined
115 117 * to the bottom 64 bits of the counter block.
116 118 */
117 - counter = ctx->ccm_cb[1] & ctx->ccm_counter_mask;
118 119 #ifdef _LITTLE_ENDIAN
119 - p = (uint8_t *)&counter;
120 - counter = (((uint64_t)p[0] << 56) |
121 - ((uint64_t)p[1] << 48) |
122 - ((uint64_t)p[2] << 40) |
123 - ((uint64_t)p[3] << 32) |
124 - ((uint64_t)p[4] << 24) |
125 - ((uint64_t)p[5] << 16) |
126 - ((uint64_t)p[6] << 8) |
127 - (uint64_t)p[7]);
128 -#endif
120 + counter = ntohll(ctx->ccm_cb[1] & ctx->ccm_counter_mask);
121 + counter = htonll(counter + 1);
122 +#else
123 + counter = ctx->ccm_cb[1] & ctx->ccm_counter_mask;
129 124 counter++;
130 -#ifdef _LITTLE_ENDIAN
131 - counter = (((uint64_t)p[0] << 56) |
132 - ((uint64_t)p[1] << 48) |
133 - ((uint64_t)p[2] << 40) |
134 - ((uint64_t)p[3] << 32) |
135 - ((uint64_t)p[4] << 24) |
136 - ((uint64_t)p[5] << 16) |
137 - ((uint64_t)p[6] << 8) |
138 - (uint64_t)p[7]);
139 -#endif
125 +#endif /* _LITTLE_ENDIAN */
140 126 counter &= ctx->ccm_counter_mask;
141 127 ctx->ccm_cb[1] =
142 128 (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
143 129
144 130 /*
145 131 * XOR encrypted counter block with the current clear block.
146 132 */
147 133 xor_block(blockp, lastp);
148 134
149 135 ctx->ccm_processed_data_len += block_size;
150 136
151 137 if (out == NULL) {
152 138 if (ctx->ccm_remainder_len > 0) {
153 139 bcopy(blockp, ctx->ccm_copy_to,
154 140 ctx->ccm_remainder_len);
155 141 bcopy(blockp + ctx->ccm_remainder_len, datap,
156 142 need);
157 143 }
158 144 } else {
159 145 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
160 146 &out_data_1_len, &out_data_2, block_size);
161 147
162 148 /* copy block to where it belongs */
163 149 if (out_data_1_len == block_size) {
164 150 copy_block(lastp, out_data_1);
165 151 } else {
166 152 bcopy(lastp, out_data_1, out_data_1_len);
167 153 if (out_data_2 != NULL) {
168 154 bcopy(lastp + out_data_1_len,
169 155 out_data_2,
170 156 block_size - out_data_1_len);
171 157 }
172 158 }
173 159 /* update offset */
174 160 out->cd_offset += block_size;
175 161 }
176 162
177 163 /* Update pointer to next block of data to be processed. */
178 164 if (ctx->ccm_remainder_len != 0) {
179 165 datap += need;
180 166 ctx->ccm_remainder_len = 0;
181 167 } else {
182 168 datap += block_size;
183 169 }
184 170
185 171 remainder = (size_t)&data[length] - (size_t)datap;
186 172
187 173 /* Incomplete last block. */
188 174 if (remainder > 0 && remainder < block_size) {
189 175 bcopy(datap, ctx->ccm_remainder, remainder);
190 176 ctx->ccm_remainder_len = remainder;
191 177 ctx->ccm_copy_to = datap;
192 178 goto out;
193 179 }
194 180 ctx->ccm_copy_to = NULL;
195 181
196 182 } while (remainder > 0);
197 183
198 184 out:
199 185 return (CRYPTO_SUCCESS);
200 186 }
201 187
202 188 void
203 189 calculate_ccm_mac(ccm_ctx_t *ctx, uint8_t *ccm_mac,
204 190 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
205 191 {
206 192 uint64_t counter;
207 193 uint8_t *counterp, *mac_buf;
208 194 int i;
209 195
210 196 mac_buf = (uint8_t *)ctx->ccm_mac_buf;
211 197
212 198 /* first counter block start with index 0 */
213 199 counter = 0;
214 200 ctx->ccm_cb[1] = (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
215 201
216 202 counterp = (uint8_t *)ctx->ccm_tmp;
217 203 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, counterp);
218 204
219 205 /* calculate XOR of MAC with first counter block */
220 206 for (i = 0; i < ctx->ccm_mac_len; i++) {
221 207 ccm_mac[i] = mac_buf[i] ^ counterp[i];
222 208 }
223 209 }
224 210
225 211 /* ARGSUSED */
226 212 int
227 213 ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
228 214 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
229 215 void (*xor_block)(uint8_t *, uint8_t *))
230 216 {
231 217 uint8_t *lastp, *mac_buf, *ccm_mac_p, *macp;
232 218 void *iov_or_mp;
233 219 offset_t offset;
234 220 uint8_t *out_data_1;
235 221 uint8_t *out_data_2;
236 222 size_t out_data_1_len;
237 223 int i;
238 224
239 225 if (out->cd_length < (ctx->ccm_remainder_len + ctx->ccm_mac_len)) {
240 226 return (CRYPTO_DATA_LEN_RANGE);
241 227 }
242 228
243 229 /*
244 230 * When we get here, the number of bytes of payload processed
245 231 * plus whatever data remains, if any,
246 232 * should be the same as the number of bytes that's being
247 233 * passed in the argument during init time.
248 234 */
249 235 if ((ctx->ccm_processed_data_len + ctx->ccm_remainder_len)
250 236 != (ctx->ccm_data_len)) {
251 237 return (CRYPTO_DATA_LEN_RANGE);
252 238 }
253 239
254 240 mac_buf = (uint8_t *)ctx->ccm_mac_buf;
255 241
256 242 if (ctx->ccm_remainder_len > 0) {
257 243
258 244 /* ccm_mac_input_buf is not used for encryption */
259 245 macp = (uint8_t *)ctx->ccm_mac_input_buf;
260 246 bzero(macp, block_size);
261 247
262 248 /* copy remainder to temporary buffer */
263 249 bcopy(ctx->ccm_remainder, macp, ctx->ccm_remainder_len);
264 250
265 251 /* calculate the CBC MAC */
266 252 xor_block(macp, mac_buf);
267 253 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
268 254
269 255 /* calculate the counter mode */
270 256 lastp = (uint8_t *)ctx->ccm_tmp;
271 257 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, lastp);
272 258
273 259 /* XOR with counter block */
274 260 for (i = 0; i < ctx->ccm_remainder_len; i++) {
275 261 macp[i] ^= lastp[i];
276 262 }
277 263 ctx->ccm_processed_data_len += ctx->ccm_remainder_len;
278 264 }
279 265
280 266 /* Calculate the CCM MAC */
281 267 ccm_mac_p = (uint8_t *)ctx->ccm_tmp;
282 268 calculate_ccm_mac(ctx, ccm_mac_p, encrypt_block);
283 269
284 270 crypto_init_ptrs(out, &iov_or_mp, &offset);
285 271 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
286 272 &out_data_1_len, &out_data_2,
287 273 ctx->ccm_remainder_len + ctx->ccm_mac_len);
288 274
289 275 if (ctx->ccm_remainder_len > 0) {
290 276
291 277 /* copy temporary block to where it belongs */
292 278 if (out_data_2 == NULL) {
293 279 /* everything will fit in out_data_1 */
294 280 bcopy(macp, out_data_1, ctx->ccm_remainder_len);
295 281 bcopy(ccm_mac_p, out_data_1 + ctx->ccm_remainder_len,
296 282 ctx->ccm_mac_len);
297 283 } else {
298 284
299 285 if (out_data_1_len < ctx->ccm_remainder_len) {
300 286
301 287 size_t data_2_len_used;
302 288
303 289 bcopy(macp, out_data_1, out_data_1_len);
304 290
305 291 data_2_len_used = ctx->ccm_remainder_len
306 292 - out_data_1_len;
307 293
308 294 bcopy((uint8_t *)macp + out_data_1_len,
↓ open down ↓ |
159 lines elided |
↑ open up ↑ |
309 295 out_data_2, data_2_len_used);
310 296 bcopy(ccm_mac_p, out_data_2 + data_2_len_used,
311 297 ctx->ccm_mac_len);
312 298 } else {
313 299 bcopy(macp, out_data_1, out_data_1_len);
314 300 if (out_data_1_len == ctx->ccm_remainder_len) {
315 301 /* mac will be in out_data_2 */
316 302 bcopy(ccm_mac_p, out_data_2,
317 303 ctx->ccm_mac_len);
318 304 } else {
319 - size_t len_not_used
320 - = out_data_1_len -
305 + size_t len_not_used = out_data_1_len -
321 306 ctx->ccm_remainder_len;
322 307 /*
323 308 * part of mac in will be in
324 309 * out_data_1, part of the mac will be
325 310 * in out_data_2
326 311 */
327 312 bcopy(ccm_mac_p,
328 313 out_data_1 + ctx->ccm_remainder_len,
329 314 len_not_used);
330 315 bcopy(ccm_mac_p + len_not_used,
331 316 out_data_2,
332 317 ctx->ccm_mac_len - len_not_used);
333 318
334 319 }
335 320 }
336 321 }
337 322 } else {
338 323 /* copy block to where it belongs */
339 324 bcopy(ccm_mac_p, out_data_1, out_data_1_len);
340 325 if (out_data_2 != NULL) {
341 326 bcopy(ccm_mac_p + out_data_1_len, out_data_2,
342 327 block_size - out_data_1_len);
343 328 }
344 329 }
345 330 out->cd_offset += ctx->ccm_remainder_len + ctx->ccm_mac_len;
346 331 ctx->ccm_remainder_len = 0;
347 332 return (CRYPTO_SUCCESS);
348 333 }
349 334
350 335 /*
351 336 * This will only deal with decrypting the last block of the input that
352 337 * might not be a multiple of block length.
353 338 */
354 339 void
355 340 ccm_decrypt_incomplete_block(ccm_ctx_t *ctx,
356 341 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
357 342 {
358 343 uint8_t *datap, *outp, *counterp;
359 344 int i;
360 345
361 346 datap = (uint8_t *)ctx->ccm_remainder;
362 347 outp = &((ctx->ccm_pt_buf)[ctx->ccm_processed_data_len]);
363 348
364 349 counterp = (uint8_t *)ctx->ccm_tmp;
365 350 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, counterp);
366 351
367 352 /* XOR with counter block */
368 353 for (i = 0; i < ctx->ccm_remainder_len; i++) {
369 354 outp[i] = datap[i] ^ counterp[i];
370 355 }
371 356 }
372 357
373 358 /*
374 359 * This will decrypt the cipher text. However, the plaintext won't be
375 360 * returned to the caller. It will be returned when decrypt_final() is
376 361 * called if the MAC matches
377 362 */
378 363 /* ARGSUSED */
379 364 int
380 365 ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
381 366 crypto_data_t *out, size_t block_size,
382 367 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
383 368 void (*copy_block)(uint8_t *, uint8_t *),
384 369 void (*xor_block)(uint8_t *, uint8_t *))
385 370 {
386 371 size_t remainder = length;
387 372 size_t need;
388 373 uint8_t *datap = (uint8_t *)data;
389 374 uint8_t *blockp;
390 375 uint8_t *cbp;
391 376 uint64_t counter;
392 377 size_t pt_len, total_decrypted_len, mac_len, pm_len, pd_len;
393 378 uint8_t *resultp;
394 379 #ifdef _LITTLE_ENDIAN
395 380 uint8_t *p;
396 381 #endif /* _LITTLE_ENDIAN */
397 382
398 383
399 384 pm_len = ctx->ccm_processed_mac_len;
400 385
401 386 if (pm_len > 0) {
402 387 uint8_t *tmp;
403 388 /*
404 389 * all ciphertext has been processed, just waiting for
405 390 * part of the value of the mac
406 391 */
407 392 if ((pm_len + length) > ctx->ccm_mac_len) {
408 393 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
409 394 }
410 395 tmp = (uint8_t *)ctx->ccm_mac_input_buf;
411 396
412 397 bcopy(datap, tmp + pm_len, length);
413 398
414 399 ctx->ccm_processed_mac_len += length;
415 400 return (CRYPTO_SUCCESS);
416 401 }
417 402
418 403 /*
419 404 * If we decrypt the given data, what total amount of data would
420 405 * have been decrypted?
421 406 */
422 407 pd_len = ctx->ccm_processed_data_len;
423 408 total_decrypted_len = pd_len + length + ctx->ccm_remainder_len;
424 409
425 410 if (total_decrypted_len >
426 411 (ctx->ccm_data_len + ctx->ccm_mac_len)) {
427 412 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
428 413 }
429 414
430 415 pt_len = ctx->ccm_data_len;
431 416
432 417 if (total_decrypted_len > pt_len) {
433 418 /*
434 419 * part of the input will be the MAC, need to isolate that
435 420 * to be dealt with later. The left-over data in
436 421 * ccm_remainder_len from last time will not be part of the
437 422 * MAC. Otherwise, it would have already been taken out
438 423 * when this call is made last time.
439 424 */
440 425 size_t pt_part = pt_len - pd_len - ctx->ccm_remainder_len;
441 426
442 427 mac_len = length - pt_part;
443 428
444 429 ctx->ccm_processed_mac_len = mac_len;
445 430 bcopy(data + pt_part, ctx->ccm_mac_input_buf, mac_len);
446 431
447 432 if (pt_part + ctx->ccm_remainder_len < block_size) {
448 433 /*
449 434 * since this is last of the ciphertext, will
450 435 * just decrypt with it here
451 436 */
452 437 bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
453 438 [ctx->ccm_remainder_len], pt_part);
454 439 ctx->ccm_remainder_len += pt_part;
455 440 ccm_decrypt_incomplete_block(ctx, encrypt_block);
456 441 ctx->ccm_remainder_len = 0;
457 442 ctx->ccm_processed_data_len += pt_part;
458 443 return (CRYPTO_SUCCESS);
459 444 } else {
460 445 /* let rest of the code handle this */
461 446 length = pt_part;
462 447 }
463 448 } else if (length + ctx->ccm_remainder_len < block_size) {
464 449 /* accumulate bytes here and return */
465 450 bcopy(datap,
466 451 (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
467 452 length);
468 453 ctx->ccm_remainder_len += length;
469 454 ctx->ccm_copy_to = datap;
470 455 return (CRYPTO_SUCCESS);
471 456 }
472 457
473 458 do {
474 459 /* Unprocessed data from last call. */
475 460 if (ctx->ccm_remainder_len > 0) {
476 461 need = block_size - ctx->ccm_remainder_len;
477 462
478 463 if (need > remainder)
479 464 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
480 465
481 466 bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
482 467 [ctx->ccm_remainder_len], need);
483 468
484 469 blockp = (uint8_t *)ctx->ccm_remainder;
485 470 } else {
486 471 blockp = datap;
↓ open down ↓ |
156 lines elided |
↑ open up ↑ |
487 472 }
488 473
489 474 /* Calculate the counter mode, ccm_cb is the counter block */
490 475 cbp = (uint8_t *)ctx->ccm_tmp;
491 476 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, cbp);
492 477
493 478 /*
494 479 * Increment counter.
495 480 * Counter bits are confined to the bottom 64 bits
496 481 */
497 - counter = ctx->ccm_cb[1] & ctx->ccm_counter_mask;
498 482 #ifdef _LITTLE_ENDIAN
499 - p = (uint8_t *)&counter;
500 - counter = (((uint64_t)p[0] << 56) |
501 - ((uint64_t)p[1] << 48) |
502 - ((uint64_t)p[2] << 40) |
503 - ((uint64_t)p[3] << 32) |
504 - ((uint64_t)p[4] << 24) |
505 - ((uint64_t)p[5] << 16) |
506 - ((uint64_t)p[6] << 8) |
507 - (uint64_t)p[7]);
508 -#endif
483 + counter = ntohll(ctx->ccm_cb[1] & ctx->ccm_counter_mask);
484 + counter = htonll(counter + 1);
485 +#else
486 + counter = ctx->ccm_cb[1] & ctx->ccm_counter_mask;
509 487 counter++;
510 -#ifdef _LITTLE_ENDIAN
511 - counter = (((uint64_t)p[0] << 56) |
512 - ((uint64_t)p[1] << 48) |
513 - ((uint64_t)p[2] << 40) |
514 - ((uint64_t)p[3] << 32) |
515 - ((uint64_t)p[4] << 24) |
516 - ((uint64_t)p[5] << 16) |
517 - ((uint64_t)p[6] << 8) |
518 - (uint64_t)p[7]);
519 -#endif
488 +#endif /* _LITTLE_ENDIAN */
520 489 counter &= ctx->ccm_counter_mask;
521 490 ctx->ccm_cb[1] =
522 491 (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
523 492
524 493 /* XOR with the ciphertext */
525 494 xor_block(blockp, cbp);
526 495
527 496 /* Copy the plaintext to the "holding buffer" */
528 497 resultp = (uint8_t *)ctx->ccm_pt_buf +
529 498 ctx->ccm_processed_data_len;
530 499 copy_block(cbp, resultp);
531 500
532 501 ctx->ccm_processed_data_len += block_size;
533 502
534 503 ctx->ccm_lastp = blockp;
535 504
536 505 /* Update pointer to next block of data to be processed. */
537 506 if (ctx->ccm_remainder_len != 0) {
538 507 datap += need;
539 508 ctx->ccm_remainder_len = 0;
540 509 } else {
541 510 datap += block_size;
542 511 }
543 512
544 513 remainder = (size_t)&data[length] - (size_t)datap;
545 514
546 515 /* Incomplete last block */
547 516 if (remainder > 0 && remainder < block_size) {
548 517 bcopy(datap, ctx->ccm_remainder, remainder);
549 518 ctx->ccm_remainder_len = remainder;
550 519 ctx->ccm_copy_to = datap;
551 520 if (ctx->ccm_processed_mac_len > 0) {
552 521 /*
553 522 * not expecting anymore ciphertext, just
554 523 * compute plaintext for the remaining input
555 524 */
556 525 ccm_decrypt_incomplete_block(ctx,
557 526 encrypt_block);
558 527 ctx->ccm_processed_data_len += remainder;
559 528 ctx->ccm_remainder_len = 0;
560 529 }
561 530 goto out;
562 531 }
563 532 ctx->ccm_copy_to = NULL;
564 533
565 534 } while (remainder > 0);
566 535
567 536 out:
568 537 return (CRYPTO_SUCCESS);
569 538 }
570 539
571 540 int
572 541 ccm_decrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
573 542 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
574 543 void (*copy_block)(uint8_t *, uint8_t *),
575 544 void (*xor_block)(uint8_t *, uint8_t *))
576 545 {
577 546 size_t mac_remain, pt_len;
578 547 uint8_t *pt, *mac_buf, *macp, *ccm_mac_p;
579 548 void *iov_or_mp;
580 549 offset_t offset;
581 550 uint8_t *out_data_1, *out_data_2;
582 551 size_t out_data_1_len;
583 552
584 553 pt_len = ctx->ccm_data_len;
585 554
586 555 /* Make sure output buffer can fit all of the plaintext */
587 556 if (out->cd_length < pt_len) {
588 557 return (CRYPTO_DATA_LEN_RANGE);
589 558 }
590 559
591 560 pt = ctx->ccm_pt_buf;
592 561 mac_remain = ctx->ccm_processed_data_len;
593 562 mac_buf = (uint8_t *)ctx->ccm_mac_buf;
594 563
595 564 macp = (uint8_t *)ctx->ccm_tmp;
596 565
597 566 while (mac_remain > 0) {
598 567
599 568 if (mac_remain < block_size) {
600 569 bzero(macp, block_size);
601 570 bcopy(pt, macp, mac_remain);
602 571 mac_remain = 0;
603 572 } else {
604 573 copy_block(pt, macp);
605 574 mac_remain -= block_size;
606 575 pt += block_size;
607 576 }
608 577
609 578 /* calculate the CBC MAC */
610 579 xor_block(macp, mac_buf);
611 580 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
612 581 }
613 582
614 583 /* Calculate the CCM MAC */
615 584 ccm_mac_p = (uint8_t *)ctx->ccm_tmp;
616 585 calculate_ccm_mac((ccm_ctx_t *)ctx, ccm_mac_p, encrypt_block);
617 586
618 587 /* compare the input CCM MAC value with what we calculated */
619 588 if (bcmp(ctx->ccm_mac_input_buf, ccm_mac_p, ctx->ccm_mac_len)) {
620 589 /* They don't match */
621 590 return (CRYPTO_INVALID_MAC);
622 591 } else {
623 592 crypto_init_ptrs(out, &iov_or_mp, &offset);
624 593 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
625 594 &out_data_1_len, &out_data_2, pt_len);
626 595 bcopy(ctx->ccm_pt_buf, out_data_1, out_data_1_len);
627 596 if (out_data_2 != NULL) {
628 597 bcopy((ctx->ccm_pt_buf) + out_data_1_len,
629 598 out_data_2, pt_len - out_data_1_len);
630 599 }
631 600 out->cd_offset += pt_len;
632 601 }
633 602 return (CRYPTO_SUCCESS);
634 603 }
635 604
636 605 int
637 606 ccm_validate_args(CK_AES_CCM_PARAMS *ccm_param, boolean_t is_encrypt_init)
638 607 {
639 608 size_t macSize, nonceSize;
640 609 uint8_t q;
641 610 uint64_t maxValue;
642 611
643 612 /*
644 613 * Check the length of the MAC. The only valid
645 614 * lengths for the MAC are: 4, 6, 8, 10, 12, 14, 16
646 615 */
647 616 macSize = ccm_param->ulMACSize;
648 617 if ((macSize < 4) || (macSize > 16) || ((macSize % 2) != 0)) {
649 618 return (CRYPTO_MECHANISM_PARAM_INVALID);
650 619 }
651 620
652 621 /* Check the nonce length. Valid values are 7, 8, 9, 10, 11, 12, 13 */
653 622 nonceSize = ccm_param->ulNonceSize;
654 623 if ((nonceSize < 7) || (nonceSize > 13)) {
655 624 return (CRYPTO_MECHANISM_PARAM_INVALID);
656 625 }
657 626
658 627 /* q is the length of the field storing the length, in bytes */
659 628 q = (uint8_t)((15 - nonceSize) & 0xFF);
660 629
661 630
662 631 /*
663 632 * If it is decrypt, need to make sure size of ciphertext is at least
664 633 * bigger than MAC len
665 634 */
666 635 if ((!is_encrypt_init) && (ccm_param->ulDataSize < macSize)) {
667 636 return (CRYPTO_MECHANISM_PARAM_INVALID);
668 637 }
669 638
670 639 /*
671 640 * Check to make sure the length of the payload is within the
672 641 * range of values allowed by q
673 642 */
674 643 if (q < 8) {
675 644 maxValue = (1ULL << (q * 8)) - 1;
676 645 } else {
677 646 maxValue = ULONG_MAX;
678 647 }
679 648
680 649 if (ccm_param->ulDataSize > maxValue) {
681 650 return (CRYPTO_MECHANISM_PARAM_INVALID);
682 651 }
683 652 return (CRYPTO_SUCCESS);
684 653 }
685 654
686 655 /*
687 656 * Format the first block used in CBC-MAC (B0) and the initial counter
688 657 * block based on formatting functions and counter generation functions
689 658 * specified in RFC 3610 and NIST publication 800-38C, appendix A
690 659 *
691 660 * b0 is the first block used in CBC-MAC
692 661 * cb0 is the first counter block
693 662 *
694 663 * It's assumed that the arguments b0 and cb0 are preallocated AES blocks
695 664 *
696 665 */
↓ open down ↓ |
167 lines elided |
↑ open up ↑ |
697 666 static void
698 667 ccm_format_initial_blocks(uchar_t *nonce, ulong_t nonceSize,
699 668 ulong_t authDataSize, uint8_t *b0, ccm_ctx_t *aes_ctx)
700 669 {
701 670 uint64_t payloadSize;
702 671 uint8_t t, q, have_adata = 0;
703 672 size_t limit;
704 673 int i, j, k;
705 674 uint64_t mask = 0;
706 675 uint8_t *cb;
707 -#ifdef _LITTLE_ENDIAN
708 - uint8_t *p8;
709 -#endif /* _LITTLE_ENDIAN */
710 676
711 677 q = (uint8_t)((15 - nonceSize) & 0xFF);
712 678 t = (uint8_t)((aes_ctx->ccm_mac_len) & 0xFF);
713 679
714 680 /* Construct the first octet of b0 */
715 681 if (authDataSize > 0) {
716 682 have_adata = 1;
717 683 }
718 684 b0[0] = (have_adata << 6) | (((t - 2) / 2) << 3) | (q - 1);
719 685
720 686 /* copy the nonce value into b0 */
721 687 bcopy(nonce, &(b0[1]), nonceSize);
722 688
723 689 /* store the length of the payload into b0 */
724 690 bzero(&(b0[1+nonceSize]), q);
725 691
726 692 payloadSize = aes_ctx->ccm_data_len;
727 693 limit = 8 < q ? 8 : q;
728 694
729 695 for (i = 0, j = 0, k = 15; i < limit; i++, j += 8, k--) {
730 696 b0[k] = (uint8_t)((payloadSize >> j) & 0xFF);
731 697 }
732 698
733 699 /* format the counter block */
734 700
735 701 cb = (uint8_t *)aes_ctx->ccm_cb;
736 702
737 703 cb[0] = 0x07 & (q-1); /* first byte */
738 704
739 705 /* copy the nonce value into the counter block */
740 706 bcopy(nonce, &(cb[1]), nonceSize);
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
741 707
742 708 bzero(&(cb[1+nonceSize]), q);
743 709
744 710 /* Create the mask for the counter field based on the size of nonce */
745 711 q <<= 3;
746 712 while (q-- > 0) {
747 713 mask |= (1ULL << q);
748 714 }
749 715
750 716 #ifdef _LITTLE_ENDIAN
751 - p8 = (uint8_t *)&mask;
752 - mask = (((uint64_t)p8[0] << 56) |
753 - ((uint64_t)p8[1] << 48) |
754 - ((uint64_t)p8[2] << 40) |
755 - ((uint64_t)p8[3] << 32) |
756 - ((uint64_t)p8[4] << 24) |
757 - ((uint64_t)p8[5] << 16) |
758 - ((uint64_t)p8[6] << 8) |
759 - (uint64_t)p8[7]);
717 + mask = htonll(mask);
760 718 #endif
761 719 aes_ctx->ccm_counter_mask = mask;
762 720
763 721 /*
764 722 * During calculation, we start using counter block 1, we will
765 723 * set it up right here.
766 724 * We can just set the last byte to have the value 1, because
767 725 * even with the biggest nonce of 13, the last byte of the
768 726 * counter block will be used for the counter value.
769 727 */
770 728 cb[15] = 0x01;
771 729 }
772 730
773 731 /*
774 732 * Encode the length of the associated data as
775 733 * specified in RFC 3610 and NIST publication 800-38C, appendix A
776 734 */
777 735 static void
778 736 encode_adata_len(ulong_t auth_data_len, uint8_t *encoded, size_t *encoded_len)
779 737 {
738 +#ifdef UNALIGNED_POINTERS_PERMITTED
739 + uint32_t *lencoded_ptr;
740 +#ifdef _LP64
741 + uint64_t *llencoded_ptr;
742 +#endif
743 +#endif /* UNALIGNED_POINTERS_PERMITTED */
744 +
780 745 if (auth_data_len < ((1ULL<<16) - (1ULL<<8))) {
781 746 /* 0 < a < (2^16-2^8) */
782 747 *encoded_len = 2;
783 748 encoded[0] = (auth_data_len & 0xff00) >> 8;
784 749 encoded[1] = auth_data_len & 0xff;
785 750
786 751 } else if ((auth_data_len >= ((1ULL<<16) - (1ULL<<8))) &&
787 752 (auth_data_len < (1ULL << 31))) {
788 753 /* (2^16-2^8) <= a < 2^32 */
789 754 *encoded_len = 6;
790 755 encoded[0] = 0xff;
791 756 encoded[1] = 0xfe;
757 +#ifdef UNALIGNED_POINTERS_PERMITTED
758 + lencoded_ptr = (uint32_t *)&encoded[2];
759 + *lencoded_ptr = htonl(auth_data_len);
760 +#else
792 761 encoded[2] = (auth_data_len & 0xff000000) >> 24;
793 762 encoded[3] = (auth_data_len & 0xff0000) >> 16;
794 763 encoded[4] = (auth_data_len & 0xff00) >> 8;
795 764 encoded[5] = auth_data_len & 0xff;
765 +#endif /* UNALIGNED_POINTERS_PERMITTED */
766 +
796 767 #ifdef _LP64
797 768 } else {
798 769 /* 2^32 <= a < 2^64 */
799 770 *encoded_len = 10;
800 771 encoded[0] = 0xff;
801 772 encoded[1] = 0xff;
773 +#ifdef UNALIGNED_POINTERS_PERMITTED
774 + llencoded_ptr = (uint64_t *)&encoded[2];
775 + *llencoded_ptr = htonl(auth_data_len);
776 +#else
802 777 encoded[2] = (auth_data_len & 0xff00000000000000) >> 56;
803 778 encoded[3] = (auth_data_len & 0xff000000000000) >> 48;
804 779 encoded[4] = (auth_data_len & 0xff0000000000) >> 40;
805 780 encoded[5] = (auth_data_len & 0xff00000000) >> 32;
806 781 encoded[6] = (auth_data_len & 0xff000000) >> 24;
807 782 encoded[7] = (auth_data_len & 0xff0000) >> 16;
808 783 encoded[8] = (auth_data_len & 0xff00) >> 8;
809 784 encoded[9] = auth_data_len & 0xff;
785 +#endif /* UNALIGNED_POINTERS_PERMITTED */
810 786 #endif /* _LP64 */
811 787 }
812 788 }
813 789
814 790 /*
815 791 * The following function should be call at encrypt or decrypt init time
816 792 * for AES CCM mode.
817 793 */
818 794 int
819 795 ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len,
820 796 unsigned char *auth_data, size_t auth_data_len, size_t block_size,
821 797 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
822 798 void (*xor_block)(uint8_t *, uint8_t *))
823 799 {
824 800 uint8_t *mac_buf, *datap, *ivp, *authp;
825 801 size_t remainder, processed;
826 802 uint8_t encoded_a[10]; /* max encoded auth data length is 10 octets */
827 803 size_t encoded_a_len = 0;
828 804
829 805 mac_buf = (uint8_t *)&(ctx->ccm_mac_buf);
830 806
831 807 /*
832 808 * Format the 1st block for CBC-MAC and construct the
833 809 * 1st counter block.
834 810 *
835 811 * aes_ctx->ccm_iv is used for storing the counter block
836 812 * mac_buf will store b0 at this time.
837 813 */
838 814 ccm_format_initial_blocks(nonce, nonce_len,
839 815 auth_data_len, mac_buf, ctx);
840 816
841 817 /* The IV for CBC MAC for AES CCM mode is always zero */
842 818 ivp = (uint8_t *)ctx->ccm_tmp;
843 819 bzero(ivp, block_size);
844 820
845 821 xor_block(ivp, mac_buf);
846 822
847 823 /* encrypt the nonce */
848 824 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
849 825
850 826 /* take care of the associated data, if any */
851 827 if (auth_data_len == 0) {
852 828 return (CRYPTO_SUCCESS);
853 829 }
854 830
855 831 encode_adata_len(auth_data_len, encoded_a, &encoded_a_len);
856 832
857 833 remainder = auth_data_len;
858 834
859 835 /* 1st block: it contains encoded associated data, and some data */
860 836 authp = (uint8_t *)ctx->ccm_tmp;
861 837 bzero(authp, block_size);
862 838 bcopy(encoded_a, authp, encoded_a_len);
863 839 processed = block_size - encoded_a_len;
864 840 if (processed > auth_data_len) {
865 841 /* in case auth_data is very small */
866 842 processed = auth_data_len;
867 843 }
868 844 bcopy(auth_data, authp+encoded_a_len, processed);
869 845 /* xor with previous buffer */
870 846 xor_block(authp, mac_buf);
871 847 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
872 848 remainder -= processed;
873 849 if (remainder == 0) {
874 850 /* a small amount of associated data, it's all done now */
875 851 return (CRYPTO_SUCCESS);
876 852 }
877 853
878 854 do {
879 855 if (remainder < block_size) {
880 856 /*
881 857 * There's not a block full of data, pad rest of
882 858 * buffer with zero
883 859 */
884 860 bzero(authp, block_size);
885 861 bcopy(&(auth_data[processed]), authp, remainder);
886 862 datap = (uint8_t *)authp;
887 863 remainder = 0;
888 864 } else {
889 865 datap = (uint8_t *)(&(auth_data[processed]));
890 866 processed += block_size;
891 867 remainder -= block_size;
892 868 }
893 869
894 870 xor_block(datap, mac_buf);
895 871 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
896 872
897 873 } while (remainder > 0);
898 874
899 875 return (CRYPTO_SUCCESS);
900 876 }
901 877
902 878 int
903 879 ccm_init_ctx(ccm_ctx_t *ccm_ctx, char *param, int kmflag,
904 880 boolean_t is_encrypt_init, size_t block_size,
905 881 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
906 882 void (*xor_block)(uint8_t *, uint8_t *))
907 883 {
908 884 int rv;
909 885 CK_AES_CCM_PARAMS *ccm_param;
910 886
911 887 if (param != NULL) {
912 888 ccm_param = (CK_AES_CCM_PARAMS *)param;
913 889
914 890 if ((rv = ccm_validate_args(ccm_param,
915 891 is_encrypt_init)) != 0) {
916 892 return (rv);
917 893 }
918 894
919 895 ccm_ctx->ccm_mac_len = ccm_param->ulMACSize;
920 896 if (is_encrypt_init) {
921 897 ccm_ctx->ccm_data_len = ccm_param->ulDataSize;
922 898 } else {
923 899 ccm_ctx->ccm_data_len =
924 900 ccm_param->ulDataSize - ccm_ctx->ccm_mac_len;
925 901 ccm_ctx->ccm_processed_mac_len = 0;
926 902 }
927 903 ccm_ctx->ccm_processed_data_len = 0;
928 904
929 905 ccm_ctx->ccm_flags |= CCM_MODE;
930 906 } else {
931 907 rv = CRYPTO_MECHANISM_PARAM_INVALID;
932 908 goto out;
933 909 }
934 910
935 911 if (ccm_init(ccm_ctx, ccm_param->nonce, ccm_param->ulNonceSize,
936 912 ccm_param->authData, ccm_param->ulAuthDataSize, block_size,
937 913 encrypt_block, xor_block) != 0) {
938 914 rv = CRYPTO_MECHANISM_PARAM_INVALID;
939 915 goto out;
940 916 }
941 917 if (!is_encrypt_init) {
942 918 /* allocate buffer for storing decrypted plaintext */
943 919 #ifdef _KERNEL
944 920 ccm_ctx->ccm_pt_buf = kmem_alloc(ccm_ctx->ccm_data_len,
945 921 kmflag);
946 922 #else
947 923 ccm_ctx->ccm_pt_buf = malloc(ccm_ctx->ccm_data_len);
948 924 #endif
949 925 if (ccm_ctx->ccm_pt_buf == NULL) {
950 926 rv = CRYPTO_HOST_MEMORY;
951 927 }
952 928 }
953 929 out:
954 930 return (rv);
955 931 }
956 932
957 933 void *
958 934 ccm_alloc_ctx(int kmflag)
959 935 {
960 936 ccm_ctx_t *ccm_ctx;
961 937
962 938 #ifdef _KERNEL
963 939 if ((ccm_ctx = kmem_zalloc(sizeof (ccm_ctx_t), kmflag)) == NULL)
964 940 #else
965 941 if ((ccm_ctx = calloc(1, sizeof (ccm_ctx_t))) == NULL)
966 942 #endif
967 943 return (NULL);
968 944
969 945 ccm_ctx->ccm_flags = CCM_MODE;
970 946 return (ccm_ctx);
971 947 }
↓ open down ↓ |
152 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX