Print this page
6717509 Need to use bswap/bswapq for byte swap of 64-bit integer on x32/x64 (fix lint)
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/common/crypto/modes/ccm.c
+++ new/usr/src/common/crypto/modes/ccm.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 #ifndef _KERNEL
27 27 #include <strings.h>
28 28 #include <limits.h>
29 29 #include <assert.h>
30 30 #include <security/cryptoki.h>
31 31 #endif
32 32
33 33 #include <sys/types.h>
34 34 #include <sys/kmem.h>
35 35 #include <modes/modes.h>
36 36 #include <sys/crypto/common.h>
37 37 #include <sys/crypto/impl.h>
38 38
39 39 #if defined(__i386) || defined(__amd64)
40 40 #include <sys/byteorder.h>
41 41 #define UNALIGNED_POINTERS_PERMITTED
42 42 #endif
43 43
44 44 /*
45 45 * Encrypt multiple blocks of data in CCM mode. Decrypt for CCM mode
46 46 * is done in another function.
47 47 */
48 48 int
49 49 ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
50 50 crypto_data_t *out, size_t block_size,
51 51 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
52 52 void (*copy_block)(uint8_t *, uint8_t *),
53 53 void (*xor_block)(uint8_t *, uint8_t *))
54 54 {
55 55 size_t remainder = length;
56 56 size_t need;
57 57 uint8_t *datap = (uint8_t *)data;
58 58 uint8_t *blockp;
59 59 uint8_t *lastp;
60 60 void *iov_or_mp;
61 61 offset_t offset;
62 62 uint8_t *out_data_1;
63 63 uint8_t *out_data_2;
64 64 size_t out_data_1_len;
65 65 uint64_t counter;
66 66 uint8_t *mac_buf;
67 67
68 68 if (length + ctx->ccm_remainder_len < block_size) {
69 69 /* accumulate bytes here and return */
70 70 bcopy(datap,
71 71 (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
72 72 length);
73 73 ctx->ccm_remainder_len += length;
74 74 ctx->ccm_copy_to = datap;
75 75 return (CRYPTO_SUCCESS);
76 76 }
77 77
78 78 lastp = (uint8_t *)ctx->ccm_cb;
79 79 if (out != NULL)
80 80 crypto_init_ptrs(out, &iov_or_mp, &offset);
81 81
82 82 mac_buf = (uint8_t *)ctx->ccm_mac_buf;
83 83
84 84 do {
85 85 /* Unprocessed data from last call. */
86 86 if (ctx->ccm_remainder_len > 0) {
87 87 need = block_size - ctx->ccm_remainder_len;
88 88
89 89 if (need > remainder)
90 90 return (CRYPTO_DATA_LEN_RANGE);
91 91
92 92 bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
93 93 [ctx->ccm_remainder_len], need);
94 94
95 95 blockp = (uint8_t *)ctx->ccm_remainder;
96 96 } else {
97 97 blockp = datap;
98 98 }
99 99
100 100 /*
101 101 * do CBC MAC
102 102 *
103 103 * XOR the previous cipher block current clear block.
104 104 * mac_buf always contain previous cipher block.
105 105 */
106 106 xor_block(blockp, mac_buf);
107 107 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
108 108
109 109 /* ccm_cb is the counter block */
110 110 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb,
111 111 (uint8_t *)ctx->ccm_tmp);
112 112
113 113 lastp = (uint8_t *)ctx->ccm_tmp;
114 114
115 115 /*
116 116 * Increment counter. Counter bits are confined
117 117 * to the bottom 64 bits of the counter block.
118 118 */
119 119 #ifdef _LITTLE_ENDIAN
120 120 counter = ntohll(ctx->ccm_cb[1] & ctx->ccm_counter_mask);
121 121 counter = htonll(counter + 1);
122 122 #else
123 123 counter = ctx->ccm_cb[1] & ctx->ccm_counter_mask;
124 124 counter++;
125 125 #endif /* _LITTLE_ENDIAN */
126 126 counter &= ctx->ccm_counter_mask;
127 127 ctx->ccm_cb[1] =
128 128 (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
129 129
130 130 /*
131 131 * XOR encrypted counter block with the current clear block.
132 132 */
133 133 xor_block(blockp, lastp);
134 134
135 135 ctx->ccm_processed_data_len += block_size;
136 136
137 137 if (out == NULL) {
138 138 if (ctx->ccm_remainder_len > 0) {
139 139 bcopy(blockp, ctx->ccm_copy_to,
140 140 ctx->ccm_remainder_len);
141 141 bcopy(blockp + ctx->ccm_remainder_len, datap,
142 142 need);
143 143 }
144 144 } else {
145 145 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
146 146 &out_data_1_len, &out_data_2, block_size);
147 147
148 148 /* copy block to where it belongs */
149 149 if (out_data_1_len == block_size) {
150 150 copy_block(lastp, out_data_1);
151 151 } else {
152 152 bcopy(lastp, out_data_1, out_data_1_len);
153 153 if (out_data_2 != NULL) {
154 154 bcopy(lastp + out_data_1_len,
155 155 out_data_2,
156 156 block_size - out_data_1_len);
157 157 }
158 158 }
159 159 /* update offset */
160 160 out->cd_offset += block_size;
161 161 }
162 162
163 163 /* Update pointer to next block of data to be processed. */
164 164 if (ctx->ccm_remainder_len != 0) {
165 165 datap += need;
166 166 ctx->ccm_remainder_len = 0;
167 167 } else {
168 168 datap += block_size;
169 169 }
170 170
171 171 remainder = (size_t)&data[length] - (size_t)datap;
172 172
173 173 /* Incomplete last block. */
174 174 if (remainder > 0 && remainder < block_size) {
175 175 bcopy(datap, ctx->ccm_remainder, remainder);
176 176 ctx->ccm_remainder_len = remainder;
177 177 ctx->ccm_copy_to = datap;
178 178 goto out;
179 179 }
180 180 ctx->ccm_copy_to = NULL;
181 181
182 182 } while (remainder > 0);
183 183
184 184 out:
185 185 return (CRYPTO_SUCCESS);
186 186 }
187 187
188 188 void
189 189 calculate_ccm_mac(ccm_ctx_t *ctx, uint8_t *ccm_mac,
190 190 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
191 191 {
192 192 uint64_t counter;
193 193 uint8_t *counterp, *mac_buf;
194 194 int i;
195 195
196 196 mac_buf = (uint8_t *)ctx->ccm_mac_buf;
197 197
198 198 /* first counter block start with index 0 */
199 199 counter = 0;
200 200 ctx->ccm_cb[1] = (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
201 201
202 202 counterp = (uint8_t *)ctx->ccm_tmp;
203 203 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, counterp);
204 204
205 205 /* calculate XOR of MAC with first counter block */
206 206 for (i = 0; i < ctx->ccm_mac_len; i++) {
207 207 ccm_mac[i] = mac_buf[i] ^ counterp[i];
208 208 }
209 209 }
210 210
211 211 /* ARGSUSED */
212 212 int
213 213 ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
214 214 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
215 215 void (*xor_block)(uint8_t *, uint8_t *))
216 216 {
217 217 uint8_t *lastp, *mac_buf, *ccm_mac_p, *macp;
218 218 void *iov_or_mp;
219 219 offset_t offset;
220 220 uint8_t *out_data_1;
221 221 uint8_t *out_data_2;
222 222 size_t out_data_1_len;
223 223 int i;
224 224
225 225 if (out->cd_length < (ctx->ccm_remainder_len + ctx->ccm_mac_len)) {
226 226 return (CRYPTO_DATA_LEN_RANGE);
227 227 }
228 228
229 229 /*
230 230 * When we get here, the number of bytes of payload processed
231 231 * plus whatever data remains, if any,
232 232 * should be the same as the number of bytes that's being
233 233 * passed in the argument during init time.
234 234 */
235 235 if ((ctx->ccm_processed_data_len + ctx->ccm_remainder_len)
236 236 != (ctx->ccm_data_len)) {
237 237 return (CRYPTO_DATA_LEN_RANGE);
238 238 }
239 239
240 240 mac_buf = (uint8_t *)ctx->ccm_mac_buf;
241 241
242 242 if (ctx->ccm_remainder_len > 0) {
243 243
244 244 /* ccm_mac_input_buf is not used for encryption */
245 245 macp = (uint8_t *)ctx->ccm_mac_input_buf;
246 246 bzero(macp, block_size);
247 247
248 248 /* copy remainder to temporary buffer */
249 249 bcopy(ctx->ccm_remainder, macp, ctx->ccm_remainder_len);
250 250
251 251 /* calculate the CBC MAC */
252 252 xor_block(macp, mac_buf);
253 253 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
254 254
255 255 /* calculate the counter mode */
256 256 lastp = (uint8_t *)ctx->ccm_tmp;
257 257 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, lastp);
258 258
259 259 /* XOR with counter block */
260 260 for (i = 0; i < ctx->ccm_remainder_len; i++) {
261 261 macp[i] ^= lastp[i];
262 262 }
263 263 ctx->ccm_processed_data_len += ctx->ccm_remainder_len;
264 264 }
265 265
266 266 /* Calculate the CCM MAC */
267 267 ccm_mac_p = (uint8_t *)ctx->ccm_tmp;
268 268 calculate_ccm_mac(ctx, ccm_mac_p, encrypt_block);
269 269
270 270 crypto_init_ptrs(out, &iov_or_mp, &offset);
271 271 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
272 272 &out_data_1_len, &out_data_2,
273 273 ctx->ccm_remainder_len + ctx->ccm_mac_len);
274 274
275 275 if (ctx->ccm_remainder_len > 0) {
276 276
277 277 /* copy temporary block to where it belongs */
278 278 if (out_data_2 == NULL) {
279 279 /* everything will fit in out_data_1 */
280 280 bcopy(macp, out_data_1, ctx->ccm_remainder_len);
281 281 bcopy(ccm_mac_p, out_data_1 + ctx->ccm_remainder_len,
282 282 ctx->ccm_mac_len);
283 283 } else {
284 284
285 285 if (out_data_1_len < ctx->ccm_remainder_len) {
286 286
287 287 size_t data_2_len_used;
288 288
289 289 bcopy(macp, out_data_1, out_data_1_len);
290 290
291 291 data_2_len_used = ctx->ccm_remainder_len
292 292 - out_data_1_len;
293 293
294 294 bcopy((uint8_t *)macp + out_data_1_len,
295 295 out_data_2, data_2_len_used);
296 296 bcopy(ccm_mac_p, out_data_2 + data_2_len_used,
297 297 ctx->ccm_mac_len);
298 298 } else {
299 299 bcopy(macp, out_data_1, out_data_1_len);
300 300 if (out_data_1_len == ctx->ccm_remainder_len) {
301 301 /* mac will be in out_data_2 */
302 302 bcopy(ccm_mac_p, out_data_2,
303 303 ctx->ccm_mac_len);
304 304 } else {
305 305 size_t len_not_used = out_data_1_len -
306 306 ctx->ccm_remainder_len;
307 307 /*
308 308 * part of mac in will be in
309 309 * out_data_1, part of the mac will be
310 310 * in out_data_2
311 311 */
312 312 bcopy(ccm_mac_p,
313 313 out_data_1 + ctx->ccm_remainder_len,
314 314 len_not_used);
315 315 bcopy(ccm_mac_p + len_not_used,
316 316 out_data_2,
317 317 ctx->ccm_mac_len - len_not_used);
318 318
319 319 }
320 320 }
321 321 }
322 322 } else {
323 323 /* copy block to where it belongs */
324 324 bcopy(ccm_mac_p, out_data_1, out_data_1_len);
325 325 if (out_data_2 != NULL) {
326 326 bcopy(ccm_mac_p + out_data_1_len, out_data_2,
327 327 block_size - out_data_1_len);
328 328 }
329 329 }
330 330 out->cd_offset += ctx->ccm_remainder_len + ctx->ccm_mac_len;
331 331 ctx->ccm_remainder_len = 0;
332 332 return (CRYPTO_SUCCESS);
333 333 }
334 334
335 335 /*
336 336 * This will only deal with decrypting the last block of the input that
337 337 * might not be a multiple of block length.
338 338 */
339 339 void
340 340 ccm_decrypt_incomplete_block(ccm_ctx_t *ctx,
341 341 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
342 342 {
343 343 uint8_t *datap, *outp, *counterp;
344 344 int i;
345 345
346 346 datap = (uint8_t *)ctx->ccm_remainder;
347 347 outp = &((ctx->ccm_pt_buf)[ctx->ccm_processed_data_len]);
348 348
349 349 counterp = (uint8_t *)ctx->ccm_tmp;
350 350 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, counterp);
351 351
352 352 /* XOR with counter block */
353 353 for (i = 0; i < ctx->ccm_remainder_len; i++) {
354 354 outp[i] = datap[i] ^ counterp[i];
355 355 }
356 356 }
357 357
358 358 /*
359 359 * This will decrypt the cipher text. However, the plaintext won't be
360 360 * returned to the caller. It will be returned when decrypt_final() is
361 361 * called if the MAC matches
362 362 */
363 363 /* ARGSUSED */
364 364 int
365 365 ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
366 366 crypto_data_t *out, size_t block_size,
367 367 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
368 368 void (*copy_block)(uint8_t *, uint8_t *),
↓ open down ↓ |
368 lines elided |
↑ open up ↑ |
369 369 void (*xor_block)(uint8_t *, uint8_t *))
370 370 {
371 371 size_t remainder = length;
372 372 size_t need;
373 373 uint8_t *datap = (uint8_t *)data;
374 374 uint8_t *blockp;
375 375 uint8_t *cbp;
376 376 uint64_t counter;
377 377 size_t pt_len, total_decrypted_len, mac_len, pm_len, pd_len;
378 378 uint8_t *resultp;
379 -#ifdef _LITTLE_ENDIAN
380 - uint8_t *p;
381 -#endif /* _LITTLE_ENDIAN */
382 379
383 380
384 381 pm_len = ctx->ccm_processed_mac_len;
385 382
386 383 if (pm_len > 0) {
387 384 uint8_t *tmp;
388 385 /*
389 386 * all ciphertext has been processed, just waiting for
390 387 * part of the value of the mac
391 388 */
392 389 if ((pm_len + length) > ctx->ccm_mac_len) {
393 390 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
394 391 }
395 392 tmp = (uint8_t *)ctx->ccm_mac_input_buf;
396 393
397 394 bcopy(datap, tmp + pm_len, length);
398 395
399 396 ctx->ccm_processed_mac_len += length;
400 397 return (CRYPTO_SUCCESS);
401 398 }
402 399
403 400 /*
404 401 * If we decrypt the given data, what total amount of data would
405 402 * have been decrypted?
406 403 */
407 404 pd_len = ctx->ccm_processed_data_len;
408 405 total_decrypted_len = pd_len + length + ctx->ccm_remainder_len;
409 406
410 407 if (total_decrypted_len >
411 408 (ctx->ccm_data_len + ctx->ccm_mac_len)) {
412 409 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
413 410 }
414 411
415 412 pt_len = ctx->ccm_data_len;
416 413
417 414 if (total_decrypted_len > pt_len) {
418 415 /*
419 416 * part of the input will be the MAC, need to isolate that
420 417 * to be dealt with later. The left-over data in
421 418 * ccm_remainder_len from last time will not be part of the
422 419 * MAC. Otherwise, it would have already been taken out
423 420 * when this call is made last time.
424 421 */
425 422 size_t pt_part = pt_len - pd_len - ctx->ccm_remainder_len;
426 423
427 424 mac_len = length - pt_part;
428 425
429 426 ctx->ccm_processed_mac_len = mac_len;
430 427 bcopy(data + pt_part, ctx->ccm_mac_input_buf, mac_len);
431 428
432 429 if (pt_part + ctx->ccm_remainder_len < block_size) {
433 430 /*
434 431 * since this is last of the ciphertext, will
435 432 * just decrypt with it here
436 433 */
437 434 bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
438 435 [ctx->ccm_remainder_len], pt_part);
439 436 ctx->ccm_remainder_len += pt_part;
440 437 ccm_decrypt_incomplete_block(ctx, encrypt_block);
441 438 ctx->ccm_remainder_len = 0;
442 439 ctx->ccm_processed_data_len += pt_part;
443 440 return (CRYPTO_SUCCESS);
444 441 } else {
445 442 /* let rest of the code handle this */
446 443 length = pt_part;
447 444 }
448 445 } else if (length + ctx->ccm_remainder_len < block_size) {
449 446 /* accumulate bytes here and return */
450 447 bcopy(datap,
451 448 (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
452 449 length);
453 450 ctx->ccm_remainder_len += length;
454 451 ctx->ccm_copy_to = datap;
455 452 return (CRYPTO_SUCCESS);
456 453 }
457 454
458 455 do {
459 456 /* Unprocessed data from last call. */
460 457 if (ctx->ccm_remainder_len > 0) {
461 458 need = block_size - ctx->ccm_remainder_len;
462 459
463 460 if (need > remainder)
464 461 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
465 462
466 463 bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
467 464 [ctx->ccm_remainder_len], need);
468 465
469 466 blockp = (uint8_t *)ctx->ccm_remainder;
470 467 } else {
471 468 blockp = datap;
472 469 }
473 470
474 471 /* Calculate the counter mode, ccm_cb is the counter block */
475 472 cbp = (uint8_t *)ctx->ccm_tmp;
476 473 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, cbp);
477 474
478 475 /*
479 476 * Increment counter.
480 477 * Counter bits are confined to the bottom 64 bits
481 478 */
482 479 #ifdef _LITTLE_ENDIAN
483 480 counter = ntohll(ctx->ccm_cb[1] & ctx->ccm_counter_mask);
484 481 counter = htonll(counter + 1);
485 482 #else
486 483 counter = ctx->ccm_cb[1] & ctx->ccm_counter_mask;
487 484 counter++;
488 485 #endif /* _LITTLE_ENDIAN */
489 486 counter &= ctx->ccm_counter_mask;
490 487 ctx->ccm_cb[1] =
491 488 (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
492 489
493 490 /* XOR with the ciphertext */
494 491 xor_block(blockp, cbp);
495 492
496 493 /* Copy the plaintext to the "holding buffer" */
497 494 resultp = (uint8_t *)ctx->ccm_pt_buf +
498 495 ctx->ccm_processed_data_len;
499 496 copy_block(cbp, resultp);
500 497
501 498 ctx->ccm_processed_data_len += block_size;
502 499
503 500 ctx->ccm_lastp = blockp;
504 501
505 502 /* Update pointer to next block of data to be processed. */
506 503 if (ctx->ccm_remainder_len != 0) {
507 504 datap += need;
508 505 ctx->ccm_remainder_len = 0;
509 506 } else {
510 507 datap += block_size;
511 508 }
512 509
513 510 remainder = (size_t)&data[length] - (size_t)datap;
514 511
515 512 /* Incomplete last block */
516 513 if (remainder > 0 && remainder < block_size) {
517 514 bcopy(datap, ctx->ccm_remainder, remainder);
518 515 ctx->ccm_remainder_len = remainder;
519 516 ctx->ccm_copy_to = datap;
520 517 if (ctx->ccm_processed_mac_len > 0) {
521 518 /*
522 519 * not expecting anymore ciphertext, just
523 520 * compute plaintext for the remaining input
524 521 */
525 522 ccm_decrypt_incomplete_block(ctx,
526 523 encrypt_block);
527 524 ctx->ccm_processed_data_len += remainder;
528 525 ctx->ccm_remainder_len = 0;
529 526 }
530 527 goto out;
531 528 }
532 529 ctx->ccm_copy_to = NULL;
533 530
534 531 } while (remainder > 0);
535 532
536 533 out:
537 534 return (CRYPTO_SUCCESS);
538 535 }
539 536
540 537 int
541 538 ccm_decrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
542 539 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
543 540 void (*copy_block)(uint8_t *, uint8_t *),
544 541 void (*xor_block)(uint8_t *, uint8_t *))
545 542 {
546 543 size_t mac_remain, pt_len;
547 544 uint8_t *pt, *mac_buf, *macp, *ccm_mac_p;
548 545 void *iov_or_mp;
549 546 offset_t offset;
550 547 uint8_t *out_data_1, *out_data_2;
551 548 size_t out_data_1_len;
552 549
553 550 pt_len = ctx->ccm_data_len;
554 551
555 552 /* Make sure output buffer can fit all of the plaintext */
556 553 if (out->cd_length < pt_len) {
557 554 return (CRYPTO_DATA_LEN_RANGE);
558 555 }
559 556
560 557 pt = ctx->ccm_pt_buf;
561 558 mac_remain = ctx->ccm_processed_data_len;
562 559 mac_buf = (uint8_t *)ctx->ccm_mac_buf;
563 560
564 561 macp = (uint8_t *)ctx->ccm_tmp;
565 562
566 563 while (mac_remain > 0) {
567 564
568 565 if (mac_remain < block_size) {
569 566 bzero(macp, block_size);
570 567 bcopy(pt, macp, mac_remain);
571 568 mac_remain = 0;
572 569 } else {
573 570 copy_block(pt, macp);
574 571 mac_remain -= block_size;
575 572 pt += block_size;
576 573 }
577 574
578 575 /* calculate the CBC MAC */
579 576 xor_block(macp, mac_buf);
580 577 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
581 578 }
582 579
583 580 /* Calculate the CCM MAC */
584 581 ccm_mac_p = (uint8_t *)ctx->ccm_tmp;
585 582 calculate_ccm_mac((ccm_ctx_t *)ctx, ccm_mac_p, encrypt_block);
586 583
587 584 /* compare the input CCM MAC value with what we calculated */
588 585 if (bcmp(ctx->ccm_mac_input_buf, ccm_mac_p, ctx->ccm_mac_len)) {
589 586 /* They don't match */
590 587 return (CRYPTO_INVALID_MAC);
591 588 } else {
592 589 crypto_init_ptrs(out, &iov_or_mp, &offset);
593 590 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
594 591 &out_data_1_len, &out_data_2, pt_len);
595 592 bcopy(ctx->ccm_pt_buf, out_data_1, out_data_1_len);
596 593 if (out_data_2 != NULL) {
597 594 bcopy((ctx->ccm_pt_buf) + out_data_1_len,
598 595 out_data_2, pt_len - out_data_1_len);
599 596 }
600 597 out->cd_offset += pt_len;
601 598 }
602 599 return (CRYPTO_SUCCESS);
603 600 }
604 601
605 602 int
606 603 ccm_validate_args(CK_AES_CCM_PARAMS *ccm_param, boolean_t is_encrypt_init)
607 604 {
608 605 size_t macSize, nonceSize;
609 606 uint8_t q;
610 607 uint64_t maxValue;
611 608
612 609 /*
613 610 * Check the length of the MAC. The only valid
614 611 * lengths for the MAC are: 4, 6, 8, 10, 12, 14, 16
615 612 */
616 613 macSize = ccm_param->ulMACSize;
617 614 if ((macSize < 4) || (macSize > 16) || ((macSize % 2) != 0)) {
618 615 return (CRYPTO_MECHANISM_PARAM_INVALID);
619 616 }
620 617
621 618 /* Check the nonce length. Valid values are 7, 8, 9, 10, 11, 12, 13 */
622 619 nonceSize = ccm_param->ulNonceSize;
623 620 if ((nonceSize < 7) || (nonceSize > 13)) {
624 621 return (CRYPTO_MECHANISM_PARAM_INVALID);
625 622 }
626 623
627 624 /* q is the length of the field storing the length, in bytes */
628 625 q = (uint8_t)((15 - nonceSize) & 0xFF);
629 626
630 627
631 628 /*
632 629 * If it is decrypt, need to make sure size of ciphertext is at least
633 630 * bigger than MAC len
634 631 */
635 632 if ((!is_encrypt_init) && (ccm_param->ulDataSize < macSize)) {
636 633 return (CRYPTO_MECHANISM_PARAM_INVALID);
637 634 }
638 635
639 636 /*
640 637 * Check to make sure the length of the payload is within the
641 638 * range of values allowed by q
642 639 */
643 640 if (q < 8) {
644 641 maxValue = (1ULL << (q * 8)) - 1;
645 642 } else {
646 643 maxValue = ULONG_MAX;
647 644 }
648 645
649 646 if (ccm_param->ulDataSize > maxValue) {
650 647 return (CRYPTO_MECHANISM_PARAM_INVALID);
651 648 }
652 649 return (CRYPTO_SUCCESS);
653 650 }
654 651
655 652 /*
656 653 * Format the first block used in CBC-MAC (B0) and the initial counter
657 654 * block based on formatting functions and counter generation functions
658 655 * specified in RFC 3610 and NIST publication 800-38C, appendix A
659 656 *
660 657 * b0 is the first block used in CBC-MAC
661 658 * cb0 is the first counter block
662 659 *
663 660 * It's assumed that the arguments b0 and cb0 are preallocated AES blocks
664 661 *
665 662 */
666 663 static void
667 664 ccm_format_initial_blocks(uchar_t *nonce, ulong_t nonceSize,
668 665 ulong_t authDataSize, uint8_t *b0, ccm_ctx_t *aes_ctx)
669 666 {
670 667 uint64_t payloadSize;
671 668 uint8_t t, q, have_adata = 0;
672 669 size_t limit;
673 670 int i, j, k;
674 671 uint64_t mask = 0;
675 672 uint8_t *cb;
676 673
677 674 q = (uint8_t)((15 - nonceSize) & 0xFF);
678 675 t = (uint8_t)((aes_ctx->ccm_mac_len) & 0xFF);
679 676
680 677 /* Construct the first octet of b0 */
681 678 if (authDataSize > 0) {
682 679 have_adata = 1;
683 680 }
684 681 b0[0] = (have_adata << 6) | (((t - 2) / 2) << 3) | (q - 1);
685 682
686 683 /* copy the nonce value into b0 */
687 684 bcopy(nonce, &(b0[1]), nonceSize);
688 685
689 686 /* store the length of the payload into b0 */
690 687 bzero(&(b0[1+nonceSize]), q);
691 688
692 689 payloadSize = aes_ctx->ccm_data_len;
693 690 limit = 8 < q ? 8 : q;
694 691
695 692 for (i = 0, j = 0, k = 15; i < limit; i++, j += 8, k--) {
696 693 b0[k] = (uint8_t)((payloadSize >> j) & 0xFF);
697 694 }
698 695
699 696 /* format the counter block */
700 697
701 698 cb = (uint8_t *)aes_ctx->ccm_cb;
702 699
703 700 cb[0] = 0x07 & (q-1); /* first byte */
704 701
705 702 /* copy the nonce value into the counter block */
706 703 bcopy(nonce, &(cb[1]), nonceSize);
707 704
708 705 bzero(&(cb[1+nonceSize]), q);
709 706
710 707 /* Create the mask for the counter field based on the size of nonce */
711 708 q <<= 3;
712 709 while (q-- > 0) {
713 710 mask |= (1ULL << q);
714 711 }
715 712
716 713 #ifdef _LITTLE_ENDIAN
717 714 mask = htonll(mask);
718 715 #endif
719 716 aes_ctx->ccm_counter_mask = mask;
720 717
721 718 /*
722 719 * During calculation, we start using counter block 1, we will
723 720 * set it up right here.
724 721 * We can just set the last byte to have the value 1, because
725 722 * even with the biggest nonce of 13, the last byte of the
726 723 * counter block will be used for the counter value.
727 724 */
728 725 cb[15] = 0x01;
729 726 }
730 727
731 728 /*
732 729 * Encode the length of the associated data as
733 730 * specified in RFC 3610 and NIST publication 800-38C, appendix A
734 731 */
735 732 static void
736 733 encode_adata_len(ulong_t auth_data_len, uint8_t *encoded, size_t *encoded_len)
737 734 {
738 735 #ifdef UNALIGNED_POINTERS_PERMITTED
739 736 uint32_t *lencoded_ptr;
740 737 #ifdef _LP64
741 738 uint64_t *llencoded_ptr;
742 739 #endif
743 740 #endif /* UNALIGNED_POINTERS_PERMITTED */
744 741
745 742 if (auth_data_len < ((1ULL<<16) - (1ULL<<8))) {
746 743 /* 0 < a < (2^16-2^8) */
747 744 *encoded_len = 2;
748 745 encoded[0] = (auth_data_len & 0xff00) >> 8;
749 746 encoded[1] = auth_data_len & 0xff;
750 747
751 748 } else if ((auth_data_len >= ((1ULL<<16) - (1ULL<<8))) &&
752 749 (auth_data_len < (1ULL << 31))) {
753 750 /* (2^16-2^8) <= a < 2^32 */
754 751 *encoded_len = 6;
755 752 encoded[0] = 0xff;
756 753 encoded[1] = 0xfe;
757 754 #ifdef UNALIGNED_POINTERS_PERMITTED
758 755 lencoded_ptr = (uint32_t *)&encoded[2];
759 756 *lencoded_ptr = htonl(auth_data_len);
760 757 #else
761 758 encoded[2] = (auth_data_len & 0xff000000) >> 24;
762 759 encoded[3] = (auth_data_len & 0xff0000) >> 16;
763 760 encoded[4] = (auth_data_len & 0xff00) >> 8;
764 761 encoded[5] = auth_data_len & 0xff;
765 762 #endif /* UNALIGNED_POINTERS_PERMITTED */
766 763
767 764 #ifdef _LP64
768 765 } else {
769 766 /* 2^32 <= a < 2^64 */
770 767 *encoded_len = 10;
771 768 encoded[0] = 0xff;
772 769 encoded[1] = 0xff;
773 770 #ifdef UNALIGNED_POINTERS_PERMITTED
774 771 llencoded_ptr = (uint64_t *)&encoded[2];
775 772 *llencoded_ptr = htonl(auth_data_len);
776 773 #else
777 774 encoded[2] = (auth_data_len & 0xff00000000000000) >> 56;
778 775 encoded[3] = (auth_data_len & 0xff000000000000) >> 48;
779 776 encoded[4] = (auth_data_len & 0xff0000000000) >> 40;
780 777 encoded[5] = (auth_data_len & 0xff00000000) >> 32;
781 778 encoded[6] = (auth_data_len & 0xff000000) >> 24;
782 779 encoded[7] = (auth_data_len & 0xff0000) >> 16;
783 780 encoded[8] = (auth_data_len & 0xff00) >> 8;
784 781 encoded[9] = auth_data_len & 0xff;
785 782 #endif /* UNALIGNED_POINTERS_PERMITTED */
786 783 #endif /* _LP64 */
787 784 }
788 785 }
789 786
790 787 /*
791 788 * The following function should be call at encrypt or decrypt init time
792 789 * for AES CCM mode.
793 790 */
794 791 int
795 792 ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len,
796 793 unsigned char *auth_data, size_t auth_data_len, size_t block_size,
797 794 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
798 795 void (*xor_block)(uint8_t *, uint8_t *))
799 796 {
800 797 uint8_t *mac_buf, *datap, *ivp, *authp;
801 798 size_t remainder, processed;
802 799 uint8_t encoded_a[10]; /* max encoded auth data length is 10 octets */
803 800 size_t encoded_a_len = 0;
804 801
805 802 mac_buf = (uint8_t *)&(ctx->ccm_mac_buf);
806 803
807 804 /*
808 805 * Format the 1st block for CBC-MAC and construct the
809 806 * 1st counter block.
810 807 *
811 808 * aes_ctx->ccm_iv is used for storing the counter block
812 809 * mac_buf will store b0 at this time.
813 810 */
814 811 ccm_format_initial_blocks(nonce, nonce_len,
815 812 auth_data_len, mac_buf, ctx);
816 813
817 814 /* The IV for CBC MAC for AES CCM mode is always zero */
818 815 ivp = (uint8_t *)ctx->ccm_tmp;
819 816 bzero(ivp, block_size);
820 817
821 818 xor_block(ivp, mac_buf);
822 819
823 820 /* encrypt the nonce */
824 821 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
825 822
826 823 /* take care of the associated data, if any */
827 824 if (auth_data_len == 0) {
828 825 return (CRYPTO_SUCCESS);
829 826 }
830 827
831 828 encode_adata_len(auth_data_len, encoded_a, &encoded_a_len);
832 829
833 830 remainder = auth_data_len;
834 831
835 832 /* 1st block: it contains encoded associated data, and some data */
836 833 authp = (uint8_t *)ctx->ccm_tmp;
837 834 bzero(authp, block_size);
838 835 bcopy(encoded_a, authp, encoded_a_len);
839 836 processed = block_size - encoded_a_len;
840 837 if (processed > auth_data_len) {
841 838 /* in case auth_data is very small */
842 839 processed = auth_data_len;
843 840 }
844 841 bcopy(auth_data, authp+encoded_a_len, processed);
845 842 /* xor with previous buffer */
846 843 xor_block(authp, mac_buf);
847 844 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
848 845 remainder -= processed;
849 846 if (remainder == 0) {
850 847 /* a small amount of associated data, it's all done now */
851 848 return (CRYPTO_SUCCESS);
852 849 }
853 850
854 851 do {
855 852 if (remainder < block_size) {
856 853 /*
857 854 * There's not a block full of data, pad rest of
858 855 * buffer with zero
859 856 */
860 857 bzero(authp, block_size);
861 858 bcopy(&(auth_data[processed]), authp, remainder);
862 859 datap = (uint8_t *)authp;
863 860 remainder = 0;
864 861 } else {
865 862 datap = (uint8_t *)(&(auth_data[processed]));
866 863 processed += block_size;
867 864 remainder -= block_size;
868 865 }
869 866
870 867 xor_block(datap, mac_buf);
871 868 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
872 869
873 870 } while (remainder > 0);
874 871
875 872 return (CRYPTO_SUCCESS);
876 873 }
877 874
878 875 int
879 876 ccm_init_ctx(ccm_ctx_t *ccm_ctx, char *param, int kmflag,
880 877 boolean_t is_encrypt_init, size_t block_size,
881 878 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
882 879 void (*xor_block)(uint8_t *, uint8_t *))
883 880 {
884 881 int rv;
885 882 CK_AES_CCM_PARAMS *ccm_param;
886 883
887 884 if (param != NULL) {
888 885 ccm_param = (CK_AES_CCM_PARAMS *)param;
889 886
890 887 if ((rv = ccm_validate_args(ccm_param,
891 888 is_encrypt_init)) != 0) {
892 889 return (rv);
893 890 }
894 891
895 892 ccm_ctx->ccm_mac_len = ccm_param->ulMACSize;
896 893 if (is_encrypt_init) {
897 894 ccm_ctx->ccm_data_len = ccm_param->ulDataSize;
898 895 } else {
899 896 ccm_ctx->ccm_data_len =
900 897 ccm_param->ulDataSize - ccm_ctx->ccm_mac_len;
901 898 ccm_ctx->ccm_processed_mac_len = 0;
902 899 }
903 900 ccm_ctx->ccm_processed_data_len = 0;
904 901
905 902 ccm_ctx->ccm_flags |= CCM_MODE;
906 903 } else {
907 904 rv = CRYPTO_MECHANISM_PARAM_INVALID;
908 905 goto out;
909 906 }
910 907
911 908 if (ccm_init(ccm_ctx, ccm_param->nonce, ccm_param->ulNonceSize,
912 909 ccm_param->authData, ccm_param->ulAuthDataSize, block_size,
913 910 encrypt_block, xor_block) != 0) {
914 911 rv = CRYPTO_MECHANISM_PARAM_INVALID;
915 912 goto out;
916 913 }
917 914 if (!is_encrypt_init) {
918 915 /* allocate buffer for storing decrypted plaintext */
919 916 #ifdef _KERNEL
920 917 ccm_ctx->ccm_pt_buf = kmem_alloc(ccm_ctx->ccm_data_len,
921 918 kmflag);
922 919 #else
923 920 ccm_ctx->ccm_pt_buf = malloc(ccm_ctx->ccm_data_len);
924 921 #endif
925 922 if (ccm_ctx->ccm_pt_buf == NULL) {
926 923 rv = CRYPTO_HOST_MEMORY;
927 924 }
928 925 }
929 926 out:
930 927 return (rv);
931 928 }
932 929
933 930 void *
934 931 ccm_alloc_ctx(int kmflag)
935 932 {
936 933 ccm_ctx_t *ccm_ctx;
937 934
938 935 #ifdef _KERNEL
939 936 if ((ccm_ctx = kmem_zalloc(sizeof (ccm_ctx_t), kmflag)) == NULL)
940 937 #else
941 938 if ((ccm_ctx = calloc(1, sizeof (ccm_ctx_t))) == NULL)
942 939 #endif
943 940 return (NULL);
944 941
945 942 ccm_ctx->ccm_flags = CCM_MODE;
946 943 return (ccm_ctx);
947 944 }
↓ open down ↓ |
556 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX