Print this page
5007142 Add ntohll and htonll to sys/byteorder.h
6717509 Need to use bswap/bswapq for byte swap of 64-bit integer on x32/x64
PSARC 2008/474
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/ia32/ml/i86_subr.s
+++ new/usr/src/uts/intel/ia32/ml/i86_subr.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
29 29 * Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
30 30 * All Rights Reserved
31 31 */
32 32
33 33 /*
34 34 * General assembly language routines.
35 35 * It is the intent of this file to contain routines that are
36 36 * independent of the specific kernel architecture, and those that are
37 37 * common across kernel architectures.
38 38 * As architectures diverge, and implementations of specific
39 39 * architecture-dependent routines change, the routines should be moved
40 40 * from this file into the respective ../`arch -k`/subr.s file.
41 41 */
42 42
43 43 #include <sys/asm_linkage.h>
44 44 #include <sys/asm_misc.h>
45 45 #include <sys/panic.h>
46 46 #include <sys/ontrap.h>
47 47 #include <sys/regset.h>
48 48 #include <sys/privregs.h>
49 49 #include <sys/reboot.h>
50 50 #include <sys/psw.h>
51 51 #include <sys/x86_archext.h>
52 52
53 53 #if defined(__lint)
54 54 #include <sys/types.h>
55 55 #include <sys/systm.h>
56 56 #include <sys/thread.h>
57 57 #include <sys/archsystm.h>
58 58 #include <sys/byteorder.h>
59 59 #include <sys/dtrace.h>
60 60 #include <sys/ftrace.h>
61 61 #else /* __lint */
62 62 #include "assym.h"
63 63 #endif /* __lint */
64 64 #include <sys/dditypes.h>
65 65
66 66 /*
67 67 * on_fault()
68 68 * Catch lofault faults. Like setjmp except it returns one
69 69 * if code following causes uncorrectable fault. Turned off
70 70 * by calling no_fault().
71 71 */
72 72
73 73 #if defined(__lint)
74 74
75 75 /* ARGSUSED */
76 76 int
77 77 on_fault(label_t *ljb)
78 78 { return (0); }
79 79
80 80 void
81 81 no_fault(void)
82 82 {}
83 83
84 84 #else /* __lint */
85 85
86 86 #if defined(__amd64)
87 87
88 88 ENTRY(on_fault)
89 89 movq %gs:CPU_THREAD, %rsi
90 90 leaq catch_fault(%rip), %rdx
91 91 movq %rdi, T_ONFAULT(%rsi) /* jumpbuf in t_onfault */
92 92 movq %rdx, T_LOFAULT(%rsi) /* catch_fault in t_lofault */
93 93 jmp setjmp /* let setjmp do the rest */
94 94
95 95 catch_fault:
96 96 movq %gs:CPU_THREAD, %rsi
97 97 movq T_ONFAULT(%rsi), %rdi /* address of save area */
98 98 xorl %eax, %eax
99 99 movq %rax, T_ONFAULT(%rsi) /* turn off onfault */
100 100 movq %rax, T_LOFAULT(%rsi) /* turn off lofault */
101 101 jmp longjmp /* let longjmp do the rest */
102 102 SET_SIZE(on_fault)
↓ open down ↓ |
102 lines elided |
↑ open up ↑ |
103 103
104 104 ENTRY(no_fault)
105 105 movq %gs:CPU_THREAD, %rsi
106 106 xorl %eax, %eax
107 107 movq %rax, T_ONFAULT(%rsi) /* turn off onfault */
108 108 movq %rax, T_LOFAULT(%rsi) /* turn off lofault */
109 109 ret
110 110 SET_SIZE(no_fault)
111 111
112 112 #elif defined(__i386)
113 -
113 +
114 114 ENTRY(on_fault)
115 115 movl %gs:CPU_THREAD, %edx
116 116 movl 4(%esp), %eax /* jumpbuf address */
117 117 leal catch_fault, %ecx
118 118 movl %eax, T_ONFAULT(%edx) /* jumpbuf in t_onfault */
119 119 movl %ecx, T_LOFAULT(%edx) /* catch_fault in t_lofault */
120 120 jmp setjmp /* let setjmp do the rest */
121 121
122 122 catch_fault:
123 123 movl %gs:CPU_THREAD, %edx
124 124 xorl %eax, %eax
125 125 movl T_ONFAULT(%edx), %ecx /* address of save area */
126 126 movl %eax, T_ONFAULT(%edx) /* turn off onfault */
127 127 movl %eax, T_LOFAULT(%edx) /* turn off lofault */
128 128 pushl %ecx
129 129 call longjmp /* let longjmp do the rest */
130 130 SET_SIZE(on_fault)
131 131
132 132 ENTRY(no_fault)
133 133 movl %gs:CPU_THREAD, %edx
134 134 xorl %eax, %eax
135 135 movl %eax, T_ONFAULT(%edx) /* turn off onfault */
136 136 movl %eax, T_LOFAULT(%edx) /* turn off lofault */
137 137 ret
138 138 SET_SIZE(no_fault)
139 139
140 140 #endif /* __i386 */
141 141 #endif /* __lint */
142 142
143 143 /*
144 144 * Default trampoline code for on_trap() (see <sys/ontrap.h>). We just
145 145 * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
146 146 */
147 147
148 148 #if defined(lint)
149 149
150 150 void
151 151 on_trap_trampoline(void)
152 152 {}
153 153
154 154 #else /* __lint */
155 155
156 156 #if defined(__amd64)
157 157
158 158 ENTRY(on_trap_trampoline)
159 159 movq %gs:CPU_THREAD, %rsi
160 160 movq T_ONTRAP(%rsi), %rdi
161 161 addq $OT_JMPBUF, %rdi
162 162 jmp longjmp
163 163 SET_SIZE(on_trap_trampoline)
164 164
165 165 #elif defined(__i386)
166 166
167 167 ENTRY(on_trap_trampoline)
168 168 movl %gs:CPU_THREAD, %eax
169 169 movl T_ONTRAP(%eax), %eax
170 170 addl $OT_JMPBUF, %eax
171 171 pushl %eax
172 172 call longjmp
173 173 SET_SIZE(on_trap_trampoline)
174 174
175 175 #endif /* __i386 */
176 176 #endif /* __lint */
177 177
178 178 /*
179 179 * Push a new element on to the t_ontrap stack. Refer to <sys/ontrap.h> for
180 180 * more information about the on_trap() mechanism. If the on_trap_data is the
181 181 * same as the topmost stack element, we just modify that element.
182 182 */
183 183 #if defined(lint)
184 184
185 185 /*ARGSUSED*/
186 186 int
187 187 on_trap(on_trap_data_t *otp, uint_t prot)
188 188 { return (0); }
189 189
190 190 #else /* __lint */
191 191
192 192 #if defined(__amd64)
193 193
194 194 ENTRY(on_trap)
195 195 movw %si, OT_PROT(%rdi) /* ot_prot = prot */
196 196 movw $0, OT_TRAP(%rdi) /* ot_trap = 0 */
197 197 leaq on_trap_trampoline(%rip), %rdx /* rdx = &on_trap_trampoline */
198 198 movq %rdx, OT_TRAMPOLINE(%rdi) /* ot_trampoline = rdx */
199 199 xorl %ecx, %ecx
200 200 movq %rcx, OT_HANDLE(%rdi) /* ot_handle = NULL */
201 201 movq %rcx, OT_PAD1(%rdi) /* ot_pad1 = NULL */
202 202 movq %gs:CPU_THREAD, %rdx /* rdx = curthread */
203 203 movq T_ONTRAP(%rdx), %rcx /* rcx = curthread->t_ontrap */
204 204 cmpq %rdi, %rcx /* if (otp == %rcx) */
205 205 je 0f /* don't modify t_ontrap */
206 206
207 207 movq %rcx, OT_PREV(%rdi) /* ot_prev = t_ontrap */
208 208 movq %rdi, T_ONTRAP(%rdx) /* curthread->t_ontrap = otp */
209 209
210 210 0: addq $OT_JMPBUF, %rdi /* &ot_jmpbuf */
211 211 jmp setjmp
212 212 SET_SIZE(on_trap)
213 213
214 214 #elif defined(__i386)
215 215
216 216 ENTRY(on_trap)
217 217 movl 4(%esp), %eax /* %eax = otp */
218 218 movl 8(%esp), %edx /* %edx = prot */
219 219
220 220 movw %dx, OT_PROT(%eax) /* ot_prot = prot */
221 221 movw $0, OT_TRAP(%eax) /* ot_trap = 0 */
222 222 leal on_trap_trampoline, %edx /* %edx = &on_trap_trampoline */
223 223 movl %edx, OT_TRAMPOLINE(%eax) /* ot_trampoline = %edx */
224 224 movl $0, OT_HANDLE(%eax) /* ot_handle = NULL */
225 225 movl $0, OT_PAD1(%eax) /* ot_pad1 = NULL */
226 226 movl %gs:CPU_THREAD, %edx /* %edx = curthread */
227 227 movl T_ONTRAP(%edx), %ecx /* %ecx = curthread->t_ontrap */
228 228 cmpl %eax, %ecx /* if (otp == %ecx) */
229 229 je 0f /* don't modify t_ontrap */
230 230
231 231 movl %ecx, OT_PREV(%eax) /* ot_prev = t_ontrap */
232 232 movl %eax, T_ONTRAP(%edx) /* curthread->t_ontrap = otp */
233 233
234 234 0: addl $OT_JMPBUF, %eax /* %eax = &ot_jmpbuf */
235 235 movl %eax, 4(%esp) /* put %eax back on the stack */
236 236 jmp setjmp /* let setjmp do the rest */
237 237 SET_SIZE(on_trap)
238 238
239 239 #endif /* __i386 */
240 240 #endif /* __lint */
241 241
242 242 /*
243 243 * Setjmp and longjmp implement non-local gotos using state vectors
244 244 * type label_t.
245 245 */
246 246
247 247 #if defined(__lint)
248 248
249 249 /* ARGSUSED */
250 250 int
251 251 setjmp(label_t *lp)
252 252 { return (0); }
253 253
254 254 /* ARGSUSED */
255 255 void
256 256 longjmp(label_t *lp)
257 257 {}
258 258
259 259 #else /* __lint */
260 260
261 261 #if LABEL_PC != 0
262 262 #error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
263 263 #endif /* LABEL_PC != 0 */
264 264
265 265 #if defined(__amd64)
266 266
267 267 ENTRY(setjmp)
268 268 movq %rsp, LABEL_SP(%rdi)
269 269 movq %rbp, LABEL_RBP(%rdi)
270 270 movq %rbx, LABEL_RBX(%rdi)
271 271 movq %r12, LABEL_R12(%rdi)
272 272 movq %r13, LABEL_R13(%rdi)
273 273 movq %r14, LABEL_R14(%rdi)
274 274 movq %r15, LABEL_R15(%rdi)
275 275 movq (%rsp), %rdx /* return address */
276 276 movq %rdx, (%rdi) /* LABEL_PC is 0 */
277 277 xorl %eax, %eax /* return 0 */
278 278 ret
279 279 SET_SIZE(setjmp)
280 280
281 281 ENTRY(longjmp)
282 282 movq LABEL_SP(%rdi), %rsp
283 283 movq LABEL_RBP(%rdi), %rbp
284 284 movq LABEL_RBX(%rdi), %rbx
285 285 movq LABEL_R12(%rdi), %r12
286 286 movq LABEL_R13(%rdi), %r13
287 287 movq LABEL_R14(%rdi), %r14
288 288 movq LABEL_R15(%rdi), %r15
289 289 movq (%rdi), %rdx /* return address; LABEL_PC is 0 */
290 290 movq %rdx, (%rsp)
291 291 xorl %eax, %eax
292 292 incl %eax /* return 1 */
293 293 ret
294 294 SET_SIZE(longjmp)
295 295
296 296 #elif defined(__i386)
297 297
298 298 ENTRY(setjmp)
299 299 movl 4(%esp), %edx /* address of save area */
300 300 movl %ebp, LABEL_EBP(%edx)
301 301 movl %ebx, LABEL_EBX(%edx)
302 302 movl %esi, LABEL_ESI(%edx)
303 303 movl %edi, LABEL_EDI(%edx)
304 304 movl %esp, 4(%edx)
305 305 movl (%esp), %ecx /* %eip (return address) */
306 306 movl %ecx, (%edx) /* LABEL_PC is 0 */
307 307 subl %eax, %eax /* return 0 */
308 308 ret
309 309 SET_SIZE(setjmp)
310 310
311 311 ENTRY(longjmp)
312 312 movl 4(%esp), %edx /* address of save area */
313 313 movl LABEL_EBP(%edx), %ebp
314 314 movl LABEL_EBX(%edx), %ebx
315 315 movl LABEL_ESI(%edx), %esi
316 316 movl LABEL_EDI(%edx), %edi
317 317 movl 4(%edx), %esp
318 318 movl (%edx), %ecx /* %eip (return addr); LABEL_PC is 0 */
319 319 movl $1, %eax
320 320 addl $4, %esp /* pop ret adr */
321 321 jmp *%ecx /* indirect */
322 322 SET_SIZE(longjmp)
323 323
324 324 #endif /* __i386 */
325 325 #endif /* __lint */
326 326
327 327 /*
328 328 * if a() calls b() calls caller(),
329 329 * caller() returns return address in a().
330 330 * (Note: We assume a() and b() are C routines which do the normal entry/exit
331 331 * sequence.)
332 332 */
333 333
334 334 #if defined(__lint)
335 335
336 336 caddr_t
337 337 caller(void)
338 338 { return (0); }
339 339
340 340 #else /* __lint */
341 341
342 342 #if defined(__amd64)
343 343
344 344 ENTRY(caller)
345 345 movq 8(%rbp), %rax /* b()'s return pc, in a() */
346 346 ret
347 347 SET_SIZE(caller)
348 348
349 349 #elif defined(__i386)
350 350
351 351 ENTRY(caller)
352 352 movl 4(%ebp), %eax /* b()'s return pc, in a() */
353 353 ret
354 354 SET_SIZE(caller)
355 355
356 356 #endif /* __i386 */
357 357 #endif /* __lint */
358 358
359 359 /*
360 360 * if a() calls callee(), callee() returns the
361 361 * return address in a();
362 362 */
363 363
364 364 #if defined(__lint)
365 365
366 366 caddr_t
367 367 callee(void)
368 368 { return (0); }
369 369
370 370 #else /* __lint */
371 371
372 372 #if defined(__amd64)
373 373
374 374 ENTRY(callee)
375 375 movq (%rsp), %rax /* callee()'s return pc, in a() */
376 376 ret
377 377 SET_SIZE(callee)
378 378
379 379 #elif defined(__i386)
380 380
381 381 ENTRY(callee)
382 382 movl (%esp), %eax /* callee()'s return pc, in a() */
383 383 ret
384 384 SET_SIZE(callee)
385 385
386 386 #endif /* __i386 */
387 387 #endif /* __lint */
388 388
389 389 /*
390 390 * return the current frame pointer
391 391 */
392 392
393 393 #if defined(__lint)
394 394
395 395 greg_t
396 396 getfp(void)
397 397 { return (0); }
398 398
399 399 #else /* __lint */
400 400
401 401 #if defined(__amd64)
402 402
403 403 ENTRY(getfp)
404 404 movq %rbp, %rax
405 405 ret
406 406 SET_SIZE(getfp)
407 407
408 408 #elif defined(__i386)
409 409
410 410 ENTRY(getfp)
411 411 movl %ebp, %eax
412 412 ret
413 413 SET_SIZE(getfp)
414 414
415 415 #endif /* __i386 */
416 416 #endif /* __lint */
417 417
418 418 /*
419 419 * Invalidate a single page table entry in the TLB
420 420 */
421 421
422 422 #if defined(__lint)
423 423
424 424 /* ARGSUSED */
425 425 void
426 426 mmu_tlbflush_entry(caddr_t m)
427 427 {}
428 428
429 429 #else /* __lint */
430 430
431 431 #if defined(__amd64)
432 432
433 433 ENTRY(mmu_tlbflush_entry)
434 434 invlpg (%rdi)
435 435 ret
436 436 SET_SIZE(mmu_tlbflush_entry)
437 437
438 438 #elif defined(__i386)
439 439
440 440 ENTRY(mmu_tlbflush_entry)
441 441 movl 4(%esp), %eax
442 442 invlpg (%eax)
443 443 ret
444 444 SET_SIZE(mmu_tlbflush_entry)
445 445
446 446 #endif /* __i386 */
447 447 #endif /* __lint */
448 448
449 449
450 450 /*
451 451 * Get/Set the value of various control registers
452 452 */
453 453
454 454 #if defined(__lint)
455 455
456 456 ulong_t
457 457 getcr0(void)
458 458 { return (0); }
459 459
460 460 /* ARGSUSED */
461 461 void
462 462 setcr0(ulong_t value)
463 463 {}
464 464
465 465 ulong_t
466 466 getcr2(void)
467 467 { return (0); }
468 468
469 469 ulong_t
470 470 getcr3(void)
471 471 { return (0); }
472 472
473 473 #if !defined(__xpv)
474 474 /* ARGSUSED */
475 475 void
476 476 setcr3(ulong_t val)
477 477 {}
478 478
479 479 void
480 480 reload_cr3(void)
481 481 {}
482 482 #endif
483 483
484 484 ulong_t
485 485 getcr4(void)
486 486 { return (0); }
487 487
488 488 /* ARGSUSED */
489 489 void
490 490 setcr4(ulong_t val)
491 491 {}
492 492
493 493 #if defined(__amd64)
494 494
495 495 ulong_t
496 496 getcr8(void)
497 497 { return (0); }
498 498
499 499 /* ARGSUSED */
500 500 void
501 501 setcr8(ulong_t val)
502 502 {}
503 503
504 504 #endif /* __amd64 */
505 505
506 506 #else /* __lint */
507 507
508 508 #if defined(__amd64)
509 509
510 510 ENTRY(getcr0)
511 511 movq %cr0, %rax
512 512 ret
513 513 SET_SIZE(getcr0)
514 514
515 515 ENTRY(setcr0)
516 516 movq %rdi, %cr0
517 517 ret
518 518 SET_SIZE(setcr0)
519 519
520 520 ENTRY(getcr2)
521 521 #if defined(__xpv)
522 522 movq %gs:CPU_VCPU_INFO, %rax
523 523 movq VCPU_INFO_ARCH_CR2(%rax), %rax
524 524 #else
525 525 movq %cr2, %rax
526 526 #endif
527 527 ret
528 528 SET_SIZE(getcr2)
529 529
530 530 ENTRY(getcr3)
531 531 movq %cr3, %rax
532 532 ret
533 533 SET_SIZE(getcr3)
534 534
535 535 #if !defined(__xpv)
536 536
537 537 ENTRY(setcr3)
538 538 movq %rdi, %cr3
539 539 ret
540 540 SET_SIZE(setcr3)
541 541
542 542 ENTRY(reload_cr3)
543 543 movq %cr3, %rdi
544 544 movq %rdi, %cr3
545 545 ret
546 546 SET_SIZE(reload_cr3)
547 547
548 548 #endif /* __xpv */
549 549
550 550 ENTRY(getcr4)
551 551 movq %cr4, %rax
552 552 ret
553 553 SET_SIZE(getcr4)
554 554
555 555 ENTRY(setcr4)
556 556 movq %rdi, %cr4
557 557 ret
558 558 SET_SIZE(setcr4)
559 559
560 560 ENTRY(getcr8)
561 561 movq %cr8, %rax
562 562 ret
563 563 SET_SIZE(getcr8)
564 564
565 565 ENTRY(setcr8)
566 566 movq %rdi, %cr8
567 567 ret
568 568 SET_SIZE(setcr8)
569 569
570 570 #elif defined(__i386)
571 571
572 572 ENTRY(getcr0)
573 573 movl %cr0, %eax
574 574 ret
575 575 SET_SIZE(getcr0)
576 576
577 577 ENTRY(setcr0)
578 578 movl 4(%esp), %eax
579 579 movl %eax, %cr0
580 580 ret
581 581 SET_SIZE(setcr0)
582 582
583 583 ENTRY(getcr2)
584 584 #if defined(__xpv)
585 585 movl %gs:CPU_VCPU_INFO, %eax
586 586 movl VCPU_INFO_ARCH_CR2(%eax), %eax
587 587 #else
588 588 movl %cr2, %eax
589 589 #endif
590 590 ret
591 591 SET_SIZE(getcr2)
592 592
593 593 ENTRY(getcr3)
594 594 movl %cr3, %eax
595 595 ret
596 596 SET_SIZE(getcr3)
597 597
598 598 #if !defined(__xpv)
599 599
600 600 ENTRY(setcr3)
601 601 movl 4(%esp), %eax
602 602 movl %eax, %cr3
603 603 ret
604 604 SET_SIZE(setcr3)
605 605
606 606 ENTRY(reload_cr3)
607 607 movl %cr3, %eax
608 608 movl %eax, %cr3
609 609 ret
610 610 SET_SIZE(reload_cr3)
611 611
612 612 #endif /* __xpv */
613 613
614 614 ENTRY(getcr4)
615 615 movl %cr4, %eax
616 616 ret
617 617 SET_SIZE(getcr4)
618 618
619 619 ENTRY(setcr4)
620 620 movl 4(%esp), %eax
621 621 movl %eax, %cr4
622 622 ret
623 623 SET_SIZE(setcr4)
624 624
625 625 #endif /* __i386 */
626 626 #endif /* __lint */
627 627
628 628 #if defined(__lint)
629 629
630 630 /*ARGSUSED*/
631 631 uint32_t
632 632 __cpuid_insn(struct cpuid_regs *regs)
633 633 { return (0); }
634 634
635 635 #else /* __lint */
636 636
637 637 #if defined(__amd64)
638 638
639 639 ENTRY(__cpuid_insn)
640 640 movq %rbx, %r8
641 641 movq %rcx, %r9
642 642 movq %rdx, %r11
643 643 movl (%rdi), %eax /* %eax = regs->cp_eax */
644 644 movl 0x4(%rdi), %ebx /* %ebx = regs->cp_ebx */
645 645 movl 0x8(%rdi), %ecx /* %ecx = regs->cp_ecx */
646 646 movl 0xc(%rdi), %edx /* %edx = regs->cp_edx */
647 647 cpuid
648 648 movl %eax, (%rdi) /* regs->cp_eax = %eax */
649 649 movl %ebx, 0x4(%rdi) /* regs->cp_ebx = %ebx */
650 650 movl %ecx, 0x8(%rdi) /* regs->cp_ecx = %ecx */
651 651 movl %edx, 0xc(%rdi) /* regs->cp_edx = %edx */
652 652 movq %r8, %rbx
653 653 movq %r9, %rcx
654 654 movq %r11, %rdx
655 655 ret
656 656 SET_SIZE(__cpuid_insn)
657 657
658 658 #elif defined(__i386)
659 659
660 660 ENTRY(__cpuid_insn)
661 661 pushl %ebp
662 662 movl 0x8(%esp), %ebp /* %ebp = regs */
663 663 pushl %ebx
664 664 pushl %ecx
665 665 pushl %edx
666 666 movl (%ebp), %eax /* %eax = regs->cp_eax */
667 667 movl 0x4(%ebp), %ebx /* %ebx = regs->cp_ebx */
668 668 movl 0x8(%ebp), %ecx /* %ecx = regs->cp_ecx */
669 669 movl 0xc(%ebp), %edx /* %edx = regs->cp_edx */
670 670 cpuid
671 671 movl %eax, (%ebp) /* regs->cp_eax = %eax */
672 672 movl %ebx, 0x4(%ebp) /* regs->cp_ebx = %ebx */
673 673 movl %ecx, 0x8(%ebp) /* regs->cp_ecx = %ecx */
674 674 movl %edx, 0xc(%ebp) /* regs->cp_edx = %edx */
675 675 popl %edx
676 676 popl %ecx
677 677 popl %ebx
678 678 popl %ebp
679 679 ret
680 680 SET_SIZE(__cpuid_insn)
681 681
682 682 #endif /* __i386 */
683 683 #endif /* __lint */
684 684
685 685 #if defined(__xpv)
686 686 /*
687 687 * Defined in C
688 688 */
689 689 #else
690 690
691 691 #if defined(__lint)
692 692
693 693 /*ARGSUSED*/
694 694 void
695 695 i86_monitor(volatile uint32_t *addr, uint32_t extensions, uint32_t hints)
696 696 { return; }
697 697
698 698 #else /* __lint */
699 699
700 700 #if defined(__amd64)
701 701
702 702 ENTRY_NP(i86_monitor)
703 703 pushq %rbp
704 704 movq %rsp, %rbp
705 705 movq %rdi, %rax /* addr */
706 706 movq %rsi, %rcx /* extensions */
707 707 /* rdx contains input arg3: hints */
708 708 .byte 0x0f, 0x01, 0xc8 /* monitor */
709 709 leave
710 710 ret
711 711 SET_SIZE(i86_monitor)
712 712
713 713 #elif defined(__i386)
714 714
715 715 ENTRY_NP(i86_monitor)
716 716 pushl %ebp
717 717 movl %esp, %ebp
718 718 movl 0x8(%ebp),%eax /* addr */
719 719 movl 0xc(%ebp),%ecx /* extensions */
720 720 movl 0x10(%ebp),%edx /* hints */
721 721 .byte 0x0f, 0x01, 0xc8 /* monitor */
722 722 leave
723 723 ret
724 724 SET_SIZE(i86_monitor)
725 725
726 726 #endif /* __i386 */
727 727 #endif /* __lint */
728 728
729 729 #if defined(__lint)
730 730
731 731 /*ARGSUSED*/
732 732 void
733 733 i86_mwait(uint32_t data, uint32_t extensions)
734 734 { return; }
735 735
736 736 #else /* __lint */
737 737
738 738 #if defined(__amd64)
739 739
740 740 ENTRY_NP(i86_mwait)
741 741 pushq %rbp
742 742 movq %rsp, %rbp
743 743 movq %rdi, %rax /* data */
744 744 movq %rsi, %rcx /* extensions */
745 745 .byte 0x0f, 0x01, 0xc9 /* mwait */
746 746 leave
747 747 ret
748 748 SET_SIZE(i86_mwait)
749 749
750 750 #elif defined(__i386)
751 751
752 752 ENTRY_NP(i86_mwait)
753 753 pushl %ebp
754 754 movl %esp, %ebp
755 755 movl 0x8(%ebp),%eax /* data */
756 756 movl 0xc(%ebp),%ecx /* extensions */
757 757 .byte 0x0f, 0x01, 0xc9 /* mwait */
758 758 leave
759 759 ret
760 760 SET_SIZE(i86_mwait)
761 761
762 762 #endif /* __i386 */
763 763 #endif /* __lint */
764 764
765 765 #if defined(__lint)
766 766
767 767 hrtime_t
768 768 tsc_read(void)
769 769 {
770 770 return (0);
771 771 }
772 772
773 773 #else /* __lint */
774 774
775 775 #if defined(__amd64)
776 776
777 777 ENTRY_NP(tsc_read)
778 778 movq %rbx, %r11
779 779 movl $0, %eax
780 780 cpuid
781 781 rdtsc
782 782 movq %r11, %rbx
783 783 shlq $32, %rdx
784 784 orq %rdx, %rax
785 785 ret
786 786 .globl _tsc_mfence_start
787 787 _tsc_mfence_start:
788 788 mfence
789 789 rdtsc
790 790 shlq $32, %rdx
791 791 orq %rdx, %rax
792 792 ret
793 793 .globl _tsc_mfence_end
794 794 _tsc_mfence_end:
795 795 .globl _tscp_start
796 796 _tscp_start:
797 797 .byte 0x0f, 0x01, 0xf9 /* rdtscp instruction */
798 798 shlq $32, %rdx
799 799 orq %rdx, %rax
800 800 ret
801 801 .globl _tscp_end
802 802 _tscp_end:
803 803 .globl _no_rdtsc_start
804 804 _no_rdtsc_start:
805 805 xorl %edx, %edx
806 806 xorl %eax, %eax
807 807 ret
808 808 .globl _no_rdtsc_end
809 809 _no_rdtsc_end:
810 810 .globl _tsc_lfence_start
811 811 _tsc_lfence_start:
812 812 lfence
813 813 rdtsc
814 814 shlq $32, %rdx
815 815 orq %rdx, %rax
816 816 ret
817 817 .globl _tsc_lfence_end
818 818 _tsc_lfence_end:
819 819 SET_SIZE(tsc_read)
820 820
821 821 #else /* __i386 */
822 822
823 823 ENTRY_NP(tsc_read)
824 824 pushl %ebx
825 825 movl $0, %eax
826 826 cpuid
827 827 rdtsc
828 828 popl %ebx
829 829 ret
830 830 .globl _tsc_mfence_start
831 831 _tsc_mfence_start:
832 832 mfence
833 833 rdtsc
834 834 ret
835 835 .globl _tsc_mfence_end
836 836 _tsc_mfence_end:
837 837 .globl _tscp_start
838 838 _tscp_start:
839 839 .byte 0x0f, 0x01, 0xf9 /* rdtscp instruction */
840 840 ret
841 841 .globl _tscp_end
842 842 _tscp_end:
843 843 .globl _no_rdtsc_start
844 844 _no_rdtsc_start:
845 845 xorl %edx, %edx
846 846 xorl %eax, %eax
847 847 ret
848 848 .globl _no_rdtsc_end
849 849 _no_rdtsc_end:
850 850 .globl _tsc_lfence_start
851 851 _tsc_lfence_start:
852 852 lfence
853 853 rdtsc
854 854 ret
855 855 .globl _tsc_lfence_end
856 856 _tsc_lfence_end:
857 857 SET_SIZE(tsc_read)
858 858
859 859 #endif /* __i386 */
860 860
861 861 #endif /* __lint */
862 862
863 863
864 864 #endif /* __xpv */
865 865
866 866 #ifdef __lint
867 867 /*
868 868 * Do not use this function for obtaining clock tick. This
869 869 * is called by callers who do not need to have a guarenteed
870 870 * correct tick value. The proper routine to use is tsc_read().
871 871 */
872 872 hrtime_t
873 873 randtick(void)
874 874 {
875 875 return (0);
876 876 }
877 877 #else
878 878 #if defined(__amd64)
879 879 ENTRY_NP(randtick)
880 880 rdtsc
881 881 shlq $32, %rdx
882 882 orq %rdx, %rax
883 883 ret
884 884 SET_SIZE(randtick)
885 885 #else
886 886 ENTRY_NP(randtick)
887 887 rdtsc
888 888 ret
889 889 SET_SIZE(randtick)
890 890 #endif /* __i386 */
891 891 #endif /* __lint */
892 892 /*
893 893 * Insert entryp after predp in a doubly linked list.
894 894 */
895 895
896 896 #if defined(__lint)
897 897
↓ open down ↓ |
774 lines elided |
↑ open up ↑ |
898 898 /*ARGSUSED*/
899 899 void
900 900 _insque(caddr_t entryp, caddr_t predp)
901 901 {}
902 902
903 903 #else /* __lint */
904 904
905 905 #if defined(__amd64)
906 906
907 907 ENTRY(_insque)
908 - movq (%rsi), %rax /* predp->forw */
908 + movq (%rsi), %rax /* predp->forw */
909 909 movq %rsi, CPTRSIZE(%rdi) /* entryp->back = predp */
910 910 movq %rax, (%rdi) /* entryp->forw = predp->forw */
911 911 movq %rdi, (%rsi) /* predp->forw = entryp */
912 912 movq %rdi, CPTRSIZE(%rax) /* predp->forw->back = entryp */
913 913 ret
914 914 SET_SIZE(_insque)
915 915
916 916 #elif defined(__i386)
917 917
918 918 ENTRY(_insque)
919 919 movl 8(%esp), %edx
920 920 movl 4(%esp), %ecx
921 921 movl (%edx), %eax /* predp->forw */
922 922 movl %edx, CPTRSIZE(%ecx) /* entryp->back = predp */
923 923 movl %eax, (%ecx) /* entryp->forw = predp->forw */
924 924 movl %ecx, (%edx) /* predp->forw = entryp */
925 925 movl %ecx, CPTRSIZE(%eax) /* predp->forw->back = entryp */
926 926 ret
927 927 SET_SIZE(_insque)
928 928
929 929 #endif /* __i386 */
930 930 #endif /* __lint */
931 931
932 932 /*
933 933 * Remove entryp from a doubly linked list
934 934 */
935 935
936 936 #if defined(__lint)
937 937
938 938 /*ARGSUSED*/
939 939 void
940 940 _remque(caddr_t entryp)
941 941 {}
942 942
943 943 #else /* __lint */
944 944
945 945 #if defined(__amd64)
946 946
947 947 ENTRY(_remque)
948 948 movq (%rdi), %rax /* entry->forw */
949 949 movq CPTRSIZE(%rdi), %rdx /* entry->back */
950 950 movq %rax, (%rdx) /* entry->back->forw = entry->forw */
951 951 movq %rdx, CPTRSIZE(%rax) /* entry->forw->back = entry->back */
952 952 ret
953 953 SET_SIZE(_remque)
954 954
955 955 #elif defined(__i386)
956 956
957 957 ENTRY(_remque)
958 958 movl 4(%esp), %ecx
959 959 movl (%ecx), %eax /* entry->forw */
960 960 movl CPTRSIZE(%ecx), %edx /* entry->back */
961 961 movl %eax, (%edx) /* entry->back->forw = entry->forw */
962 962 movl %edx, CPTRSIZE(%eax) /* entry->forw->back = entry->back */
963 963 ret
964 964 SET_SIZE(_remque)
965 965
966 966 #endif /* __i386 */
967 967 #endif /* __lint */
968 968
969 969 /*
970 970 * Returns the number of
971 971 * non-NULL bytes in string argument.
972 972 */
973 973
974 974 #if defined(__lint)
975 975
976 976 /* ARGSUSED */
977 977 size_t
978 978 strlen(const char *str)
979 979 { return (0); }
980 980
981 981 #else /* __lint */
982 982
983 983 #if defined(__amd64)
984 984
985 985 /*
986 986 * This is close to a simple transliteration of a C version of this
987 987 * routine. We should either just -make- this be a C version, or
988 988 * justify having it in assembler by making it significantly faster.
989 989 *
990 990 * size_t
991 991 * strlen(const char *s)
992 992 * {
993 993 * const char *s0;
994 994 * #if defined(DEBUG)
995 995 * if ((uintptr_t)s < KERNELBASE)
996 996 * panic(.str_panic_msg);
997 997 * #endif
998 998 * for (s0 = s; *s; s++)
999 999 * ;
1000 1000 * return (s - s0);
1001 1001 * }
1002 1002 */
1003 1003
1004 1004 ENTRY(strlen)
1005 1005 #ifdef DEBUG
1006 1006 movq postbootkernelbase(%rip), %rax
1007 1007 cmpq %rax, %rdi
1008 1008 jae str_valid
1009 1009 pushq %rbp
1010 1010 movq %rsp, %rbp
1011 1011 leaq .str_panic_msg(%rip), %rdi
1012 1012 xorl %eax, %eax
1013 1013 call panic
1014 1014 #endif /* DEBUG */
1015 1015 str_valid:
1016 1016 cmpb $0, (%rdi)
1017 1017 movq %rdi, %rax
1018 1018 je .null_found
1019 1019 .align 4
1020 1020 .strlen_loop:
1021 1021 incq %rdi
1022 1022 cmpb $0, (%rdi)
1023 1023 jne .strlen_loop
1024 1024 .null_found:
1025 1025 subq %rax, %rdi
1026 1026 movq %rdi, %rax
1027 1027 ret
1028 1028 SET_SIZE(strlen)
1029 1029
1030 1030 #elif defined(__i386)
1031 1031
1032 1032 ENTRY(strlen)
1033 1033 #ifdef DEBUG
1034 1034 movl postbootkernelbase, %eax
1035 1035 cmpl %eax, 4(%esp)
1036 1036 jae str_valid
1037 1037 pushl %ebp
1038 1038 movl %esp, %ebp
1039 1039 pushl $.str_panic_msg
1040 1040 call panic
1041 1041 #endif /* DEBUG */
1042 1042
1043 1043 str_valid:
1044 1044 movl 4(%esp), %eax /* %eax = string address */
1045 1045 testl $3, %eax /* if %eax not word aligned */
1046 1046 jnz .not_word_aligned /* goto .not_word_aligned */
1047 1047 .align 4
1048 1048 .word_aligned:
1049 1049 movl (%eax), %edx /* move 1 word from (%eax) to %edx */
1050 1050 movl $0x7f7f7f7f, %ecx
1051 1051 andl %edx, %ecx /* %ecx = %edx & 0x7f7f7f7f */
1052 1052 addl $4, %eax /* next word */
1053 1053 addl $0x7f7f7f7f, %ecx /* %ecx += 0x7f7f7f7f */
1054 1054 orl %edx, %ecx /* %ecx |= %edx */
1055 1055 andl $0x80808080, %ecx /* %ecx &= 0x80808080 */
1056 1056 cmpl $0x80808080, %ecx /* if no null byte in this word */
1057 1057 je .word_aligned /* goto .word_aligned */
1058 1058 subl $4, %eax /* post-incremented */
1059 1059 .not_word_aligned:
1060 1060 cmpb $0, (%eax) /* if a byte in (%eax) is null */
1061 1061 je .null_found /* goto .null_found */
1062 1062 incl %eax /* next byte */
1063 1063 testl $3, %eax /* if %eax not word aligned */
1064 1064 jnz .not_word_aligned /* goto .not_word_aligned */
1065 1065 jmp .word_aligned /* goto .word_aligned */
1066 1066 .align 4
1067 1067 .null_found:
1068 1068 subl 4(%esp), %eax /* %eax -= string address */
1069 1069 ret
1070 1070 SET_SIZE(strlen)
1071 1071
1072 1072 #endif /* __i386 */
↓ open down ↓ |
154 lines elided |
↑ open up ↑ |
1073 1073
1074 1074 #ifdef DEBUG
1075 1075 .text
1076 1076 .str_panic_msg:
1077 1077 .string "strlen: argument below kernelbase"
1078 1078 #endif /* DEBUG */
1079 1079
1080 1080 #endif /* __lint */
1081 1081
1082 1082 /*
1083 - * Berkley 4.3 introduced symbolically named interrupt levels
1083 + * Berkeley 4.3 introduced symbolically named interrupt levels
1084 1084 * as a way deal with priority in a machine independent fashion.
1085 1085 * Numbered priorities are machine specific, and should be
1086 1086 * discouraged where possible.
1087 1087 *
1088 1088 * Note, for the machine specific priorities there are
1089 1089 * examples listed for devices that use a particular priority.
1090 1090 * It should not be construed that all devices of that
1091 1091 * type should be at that priority. It is currently were
1092 1092 * the current devices fit into the priority scheme based
1093 1093 * upon time criticalness.
1094 1094 *
1095 1095 * The underlying assumption of these assignments is that
1096 1096 * IPL 10 is the highest level from which a device
1097 1097 * routine can call wakeup. Devices that interrupt from higher
1098 1098 * levels are restricted in what they can do. If they need
1099 1099 * kernels services they should schedule a routine at a lower
1100 1100 * level (via software interrupt) to do the required
1101 1101 * processing.
1102 1102 *
1103 1103 * Examples of this higher usage:
1104 1104 * Level Usage
1105 1105 * 14 Profiling clock (and PROM uart polling clock)
1106 1106 * 12 Serial ports
1107 1107 *
1108 1108 * The serial ports request lower level processing on level 6.
1109 1109 *
1110 1110 * Also, almost all splN routines (where N is a number or a
1111 1111 * mnemonic) will do a RAISE(), on the assumption that they are
1112 1112 * never used to lower our priority.
1113 1113 * The exceptions are:
1114 1114 * spl8() Because you can't be above 15 to begin with!
1115 1115 * splzs() Because this is used at boot time to lower our
1116 1116 * priority, to allow the PROM to poll the uart.
1117 1117 * spl0() Used to lower priority to 0.
1118 1118 */
1119 1119
1120 1120 #if defined(__lint)
1121 1121
1122 1122 int spl0(void) { return (0); }
1123 1123 int spl6(void) { return (0); }
1124 1124 int spl7(void) { return (0); }
1125 1125 int spl8(void) { return (0); }
1126 1126 int splhigh(void) { return (0); }
1127 1127 int splhi(void) { return (0); }
1128 1128 int splzs(void) { return (0); }
1129 1129
1130 1130 /* ARGSUSED */
1131 1131 void
1132 1132 splx(int level)
1133 1133 {}
1134 1134
1135 1135 #else /* __lint */
1136 1136
1137 1137 #if defined(__amd64)
1138 1138
1139 1139 #define SETPRI(level) \
1140 1140 movl $/**/level, %edi; /* new priority */ \
1141 1141 jmp do_splx /* redirect to do_splx */
1142 1142
1143 1143 #define RAISE(level) \
1144 1144 movl $/**/level, %edi; /* new priority */ \
1145 1145 jmp splr /* redirect to splr */
1146 1146
1147 1147 #elif defined(__i386)
1148 1148
1149 1149 #define SETPRI(level) \
1150 1150 pushl $/**/level; /* new priority */ \
1151 1151 call do_splx; /* invoke common splx code */ \
1152 1152 addl $4, %esp; /* unstack arg */ \
1153 1153 ret
1154 1154
1155 1155 #define RAISE(level) \
1156 1156 pushl $/**/level; /* new priority */ \
1157 1157 call splr; /* invoke common splr code */ \
1158 1158 addl $4, %esp; /* unstack args */ \
1159 1159 ret
1160 1160
1161 1161 #endif /* __i386 */
1162 1162
1163 1163 /* locks out all interrupts, including memory errors */
1164 1164 ENTRY(spl8)
1165 1165 SETPRI(15)
1166 1166 SET_SIZE(spl8)
1167 1167
1168 1168 /* just below the level that profiling runs */
1169 1169 ENTRY(spl7)
1170 1170 RAISE(13)
1171 1171 SET_SIZE(spl7)
1172 1172
1173 1173 /* sun specific - highest priority onboard serial i/o asy ports */
1174 1174 ENTRY(splzs)
1175 1175 SETPRI(12) /* Can't be a RAISE, as it's used to lower us */
1176 1176 SET_SIZE(splzs)
1177 1177
1178 1178 ENTRY(splhi)
1179 1179 ALTENTRY(splhigh)
1180 1180 ALTENTRY(spl6)
1181 1181 ALTENTRY(i_ddi_splhigh)
1182 1182
1183 1183 RAISE(DISP_LEVEL)
1184 1184
1185 1185 SET_SIZE(i_ddi_splhigh)
↓ open down ↓ |
92 lines elided |
↑ open up ↑ |
1186 1186 SET_SIZE(spl6)
1187 1187 SET_SIZE(splhigh)
1188 1188 SET_SIZE(splhi)
1189 1189
1190 1190 /* allow all interrupts */
1191 1191 ENTRY(spl0)
1192 1192 SETPRI(0)
1193 1193 SET_SIZE(spl0)
1194 1194
1195 1195
1196 - /* splx implentation */
1196 + /* splx implementation */
1197 1197 ENTRY(splx)
1198 1198 jmp do_splx /* redirect to common splx code */
1199 1199 SET_SIZE(splx)
1200 1200
1201 1201 #endif /* __lint */
1202 1202
1203 1203 #if defined(__i386)
1204 1204
1205 1205 /*
1206 1206 * Read and write the %gs register
1207 1207 */
1208 1208
1209 1209 #if defined(__lint)
1210 1210
1211 1211 /*ARGSUSED*/
1212 1212 uint16_t
1213 1213 getgs(void)
1214 1214 { return (0); }
1215 1215
1216 1216 /*ARGSUSED*/
1217 1217 void
1218 1218 setgs(uint16_t sel)
1219 1219 {}
1220 1220
1221 1221 #else /* __lint */
1222 1222
1223 1223 ENTRY(getgs)
1224 1224 clr %eax
1225 1225 movw %gs, %ax
1226 1226 ret
1227 1227 SET_SIZE(getgs)
1228 1228
1229 1229 ENTRY(setgs)
1230 1230 movw 4(%esp), %gs
1231 1231 ret
1232 1232 SET_SIZE(setgs)
1233 1233
1234 1234 #endif /* __lint */
1235 1235 #endif /* __i386 */
1236 1236
1237 1237 #if defined(__lint)
1238 1238
1239 1239 void
1240 1240 pc_reset(void)
1241 1241 {}
1242 1242
1243 1243 void
1244 1244 efi_reset(void)
1245 1245 {}
1246 1246
1247 1247 #else /* __lint */
1248 1248
1249 1249 ENTRY(wait_500ms)
1250 1250 push %ebx
1251 1251 movl $50000, %ebx
1252 1252 1:
1253 1253 call tenmicrosec
1254 1254 decl %ebx
1255 1255 jnz 1b
1256 1256 pop %ebx
1257 1257 ret
1258 1258 SET_SIZE(wait_500ms)
1259 1259
1260 1260 #define RESET_METHOD_KBC 1
1261 1261 #define RESET_METHOD_PORT92 2
1262 1262 #define RESET_METHOD_PCI 4
1263 1263
1264 1264 DGDEF3(pc_reset_methods, 4, 8)
1265 1265 .long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
1266 1266
1267 1267 ENTRY(pc_reset)
1268 1268
1269 1269 #if defined(__i386)
1270 1270 testl $RESET_METHOD_KBC, pc_reset_methods
1271 1271 #elif defined(__amd64)
1272 1272 testl $RESET_METHOD_KBC, pc_reset_methods(%rip)
1273 1273 #endif
1274 1274 jz 1f
1275 1275
1276 1276 /
1277 1277 / Try the classic keyboard controller-triggered reset.
1278 1278 /
1279 1279 movw $0x64, %dx
1280 1280 movb $0xfe, %al
1281 1281 outb (%dx)
1282 1282
1283 1283 / Wait up to 500 milliseconds here for the keyboard controller
1284 1284 / to pull the reset line. On some systems where the keyboard
1285 1285 / controller is slow to pull the reset line, the next reset method
1286 1286 / may be executed (which may be bad if those systems hang when the
1287 1287 / next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
1288 1288 / and Ferrari 4000 (doesn't like the cf9 reset method))
1289 1289
1290 1290 call wait_500ms
1291 1291
1292 1292 1:
1293 1293 #if defined(__i386)
1294 1294 testl $RESET_METHOD_PORT92, pc_reset_methods
1295 1295 #elif defined(__amd64)
1296 1296 testl $RESET_METHOD_PORT92, pc_reset_methods(%rip)
1297 1297 #endif
1298 1298 jz 3f
1299 1299
1300 1300 /
1301 1301 / Try port 0x92 fast reset
1302 1302 /
1303 1303 movw $0x92, %dx
1304 1304 inb (%dx)
1305 1305 cmpb $0xff, %al / If port's not there, we should get back 0xFF
1306 1306 je 1f
1307 1307 testb $1, %al / If bit 0
1308 1308 jz 2f / is clear, jump to perform the reset
1309 1309 andb $0xfe, %al / otherwise,
1310 1310 outb (%dx) / clear bit 0 first, then
1311 1311 2:
1312 1312 orb $1, %al / Set bit 0
1313 1313 outb (%dx) / and reset the system
1314 1314 1:
1315 1315
1316 1316 call wait_500ms
1317 1317
1318 1318 3:
1319 1319 #if defined(__i386)
1320 1320 testl $RESET_METHOD_PCI, pc_reset_methods
1321 1321 #elif defined(__amd64)
1322 1322 testl $RESET_METHOD_PCI, pc_reset_methods(%rip)
1323 1323 #endif
1324 1324 jz 4f
1325 1325
1326 1326 / Try the PCI (soft) reset vector (should work on all modern systems,
1327 1327 / but has been shown to cause problems on 450NX systems, and some newer
1328 1328 / systems (e.g. ATI IXP400-equipped systems))
1329 1329 / When resetting via this method, 2 writes are required. The first
1330 1330 / targets bit 1 (0=hard reset without power cycle, 1=hard reset with
1331 1331 / power cycle).
1332 1332 / The reset occurs on the second write, during bit 2's transition from
1333 1333 / 0->1.
1334 1334 movw $0xcf9, %dx
1335 1335 movb $0x2, %al / Reset mode = hard, no power cycle
1336 1336 outb (%dx)
1337 1337 movb $0x6, %al
1338 1338 outb (%dx)
1339 1339
1340 1340 call wait_500ms
1341 1341
1342 1342 4:
1343 1343 /
1344 1344 / port 0xcf9 failed also. Last-ditch effort is to
1345 1345 / triple-fault the CPU.
1346 1346 / Also, use triple fault for EFI firmware
1347 1347 /
1348 1348 ENTRY(efi_reset)
1349 1349 #if defined(__amd64)
1350 1350 pushq $0x0
1351 1351 pushq $0x0 / IDT base of 0, limit of 0 + 2 unused bytes
1352 1352 lidt (%rsp)
1353 1353 #elif defined(__i386)
1354 1354 pushl $0x0
1355 1355 pushl $0x0 / IDT base of 0, limit of 0 + 2 unused bytes
1356 1356 lidt (%esp)
1357 1357 #endif
1358 1358 int $0x0 / Trigger interrupt, generate triple-fault
1359 1359
1360 1360 cli
1361 1361 hlt / Wait forever
1362 1362 /*NOTREACHED*/
1363 1363 SET_SIZE(efi_reset)
1364 1364 SET_SIZE(pc_reset)
1365 1365
1366 1366 #endif /* __lint */
1367 1367
1368 1368 /*
1369 1369 * C callable in and out routines
1370 1370 */
1371 1371
1372 1372 #if defined(__lint)
1373 1373
1374 1374 /* ARGSUSED */
1375 1375 void
1376 1376 outl(int port_address, uint32_t val)
1377 1377 {}
1378 1378
1379 1379 #else /* __lint */
1380 1380
1381 1381 #if defined(__amd64)
1382 1382
1383 1383 ENTRY(outl)
1384 1384 movw %di, %dx
1385 1385 movl %esi, %eax
1386 1386 outl (%dx)
1387 1387 ret
1388 1388 SET_SIZE(outl)
1389 1389
1390 1390 #elif defined(__i386)
1391 1391
1392 1392 .set PORT, 4
1393 1393 .set VAL, 8
1394 1394
1395 1395 ENTRY(outl)
1396 1396 movw PORT(%esp), %dx
1397 1397 movl VAL(%esp), %eax
1398 1398 outl (%dx)
1399 1399 ret
1400 1400 SET_SIZE(outl)
1401 1401
1402 1402 #endif /* __i386 */
1403 1403 #endif /* __lint */
1404 1404
1405 1405 #if defined(__lint)
1406 1406
1407 1407 /* ARGSUSED */
1408 1408 void
1409 1409 outw(int port_address, uint16_t val)
1410 1410 {}
1411 1411
1412 1412 #else /* __lint */
1413 1413
1414 1414 #if defined(__amd64)
1415 1415
1416 1416 ENTRY(outw)
1417 1417 movw %di, %dx
1418 1418 movw %si, %ax
1419 1419 D16 outl (%dx) /* XX64 why not outw? */
1420 1420 ret
1421 1421 SET_SIZE(outw)
1422 1422
1423 1423 #elif defined(__i386)
1424 1424
1425 1425 ENTRY(outw)
1426 1426 movw PORT(%esp), %dx
1427 1427 movw VAL(%esp), %ax
1428 1428 D16 outl (%dx)
1429 1429 ret
1430 1430 SET_SIZE(outw)
1431 1431
1432 1432 #endif /* __i386 */
1433 1433 #endif /* __lint */
1434 1434
1435 1435 #if defined(__lint)
1436 1436
1437 1437 /* ARGSUSED */
1438 1438 void
1439 1439 outb(int port_address, uint8_t val)
1440 1440 {}
1441 1441
1442 1442 #else /* __lint */
1443 1443
1444 1444 #if defined(__amd64)
1445 1445
1446 1446 ENTRY(outb)
1447 1447 movw %di, %dx
1448 1448 movb %sil, %al
1449 1449 outb (%dx)
1450 1450 ret
1451 1451 SET_SIZE(outb)
1452 1452
1453 1453 #elif defined(__i386)
1454 1454
1455 1455 ENTRY(outb)
1456 1456 movw PORT(%esp), %dx
1457 1457 movb VAL(%esp), %al
1458 1458 outb (%dx)
1459 1459 ret
1460 1460 SET_SIZE(outb)
1461 1461
1462 1462 #endif /* __i386 */
1463 1463 #endif /* __lint */
1464 1464
1465 1465 #if defined(__lint)
1466 1466
1467 1467 /* ARGSUSED */
1468 1468 uint32_t
1469 1469 inl(int port_address)
1470 1470 { return (0); }
1471 1471
1472 1472 #else /* __lint */
1473 1473
1474 1474 #if defined(__amd64)
1475 1475
1476 1476 ENTRY(inl)
1477 1477 xorl %eax, %eax
1478 1478 movw %di, %dx
1479 1479 inl (%dx)
1480 1480 ret
1481 1481 SET_SIZE(inl)
1482 1482
1483 1483 #elif defined(__i386)
1484 1484
1485 1485 ENTRY(inl)
1486 1486 movw PORT(%esp), %dx
1487 1487 inl (%dx)
1488 1488 ret
1489 1489 SET_SIZE(inl)
1490 1490
1491 1491 #endif /* __i386 */
1492 1492 #endif /* __lint */
1493 1493
1494 1494 #if defined(__lint)
1495 1495
1496 1496 /* ARGSUSED */
1497 1497 uint16_t
1498 1498 inw(int port_address)
1499 1499 { return (0); }
1500 1500
1501 1501 #else /* __lint */
1502 1502
1503 1503 #if defined(__amd64)
1504 1504
1505 1505 ENTRY(inw)
1506 1506 xorl %eax, %eax
1507 1507 movw %di, %dx
1508 1508 D16 inl (%dx)
1509 1509 ret
1510 1510 SET_SIZE(inw)
1511 1511
1512 1512 #elif defined(__i386)
1513 1513
1514 1514 ENTRY(inw)
1515 1515 subl %eax, %eax
1516 1516 movw PORT(%esp), %dx
1517 1517 D16 inl (%dx)
1518 1518 ret
1519 1519 SET_SIZE(inw)
1520 1520
1521 1521 #endif /* __i386 */
1522 1522 #endif /* __lint */
1523 1523
1524 1524
1525 1525 #if defined(__lint)
1526 1526
1527 1527 /* ARGSUSED */
1528 1528 uint8_t
1529 1529 inb(int port_address)
1530 1530 { return (0); }
1531 1531
1532 1532 #else /* __lint */
1533 1533
1534 1534 #if defined(__amd64)
1535 1535
1536 1536 ENTRY(inb)
1537 1537 xorl %eax, %eax
1538 1538 movw %di, %dx
1539 1539 inb (%dx)
1540 1540 ret
1541 1541 SET_SIZE(inb)
1542 1542
1543 1543 #elif defined(__i386)
1544 1544
1545 1545 ENTRY(inb)
1546 1546 subl %eax, %eax
1547 1547 movw PORT(%esp), %dx
1548 1548 inb (%dx)
1549 1549 ret
1550 1550 SET_SIZE(inb)
1551 1551
1552 1552 #endif /* __i386 */
1553 1553 #endif /* __lint */
1554 1554
1555 1555
1556 1556 #if defined(__lint)
1557 1557
1558 1558 /* ARGSUSED */
1559 1559 void
1560 1560 repoutsw(int port, uint16_t *addr, int cnt)
1561 1561 {}
1562 1562
1563 1563 #else /* __lint */
1564 1564
1565 1565 #if defined(__amd64)
1566 1566
1567 1567 ENTRY(repoutsw)
1568 1568 movl %edx, %ecx
1569 1569 movw %di, %dx
1570 1570 rep
1571 1571 D16 outsl
1572 1572 ret
1573 1573 SET_SIZE(repoutsw)
1574 1574
1575 1575 #elif defined(__i386)
1576 1576
1577 1577 /*
1578 1578 * The arguments and saved registers are on the stack in the
1579 1579 * following order:
1580 1580 * | cnt | +16
1581 1581 * | *addr | +12
1582 1582 * | port | +8
1583 1583 * | eip | +4
1584 1584 * | esi | <-- %esp
1585 1585 * If additional values are pushed onto the stack, make sure
1586 1586 * to adjust the following constants accordingly.
1587 1587 */
1588 1588 .set PORT, 8
1589 1589 .set ADDR, 12
1590 1590 .set COUNT, 16
1591 1591
1592 1592 ENTRY(repoutsw)
1593 1593 pushl %esi
1594 1594 movl PORT(%esp), %edx
1595 1595 movl ADDR(%esp), %esi
1596 1596 movl COUNT(%esp), %ecx
1597 1597 rep
1598 1598 D16 outsl
1599 1599 popl %esi
1600 1600 ret
1601 1601 SET_SIZE(repoutsw)
1602 1602
1603 1603 #endif /* __i386 */
1604 1604 #endif /* __lint */
1605 1605
1606 1606
1607 1607 #if defined(__lint)
1608 1608
1609 1609 /* ARGSUSED */
1610 1610 void
1611 1611 repinsw(int port_addr, uint16_t *addr, int cnt)
1612 1612 {}
1613 1613
1614 1614 #else /* __lint */
1615 1615
1616 1616 #if defined(__amd64)
1617 1617
1618 1618 ENTRY(repinsw)
1619 1619 movl %edx, %ecx
1620 1620 movw %di, %dx
1621 1621 rep
1622 1622 D16 insl
1623 1623 ret
1624 1624 SET_SIZE(repinsw)
1625 1625
1626 1626 #elif defined(__i386)
1627 1627
1628 1628 ENTRY(repinsw)
1629 1629 pushl %edi
1630 1630 movl PORT(%esp), %edx
1631 1631 movl ADDR(%esp), %edi
1632 1632 movl COUNT(%esp), %ecx
1633 1633 rep
1634 1634 D16 insl
1635 1635 popl %edi
1636 1636 ret
1637 1637 SET_SIZE(repinsw)
1638 1638
1639 1639 #endif /* __i386 */
1640 1640 #endif /* __lint */
1641 1641
1642 1642
1643 1643 #if defined(__lint)
1644 1644
1645 1645 /* ARGSUSED */
1646 1646 void
1647 1647 repinsb(int port, uint8_t *addr, int count)
1648 1648 {}
1649 1649
1650 1650 #else /* __lint */
1651 1651
1652 1652 #if defined(__amd64)
1653 1653
1654 1654 ENTRY(repinsb)
1655 1655 movl %edx, %ecx
1656 1656 movw %di, %dx
1657 1657 movq %rsi, %rdi
1658 1658 rep
1659 1659 insb
1660 1660 ret
1661 1661 SET_SIZE(repinsb)
1662 1662
1663 1663 #elif defined(__i386)
1664 1664
1665 1665 /*
1666 1666 * The arguments and saved registers are on the stack in the
1667 1667 * following order:
1668 1668 * | cnt | +16
1669 1669 * | *addr | +12
1670 1670 * | port | +8
1671 1671 * | eip | +4
1672 1672 * | esi | <-- %esp
1673 1673 * If additional values are pushed onto the stack, make sure
1674 1674 * to adjust the following constants accordingly.
1675 1675 */
1676 1676 .set IO_PORT, 8
1677 1677 .set IO_ADDR, 12
1678 1678 .set IO_COUNT, 16
1679 1679
1680 1680 ENTRY(repinsb)
1681 1681 pushl %edi
1682 1682 movl IO_ADDR(%esp), %edi
1683 1683 movl IO_COUNT(%esp), %ecx
1684 1684 movl IO_PORT(%esp), %edx
1685 1685 rep
1686 1686 insb
1687 1687 popl %edi
1688 1688 ret
1689 1689 SET_SIZE(repinsb)
1690 1690
1691 1691 #endif /* __i386 */
1692 1692 #endif /* __lint */
1693 1693
1694 1694
1695 1695 /*
1696 1696 * Input a stream of 32-bit words.
1697 1697 * NOTE: count is a DWORD count.
1698 1698 */
1699 1699 #if defined(__lint)
1700 1700
1701 1701 /* ARGSUSED */
1702 1702 void
1703 1703 repinsd(int port, uint32_t *addr, int count)
1704 1704 {}
1705 1705
1706 1706 #else /* __lint */
1707 1707
1708 1708 #if defined(__amd64)
1709 1709
1710 1710 ENTRY(repinsd)
1711 1711 movl %edx, %ecx
1712 1712 movw %di, %dx
1713 1713 movq %rsi, %rdi
1714 1714 rep
1715 1715 insl
1716 1716 ret
1717 1717 SET_SIZE(repinsd)
1718 1718
1719 1719 #elif defined(__i386)
1720 1720
1721 1721 ENTRY(repinsd)
1722 1722 pushl %edi
1723 1723 movl IO_ADDR(%esp), %edi
1724 1724 movl IO_COUNT(%esp), %ecx
1725 1725 movl IO_PORT(%esp), %edx
1726 1726 rep
1727 1727 insl
1728 1728 popl %edi
1729 1729 ret
1730 1730 SET_SIZE(repinsd)
1731 1731
1732 1732 #endif /* __i386 */
1733 1733 #endif /* __lint */
1734 1734
1735 1735 /*
1736 1736 * Output a stream of bytes
1737 1737 * NOTE: count is a byte count
1738 1738 */
1739 1739 #if defined(__lint)
1740 1740
1741 1741 /* ARGSUSED */
1742 1742 void
1743 1743 repoutsb(int port, uint8_t *addr, int count)
1744 1744 {}
1745 1745
1746 1746 #else /* __lint */
1747 1747
1748 1748 #if defined(__amd64)
1749 1749
1750 1750 ENTRY(repoutsb)
1751 1751 movl %edx, %ecx
1752 1752 movw %di, %dx
1753 1753 rep
1754 1754 outsb
1755 1755 ret
1756 1756 SET_SIZE(repoutsb)
1757 1757
1758 1758 #elif defined(__i386)
1759 1759
1760 1760 ENTRY(repoutsb)
1761 1761 pushl %esi
1762 1762 movl IO_ADDR(%esp), %esi
1763 1763 movl IO_COUNT(%esp), %ecx
1764 1764 movl IO_PORT(%esp), %edx
1765 1765 rep
1766 1766 outsb
1767 1767 popl %esi
1768 1768 ret
1769 1769 SET_SIZE(repoutsb)
1770 1770
1771 1771 #endif /* __i386 */
1772 1772 #endif /* __lint */
1773 1773
1774 1774 /*
1775 1775 * Output a stream of 32-bit words
1776 1776 * NOTE: count is a DWORD count
1777 1777 */
1778 1778 #if defined(__lint)
1779 1779
1780 1780 /* ARGSUSED */
1781 1781 void
1782 1782 repoutsd(int port, uint32_t *addr, int count)
1783 1783 {}
1784 1784
1785 1785 #else /* __lint */
1786 1786
1787 1787 #if defined(__amd64)
1788 1788
1789 1789 ENTRY(repoutsd)
1790 1790 movl %edx, %ecx
1791 1791 movw %di, %dx
1792 1792 rep
1793 1793 outsl
1794 1794 ret
1795 1795 SET_SIZE(repoutsd)
1796 1796
1797 1797 #elif defined(__i386)
1798 1798
1799 1799 ENTRY(repoutsd)
1800 1800 pushl %esi
1801 1801 movl IO_ADDR(%esp), %esi
1802 1802 movl IO_COUNT(%esp), %ecx
1803 1803 movl IO_PORT(%esp), %edx
1804 1804 rep
1805 1805 outsl
1806 1806 popl %esi
1807 1807 ret
1808 1808 SET_SIZE(repoutsd)
1809 1809
1810 1810 #endif /* __i386 */
1811 1811 #endif /* __lint */
1812 1812
1813 1813 /*
1814 1814 * void int3(void)
1815 1815 * void int18(void)
1816 1816 * void int20(void)
1817 1817 * void int_cmci(void)
1818 1818 */
1819 1819
1820 1820 #if defined(__lint)
1821 1821
1822 1822 void
1823 1823 int3(void)
1824 1824 {}
1825 1825
1826 1826 void
1827 1827 int18(void)
1828 1828 {}
1829 1829
1830 1830 void
1831 1831 int20(void)
1832 1832 {}
1833 1833
1834 1834 void
1835 1835 int_cmci(void)
1836 1836 {}
1837 1837
1838 1838 #else /* __lint */
1839 1839
1840 1840 ENTRY(int3)
1841 1841 int $T_BPTFLT
1842 1842 ret
1843 1843 SET_SIZE(int3)
1844 1844
1845 1845 ENTRY(int18)
1846 1846 int $T_MCE
1847 1847 ret
1848 1848 SET_SIZE(int18)
1849 1849
1850 1850 ENTRY(int20)
1851 1851 movl boothowto, %eax
1852 1852 andl $RB_DEBUG, %eax
1853 1853 jz 1f
1854 1854
1855 1855 int $T_DBGENTR
1856 1856 1:
1857 1857 rep; ret /* use 2 byte return instruction when branch target */
1858 1858 /* AMD Software Optimization Guide - Section 6.2 */
1859 1859 SET_SIZE(int20)
1860 1860
1861 1861 ENTRY(int_cmci)
1862 1862 int $T_ENOEXTFLT
1863 1863 ret
1864 1864 SET_SIZE(int_cmci)
1865 1865
1866 1866 #endif /* __lint */
1867 1867
1868 1868 #if defined(__lint)
1869 1869
1870 1870 /* ARGSUSED */
1871 1871 int
1872 1872 scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask)
1873 1873 { return (0); }
1874 1874
↓ open down ↓ |
668 lines elided |
↑ open up ↑ |
1875 1875 #else /* __lint */
1876 1876
1877 1877 #if defined(__amd64)
1878 1878
1879 1879 ENTRY(scanc)
1880 1880 /* rdi == size */
1881 1881 /* rsi == cp */
1882 1882 /* rdx == table */
1883 1883 /* rcx == mask */
1884 1884 addq %rsi, %rdi /* end = &cp[size] */
1885 -.scanloop:
1885 +.scanloop:
1886 1886 cmpq %rdi, %rsi /* while (cp < end */
1887 1887 jnb .scandone
1888 1888 movzbq (%rsi), %r8 /* %r8 = *cp */
1889 1889 incq %rsi /* cp++ */
1890 1890 testb %cl, (%r8, %rdx)
1891 1891 jz .scanloop /* && (table[*cp] & mask) == 0) */
1892 1892 decq %rsi /* (fix post-increment) */
1893 1893 .scandone:
1894 1894 movl %edi, %eax
1895 1895 subl %esi, %eax /* return (end - cp) */
1896 1896 ret
1897 1897 SET_SIZE(scanc)
1898 1898
1899 1899 #elif defined(__i386)
1900 -
1900 +
1901 1901 ENTRY(scanc)
1902 1902 pushl %edi
1903 1903 pushl %esi
1904 1904 movb 24(%esp), %cl /* mask = %cl */
1905 1905 movl 16(%esp), %esi /* cp = %esi */
1906 1906 movl 20(%esp), %edx /* table = %edx */
1907 1907 movl %esi, %edi
1908 1908 addl 12(%esp), %edi /* end = &cp[size]; */
1909 1909 .scanloop:
1910 1910 cmpl %edi, %esi /* while (cp < end */
1911 1911 jnb .scandone
1912 1912 movzbl (%esi), %eax /* %al = *cp */
1913 1913 incl %esi /* cp++ */
1914 1914 movb (%edx, %eax), %al /* %al = table[*cp] */
1915 1915 testb %al, %cl
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
1916 1916 jz .scanloop /* && (table[*cp] & mask) == 0) */
1917 1917 dec %esi /* post-incremented */
1918 1918 .scandone:
1919 1919 movl %edi, %eax
1920 1920 subl %esi, %eax /* return (end - cp) */
1921 1921 popl %esi
1922 1922 popl %edi
1923 1923 ret
1924 1924 SET_SIZE(scanc)
1925 1925
1926 -#endif /* __i386 */
1926 +#endif /* __i386 */
1927 1927 #endif /* __lint */
1928 1928
1929 1929 /*
1930 1930 * Replacement functions for ones that are normally inlined.
1931 1931 * In addition to the copy in i86.il, they are defined here just in case.
1932 1932 */
1933 1933
1934 1934 #if defined(__lint)
1935 1935
1936 1936 ulong_t
1937 1937 intr_clear(void)
1938 1938 { return (0); }
1939 1939
1940 1940 ulong_t
1941 1941 clear_int_flag(void)
1942 1942 { return (0); }
1943 1943
1944 1944 #else /* __lint */
1945 1945
1946 1946 #if defined(__amd64)
1947 1947
1948 1948 ENTRY(intr_clear)
1949 1949 ENTRY(clear_int_flag)
1950 1950 pushfq
1951 1951 popq %rax
1952 1952 #if defined(__xpv)
1953 1953 leaq xpv_panicking, %rdi
1954 1954 movl (%rdi), %edi
1955 1955 cmpl $0, %edi
1956 1956 jne 2f
1957 1957 CLIRET(%rdi, %dl) /* returns event mask in %dl */
1958 1958 /*
1959 1959 * Synthesize the PS_IE bit from the event mask bit
1960 1960 */
1961 1961 andq $_BITNOT(PS_IE), %rax
1962 1962 testb $1, %dl
1963 1963 jnz 1f
1964 1964 orq $PS_IE, %rax
1965 1965 1:
1966 1966 ret
1967 1967 2:
1968 1968 #endif
1969 1969 CLI(%rdi)
1970 1970 ret
1971 1971 SET_SIZE(clear_int_flag)
1972 1972 SET_SIZE(intr_clear)
1973 1973
1974 1974 #elif defined(__i386)
1975 1975
1976 1976 ENTRY(intr_clear)
1977 1977 ENTRY(clear_int_flag)
1978 1978 pushfl
1979 1979 popl %eax
1980 1980 #if defined(__xpv)
1981 1981 leal xpv_panicking, %edx
1982 1982 movl (%edx), %edx
1983 1983 cmpl $0, %edx
1984 1984 jne 2f
1985 1985 CLIRET(%edx, %cl) /* returns event mask in %cl */
1986 1986 /*
1987 1987 * Synthesize the PS_IE bit from the event mask bit
1988 1988 */
1989 1989 andl $_BITNOT(PS_IE), %eax
1990 1990 testb $1, %cl
1991 1991 jnz 1f
1992 1992 orl $PS_IE, %eax
1993 1993 1:
1994 1994 ret
1995 1995 2:
1996 1996 #endif
1997 1997 CLI(%edx)
1998 1998 ret
1999 1999 SET_SIZE(clear_int_flag)
2000 2000 SET_SIZE(intr_clear)
2001 2001
2002 2002 #endif /* __i386 */
2003 2003 #endif /* __lint */
2004 2004
2005 2005 #if defined(__lint)
2006 2006
2007 2007 struct cpu *
2008 2008 curcpup(void)
2009 2009 { return 0; }
2010 2010
2011 2011 #else /* __lint */
2012 2012
2013 2013 #if defined(__amd64)
2014 2014
2015 2015 ENTRY(curcpup)
2016 2016 movq %gs:CPU_SELF, %rax
2017 2017 ret
2018 2018 SET_SIZE(curcpup)
2019 2019
↓ open down ↓ |
83 lines elided |
↑ open up ↑ |
2020 2020 #elif defined(__i386)
2021 2021
2022 2022 ENTRY(curcpup)
2023 2023 movl %gs:CPU_SELF, %eax
2024 2024 ret
2025 2025 SET_SIZE(curcpup)
2026 2026
2027 2027 #endif /* __i386 */
2028 2028 #endif /* __lint */
2029 2029
2030 +/* htonll(), ntohll(), htonl(), ntohl(), htons(), ntohs()
2031 + * These functions reverse the byte order of the input parameter and returns
2032 + * the result. This is to convert the byte order from host byte order
2033 + * (little endian) to network byte order (big endian), or vice versa.
2034 + */
2035 +
2030 2036 #if defined(__lint)
2031 2037
2032 -/* ARGSUSED */
2038 +uint64_t
2039 +htonll(uint64_t i)
2040 +{ return (i); }
2041 +
2042 +uint64_t
2043 +ntohll(uint64_t i)
2044 +{ return (i); }
2045 +
2033 2046 uint32_t
2034 2047 htonl(uint32_t i)
2035 -{ return (0); }
2048 +{ return (i); }
2036 2049
2037 -/* ARGSUSED */
2038 2050 uint32_t
2039 2051 ntohl(uint32_t i)
2040 -{ return (0); }
2052 +{ return (i); }
2041 2053
2054 +uint16_t
2055 +htons(uint16_t i)
2056 +{ return (i); }
2057 +
2058 +uint16_t
2059 +ntohs(uint16_t i)
2060 +{ return (i); }
2061 +
2042 2062 #else /* __lint */
2043 2063
2044 2064 #if defined(__amd64)
2045 2065
2066 + ENTRY(htonll)
2067 + ALTENTRY(ntohll)
2068 + movq %rdi, %rax
2069 + bswapq %rax
2070 + ret
2071 + SET_SIZE(ntohll)
2072 + SET_SIZE(htonll)
2073 +
2046 2074 /* XX64 there must be shorter sequences for this */
2047 2075 ENTRY(htonl)
2048 2076 ALTENTRY(ntohl)
2049 2077 movl %edi, %eax
2050 2078 bswap %eax
2051 2079 ret
2052 2080 SET_SIZE(ntohl)
2053 2081 SET_SIZE(htonl)
2054 2082
2055 -#elif defined(__i386)
2056 -
2057 - ENTRY(htonl)
2058 - ALTENTRY(ntohl)
2059 - movl 4(%esp), %eax
2060 - bswap %eax
2061 - ret
2062 - SET_SIZE(ntohl)
2063 - SET_SIZE(htonl)
2064 -
2065 -#endif /* __i386 */
2066 -#endif /* __lint */
2067 -
2068 -#if defined(__lint)
2069 -
2070 -/* ARGSUSED */
2071 -uint16_t
2072 -htons(uint16_t i)
2073 -{ return (0); }
2074 -
2075 -/* ARGSUSED */
2076 -uint16_t
2077 -ntohs(uint16_t i)
2078 -{ return (0); }
2079 -
2080 -
2081 -#else /* __lint */
2082 -
2083 -#if defined(__amd64)
2084 -
2085 2083 /* XX64 there must be better sequences for this */
2086 2084 ENTRY(htons)
2087 2085 ALTENTRY(ntohs)
2088 2086 movl %edi, %eax
2089 2087 bswap %eax
2090 2088 shrl $16, %eax
2091 2089 ret
2092 - SET_SIZE(ntohs)
2090 + SET_SIZE(ntohs)
2093 2091 SET_SIZE(htons)
2094 2092
2095 2093 #elif defined(__i386)
2096 2094
2095 + ENTRY(htonll)
2096 + ALTENTRY(ntohll)
2097 + movl 4(%esp), %edx
2098 + movl 8(%esp), %eax
2099 + bswap %edx
2100 + bswap %eax
2101 + ret
2102 + SET_SIZE(ntohll)
2103 + SET_SIZE(htonll)
2104 +
2105 + ENTRY(htonl)
2106 + ALTENTRY(ntohl)
2107 + movl 4(%esp), %eax
2108 + bswap %eax
2109 + ret
2110 + SET_SIZE(ntohl)
2111 + SET_SIZE(htonl)
2112 +
2097 2113 ENTRY(htons)
2098 2114 ALTENTRY(ntohs)
2099 2115 movl 4(%esp), %eax
2100 2116 bswap %eax
2101 2117 shrl $16, %eax
2102 2118 ret
2103 2119 SET_SIZE(ntohs)
2104 2120 SET_SIZE(htons)
2105 2121
2106 2122 #endif /* __i386 */
2107 2123 #endif /* __lint */
2108 2124
2109 2125
2110 2126 #if defined(__lint)
2111 2127
2112 2128 /* ARGSUSED */
2113 2129 void
2114 2130 intr_restore(ulong_t i)
2115 2131 { return; }
2116 2132
2117 2133 /* ARGSUSED */
2118 2134 void
2119 2135 restore_int_flag(ulong_t i)
2120 2136 { return; }
2121 2137
2122 2138 #else /* __lint */
2123 2139
2124 2140 #if defined(__amd64)
2125 2141
2126 2142 ENTRY(intr_restore)
2127 2143 ENTRY(restore_int_flag)
2128 2144 testq $PS_IE, %rdi
2129 2145 jz 1f
2130 2146 #if defined(__xpv)
2131 2147 leaq xpv_panicking, %rsi
2132 2148 movl (%rsi), %esi
2133 2149 cmpl $0, %esi
2134 2150 jne 1f
2135 2151 /*
2136 2152 * Since we're -really- running unprivileged, our attempt
2137 2153 * to change the state of the IF bit will be ignored.
2138 2154 * The virtual IF bit is tweaked by CLI and STI.
2139 2155 */
2140 2156 IE_TO_EVENT_MASK(%rsi, %rdi)
2141 2157 #else
2142 2158 sti
2143 2159 #endif
2144 2160 1:
2145 2161 ret
2146 2162 SET_SIZE(restore_int_flag)
2147 2163 SET_SIZE(intr_restore)
2148 2164
2149 2165 #elif defined(__i386)
2150 2166
2151 2167 ENTRY(intr_restore)
2152 2168 ENTRY(restore_int_flag)
2153 2169 testl $PS_IE, 4(%esp)
2154 2170 jz 1f
2155 2171 #if defined(__xpv)
2156 2172 leal xpv_panicking, %edx
2157 2173 movl (%edx), %edx
2158 2174 cmpl $0, %edx
2159 2175 jne 1f
2160 2176 /*
2161 2177 * Since we're -really- running unprivileged, our attempt
2162 2178 * to change the state of the IF bit will be ignored.
2163 2179 * The virtual IF bit is tweaked by CLI and STI.
2164 2180 */
2165 2181 IE_TO_EVENT_MASK(%edx, 4(%esp))
2166 2182 #else
2167 2183 sti
2168 2184 #endif
2169 2185 1:
2170 2186 ret
2171 2187 SET_SIZE(restore_int_flag)
2172 2188 SET_SIZE(intr_restore)
2173 2189
2174 2190 #endif /* __i386 */
2175 2191 #endif /* __lint */
2176 2192
2177 2193 #if defined(__lint)
2178 2194
2179 2195 void
2180 2196 sti(void)
2181 2197 {}
2182 2198
2183 2199 void
2184 2200 cli(void)
2185 2201 {}
2186 2202
2187 2203 #else /* __lint */
2188 2204
2189 2205 ENTRY(sti)
2190 2206 STI
2191 2207 ret
2192 2208 SET_SIZE(sti)
2193 2209
2194 2210 ENTRY(cli)
2195 2211 #if defined(__amd64)
2196 2212 CLI(%rax)
2197 2213 #elif defined(__i386)
2198 2214 CLI(%eax)
2199 2215 #endif /* __i386 */
2200 2216 ret
2201 2217 SET_SIZE(cli)
2202 2218
2203 2219 #endif /* __lint */
2204 2220
2205 2221 #if defined(__lint)
2206 2222
2207 2223 dtrace_icookie_t
2208 2224 dtrace_interrupt_disable(void)
2209 2225 { return (0); }
2210 2226
2211 2227 #else /* __lint */
2212 2228
2213 2229 #if defined(__amd64)
2214 2230
2215 2231 ENTRY(dtrace_interrupt_disable)
2216 2232 pushfq
2217 2233 popq %rax
2218 2234 #if defined(__xpv)
2219 2235 leaq xpv_panicking, %rdi
2220 2236 movl (%rdi), %edi
2221 2237 cmpl $0, %edi
2222 2238 jne 1f
2223 2239 CLIRET(%rdi, %dl) /* returns event mask in %dl */
2224 2240 /*
2225 2241 * Synthesize the PS_IE bit from the event mask bit
2226 2242 */
2227 2243 andq $_BITNOT(PS_IE), %rax
2228 2244 testb $1, %dl
2229 2245 jnz 1f
2230 2246 orq $PS_IE, %rax
2231 2247 1:
2232 2248 #else
2233 2249 CLI(%rdx)
2234 2250 #endif
2235 2251 ret
2236 2252 SET_SIZE(dtrace_interrupt_disable)
2237 2253
2238 2254 #elif defined(__i386)
2239 2255
2240 2256 ENTRY(dtrace_interrupt_disable)
2241 2257 pushfl
2242 2258 popl %eax
2243 2259 #if defined(__xpv)
2244 2260 leal xpv_panicking, %edx
2245 2261 movl (%edx), %edx
2246 2262 cmpl $0, %edx
2247 2263 jne 1f
2248 2264 CLIRET(%edx, %cl) /* returns event mask in %cl */
2249 2265 /*
2250 2266 * Synthesize the PS_IE bit from the event mask bit
2251 2267 */
2252 2268 andl $_BITNOT(PS_IE), %eax
2253 2269 testb $1, %cl
2254 2270 jnz 1f
2255 2271 orl $PS_IE, %eax
2256 2272 1:
2257 2273 #else
2258 2274 CLI(%edx)
2259 2275 #endif
2260 2276 ret
2261 2277 SET_SIZE(dtrace_interrupt_disable)
2262 2278
2263 2279 #endif /* __i386 */
2264 2280 #endif /* __lint */
2265 2281
2266 2282 #if defined(__lint)
2267 2283
2268 2284 /*ARGSUSED*/
2269 2285 void
2270 2286 dtrace_interrupt_enable(dtrace_icookie_t cookie)
2271 2287 {}
2272 2288
2273 2289 #else /* __lint */
2274 2290
2275 2291 #if defined(__amd64)
2276 2292
2277 2293 ENTRY(dtrace_interrupt_enable)
2278 2294 pushq %rdi
2279 2295 popfq
2280 2296 #if defined(__xpv)
2281 2297 leaq xpv_panicking, %rdx
2282 2298 movl (%rdx), %edx
2283 2299 cmpl $0, %edx
2284 2300 jne 1f
2285 2301 /*
2286 2302 * Since we're -really- running unprivileged, our attempt
2287 2303 * to change the state of the IF bit will be ignored. The
2288 2304 * virtual IF bit is tweaked by CLI and STI.
2289 2305 */
2290 2306 IE_TO_EVENT_MASK(%rdx, %rdi)
2291 2307 #endif
2292 2308 ret
2293 2309 SET_SIZE(dtrace_interrupt_enable)
2294 2310
2295 2311 #elif defined(__i386)
2296 2312
2297 2313 ENTRY(dtrace_interrupt_enable)
2298 2314 movl 4(%esp), %eax
2299 2315 pushl %eax
2300 2316 popfl
2301 2317 #if defined(__xpv)
2302 2318 leal xpv_panicking, %edx
2303 2319 movl (%edx), %edx
2304 2320 cmpl $0, %edx
2305 2321 jne 1f
2306 2322 /*
2307 2323 * Since we're -really- running unprivileged, our attempt
2308 2324 * to change the state of the IF bit will be ignored. The
2309 2325 * virtual IF bit is tweaked by CLI and STI.
2310 2326 */
2311 2327 IE_TO_EVENT_MASK(%edx, %eax)
2312 2328 #endif
2313 2329 ret
2314 2330 SET_SIZE(dtrace_interrupt_enable)
2315 2331
2316 2332 #endif /* __i386 */
2317 2333 #endif /* __lint */
2318 2334
2319 2335
2320 2336 #if defined(lint)
2321 2337
2322 2338 void
2323 2339 dtrace_membar_producer(void)
2324 2340 {}
2325 2341
2326 2342 void
2327 2343 dtrace_membar_consumer(void)
2328 2344 {}
2329 2345
2330 2346 #else /* __lint */
2331 2347
2332 2348 ENTRY(dtrace_membar_producer)
2333 2349 rep; ret /* use 2 byte return instruction when branch target */
2334 2350 /* AMD Software Optimization Guide - Section 6.2 */
2335 2351 SET_SIZE(dtrace_membar_producer)
2336 2352
2337 2353 ENTRY(dtrace_membar_consumer)
2338 2354 rep; ret /* use 2 byte return instruction when branch target */
2339 2355 /* AMD Software Optimization Guide - Section 6.2 */
2340 2356 SET_SIZE(dtrace_membar_consumer)
2341 2357
2342 2358 #endif /* __lint */
2343 2359
2344 2360 #if defined(__lint)
2345 2361
2346 2362 kthread_id_t
2347 2363 threadp(void)
2348 2364 { return ((kthread_id_t)0); }
2349 2365
2350 2366 #else /* __lint */
2351 2367
2352 2368 #if defined(__amd64)
2353 2369
2354 2370 ENTRY(threadp)
2355 2371 movq %gs:CPU_THREAD, %rax
2356 2372 ret
2357 2373 SET_SIZE(threadp)
2358 2374
2359 2375 #elif defined(__i386)
2360 2376
2361 2377 ENTRY(threadp)
2362 2378 movl %gs:CPU_THREAD, %eax
2363 2379 ret
2364 2380 SET_SIZE(threadp)
2365 2381
2366 2382 #endif /* __i386 */
2367 2383 #endif /* __lint */
2368 2384
2369 2385 /*
2370 2386 * Checksum routine for Internet Protocol Headers
2371 2387 */
2372 2388
2373 2389 #if defined(__lint)
2374 2390
2375 2391 /* ARGSUSED */
2376 2392 unsigned int
2377 2393 ip_ocsum(
2378 2394 ushort_t *address, /* ptr to 1st message buffer */
2379 2395 int halfword_count, /* length of data */
2380 2396 unsigned int sum) /* partial checksum */
2381 2397 {
2382 2398 int i;
2383 2399 unsigned int psum = 0; /* partial sum */
2384 2400
2385 2401 for (i = 0; i < halfword_count; i++, address++) {
2386 2402 psum += *address;
2387 2403 }
2388 2404
2389 2405 while ((psum >> 16) != 0) {
2390 2406 psum = (psum & 0xffff) + (psum >> 16);
2391 2407 }
2392 2408
2393 2409 psum += sum;
2394 2410
2395 2411 while ((psum >> 16) != 0) {
2396 2412 psum = (psum & 0xffff) + (psum >> 16);
2397 2413 }
2398 2414
2399 2415 return (psum);
2400 2416 }
2401 2417
2402 2418 #else /* __lint */
2403 2419
2404 2420 #if defined(__amd64)
2405 2421
2406 2422 ENTRY(ip_ocsum)
2407 2423 pushq %rbp
2408 2424 movq %rsp, %rbp
2409 2425 #ifdef DEBUG
2410 2426 movq postbootkernelbase(%rip), %rax
2411 2427 cmpq %rax, %rdi
2412 2428 jnb 1f
2413 2429 xorl %eax, %eax
2414 2430 movq %rdi, %rsi
2415 2431 leaq .ip_ocsum_panic_msg(%rip), %rdi
2416 2432 call panic
2417 2433 /*NOTREACHED*/
2418 2434 .ip_ocsum_panic_msg:
2419 2435 .string "ip_ocsum: address 0x%p below kernelbase\n"
2420 2436 1:
2421 2437 #endif
2422 2438 movl %esi, %ecx /* halfword_count */
2423 2439 movq %rdi, %rsi /* address */
2424 2440 /* partial sum in %edx */
2425 2441 xorl %eax, %eax
2426 2442 testl %ecx, %ecx
2427 2443 jz .ip_ocsum_done
2428 2444 testq $3, %rsi
2429 2445 jnz .ip_csum_notaligned
2430 2446 .ip_csum_aligned: /* XX64 opportunities for 8-byte operations? */
2431 2447 .next_iter:
2432 2448 /* XX64 opportunities for prefetch? */
2433 2449 /* XX64 compute csum with 64 bit quantities? */
2434 2450 subl $32, %ecx
2435 2451 jl .less_than_32
2436 2452
2437 2453 addl 0(%rsi), %edx
2438 2454 .only60:
2439 2455 adcl 4(%rsi), %eax
2440 2456 .only56:
2441 2457 adcl 8(%rsi), %edx
2442 2458 .only52:
2443 2459 adcl 12(%rsi), %eax
2444 2460 .only48:
2445 2461 adcl 16(%rsi), %edx
2446 2462 .only44:
2447 2463 adcl 20(%rsi), %eax
2448 2464 .only40:
2449 2465 adcl 24(%rsi), %edx
2450 2466 .only36:
2451 2467 adcl 28(%rsi), %eax
2452 2468 .only32:
2453 2469 adcl 32(%rsi), %edx
2454 2470 .only28:
2455 2471 adcl 36(%rsi), %eax
2456 2472 .only24:
2457 2473 adcl 40(%rsi), %edx
2458 2474 .only20:
2459 2475 adcl 44(%rsi), %eax
2460 2476 .only16:
2461 2477 adcl 48(%rsi), %edx
2462 2478 .only12:
2463 2479 adcl 52(%rsi), %eax
2464 2480 .only8:
2465 2481 adcl 56(%rsi), %edx
2466 2482 .only4:
2467 2483 adcl 60(%rsi), %eax /* could be adding -1 and -1 with a carry */
2468 2484 .only0:
2469 2485 adcl $0, %eax /* could be adding -1 in eax with a carry */
2470 2486 adcl $0, %eax
2471 2487
2472 2488 addq $64, %rsi
2473 2489 testl %ecx, %ecx
2474 2490 jnz .next_iter
2475 2491
2476 2492 .ip_ocsum_done:
2477 2493 addl %eax, %edx
2478 2494 adcl $0, %edx
2479 2495 movl %edx, %eax /* form a 16 bit checksum by */
2480 2496 shrl $16, %eax /* adding two halves of 32 bit checksum */
2481 2497 addw %dx, %ax
2482 2498 adcw $0, %ax
2483 2499 andl $0xffff, %eax
2484 2500 leave
2485 2501 ret
2486 2502
2487 2503 .ip_csum_notaligned:
2488 2504 xorl %edi, %edi
2489 2505 movw (%rsi), %di
2490 2506 addl %edi, %edx
2491 2507 adcl $0, %edx
2492 2508 addq $2, %rsi
2493 2509 decl %ecx
2494 2510 jmp .ip_csum_aligned
2495 2511
2496 2512 .less_than_32:
2497 2513 addl $32, %ecx
2498 2514 testl $1, %ecx
2499 2515 jz .size_aligned
2500 2516 andl $0xfe, %ecx
2501 2517 movzwl (%rsi, %rcx, 2), %edi
2502 2518 addl %edi, %edx
2503 2519 adcl $0, %edx
2504 2520 .size_aligned:
2505 2521 movl %ecx, %edi
2506 2522 shrl $1, %ecx
2507 2523 shl $1, %edi
2508 2524 subq $64, %rdi
2509 2525 addq %rdi, %rsi
2510 2526 leaq .ip_ocsum_jmptbl(%rip), %rdi
2511 2527 leaq (%rdi, %rcx, 8), %rdi
2512 2528 xorl %ecx, %ecx
2513 2529 clc
2514 2530 jmp *(%rdi)
2515 2531
2516 2532 .align 8
2517 2533 .ip_ocsum_jmptbl:
2518 2534 .quad .only0, .only4, .only8, .only12, .only16, .only20
2519 2535 .quad .only24, .only28, .only32, .only36, .only40, .only44
2520 2536 .quad .only48, .only52, .only56, .only60
2521 2537 SET_SIZE(ip_ocsum)
2522 2538
2523 2539 #elif defined(__i386)
2524 2540
2525 2541 ENTRY(ip_ocsum)
2526 2542 pushl %ebp
2527 2543 movl %esp, %ebp
2528 2544 pushl %ebx
2529 2545 pushl %esi
2530 2546 pushl %edi
2531 2547 movl 12(%ebp), %ecx /* count of half words */
2532 2548 movl 16(%ebp), %edx /* partial checksum */
2533 2549 movl 8(%ebp), %esi
2534 2550 xorl %eax, %eax
2535 2551 testl %ecx, %ecx
2536 2552 jz .ip_ocsum_done
2537 2553
2538 2554 testl $3, %esi
2539 2555 jnz .ip_csum_notaligned
2540 2556 .ip_csum_aligned:
2541 2557 .next_iter:
2542 2558 subl $32, %ecx
2543 2559 jl .less_than_32
2544 2560
2545 2561 addl 0(%esi), %edx
2546 2562 .only60:
2547 2563 adcl 4(%esi), %eax
2548 2564 .only56:
2549 2565 adcl 8(%esi), %edx
2550 2566 .only52:
2551 2567 adcl 12(%esi), %eax
2552 2568 .only48:
2553 2569 adcl 16(%esi), %edx
2554 2570 .only44:
2555 2571 adcl 20(%esi), %eax
2556 2572 .only40:
2557 2573 adcl 24(%esi), %edx
2558 2574 .only36:
2559 2575 adcl 28(%esi), %eax
2560 2576 .only32:
2561 2577 adcl 32(%esi), %edx
2562 2578 .only28:
2563 2579 adcl 36(%esi), %eax
2564 2580 .only24:
2565 2581 adcl 40(%esi), %edx
2566 2582 .only20:
2567 2583 adcl 44(%esi), %eax
2568 2584 .only16:
2569 2585 adcl 48(%esi), %edx
2570 2586 .only12:
2571 2587 adcl 52(%esi), %eax
2572 2588 .only8:
2573 2589 adcl 56(%esi), %edx
2574 2590 .only4:
2575 2591 adcl 60(%esi), %eax /* We could be adding -1 and -1 with a carry */
2576 2592 .only0:
2577 2593 adcl $0, %eax /* we could be adding -1 in eax with a carry */
2578 2594 adcl $0, %eax
2579 2595
2580 2596 addl $64, %esi
2581 2597 andl %ecx, %ecx
2582 2598 jnz .next_iter
2583 2599
2584 2600 .ip_ocsum_done:
2585 2601 addl %eax, %edx
2586 2602 adcl $0, %edx
2587 2603 movl %edx, %eax /* form a 16 bit checksum by */
2588 2604 shrl $16, %eax /* adding two halves of 32 bit checksum */
2589 2605 addw %dx, %ax
2590 2606 adcw $0, %ax
2591 2607 andl $0xffff, %eax
2592 2608 popl %edi /* restore registers */
2593 2609 popl %esi
2594 2610 popl %ebx
2595 2611 leave
2596 2612 ret
2597 2613
2598 2614 .ip_csum_notaligned:
2599 2615 xorl %edi, %edi
2600 2616 movw (%esi), %di
2601 2617 addl %edi, %edx
2602 2618 adcl $0, %edx
2603 2619 addl $2, %esi
2604 2620 decl %ecx
2605 2621 jmp .ip_csum_aligned
2606 2622
2607 2623 .less_than_32:
2608 2624 addl $32, %ecx
2609 2625 testl $1, %ecx
2610 2626 jz .size_aligned
2611 2627 andl $0xfe, %ecx
2612 2628 movzwl (%esi, %ecx, 2), %edi
2613 2629 addl %edi, %edx
2614 2630 adcl $0, %edx
2615 2631 .size_aligned:
2616 2632 movl %ecx, %edi
2617 2633 shrl $1, %ecx
2618 2634 shl $1, %edi
2619 2635 subl $64, %edi
2620 2636 addl %edi, %esi
2621 2637 movl $.ip_ocsum_jmptbl, %edi
2622 2638 lea (%edi, %ecx, 4), %edi
2623 2639 xorl %ecx, %ecx
2624 2640 clc
2625 2641 jmp *(%edi)
2626 2642 SET_SIZE(ip_ocsum)
2627 2643
2628 2644 .data
2629 2645 .align 4
2630 2646
2631 2647 .ip_ocsum_jmptbl:
2632 2648 .long .only0, .only4, .only8, .only12, .only16, .only20
2633 2649 .long .only24, .only28, .only32, .only36, .only40, .only44
2634 2650 .long .only48, .only52, .only56, .only60
2635 2651
2636 2652
2637 2653 #endif /* __i386 */
2638 2654 #endif /* __lint */
2639 2655
2640 2656 /*
2641 2657 * multiply two long numbers and yield a u_longlong_t result, callable from C.
2642 2658 * Provided to manipulate hrtime_t values.
2643 2659 */
2644 2660 #if defined(__lint)
2645 2661
2646 2662 /* result = a * b; */
2647 2663
2648 2664 /* ARGSUSED */
2649 2665 unsigned long long
2650 2666 mul32(uint_t a, uint_t b)
2651 2667 { return (0); }
2652 2668
2653 2669 #else /* __lint */
2654 2670
2655 2671 #if defined(__amd64)
2656 2672
2657 2673 ENTRY(mul32)
2658 2674 xorl %edx, %edx /* XX64 joe, paranoia? */
2659 2675 movl %edi, %eax
2660 2676 mull %esi
2661 2677 shlq $32, %rdx
2662 2678 orq %rdx, %rax
2663 2679 ret
2664 2680 SET_SIZE(mul32)
2665 2681
2666 2682 #elif defined(__i386)
2667 2683
2668 2684 ENTRY(mul32)
2669 2685 movl 8(%esp), %eax
2670 2686 movl 4(%esp), %ecx
2671 2687 mull %ecx
2672 2688 ret
2673 2689 SET_SIZE(mul32)
2674 2690
2675 2691 #endif /* __i386 */
2676 2692 #endif /* __lint */
2677 2693
2678 2694 #if defined(notused)
2679 2695 #if defined(__lint)
2680 2696 /* ARGSUSED */
2681 2697 void
2682 2698 load_pte64(uint64_t *pte, uint64_t pte_value)
2683 2699 {}
2684 2700 #else /* __lint */
2685 2701 .globl load_pte64
2686 2702 load_pte64:
2687 2703 movl 4(%esp), %eax
2688 2704 movl 8(%esp), %ecx
2689 2705 movl 12(%esp), %edx
2690 2706 movl %edx, 4(%eax)
2691 2707 movl %ecx, (%eax)
2692 2708 ret
2693 2709 #endif /* __lint */
2694 2710 #endif /* notused */
2695 2711
2696 2712 #if defined(__lint)
2697 2713
2698 2714 /*ARGSUSED*/
2699 2715 void
2700 2716 scan_memory(caddr_t addr, size_t size)
2701 2717 {}
2702 2718
2703 2719 #else /* __lint */
2704 2720
2705 2721 #if defined(__amd64)
2706 2722
2707 2723 ENTRY(scan_memory)
2708 2724 shrq $3, %rsi /* convert %rsi from byte to quadword count */
2709 2725 jz .scanm_done
2710 2726 movq %rsi, %rcx /* move count into rep control register */
2711 2727 movq %rdi, %rsi /* move addr into lodsq control reg. */
2712 2728 rep lodsq /* scan the memory range */
2713 2729 .scanm_done:
2714 2730 rep; ret /* use 2 byte return instruction when branch target */
2715 2731 /* AMD Software Optimization Guide - Section 6.2 */
2716 2732 SET_SIZE(scan_memory)
2717 2733
2718 2734 #elif defined(__i386)
2719 2735
2720 2736 ENTRY(scan_memory)
2721 2737 pushl %ecx
2722 2738 pushl %esi
2723 2739 movl 16(%esp), %ecx /* move 2nd arg into rep control register */
2724 2740 shrl $2, %ecx /* convert from byte count to word count */
2725 2741 jz .scanm_done
2726 2742 movl 12(%esp), %esi /* move 1st arg into lodsw control register */
2727 2743 .byte 0xf3 /* rep prefix. lame assembler. sigh. */
2728 2744 lodsl
2729 2745 .scanm_done:
2730 2746 popl %esi
2731 2747 popl %ecx
2732 2748 ret
2733 2749 SET_SIZE(scan_memory)
2734 2750
2735 2751 #endif /* __i386 */
2736 2752 #endif /* __lint */
2737 2753
2738 2754
2739 2755 #if defined(__lint)
2740 2756
2741 2757 /*ARGSUSED */
2742 2758 int
2743 2759 lowbit(ulong_t i)
2744 2760 { return (0); }
2745 2761
2746 2762 #else /* __lint */
2747 2763
2748 2764 #if defined(__amd64)
2749 2765
2750 2766 ENTRY(lowbit)
2751 2767 movl $-1, %eax
2752 2768 bsfq %rdi, %rax
2753 2769 incl %eax
2754 2770 ret
2755 2771 SET_SIZE(lowbit)
2756 2772
2757 2773 #elif defined(__i386)
2758 2774
2759 2775 ENTRY(lowbit)
2760 2776 movl $-1, %eax
2761 2777 bsfl 4(%esp), %eax
2762 2778 incl %eax
2763 2779 ret
2764 2780 SET_SIZE(lowbit)
2765 2781
2766 2782 #endif /* __i386 */
2767 2783 #endif /* __lint */
2768 2784
2769 2785 #if defined(__lint)
2770 2786
2771 2787 /*ARGSUSED*/
2772 2788 int
2773 2789 highbit(ulong_t i)
2774 2790 { return (0); }
2775 2791
2776 2792 #else /* __lint */
2777 2793
2778 2794 #if defined(__amd64)
2779 2795
2780 2796 ENTRY(highbit)
2781 2797 movl $-1, %eax
2782 2798 bsrq %rdi, %rax
2783 2799 incl %eax
2784 2800 ret
2785 2801 SET_SIZE(highbit)
2786 2802
2787 2803 #elif defined(__i386)
2788 2804
2789 2805 ENTRY(highbit)
2790 2806 movl $-1, %eax
2791 2807 bsrl 4(%esp), %eax
2792 2808 incl %eax
2793 2809 ret
2794 2810 SET_SIZE(highbit)
2795 2811
2796 2812 #endif /* __i386 */
2797 2813 #endif /* __lint */
2798 2814
2799 2815 #if defined(__lint)
2800 2816
2801 2817 /*ARGSUSED*/
2802 2818 uint64_t
2803 2819 rdmsr(uint_t r)
2804 2820 { return (0); }
2805 2821
2806 2822 /*ARGSUSED*/
2807 2823 void
2808 2824 wrmsr(uint_t r, const uint64_t val)
2809 2825 {}
2810 2826
2811 2827 /*ARGSUSED*/
2812 2828 uint64_t
2813 2829 xrdmsr(uint_t r)
2814 2830 { return (0); }
2815 2831
2816 2832 /*ARGSUSED*/
2817 2833 void
2818 2834 xwrmsr(uint_t r, const uint64_t val)
2819 2835 {}
2820 2836
2821 2837 void
2822 2838 invalidate_cache(void)
2823 2839 {}
2824 2840
2825 2841 #else /* __lint */
2826 2842
2827 2843 #define XMSR_ACCESS_VAL $0x9c5a203a
2828 2844
2829 2845 #if defined(__amd64)
2830 2846
2831 2847 ENTRY(rdmsr)
2832 2848 movl %edi, %ecx
2833 2849 rdmsr
2834 2850 shlq $32, %rdx
2835 2851 orq %rdx, %rax
2836 2852 ret
2837 2853 SET_SIZE(rdmsr)
2838 2854
2839 2855 ENTRY(wrmsr)
2840 2856 movq %rsi, %rdx
2841 2857 shrq $32, %rdx
2842 2858 movl %esi, %eax
2843 2859 movl %edi, %ecx
2844 2860 wrmsr
2845 2861 ret
2846 2862 SET_SIZE(wrmsr)
2847 2863
2848 2864 ENTRY(xrdmsr)
2849 2865 pushq %rbp
2850 2866 movq %rsp, %rbp
2851 2867 movl %edi, %ecx
2852 2868 movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */
2853 2869 rdmsr
2854 2870 shlq $32, %rdx
2855 2871 orq %rdx, %rax
2856 2872 leave
2857 2873 ret
2858 2874 SET_SIZE(xrdmsr)
2859 2875
2860 2876 ENTRY(xwrmsr)
2861 2877 pushq %rbp
2862 2878 movq %rsp, %rbp
2863 2879 movl %edi, %ecx
2864 2880 movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */
2865 2881 movq %rsi, %rdx
2866 2882 shrq $32, %rdx
2867 2883 movl %esi, %eax
2868 2884 wrmsr
2869 2885 leave
2870 2886 ret
2871 2887 SET_SIZE(xwrmsr)
2872 2888
2873 2889 #elif defined(__i386)
2874 2890
2875 2891 ENTRY(rdmsr)
2876 2892 movl 4(%esp), %ecx
2877 2893 rdmsr
2878 2894 ret
2879 2895 SET_SIZE(rdmsr)
2880 2896
2881 2897 ENTRY(wrmsr)
2882 2898 movl 4(%esp), %ecx
2883 2899 movl 8(%esp), %eax
2884 2900 movl 12(%esp), %edx
2885 2901 wrmsr
2886 2902 ret
2887 2903 SET_SIZE(wrmsr)
2888 2904
2889 2905 ENTRY(xrdmsr)
2890 2906 pushl %ebp
2891 2907 movl %esp, %ebp
2892 2908 movl 8(%esp), %ecx
2893 2909 pushl %edi
2894 2910 movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */
2895 2911 rdmsr
2896 2912 popl %edi
2897 2913 leave
2898 2914 ret
2899 2915 SET_SIZE(xrdmsr)
2900 2916
2901 2917 ENTRY(xwrmsr)
2902 2918 pushl %ebp
2903 2919 movl %esp, %ebp
2904 2920 movl 8(%esp), %ecx
2905 2921 movl 12(%esp), %eax
2906 2922 movl 16(%esp), %edx
2907 2923 pushl %edi
2908 2924 movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */
2909 2925 wrmsr
2910 2926 popl %edi
2911 2927 leave
2912 2928 ret
2913 2929 SET_SIZE(xwrmsr)
2914 2930
2915 2931 #endif /* __i386 */
2916 2932
2917 2933 ENTRY(invalidate_cache)
2918 2934 wbinvd
2919 2935 ret
2920 2936 SET_SIZE(invalidate_cache)
2921 2937
2922 2938 #endif /* __lint */
2923 2939
2924 2940 #if defined(__lint)
2925 2941
2926 2942 /*ARGSUSED*/
2927 2943 void
2928 2944 getcregs(struct cregs *crp)
2929 2945 {}
2930 2946
2931 2947 #else /* __lint */
2932 2948
2933 2949 #if defined(__amd64)
2934 2950
2935 2951 ENTRY_NP(getcregs)
2936 2952 #if defined(__xpv)
2937 2953 /*
2938 2954 * Only a few of the hardware control registers or descriptor tables
2939 2955 * are directly accessible to us, so just zero the structure.
2940 2956 *
2941 2957 * XXPV Perhaps it would be helpful for the hypervisor to return
2942 2958 * virtualized versions of these for post-mortem use.
2943 2959 * (Need to reevaluate - perhaps it already does!)
2944 2960 */
2945 2961 pushq %rdi /* save *crp */
2946 2962 movq $CREGSZ, %rsi
2947 2963 call bzero
2948 2964 popq %rdi
2949 2965
2950 2966 /*
2951 2967 * Dump what limited information we can
2952 2968 */
2953 2969 movq %cr0, %rax
2954 2970 movq %rax, CREG_CR0(%rdi) /* cr0 */
2955 2971 movq %cr2, %rax
2956 2972 movq %rax, CREG_CR2(%rdi) /* cr2 */
2957 2973 movq %cr3, %rax
2958 2974 movq %rax, CREG_CR3(%rdi) /* cr3 */
2959 2975 movq %cr4, %rax
2960 2976 movq %rax, CREG_CR4(%rdi) /* cr4 */
2961 2977
2962 2978 #else /* __xpv */
2963 2979
2964 2980 #define GETMSR(r, off, d) \
2965 2981 movl $r, %ecx; \
2966 2982 rdmsr; \
2967 2983 movl %eax, off(d); \
2968 2984 movl %edx, off+4(d)
2969 2985
2970 2986 xorl %eax, %eax
2971 2987 movq %rax, CREG_GDT+8(%rdi)
2972 2988 sgdt CREG_GDT(%rdi) /* 10 bytes */
2973 2989 movq %rax, CREG_IDT+8(%rdi)
2974 2990 sidt CREG_IDT(%rdi) /* 10 bytes */
2975 2991 movq %rax, CREG_LDT(%rdi)
2976 2992 sldt CREG_LDT(%rdi) /* 2 bytes */
2977 2993 movq %rax, CREG_TASKR(%rdi)
2978 2994 str CREG_TASKR(%rdi) /* 2 bytes */
2979 2995 movq %cr0, %rax
2980 2996 movq %rax, CREG_CR0(%rdi) /* cr0 */
2981 2997 movq %cr2, %rax
2982 2998 movq %rax, CREG_CR2(%rdi) /* cr2 */
2983 2999 movq %cr3, %rax
2984 3000 movq %rax, CREG_CR3(%rdi) /* cr3 */
2985 3001 movq %cr4, %rax
2986 3002 movq %rax, CREG_CR4(%rdi) /* cr4 */
2987 3003 movq %cr8, %rax
2988 3004 movq %rax, CREG_CR8(%rdi) /* cr8 */
2989 3005 GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
2990 3006 GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
2991 3007 #endif /* __xpv */
2992 3008 ret
2993 3009 SET_SIZE(getcregs)
2994 3010
2995 3011 #undef GETMSR
2996 3012
2997 3013 #elif defined(__i386)
2998 3014
2999 3015 ENTRY_NP(getcregs)
3000 3016 #if defined(__xpv)
3001 3017 /*
3002 3018 * Only a few of the hardware control registers or descriptor tables
3003 3019 * are directly accessible to us, so just zero the structure.
3004 3020 *
3005 3021 * XXPV Perhaps it would be helpful for the hypervisor to return
3006 3022 * virtualized versions of these for post-mortem use.
3007 3023 * (Need to reevaluate - perhaps it already does!)
3008 3024 */
3009 3025 movl 4(%esp), %edx
3010 3026 pushl $CREGSZ
3011 3027 pushl %edx
3012 3028 call bzero
3013 3029 addl $8, %esp
3014 3030 movl 4(%esp), %edx
3015 3031
3016 3032 /*
3017 3033 * Dump what limited information we can
3018 3034 */
3019 3035 movl %cr0, %eax
3020 3036 movl %eax, CREG_CR0(%edx) /* cr0 */
3021 3037 movl %cr2, %eax
3022 3038 movl %eax, CREG_CR2(%edx) /* cr2 */
3023 3039 movl %cr3, %eax
3024 3040 movl %eax, CREG_CR3(%edx) /* cr3 */
3025 3041 movl %cr4, %eax
3026 3042 movl %eax, CREG_CR4(%edx) /* cr4 */
3027 3043
3028 3044 #else /* __xpv */
3029 3045
3030 3046 movl 4(%esp), %edx
3031 3047 movw $0, CREG_GDT+6(%edx)
3032 3048 movw $0, CREG_IDT+6(%edx)
3033 3049 sgdt CREG_GDT(%edx) /* gdt */
3034 3050 sidt CREG_IDT(%edx) /* idt */
3035 3051 sldt CREG_LDT(%edx) /* ldt */
3036 3052 str CREG_TASKR(%edx) /* task */
3037 3053 movl %cr0, %eax
3038 3054 movl %eax, CREG_CR0(%edx) /* cr0 */
3039 3055 movl %cr2, %eax
3040 3056 movl %eax, CREG_CR2(%edx) /* cr2 */
3041 3057 movl %cr3, %eax
3042 3058 movl %eax, CREG_CR3(%edx) /* cr3 */
3043 3059 testl $X86_LARGEPAGE, x86_feature
3044 3060 jz .nocr4
3045 3061 movl %cr4, %eax
3046 3062 movl %eax, CREG_CR4(%edx) /* cr4 */
3047 3063 jmp .skip
3048 3064 .nocr4:
3049 3065 movl $0, CREG_CR4(%edx)
3050 3066 .skip:
3051 3067 #endif
3052 3068 ret
3053 3069 SET_SIZE(getcregs)
3054 3070
3055 3071 #endif /* __i386 */
3056 3072 #endif /* __lint */
3057 3073
3058 3074
3059 3075 /*
3060 3076 * A panic trigger is a word which is updated atomically and can only be set
3061 3077 * once. We atomically store 0xDEFACEDD and load the old value. If the
3062 3078 * previous value was 0, we succeed and return 1; otherwise return 0.
3063 3079 * This allows a partially corrupt trigger to still trigger correctly. DTrace
3064 3080 * has its own version of this function to allow it to panic correctly from
3065 3081 * probe context.
3066 3082 */
3067 3083 #if defined(__lint)
3068 3084
3069 3085 /*ARGSUSED*/
3070 3086 int
3071 3087 panic_trigger(int *tp)
3072 3088 { return (0); }
3073 3089
3074 3090 /*ARGSUSED*/
3075 3091 int
3076 3092 dtrace_panic_trigger(int *tp)
3077 3093 { return (0); }
3078 3094
3079 3095 #else /* __lint */
3080 3096
3081 3097 #if defined(__amd64)
3082 3098
3083 3099 ENTRY_NP(panic_trigger)
3084 3100 xorl %eax, %eax
3085 3101 movl $0xdefacedd, %edx
3086 3102 lock
3087 3103 xchgl %edx, (%rdi)
3088 3104 cmpl $0, %edx
3089 3105 je 0f
3090 3106 movl $0, %eax
3091 3107 ret
3092 3108 0: movl $1, %eax
3093 3109 ret
3094 3110 SET_SIZE(panic_trigger)
3095 3111
3096 3112 ENTRY_NP(dtrace_panic_trigger)
3097 3113 xorl %eax, %eax
3098 3114 movl $0xdefacedd, %edx
3099 3115 lock
3100 3116 xchgl %edx, (%rdi)
3101 3117 cmpl $0, %edx
3102 3118 je 0f
3103 3119 movl $0, %eax
3104 3120 ret
3105 3121 0: movl $1, %eax
3106 3122 ret
3107 3123 SET_SIZE(dtrace_panic_trigger)
3108 3124
3109 3125 #elif defined(__i386)
3110 3126
3111 3127 ENTRY_NP(panic_trigger)
3112 3128 movl 4(%esp), %edx / %edx = address of trigger
3113 3129 movl $0xdefacedd, %eax / %eax = 0xdefacedd
3114 3130 lock / assert lock
3115 3131 xchgl %eax, (%edx) / exchange %eax and the trigger
3116 3132 cmpl $0, %eax / if (%eax == 0x0)
3117 3133 je 0f / return (1);
3118 3134 movl $0, %eax / else
3119 3135 ret / return (0);
3120 3136 0: movl $1, %eax
3121 3137 ret
3122 3138 SET_SIZE(panic_trigger)
3123 3139
3124 3140 ENTRY_NP(dtrace_panic_trigger)
3125 3141 movl 4(%esp), %edx / %edx = address of trigger
3126 3142 movl $0xdefacedd, %eax / %eax = 0xdefacedd
3127 3143 lock / assert lock
3128 3144 xchgl %eax, (%edx) / exchange %eax and the trigger
3129 3145 cmpl $0, %eax / if (%eax == 0x0)
3130 3146 je 0f / return (1);
3131 3147 movl $0, %eax / else
3132 3148 ret / return (0);
3133 3149 0: movl $1, %eax
3134 3150 ret
3135 3151 SET_SIZE(dtrace_panic_trigger)
3136 3152
3137 3153 #endif /* __i386 */
3138 3154 #endif /* __lint */
3139 3155
3140 3156 /*
3141 3157 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
3142 3158 * into the panic code implemented in panicsys(). vpanic() is responsible
3143 3159 * for passing through the format string and arguments, and constructing a
3144 3160 * regs structure on the stack into which it saves the current register
3145 3161 * values. If we are not dying due to a fatal trap, these registers will
3146 3162 * then be preserved in panicbuf as the current processor state. Before
3147 3163 * invoking panicsys(), vpanic() activates the first panic trigger (see
3148 3164 * common/os/panic.c) and switches to the panic_stack if successful. Note that
3149 3165 * DTrace takes a slightly different panic path if it must panic from probe
3150 3166 * context. Instead of calling panic, it calls into dtrace_vpanic(), which
3151 3167 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
3152 3168 * branches back into vpanic().
3153 3169 */
3154 3170 #if defined(__lint)
3155 3171
3156 3172 /*ARGSUSED*/
3157 3173 void
3158 3174 vpanic(const char *format, va_list alist)
3159 3175 {}
3160 3176
3161 3177 /*ARGSUSED*/
3162 3178 void
3163 3179 dtrace_vpanic(const char *format, va_list alist)
3164 3180 {}
3165 3181
3166 3182 #else /* __lint */
3167 3183
3168 3184 #if defined(__amd64)
3169 3185
3170 3186 ENTRY_NP(vpanic) /* Initial stack layout: */
3171 3187
3172 3188 pushq %rbp /* | %rip | 0x60 */
3173 3189 movq %rsp, %rbp /* | %rbp | 0x58 */
3174 3190 pushfq /* | rfl | 0x50 */
3175 3191 pushq %r11 /* | %r11 | 0x48 */
3176 3192 pushq %r10 /* | %r10 | 0x40 */
3177 3193 pushq %rbx /* | %rbx | 0x38 */
3178 3194 pushq %rax /* | %rax | 0x30 */
3179 3195 pushq %r9 /* | %r9 | 0x28 */
3180 3196 pushq %r8 /* | %r8 | 0x20 */
3181 3197 pushq %rcx /* | %rcx | 0x18 */
3182 3198 pushq %rdx /* | %rdx | 0x10 */
3183 3199 pushq %rsi /* | %rsi | 0x8 alist */
3184 3200 pushq %rdi /* | %rdi | 0x0 format */
3185 3201
3186 3202 movq %rsp, %rbx /* %rbx = current %rsp */
3187 3203
3188 3204 leaq panic_quiesce(%rip), %rdi /* %rdi = &panic_quiesce */
3189 3205 call panic_trigger /* %eax = panic_trigger() */
3190 3206
3191 3207 vpanic_common:
3192 3208 /*
3193 3209 * The panic_trigger result is in %eax from the call above, and
3194 3210 * dtrace_panic places it in %eax before branching here.
3195 3211 * The rdmsr instructions that follow below will clobber %eax so
3196 3212 * we stash the panic_trigger result in %r11d.
3197 3213 */
3198 3214 movl %eax, %r11d
3199 3215 cmpl $0, %r11d
3200 3216 je 0f
3201 3217
3202 3218 /*
3203 3219 * If panic_trigger() was successful, we are the first to initiate a
3204 3220 * panic: we now switch to the reserved panic_stack before continuing.
3205 3221 */
3206 3222 leaq panic_stack(%rip), %rsp
3207 3223 addq $PANICSTKSIZE, %rsp
3208 3224 0: subq $REGSIZE, %rsp
3209 3225 /*
3210 3226 * Now that we've got everything set up, store the register values as
3211 3227 * they were when we entered vpanic() to the designated location in
3212 3228 * the regs structure we allocated on the stack.
3213 3229 */
3214 3230 movq 0x0(%rbx), %rcx
3215 3231 movq %rcx, REGOFF_RDI(%rsp)
3216 3232 movq 0x8(%rbx), %rcx
3217 3233 movq %rcx, REGOFF_RSI(%rsp)
3218 3234 movq 0x10(%rbx), %rcx
3219 3235 movq %rcx, REGOFF_RDX(%rsp)
3220 3236 movq 0x18(%rbx), %rcx
3221 3237 movq %rcx, REGOFF_RCX(%rsp)
3222 3238 movq 0x20(%rbx), %rcx
3223 3239
3224 3240 movq %rcx, REGOFF_R8(%rsp)
3225 3241 movq 0x28(%rbx), %rcx
3226 3242 movq %rcx, REGOFF_R9(%rsp)
3227 3243 movq 0x30(%rbx), %rcx
3228 3244 movq %rcx, REGOFF_RAX(%rsp)
3229 3245 movq 0x38(%rbx), %rcx
3230 3246 movq %rcx, REGOFF_RBX(%rsp)
3231 3247 movq 0x58(%rbx), %rcx
3232 3248
3233 3249 movq %rcx, REGOFF_RBP(%rsp)
3234 3250 movq 0x40(%rbx), %rcx
3235 3251 movq %rcx, REGOFF_R10(%rsp)
3236 3252 movq 0x48(%rbx), %rcx
3237 3253 movq %rcx, REGOFF_R11(%rsp)
3238 3254 movq %r12, REGOFF_R12(%rsp)
3239 3255
3240 3256 movq %r13, REGOFF_R13(%rsp)
3241 3257 movq %r14, REGOFF_R14(%rsp)
3242 3258 movq %r15, REGOFF_R15(%rsp)
3243 3259
3244 3260 xorl %ecx, %ecx
3245 3261 movw %ds, %cx
3246 3262 movq %rcx, REGOFF_DS(%rsp)
3247 3263 movw %es, %cx
3248 3264 movq %rcx, REGOFF_ES(%rsp)
3249 3265 movw %fs, %cx
3250 3266 movq %rcx, REGOFF_FS(%rsp)
3251 3267 movw %gs, %cx
3252 3268 movq %rcx, REGOFF_GS(%rsp)
3253 3269
3254 3270 movq $0, REGOFF_TRAPNO(%rsp)
3255 3271
3256 3272 movq $0, REGOFF_ERR(%rsp)
3257 3273 leaq vpanic(%rip), %rcx
3258 3274 movq %rcx, REGOFF_RIP(%rsp)
3259 3275 movw %cs, %cx
3260 3276 movzwq %cx, %rcx
3261 3277 movq %rcx, REGOFF_CS(%rsp)
3262 3278 movq 0x50(%rbx), %rcx
3263 3279 movq %rcx, REGOFF_RFL(%rsp)
3264 3280 movq %rbx, %rcx
3265 3281 addq $0x60, %rcx
3266 3282 movq %rcx, REGOFF_RSP(%rsp)
3267 3283 movw %ss, %cx
3268 3284 movzwq %cx, %rcx
3269 3285 movq %rcx, REGOFF_SS(%rsp)
3270 3286
3271 3287 /*
3272 3288 * panicsys(format, alist, rp, on_panic_stack)
3273 3289 */
3274 3290 movq REGOFF_RDI(%rsp), %rdi /* format */
3275 3291 movq REGOFF_RSI(%rsp), %rsi /* alist */
3276 3292 movq %rsp, %rdx /* struct regs */
3277 3293 movl %r11d, %ecx /* on_panic_stack */
3278 3294 call panicsys
3279 3295 addq $REGSIZE, %rsp
3280 3296 popq %rdi
3281 3297 popq %rsi
3282 3298 popq %rdx
3283 3299 popq %rcx
3284 3300 popq %r8
3285 3301 popq %r9
3286 3302 popq %rax
3287 3303 popq %rbx
3288 3304 popq %r10
3289 3305 popq %r11
3290 3306 popfq
3291 3307 leave
3292 3308 ret
3293 3309 SET_SIZE(vpanic)
3294 3310
3295 3311 ENTRY_NP(dtrace_vpanic) /* Initial stack layout: */
3296 3312
3297 3313 pushq %rbp /* | %rip | 0x60 */
3298 3314 movq %rsp, %rbp /* | %rbp | 0x58 */
3299 3315 pushfq /* | rfl | 0x50 */
3300 3316 pushq %r11 /* | %r11 | 0x48 */
3301 3317 pushq %r10 /* | %r10 | 0x40 */
3302 3318 pushq %rbx /* | %rbx | 0x38 */
3303 3319 pushq %rax /* | %rax | 0x30 */
3304 3320 pushq %r9 /* | %r9 | 0x28 */
3305 3321 pushq %r8 /* | %r8 | 0x20 */
3306 3322 pushq %rcx /* | %rcx | 0x18 */
3307 3323 pushq %rdx /* | %rdx | 0x10 */
3308 3324 pushq %rsi /* | %rsi | 0x8 alist */
3309 3325 pushq %rdi /* | %rdi | 0x0 format */
3310 3326
3311 3327 movq %rsp, %rbx /* %rbx = current %rsp */
3312 3328
3313 3329 leaq panic_quiesce(%rip), %rdi /* %rdi = &panic_quiesce */
3314 3330 call dtrace_panic_trigger /* %eax = dtrace_panic_trigger() */
3315 3331 jmp vpanic_common
3316 3332
3317 3333 SET_SIZE(dtrace_vpanic)
3318 3334
3319 3335 #elif defined(__i386)
3320 3336
3321 3337 ENTRY_NP(vpanic) / Initial stack layout:
3322 3338
3323 3339 pushl %ebp / | %eip | 20
3324 3340 movl %esp, %ebp / | %ebp | 16
3325 3341 pushl %eax / | %eax | 12
3326 3342 pushl %ebx / | %ebx | 8
3327 3343 pushl %ecx / | %ecx | 4
3328 3344 pushl %edx / | %edx | 0
3329 3345
3330 3346 movl %esp, %ebx / %ebx = current stack pointer
3331 3347
3332 3348 lea panic_quiesce, %eax / %eax = &panic_quiesce
3333 3349 pushl %eax / push &panic_quiesce
3334 3350 call panic_trigger / %eax = panic_trigger()
3335 3351 addl $4, %esp / reset stack pointer
3336 3352
3337 3353 vpanic_common:
3338 3354 cmpl $0, %eax / if (%eax == 0)
3339 3355 je 0f / goto 0f;
3340 3356
3341 3357 /*
3342 3358 * If panic_trigger() was successful, we are the first to initiate a
3343 3359 * panic: we now switch to the reserved panic_stack before continuing.
3344 3360 */
3345 3361 lea panic_stack, %esp / %esp = panic_stack
3346 3362 addl $PANICSTKSIZE, %esp / %esp += PANICSTKSIZE
3347 3363
3348 3364 0: subl $REGSIZE, %esp / allocate struct regs
3349 3365
3350 3366 /*
3351 3367 * Now that we've got everything set up, store the register values as
3352 3368 * they were when we entered vpanic() to the designated location in
3353 3369 * the regs structure we allocated on the stack.
3354 3370 */
3355 3371 #if !defined(__GNUC_AS__)
3356 3372 movw %gs, %edx
3357 3373 movl %edx, REGOFF_GS(%esp)
3358 3374 movw %fs, %edx
3359 3375 movl %edx, REGOFF_FS(%esp)
3360 3376 movw %es, %edx
3361 3377 movl %edx, REGOFF_ES(%esp)
3362 3378 movw %ds, %edx
3363 3379 movl %edx, REGOFF_DS(%esp)
3364 3380 #else /* __GNUC_AS__ */
3365 3381 mov %gs, %edx
3366 3382 mov %edx, REGOFF_GS(%esp)
3367 3383 mov %fs, %edx
3368 3384 mov %edx, REGOFF_FS(%esp)
3369 3385 mov %es, %edx
3370 3386 mov %edx, REGOFF_ES(%esp)
3371 3387 mov %ds, %edx
3372 3388 mov %edx, REGOFF_DS(%esp)
3373 3389 #endif /* __GNUC_AS__ */
3374 3390 movl %edi, REGOFF_EDI(%esp)
3375 3391 movl %esi, REGOFF_ESI(%esp)
3376 3392 movl 16(%ebx), %ecx
3377 3393 movl %ecx, REGOFF_EBP(%esp)
3378 3394 movl %ebx, %ecx
3379 3395 addl $20, %ecx
3380 3396 movl %ecx, REGOFF_ESP(%esp)
3381 3397 movl 8(%ebx), %ecx
3382 3398 movl %ecx, REGOFF_EBX(%esp)
3383 3399 movl 0(%ebx), %ecx
3384 3400 movl %ecx, REGOFF_EDX(%esp)
3385 3401 movl 4(%ebx), %ecx
3386 3402 movl %ecx, REGOFF_ECX(%esp)
3387 3403 movl 12(%ebx), %ecx
3388 3404 movl %ecx, REGOFF_EAX(%esp)
3389 3405 movl $0, REGOFF_TRAPNO(%esp)
3390 3406 movl $0, REGOFF_ERR(%esp)
3391 3407 lea vpanic, %ecx
3392 3408 movl %ecx, REGOFF_EIP(%esp)
3393 3409 #if !defined(__GNUC_AS__)
3394 3410 movw %cs, %edx
3395 3411 #else /* __GNUC_AS__ */
3396 3412 mov %cs, %edx
3397 3413 #endif /* __GNUC_AS__ */
3398 3414 movl %edx, REGOFF_CS(%esp)
3399 3415 pushfl
3400 3416 popl %ecx
3401 3417 #if defined(__xpv)
3402 3418 /*
3403 3419 * Synthesize the PS_IE bit from the event mask bit
3404 3420 */
3405 3421 CURTHREAD(%edx)
3406 3422 KPREEMPT_DISABLE(%edx)
3407 3423 EVENT_MASK_TO_IE(%edx, %ecx)
3408 3424 CURTHREAD(%edx)
3409 3425 KPREEMPT_ENABLE_NOKP(%edx)
3410 3426 #endif
3411 3427 movl %ecx, REGOFF_EFL(%esp)
3412 3428 movl $0, REGOFF_UESP(%esp)
3413 3429 #if !defined(__GNUC_AS__)
3414 3430 movw %ss, %edx
3415 3431 #else /* __GNUC_AS__ */
3416 3432 mov %ss, %edx
3417 3433 #endif /* __GNUC_AS__ */
3418 3434 movl %edx, REGOFF_SS(%esp)
3419 3435
3420 3436 movl %esp, %ecx / %ecx = ®s
3421 3437 pushl %eax / push on_panic_stack
3422 3438 pushl %ecx / push ®s
3423 3439 movl 12(%ebp), %ecx / %ecx = alist
3424 3440 pushl %ecx / push alist
3425 3441 movl 8(%ebp), %ecx / %ecx = format
3426 3442 pushl %ecx / push format
3427 3443 call panicsys / panicsys();
3428 3444 addl $16, %esp / pop arguments
3429 3445
3430 3446 addl $REGSIZE, %esp
3431 3447 popl %edx
3432 3448 popl %ecx
3433 3449 popl %ebx
3434 3450 popl %eax
3435 3451 leave
3436 3452 ret
3437 3453 SET_SIZE(vpanic)
3438 3454
3439 3455 ENTRY_NP(dtrace_vpanic) / Initial stack layout:
3440 3456
3441 3457 pushl %ebp / | %eip | 20
3442 3458 movl %esp, %ebp / | %ebp | 16
3443 3459 pushl %eax / | %eax | 12
3444 3460 pushl %ebx / | %ebx | 8
3445 3461 pushl %ecx / | %ecx | 4
3446 3462 pushl %edx / | %edx | 0
3447 3463
3448 3464 movl %esp, %ebx / %ebx = current stack pointer
3449 3465
3450 3466 lea panic_quiesce, %eax / %eax = &panic_quiesce
3451 3467 pushl %eax / push &panic_quiesce
3452 3468 call dtrace_panic_trigger / %eax = dtrace_panic_trigger()
3453 3469 addl $4, %esp / reset stack pointer
3454 3470 jmp vpanic_common / jump back to common code
3455 3471
3456 3472 SET_SIZE(dtrace_vpanic)
3457 3473
3458 3474 #endif /* __i386 */
3459 3475 #endif /* __lint */
3460 3476
3461 3477 #if defined(__lint)
3462 3478
3463 3479 void
3464 3480 hres_tick(void)
3465 3481 {}
3466 3482
3467 3483 int64_t timedelta;
3468 3484 hrtime_t hres_last_tick;
3469 3485 volatile timestruc_t hrestime;
3470 3486 int64_t hrestime_adj;
3471 3487 volatile int hres_lock;
3472 3488 hrtime_t hrtime_base;
3473 3489
3474 3490 #else /* __lint */
3475 3491
3476 3492 DGDEF3(hrestime, _MUL(2, CLONGSIZE), 8)
3477 3493 .NWORD 0, 0
3478 3494
3479 3495 DGDEF3(hrestime_adj, 8, 8)
3480 3496 .long 0, 0
3481 3497
3482 3498 DGDEF3(hres_last_tick, 8, 8)
3483 3499 .long 0, 0
3484 3500
3485 3501 DGDEF3(timedelta, 8, 8)
3486 3502 .long 0, 0
3487 3503
3488 3504 DGDEF3(hres_lock, 4, 8)
3489 3505 .long 0
3490 3506
3491 3507 /*
3492 3508 * initialized to a non zero value to make pc_gethrtime()
3493 3509 * work correctly even before clock is initialized
3494 3510 */
3495 3511 DGDEF3(hrtime_base, 8, 8)
3496 3512 .long _MUL(NSEC_PER_CLOCK_TICK, 6), 0
3497 3513
3498 3514 DGDEF3(adj_shift, 4, 4)
3499 3515 .long ADJ_SHIFT
3500 3516
3501 3517 #if defined(__amd64)
3502 3518
3503 3519 ENTRY_NP(hres_tick)
3504 3520 pushq %rbp
3505 3521 movq %rsp, %rbp
3506 3522
3507 3523 /*
3508 3524 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3509 3525 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3510 3526 * At worst, performing this now instead of under CLOCK_LOCK may
3511 3527 * introduce some jitter in pc_gethrestime().
3512 3528 */
3513 3529 call *gethrtimef(%rip)
3514 3530 movq %rax, %r8
3515 3531
3516 3532 leaq hres_lock(%rip), %rax
3517 3533 movb $-1, %dl
3518 3534 .CL1:
3519 3535 xchgb %dl, (%rax)
3520 3536 testb %dl, %dl
3521 3537 jz .CL3 /* got it */
3522 3538 .CL2:
3523 3539 cmpb $0, (%rax) /* possible to get lock? */
3524 3540 pause
3525 3541 jne .CL2
3526 3542 jmp .CL1 /* yes, try again */
3527 3543 .CL3:
3528 3544 /*
3529 3545 * compute the interval since last time hres_tick was called
3530 3546 * and adjust hrtime_base and hrestime accordingly
3531 3547 * hrtime_base is an 8 byte value (in nsec), hrestime is
3532 3548 * a timestruc_t (sec, nsec)
3533 3549 */
3534 3550 leaq hres_last_tick(%rip), %rax
3535 3551 movq %r8, %r11
3536 3552 subq (%rax), %r8
3537 3553 addq %r8, hrtime_base(%rip) /* add interval to hrtime_base */
3538 3554 addq %r8, hrestime+8(%rip) /* add interval to hrestime.tv_nsec */
3539 3555 /*
3540 3556 * Now that we have CLOCK_LOCK, we can update hres_last_tick
3541 3557 */
3542 3558 movq %r11, (%rax)
3543 3559
3544 3560 call __adj_hrestime
3545 3561
3546 3562 /*
3547 3563 * release the hres_lock
3548 3564 */
3549 3565 incl hres_lock(%rip)
3550 3566 leave
3551 3567 ret
3552 3568 SET_SIZE(hres_tick)
3553 3569
3554 3570 #elif defined(__i386)
3555 3571
3556 3572 ENTRY_NP(hres_tick)
3557 3573 pushl %ebp
3558 3574 movl %esp, %ebp
3559 3575 pushl %esi
3560 3576 pushl %ebx
3561 3577
3562 3578 /*
3563 3579 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3564 3580 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3565 3581 * At worst, performing this now instead of under CLOCK_LOCK may
3566 3582 * introduce some jitter in pc_gethrestime().
3567 3583 */
3568 3584 call *gethrtimef
3569 3585 movl %eax, %ebx
3570 3586 movl %edx, %esi
3571 3587
3572 3588 movl $hres_lock, %eax
3573 3589 movl $-1, %edx
3574 3590 .CL1:
3575 3591 xchgb %dl, (%eax)
3576 3592 testb %dl, %dl
3577 3593 jz .CL3 / got it
3578 3594 .CL2:
3579 3595 cmpb $0, (%eax) / possible to get lock?
3580 3596 pause
3581 3597 jne .CL2
3582 3598 jmp .CL1 / yes, try again
3583 3599 .CL3:
3584 3600 /*
3585 3601 * compute the interval since last time hres_tick was called
3586 3602 * and adjust hrtime_base and hrestime accordingly
3587 3603 * hrtime_base is an 8 byte value (in nsec), hrestime is
3588 3604 * timestruc_t (sec, nsec)
3589 3605 */
3590 3606
3591 3607 lea hres_last_tick, %eax
3592 3608
3593 3609 movl %ebx, %edx
3594 3610 movl %esi, %ecx
3595 3611
3596 3612 subl (%eax), %edx
3597 3613 sbbl 4(%eax), %ecx
3598 3614
3599 3615 addl %edx, hrtime_base / add interval to hrtime_base
3600 3616 adcl %ecx, hrtime_base+4
3601 3617
3602 3618 addl %edx, hrestime+4 / add interval to hrestime.tv_nsec
3603 3619
3604 3620 /
3605 3621 / Now that we have CLOCK_LOCK, we can update hres_last_tick.
3606 3622 /
3607 3623 movl %ebx, (%eax)
3608 3624 movl %esi, 4(%eax)
3609 3625
3610 3626 / get hrestime at this moment. used as base for pc_gethrestime
3611 3627 /
3612 3628 / Apply adjustment, if any
3613 3629 /
3614 3630 / #define HRES_ADJ (NSEC_PER_CLOCK_TICK >> ADJ_SHIFT)
3615 3631 / (max_hres_adj)
3616 3632 /
3617 3633 / void
3618 3634 / adj_hrestime()
3619 3635 / {
3620 3636 / long long adj;
3621 3637 /
3622 3638 / if (hrestime_adj == 0)
3623 3639 / adj = 0;
3624 3640 / else if (hrestime_adj > 0) {
3625 3641 / if (hrestime_adj < HRES_ADJ)
3626 3642 / adj = hrestime_adj;
3627 3643 / else
3628 3644 / adj = HRES_ADJ;
3629 3645 / }
3630 3646 / else {
3631 3647 / if (hrestime_adj < -(HRES_ADJ))
3632 3648 / adj = -(HRES_ADJ);
3633 3649 / else
3634 3650 / adj = hrestime_adj;
3635 3651 / }
3636 3652 /
3637 3653 / timedelta -= adj;
3638 3654 / hrestime_adj = timedelta;
3639 3655 / hrestime.tv_nsec += adj;
3640 3656 /
3641 3657 / while (hrestime.tv_nsec >= NANOSEC) {
3642 3658 / one_sec++;
3643 3659 / hrestime.tv_sec++;
3644 3660 / hrestime.tv_nsec -= NANOSEC;
3645 3661 / }
3646 3662 / }
3647 3663 __adj_hrestime:
3648 3664 movl hrestime_adj, %esi / if (hrestime_adj == 0)
3649 3665 movl hrestime_adj+4, %edx
3650 3666 andl %esi, %esi
3651 3667 jne .CL4 / no
3652 3668 andl %edx, %edx
3653 3669 jne .CL4 / no
3654 3670 subl %ecx, %ecx / yes, adj = 0;
3655 3671 subl %edx, %edx
3656 3672 jmp .CL5
3657 3673 .CL4:
3658 3674 subl %ecx, %ecx
3659 3675 subl %eax, %eax
3660 3676 subl %esi, %ecx
3661 3677 sbbl %edx, %eax
3662 3678 andl %eax, %eax / if (hrestime_adj > 0)
3663 3679 jge .CL6
3664 3680
3665 3681 / In the following comments, HRES_ADJ is used, while in the code
3666 3682 / max_hres_adj is used.
3667 3683 /
3668 3684 / The test for "hrestime_adj < HRES_ADJ" is complicated because
3669 3685 / hrestime_adj is 64-bits, while HRES_ADJ is 32-bits. We rely
3670 3686 / on the logical equivalence of:
3671 3687 /
3672 3688 / !(hrestime_adj < HRES_ADJ)
3673 3689 /
3674 3690 / and the two step sequence:
3675 3691 /
3676 3692 / (HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry
3677 3693 /
3678 3694 / which computes whether or not the least significant 32-bits
3679 3695 / of hrestime_adj is greater than HRES_ADJ, followed by:
3680 3696 /
3681 3697 / Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry
3682 3698 /
3683 3699 / which generates a carry whenever step 1 is true or the most
3684 3700 / significant long of the longlong hrestime_adj is non-zero.
3685 3701
3686 3702 movl max_hres_adj, %ecx / hrestime_adj is positive
3687 3703 subl %esi, %ecx
3688 3704 movl %edx, %eax
3689 3705 adcl $-1, %eax
3690 3706 jnc .CL7
3691 3707 movl max_hres_adj, %ecx / adj = HRES_ADJ;
3692 3708 subl %edx, %edx
3693 3709 jmp .CL5
3694 3710
3695 3711 / The following computation is similar to the one above.
3696 3712 /
3697 3713 / The test for "hrestime_adj < -(HRES_ADJ)" is complicated because
3698 3714 / hrestime_adj is 64-bits, while HRES_ADJ is 32-bits. We rely
3699 3715 / on the logical equivalence of:
3700 3716 /
3701 3717 / (hrestime_adj > -HRES_ADJ)
3702 3718 /
3703 3719 / and the two step sequence:
3704 3720 /
3705 3721 / (HRES_ADJ + lsw(hrestime_adj)) generates a Carry
3706 3722 /
3707 3723 / which means the least significant 32-bits of hrestime_adj is
3708 3724 / greater than -HRES_ADJ, followed by:
3709 3725 /
3710 3726 / Previous Carry + 0 + msw(hrestime_adj) generates a Carry
3711 3727 /
3712 3728 / which generates a carry only when step 1 is true and the most
3713 3729 / significant long of the longlong hrestime_adj is -1.
3714 3730
3715 3731 .CL6: / hrestime_adj is negative
3716 3732 movl %esi, %ecx
3717 3733 addl max_hres_adj, %ecx
3718 3734 movl %edx, %eax
3719 3735 adcl $0, %eax
3720 3736 jc .CL7
3721 3737 xor %ecx, %ecx
3722 3738 subl max_hres_adj, %ecx / adj = -(HRES_ADJ);
3723 3739 movl $-1, %edx
3724 3740 jmp .CL5
3725 3741 .CL7:
3726 3742 movl %esi, %ecx / adj = hrestime_adj;
3727 3743 .CL5:
3728 3744 movl timedelta, %esi
3729 3745 subl %ecx, %esi
3730 3746 movl timedelta+4, %eax
3731 3747 sbbl %edx, %eax
3732 3748 movl %esi, timedelta
3733 3749 movl %eax, timedelta+4 / timedelta -= adj;
3734 3750 movl %esi, hrestime_adj
3735 3751 movl %eax, hrestime_adj+4 / hrestime_adj = timedelta;
3736 3752 addl hrestime+4, %ecx
3737 3753
3738 3754 movl %ecx, %eax / eax = tv_nsec
3739 3755 1:
3740 3756 cmpl $NANOSEC, %eax / if ((unsigned long)tv_nsec >= NANOSEC)
3741 3757 jb .CL8 / no
3742 3758 incl one_sec / yes, one_sec++;
3743 3759 incl hrestime / hrestime.tv_sec++;
3744 3760 addl $-NANOSEC, %eax / tv_nsec -= NANOSEC
3745 3761 jmp 1b / check for more seconds
3746 3762
3747 3763 .CL8:
3748 3764 movl %eax, hrestime+4 / store final into hrestime.tv_nsec
3749 3765 incl hres_lock / release the hres_lock
3750 3766
3751 3767 popl %ebx
3752 3768 popl %esi
3753 3769 leave
3754 3770 ret
3755 3771 SET_SIZE(hres_tick)
3756 3772
3757 3773 #endif /* __i386 */
3758 3774 #endif /* __lint */
3759 3775
3760 3776 /*
3761 3777 * void prefetch_smap_w(void *)
3762 3778 *
3763 3779 * Prefetch ahead within a linear list of smap structures.
3764 3780 * Not implemented for ia32. Stub for compatibility.
3765 3781 */
3766 3782
3767 3783 #if defined(__lint)
3768 3784
3769 3785 /*ARGSUSED*/
3770 3786 void prefetch_smap_w(void *smp)
3771 3787 {}
3772 3788
3773 3789 #else /* __lint */
3774 3790
3775 3791 ENTRY(prefetch_smap_w)
3776 3792 rep; ret /* use 2 byte return instruction when branch target */
3777 3793 /* AMD Software Optimization Guide - Section 6.2 */
3778 3794 SET_SIZE(prefetch_smap_w)
3779 3795
3780 3796 #endif /* __lint */
3781 3797
3782 3798 /*
3783 3799 * prefetch_page_r(page_t *)
3784 3800 * issue prefetch instructions for a page_t
3785 3801 */
3786 3802 #if defined(__lint)
3787 3803
3788 3804 /*ARGSUSED*/
3789 3805 void
3790 3806 prefetch_page_r(void *pp)
3791 3807 {}
3792 3808
3793 3809 #else /* __lint */
3794 3810
3795 3811 ENTRY(prefetch_page_r)
3796 3812 rep; ret /* use 2 byte return instruction when branch target */
3797 3813 /* AMD Software Optimization Guide - Section 6.2 */
3798 3814 SET_SIZE(prefetch_page_r)
3799 3815
3800 3816 #endif /* __lint */
3801 3817
3802 3818 #if defined(__lint)
3803 3819
3804 3820 /*ARGSUSED*/
3805 3821 int
3806 3822 bcmp(const void *s1, const void *s2, size_t count)
3807 3823 { return (0); }
3808 3824
3809 3825 #else /* __lint */
3810 3826
3811 3827 #if defined(__amd64)
3812 3828
3813 3829 ENTRY(bcmp)
3814 3830 pushq %rbp
3815 3831 movq %rsp, %rbp
3816 3832 #ifdef DEBUG
3817 3833 movq postbootkernelbase(%rip), %r11
3818 3834 cmpq %r11, %rdi
3819 3835 jb 0f
3820 3836 cmpq %r11, %rsi
3821 3837 jnb 1f
3822 3838 0: leaq .bcmp_panic_msg(%rip), %rdi
3823 3839 xorl %eax, %eax
3824 3840 call panic
3825 3841 1:
3826 3842 #endif /* DEBUG */
3827 3843 call memcmp
3828 3844 testl %eax, %eax
3829 3845 setne %dl
3830 3846 leave
3831 3847 movzbl %dl, %eax
3832 3848 ret
3833 3849 SET_SIZE(bcmp)
3834 3850
3835 3851 #elif defined(__i386)
3836 3852
3837 3853 #define ARG_S1 8
3838 3854 #define ARG_S2 12
3839 3855 #define ARG_LENGTH 16
3840 3856
3841 3857 ENTRY(bcmp)
3842 3858 pushl %ebp
3843 3859 movl %esp, %ebp / create new stack frame
3844 3860 #ifdef DEBUG
3845 3861 movl postbootkernelbase, %eax
3846 3862 cmpl %eax, ARG_S1(%ebp)
3847 3863 jb 0f
3848 3864 cmpl %eax, ARG_S2(%ebp)
3849 3865 jnb 1f
3850 3866 0: pushl $.bcmp_panic_msg
3851 3867 call panic
3852 3868 1:
3853 3869 #endif /* DEBUG */
3854 3870
3855 3871 pushl %edi / save register variable
3856 3872 movl ARG_S1(%ebp), %eax / %eax = address of string 1
3857 3873 movl ARG_S2(%ebp), %ecx / %ecx = address of string 2
3858 3874 cmpl %eax, %ecx / if the same string
3859 3875 je .equal / goto .equal
3860 3876 movl ARG_LENGTH(%ebp), %edi / %edi = length in bytes
3861 3877 cmpl $4, %edi / if %edi < 4
3862 3878 jb .byte_check / goto .byte_check
3863 3879 .align 4
3864 3880 .word_loop:
3865 3881 movl (%ecx), %edx / move 1 word from (%ecx) to %edx
3866 3882 leal -4(%edi), %edi / %edi -= 4
3867 3883 cmpl (%eax), %edx / compare 1 word from (%eax) with %edx
3868 3884 jne .word_not_equal / if not equal, goto .word_not_equal
3869 3885 leal 4(%ecx), %ecx / %ecx += 4 (next word)
3870 3886 leal 4(%eax), %eax / %eax += 4 (next word)
3871 3887 cmpl $4, %edi / if %edi >= 4
3872 3888 jae .word_loop / goto .word_loop
3873 3889 .byte_check:
3874 3890 cmpl $0, %edi / if %edi == 0
3875 3891 je .equal / goto .equal
3876 3892 jmp .byte_loop / goto .byte_loop (checks in bytes)
3877 3893 .word_not_equal:
3878 3894 leal 4(%edi), %edi / %edi += 4 (post-decremented)
3879 3895 .align 4
3880 3896 .byte_loop:
3881 3897 movb (%ecx), %dl / move 1 byte from (%ecx) to %dl
3882 3898 cmpb %dl, (%eax) / compare %dl with 1 byte from (%eax)
3883 3899 jne .not_equal / if not equal, goto .not_equal
3884 3900 incl %ecx / %ecx++ (next byte)
3885 3901 incl %eax / %eax++ (next byte)
3886 3902 decl %edi / %edi--
3887 3903 jnz .byte_loop / if not zero, goto .byte_loop
3888 3904 .equal:
3889 3905 xorl %eax, %eax / %eax = 0
3890 3906 popl %edi / restore register variable
3891 3907 leave / restore old stack frame
3892 3908 ret / return (NULL)
3893 3909 .align 4
3894 3910 .not_equal:
3895 3911 movl $1, %eax / return 1
3896 3912 popl %edi / restore register variable
3897 3913 leave / restore old stack frame
3898 3914 ret / return (NULL)
3899 3915 SET_SIZE(bcmp)
3900 3916
3901 3917 #endif /* __i386 */
3902 3918
3903 3919 #ifdef DEBUG
3904 3920 .text
3905 3921 .bcmp_panic_msg:
3906 3922 .string "bcmp: arguments below kernelbase"
3907 3923 #endif /* DEBUG */
3908 3924
3909 3925 #endif /* __lint */
3910 3926
3911 3927 #if defined(__lint)
3912 3928
3913 3929 uint_t
3914 3930 bsrw_insn(uint16_t mask)
3915 3931 {
3916 3932 uint_t index = sizeof (mask) * NBBY - 1;
3917 3933
3918 3934 while ((mask & (1 << index)) == 0)
3919 3935 index--;
3920 3936 return (index);
3921 3937 }
3922 3938
3923 3939 #else /* __lint */
3924 3940
3925 3941 #if defined(__amd64)
3926 3942
3927 3943 ENTRY_NP(bsrw_insn)
3928 3944 xorl %eax, %eax
3929 3945 bsrw %di, %ax
3930 3946 ret
3931 3947 SET_SIZE(bsrw_insn)
3932 3948
3933 3949 #elif defined(__i386)
3934 3950
3935 3951 ENTRY_NP(bsrw_insn)
3936 3952 movw 4(%esp), %cx
3937 3953 xorl %eax, %eax
3938 3954 bsrw %cx, %ax
3939 3955 ret
3940 3956 SET_SIZE(bsrw_insn)
3941 3957
3942 3958 #endif /* __i386 */
3943 3959 #endif /* __lint */
3944 3960
3945 3961 #if defined(__lint)
3946 3962
3947 3963 uint_t
3948 3964 atomic_btr32(uint32_t *pending, uint_t pil)
3949 3965 {
3950 3966 return (*pending &= ~(1 << pil));
3951 3967 }
3952 3968
3953 3969 #else /* __lint */
3954 3970
3955 3971 #if defined(__i386)
3956 3972
3957 3973 ENTRY_NP(atomic_btr32)
3958 3974 movl 4(%esp), %ecx
3959 3975 movl 8(%esp), %edx
3960 3976 xorl %eax, %eax
3961 3977 lock
3962 3978 btrl %edx, (%ecx)
3963 3979 setc %al
3964 3980 ret
3965 3981 SET_SIZE(atomic_btr32)
3966 3982
3967 3983 #endif /* __i386 */
3968 3984 #endif /* __lint */
3969 3985
3970 3986 #if defined(__lint)
3971 3987
3972 3988 /*ARGSUSED*/
3973 3989 void
3974 3990 switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1,
3975 3991 uint_t arg2)
3976 3992 {}
3977 3993
3978 3994 #else /* __lint */
3979 3995
3980 3996 #if defined(__amd64)
3981 3997
3982 3998 ENTRY_NP(switch_sp_and_call)
3983 3999 pushq %rbp
3984 4000 movq %rsp, %rbp /* set up stack frame */
3985 4001 movq %rdi, %rsp /* switch stack pointer */
3986 4002 movq %rdx, %rdi /* pass func arg 1 */
3987 4003 movq %rsi, %r11 /* save function to call */
3988 4004 movq %rcx, %rsi /* pass func arg 2 */
3989 4005 call *%r11 /* call function */
3990 4006 leave /* restore stack */
3991 4007 ret
3992 4008 SET_SIZE(switch_sp_and_call)
3993 4009
3994 4010 #elif defined(__i386)
3995 4011
3996 4012 ENTRY_NP(switch_sp_and_call)
3997 4013 pushl %ebp
3998 4014 mov %esp, %ebp /* set up stack frame */
3999 4015 movl 8(%ebp), %esp /* switch stack pointer */
4000 4016 pushl 20(%ebp) /* push func arg 2 */
4001 4017 pushl 16(%ebp) /* push func arg 1 */
4002 4018 call *12(%ebp) /* call function */
4003 4019 addl $8, %esp /* pop arguments */
4004 4020 leave /* restore stack */
4005 4021 ret
4006 4022 SET_SIZE(switch_sp_and_call)
4007 4023
4008 4024 #endif /* __i386 */
4009 4025 #endif /* __lint */
4010 4026
4011 4027 #if defined(__lint)
4012 4028
4013 4029 void
4014 4030 kmdb_enter(void)
4015 4031 {}
4016 4032
4017 4033 #else /* __lint */
4018 4034
4019 4035 #if defined(__amd64)
4020 4036
4021 4037 ENTRY_NP(kmdb_enter)
4022 4038 pushq %rbp
4023 4039 movq %rsp, %rbp
4024 4040
4025 4041 /*
4026 4042 * Save flags, do a 'cli' then return the saved flags
4027 4043 */
4028 4044 call intr_clear
4029 4045
4030 4046 int $T_DBGENTR
4031 4047
4032 4048 /*
4033 4049 * Restore the saved flags
4034 4050 */
4035 4051 movq %rax, %rdi
4036 4052 call intr_restore
4037 4053
4038 4054 leave
4039 4055 ret
4040 4056 SET_SIZE(kmdb_enter)
4041 4057
4042 4058 #elif defined(__i386)
4043 4059
4044 4060 ENTRY_NP(kmdb_enter)
4045 4061 pushl %ebp
4046 4062 movl %esp, %ebp
4047 4063
4048 4064 /*
4049 4065 * Save flags, do a 'cli' then return the saved flags
4050 4066 */
4051 4067 call intr_clear
4052 4068
4053 4069 int $T_DBGENTR
4054 4070
4055 4071 /*
4056 4072 * Restore the saved flags
4057 4073 */
4058 4074 pushl %eax
4059 4075 call intr_restore
4060 4076 addl $4, %esp
4061 4077
4062 4078 leave
4063 4079 ret
4064 4080 SET_SIZE(kmdb_enter)
4065 4081
4066 4082 #endif /* __i386 */
4067 4083 #endif /* __lint */
4068 4084
4069 4085 #if defined(__lint)
4070 4086
4071 4087 void
4072 4088 return_instr(void)
4073 4089 {}
4074 4090
4075 4091 #else /* __lint */
4076 4092
4077 4093 ENTRY_NP(return_instr)
4078 4094 rep; ret /* use 2 byte instruction when branch target */
4079 4095 /* AMD Software Optimization Guide - Section 6.2 */
4080 4096 SET_SIZE(return_instr)
4081 4097
4082 4098 #endif /* __lint */
4083 4099
4084 4100 #if defined(__lint)
4085 4101
4086 4102 ulong_t
4087 4103 getflags(void)
4088 4104 {
4089 4105 return (0);
4090 4106 }
4091 4107
4092 4108 #else /* __lint */
4093 4109
4094 4110 #if defined(__amd64)
4095 4111
4096 4112 ENTRY(getflags)
4097 4113 pushfq
4098 4114 popq %rax
4099 4115 #if defined(__xpv)
4100 4116 CURTHREAD(%rdi)
4101 4117 KPREEMPT_DISABLE(%rdi)
4102 4118 /*
4103 4119 * Synthesize the PS_IE bit from the event mask bit
4104 4120 */
4105 4121 CURVCPU(%r11)
4106 4122 andq $_BITNOT(PS_IE), %rax
4107 4123 XEN_TEST_UPCALL_MASK(%r11)
4108 4124 jnz 1f
4109 4125 orq $PS_IE, %rax
4110 4126 1:
4111 4127 KPREEMPT_ENABLE_NOKP(%rdi)
4112 4128 #endif
4113 4129 ret
4114 4130 SET_SIZE(getflags)
4115 4131
4116 4132 #elif defined(__i386)
4117 4133
4118 4134 ENTRY(getflags)
4119 4135 pushfl
4120 4136 popl %eax
4121 4137 #if defined(__xpv)
4122 4138 CURTHREAD(%ecx)
4123 4139 KPREEMPT_DISABLE(%ecx)
4124 4140 /*
4125 4141 * Synthesize the PS_IE bit from the event mask bit
4126 4142 */
4127 4143 CURVCPU(%edx)
4128 4144 andl $_BITNOT(PS_IE), %eax
4129 4145 XEN_TEST_UPCALL_MASK(%edx)
4130 4146 jnz 1f
4131 4147 orl $PS_IE, %eax
4132 4148 1:
4133 4149 KPREEMPT_ENABLE_NOKP(%ecx)
4134 4150 #endif
4135 4151 ret
4136 4152 SET_SIZE(getflags)
4137 4153
4138 4154 #endif /* __i386 */
4139 4155
4140 4156 #endif /* __lint */
4141 4157
4142 4158 #if defined(__lint)
4143 4159
4144 4160 ftrace_icookie_t
4145 4161 ftrace_interrupt_disable(void)
4146 4162 { return (0); }
4147 4163
4148 4164 #else /* __lint */
4149 4165
4150 4166 #if defined(__amd64)
4151 4167
4152 4168 ENTRY(ftrace_interrupt_disable)
4153 4169 pushfq
4154 4170 popq %rax
4155 4171 CLI(%rdx)
4156 4172 ret
4157 4173 SET_SIZE(ftrace_interrupt_disable)
4158 4174
4159 4175 #elif defined(__i386)
4160 4176
4161 4177 ENTRY(ftrace_interrupt_disable)
4162 4178 pushfl
4163 4179 popl %eax
4164 4180 CLI(%edx)
4165 4181 ret
4166 4182 SET_SIZE(ftrace_interrupt_disable)
4167 4183
4168 4184 #endif /* __i386 */
4169 4185 #endif /* __lint */
4170 4186
4171 4187 #if defined(__lint)
4172 4188
4173 4189 /*ARGSUSED*/
4174 4190 void
4175 4191 ftrace_interrupt_enable(ftrace_icookie_t cookie)
4176 4192 {}
4177 4193
4178 4194 #else /* __lint */
4179 4195
4180 4196 #if defined(__amd64)
4181 4197
4182 4198 ENTRY(ftrace_interrupt_enable)
4183 4199 pushq %rdi
4184 4200 popfq
4185 4201 ret
4186 4202 SET_SIZE(ftrace_interrupt_enable)
4187 4203
4188 4204 #elif defined(__i386)
4189 4205
4190 4206 ENTRY(ftrace_interrupt_enable)
4191 4207 movl 4(%esp), %eax
4192 4208 pushl %eax
4193 4209 popfl
4194 4210 ret
4195 4211 SET_SIZE(ftrace_interrupt_enable)
4196 4212
4197 4213 #endif /* __i386 */
4198 4214 #endif /* __lint */
↓ open down ↓ |
2092 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX