Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 | /* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> * Adapted for Power Macintosh by Paul Mackerras. * Low-level exception handlers and MMU support * rewritten by Paul Mackerras. * Copyright (C) 1996 Paul Mackerras. * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). * * This file contains the system call entry code, context switch * code, and exception/interrupt return code for PowerPC. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/errno.h> #include <asm/unistd.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/cputable.h> #include <asm/firmware.h> #include <asm/bug.h> #include <asm/ptrace.h> #include <asm/irqflags.h> #include <asm/ftrace.h> #include <asm/hw_irq.h> /* * System calls. */ .section ".toc","aw" .SYS_CALL_TABLE: .tc .sys_call_table[TC],.sys_call_table /* This value is used to mark exception frames on the stack. */ exception_marker: .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER .section ".text" .align 7 #undef SHOW_SYSCALLS .globl system_call_common system_call_common: andi. r10,r12,MSR_PR mr r10,r1 addi r1,r1,-INT_FRAME_SIZE beq- 1f ld r1,PACAKSAVE(r13) 1: std r10,0(r1) std r11,_NIP(r1) std r12,_MSR(r1) std r0,GPR0(r1) std r10,GPR1(r1) ACCOUNT_CPU_USER_ENTRY(r10, r11) std r2,GPR2(r1) std r3,GPR3(r1) mfcr r2 std r4,GPR4(r1) std r5,GPR5(r1) std r6,GPR6(r1) std r7,GPR7(r1) std r8,GPR8(r1) li r11,0 std r11,GPR9(r1) std r11,GPR10(r1) std r11,GPR11(r1) std r11,GPR12(r1) std r11,_XER(r1) std r11,_CTR(r1) std r9,GPR13(r1) mflr r10 /* * This clears CR0.SO (bit 28), which is the error indication on * return from this system call. */ rldimi r2,r11,28,(63-28) li r11,0xc01 std r10,_LINK(r1) std r11,_TRAP(r1) std r3,ORIG_GPR3(r1) std r2,_CCR(r1) ld r2,PACATOC(r13) addi r9,r1,STACK_FRAME_OVERHEAD ld r11,exception_marker@toc(r2) std r11,-16(r9) /* "regshere" marker */ #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR) BEGIN_FW_FTR_SECTION beq 33f /* if from user, see if there are any DTL entries to process */ ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */ ld r11,PACA_DTL_RIDX(r13) /* get log read index */ ld r10,LPPACA_DTLIDX(r10) /* get log write index */ cmpd cr1,r11,r10 beq+ cr1,33f bl .accumulate_stolen_time REST_GPR(0,r1) REST_4GPRS(3,r1) REST_2GPRS(7,r1) addi r9,r1,STACK_FRAME_OVERHEAD 33: END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */ /* * A syscall should always be called with interrupts enabled * so we just unconditionally hard-enable here. When some kind * of irq tracing is used, we additionally check that condition * is correct */ #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) lbz r10,PACASOFTIRQEN(r13) xori r10,r10,1 1: tdnei r10,0 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING #endif #ifdef CONFIG_PPC_BOOK3E wrteei 1 #else ld r11,PACAKMSR(r13) ori r11,r11,MSR_EE mtmsrd r11,1 #endif /* CONFIG_PPC_BOOK3E */ /* We do need to set SOFTE in the stack frame or the return * from interrupt will be painful */ li r10,1 std r10,SOFTE(r1) #ifdef SHOW_SYSCALLS bl .do_show_syscall REST_GPR(0,r1) REST_4GPRS(3,r1) REST_2GPRS(7,r1) addi r9,r1,STACK_FRAME_OVERHEAD #endif clrrdi r11,r1,THREAD_SHIFT ld r10,TI_FLAGS(r11) andi. r11,r10,_TIF_SYSCALL_T_OR_A bne- syscall_dotrace .Lsyscall_dotrace_cont: cmpldi 0,r0,NR_syscalls bge- syscall_enosys system_call: /* label this so stack traces look sane */ /* * Need to vector to 32 Bit or default sys_call_table here, * based on caller's run-mode / personality. */ ld r11,.SYS_CALL_TABLE@toc(2) andi. r10,r10,_TIF_32BIT beq 15f addi r11,r11,8 /* use 32-bit syscall entries */ clrldi r3,r3,32 clrldi r4,r4,32 clrldi r5,r5,32 clrldi r6,r6,32 clrldi r7,r7,32 clrldi r8,r8,32 15: slwi r0,r0,4 ldx r10,r11,r0 /* Fetch system call handler [ptr] */ mtctr r10 bctrl /* Call handler */ syscall_exit: std r3,RESULT(r1) #ifdef SHOW_SYSCALLS bl .do_show_syscall_exit ld r3,RESULT(r1) #endif clrrdi r12,r1,THREAD_SHIFT ld r8,_MSR(r1) #ifdef CONFIG_PPC_BOOK3S /* No MSR:RI on BookE */ andi. r10,r8,MSR_RI beq- unrecov_restore #endif /* * Disable interrupts so current_thread_info()->flags can't change, * and so that we don't get interrupted after loading SRR0/1. */ #ifdef CONFIG_PPC_BOOK3E wrteei 0 #else ld r10,PACAKMSR(r13) mtmsrd r10,1 #endif /* CONFIG_PPC_BOOK3E */ ld r9,TI_FLAGS(r12) li r11,-_LAST_ERRNO andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) bne- syscall_exit_work cmpld r3,r11 ld r5,_CCR(r1) bge- syscall_error .Lsyscall_error_cont: ld r7,_NIP(r1) BEGIN_FTR_SECTION stdcx. r0,0,r1 /* to clear the reservation */ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) andi. r6,r8,MSR_PR ld r4,_LINK(r1) /* * Clear RI before restoring r13. If we are returning to * userspace and we take an exception after restoring r13, * we end up corrupting the userspace r13 value. */ #ifdef CONFIG_PPC_BOOK3S /* No MSR:RI on BookE */ li r12,MSR_RI andc r11,r10,r12 mtmsrd r11,1 /* clear MSR.RI */ #endif /* CONFIG_PPC_BOOK3S */ beq- 1f ACCOUNT_CPU_USER_EXIT(r11, r12) ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ 1: ld r2,GPR2(r1) ld r1,GPR1(r1) mtlr r4 mtcr r5 mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 RFI b . /* prevent speculative execution */ syscall_error: oris r5,r5,0x1000 /* Set SO bit in CR */ neg r3,r3 std r5,_CCR(r1) b .Lsyscall_error_cont /* Traced system call support */ syscall_dotrace: bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl .do_syscall_trace_enter /* * Restore argument registers possibly just changed. * We use the return value of do_syscall_trace_enter * for the call number to look up in the table (r0). */ mr r0,r3 ld r3,GPR3(r1) ld r4,GPR4(r1) ld r5,GPR5(r1) ld r6,GPR6(r1) ld r7,GPR7(r1) ld r8,GPR8(r1) addi r9,r1,STACK_FRAME_OVERHEAD clrrdi r10,r1,THREAD_SHIFT ld r10,TI_FLAGS(r10) b .Lsyscall_dotrace_cont syscall_enosys: li r3,-ENOSYS b syscall_exit syscall_exit_work: /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr. If TIF_NOERROR is set, just save r3 as it is. */ andi. r0,r9,_TIF_RESTOREALL beq+ 0f REST_NVGPRS(r1) b 2f 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */ blt+ 1f andi. r0,r9,_TIF_NOERROR bne- 1f ld r5,_CCR(r1) neg r3,r3 oris r5,r5,0x1000 /* Set SO bit in CR */ std r5,_CCR(r1) 1: std r3,GPR3(r1) 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) beq 4f /* Clear per-syscall TIF flags if any are set. */ li r11,_TIF_PERSYSCALL_MASK addi r12,r12,TI_FLAGS 3: ldarx r10,0,r12 andc r10,r10,r11 stdcx. r10,0,r12 bne- 3b subi r12,r12,TI_FLAGS 4: /* Anything else left to do? */ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) beq .ret_from_except_lite /* Re-enable interrupts */ #ifdef CONFIG_PPC_BOOK3E wrteei 1 #else ld r10,PACAKMSR(r13) ori r10,r10,MSR_EE mtmsrd r10,1 #endif /* CONFIG_PPC_BOOK3E */ bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl .do_syscall_trace_leave b .ret_from_except /* Save non-volatile GPRs, if not already saved. */ _GLOBAL(save_nvgprs) ld r11,_TRAP(r1) andi. r0,r11,1 beqlr- SAVE_NVGPRS(r1) clrrdi r0,r11,1 std r0,_TRAP(r1) blr /* * The sigsuspend and rt_sigsuspend system calls can call do_signal * and thus put the process into the stopped state where we might * want to examine its user state with ptrace. Therefore we need * to save all the nonvolatile registers (r14 - r31) before calling * the C code. Similarly, fork, vfork and clone need the full * register state on the stack so that it can be copied to the child. */ _GLOBAL(ppc_fork) bl .save_nvgprs bl .sys_fork b syscall_exit _GLOBAL(ppc_vfork) bl .save_nvgprs bl .sys_vfork b syscall_exit _GLOBAL(ppc_clone) bl .save_nvgprs bl .sys_clone b syscall_exit _GLOBAL(ppc32_swapcontext) bl .save_nvgprs bl .compat_sys_swapcontext b syscall_exit _GLOBAL(ppc64_swapcontext) bl .save_nvgprs bl .sys_swapcontext b syscall_exit _GLOBAL(ret_from_fork) bl .schedule_tail REST_NVGPRS(r1) li r3,0 b syscall_exit .section ".toc","aw" DSCR_DEFAULT: .tc dscr_default[TC],dscr_default .section ".text" /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state * of the other is restored from its kernel stack. The memory * management hardware is updated to the second process's state. * Finally, we can return to the second process, via ret_from_except. * On entry, r3 points to the THREAD for the current task, r4 * points to the THREAD for the new task. * * Note: there are two ways to get to the "going out" portion * of this code; either by coming in via the entry (_switch) * or via "fork" which must set up an environment equivalent * to the "_switch" path. If you change this you'll have to change * the fork code also. * * The code which creates the new task context is in 'copy_thread' * in arch/powerpc/kernel/process.c */ .align 7 _GLOBAL(_switch) mflr r0 std r0,16(r1) stdu r1,-SWITCH_FRAME_SIZE(r1) /* r3-r13 are caller saved -- Cort */ SAVE_8GPRS(14, r1) SAVE_10GPRS(22, r1) mflr r20 /* Return to switch caller */ mfmsr r22 li r0, MSR_FP #ifdef CONFIG_VSX BEGIN_FTR_SECTION oris r0,r0,MSR_VSX@h /* Disable VSX */ END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif /* CONFIG_VSX */ #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION oris r0,r0,MSR_VEC@h /* Disable altivec */ mfspr r24,SPRN_VRSAVE /* save vrsave register value */ std r24,THREAD_VRSAVE(r3) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION mfspr r25,SPRN_DSCR std r25,THREAD_DSCR(r3) END_FTR_SECTION_IFSET(CPU_FTR_DSCR) #endif and. r0,r0,r22 beq+ 1f andc r22,r22,r0 MTMSRD(r22) isync 1: std r20,_NIP(r1) mfcr r23 std r23,_CCR(r1) std r1,KSP(r3) /* Set old stack pointer */ #ifdef CONFIG_SMP /* We need a sync somewhere here to make sure that if the * previous task gets rescheduled on another CPU, it sees all * stores it has performed on this one. */ sync #endif /* CONFIG_SMP */ /* * If we optimise away the clear of the reservation in system * calls because we know the CPU tracks the address of the * reservation, then we need to clear it here to cover the * case that the kernel context switch path has no larx * instructions. */ BEGIN_FTR_SECTION ldarx r6,0,r1 END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS) addi r6,r4,-THREAD /* Convert THREAD to 'current' */ std r6,PACACURRENT(r13) /* Set new 'current' */ ld r8,KSP(r4) /* new stack pointer */ #ifdef CONFIG_PPC_BOOK3S BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(95) clrrdi r6,r8,28 /* get its ESID */ clrrdi r9,r1,28 /* get current sp ESID */ FTR_SECTION_ELSE_NESTED(95) clrrdi r6,r8,40 /* get its 1T ESID */ clrrdi r9,r1,40 /* get current sp 1T ESID */ ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95) FTR_SECTION_ELSE b 2f ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB) clrldi. r0,r6,2 /* is new ESID c00000000? */ cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ cror eq,4*cr1+eq,eq beq 2f /* if yes, don't slbie it */ /* Bolt in the new stack SLB entry */ ld r7,KSP_VSID(r4) /* Get new stack's VSID */ oris r0,r6,(SLB_ESID_V)@h ori r0,r0,(SLB_NUM_BOLTED-1)@l BEGIN_FTR_SECTION li r9,MMU_SEGSIZE_1T /* insert B field */ oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* Update the last bolted SLB. No write barriers are needed * here, provided we only update the current CPU's SLB shadow * buffer. */ ld r9,PACA_SLBSHADOWPTR(r13) li r12,0 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ /* No need to check for MMU_FTR_NO_SLBIE_B here, since when * we have 1TB segments, the only CPUs known to have the errata * only support less than 1TB of system memory and we'll never * actually hit this code path. */ slbie r6 slbie r6 /* Workaround POWER5 < DD2.1 issue */ slbmte r7,r0 isync 2: #endif /* !CONFIG_PPC_BOOK3S */ clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE because we don't need to leave the 288-byte ABI gap at the top of the kernel stack. */ addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE mr r1,r8 /* start using new stack pointer */ std r7,PACAKSAVE(r13) #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION ld r0,THREAD_VRSAVE(r4) mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION lwz r6,THREAD_DSCR_INHERIT(r4) ld r7,DSCR_DEFAULT@toc(2) ld r0,THREAD_DSCR(r4) cmpwi r6,0 bne 1f ld r0,0(r7) 1: cmpd r0,r25 beq 2f mtspr SPRN_DSCR,r0 2: END_FTR_SECTION_IFSET(CPU_FTR_DSCR) #endif ld r6,_CCR(r1) mtcrf 0xFF,r6 /* r3-r13 are destroyed -- Cort */ REST_8GPRS(14, r1) REST_10GPRS(22, r1) /* convert old thread to its task_struct for return value */ addi r3,r3,-THREAD ld r7,_NIP(r1) /* Return to _switch caller in new task */ mtlr r7 addi r1,r1,SWITCH_FRAME_SIZE blr .align 7 _GLOBAL(ret_from_except) ld r11,_TRAP(r1) andi. r0,r11,1 bne .ret_from_except_lite REST_NVGPRS(r1) _GLOBAL(ret_from_except_lite) /* * Disable interrupts so that current_thread_info()->flags * can't change between when we test it and when we return * from the interrupt. */ #ifdef CONFIG_PPC_BOOK3E wrteei 0 #else ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ mtmsrd r10,1 /* Update machine state */ #endif /* CONFIG_PPC_BOOK3E */ clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ ld r3,_MSR(r1) ld r4,TI_FLAGS(r9) andi. r3,r3,MSR_PR beq resume_kernel /* Check current_thread_info()->flags */ andi. r0,r4,_TIF_USER_WORK_MASK beq restore andi. r0,r4,_TIF_NEED_RESCHED beq 1f bl .restore_interrupts bl .schedule b .ret_from_except_lite 1: bl .save_nvgprs bl .restore_interrupts addi r3,r1,STACK_FRAME_OVERHEAD bl .do_notify_resume b .ret_from_except resume_kernel: #ifdef CONFIG_PREEMPT /* Check if we need to preempt */ andi. r0,r4,_TIF_NEED_RESCHED beq+ restore /* Check that preempt_count() == 0 and interrupts are enabled */ lwz r8,TI_PREEMPT(r9) cmpwi cr1,r8,0 ld r0,SOFTE(r1) cmpdi r0,0 crandc eq,cr1*4+eq,eq bne restore /* * Here we are preempting the current task. We want to make * sure we are soft-disabled first */ SOFT_DISABLE_INTS(r3,r4) 1: bl .preempt_schedule_irq /* Re-test flags and eventually loop */ clrrdi r9,r1,THREAD_SHIFT ld r4,TI_FLAGS(r9) andi. r0,r4,_TIF_NEED_RESCHED bne 1b #endif /* CONFIG_PREEMPT */ .globl fast_exc_return_irq fast_exc_return_irq: restore: /* * This is the main kernel exit path. First we check if we * are about to re-enable interrupts */ ld r5,SOFTE(r1) lbz r6,PACASOFTIRQEN(r13) cmpwi cr0,r5,0 beq restore_irq_off /* We are enabling, were we already enabled ? Yes, just return */ cmpwi cr0,r6,1 beq cr0,do_restore /* * We are about to soft-enable interrupts (we are hard disabled * at this point). We check if there's anything that needs to * be replayed first. */ lbz r0,PACAIRQHAPPENED(r13) cmpwi cr0,r0,0 bne- restore_check_irq_replay /* * Get here when nothing happened while soft-disabled, just * soft-enable and move-on. We will hard-enable as a side * effect of rfi */ restore_no_replay: TRACE_ENABLE_INTS li r0,1 stb r0,PACASOFTIRQEN(r13); /* * Final return path. BookE is handled in a different file */ do_restore: #ifdef CONFIG_PPC_BOOK3E b .exception_return_book3e #else /* * Clear the reservation. If we know the CPU tracks the address of * the reservation then we can potentially save some cycles and use * a larx. On POWER6 and POWER7 this is significantly faster. */ BEGIN_FTR_SECTION stdcx. r0,0,r1 /* to clear the reservation */ FTR_SECTION_ELSE ldarx r4,0,r1 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) /* * Some code path such as load_up_fpu or altivec return directly * here. They run entirely hard disabled and do not alter the * interrupt state. They also don't use lwarx/stwcx. and thus * are known not to leave dangling reservations. */ .globl fast_exception_return fast_exception_return: ld r3,_MSR(r1) ld r4,_CTR(r1) ld r0,_LINK(r1) mtctr r4 mtlr r0 ld r4,_XER(r1) mtspr SPRN_XER,r4 REST_8GPRS(5, r1) andi. r0,r3,MSR_RI beq- unrecov_restore /* * Clear RI before restoring r13. If we are returning to * userspace and we take an exception after restoring r13, * we end up corrupting the userspace r13 value. */ ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */ andc r4,r4,r0 /* r0 contains MSR_RI here */ mtmsrd r4,1 /* * r13 is our per cpu area, only restore it if we are returning to * userspace the value stored in the stack frame may belong to * another CPU. */ andi. r0,r3,MSR_PR beq 1f ACCOUNT_CPU_USER_EXIT(r2, r4) REST_GPR(13, r1) 1: mtspr SPRN_SRR1,r3 ld r2,_CCR(r1) mtcrf 0xFF,r2 ld r2,_NIP(r1) mtspr SPRN_SRR0,r2 ld r0,GPR0(r1) ld r2,GPR2(r1) ld r3,GPR3(r1) ld r4,GPR4(r1) ld r1,GPR1(r1) rfid b . /* prevent speculative execution */ #endif /* CONFIG_PPC_BOOK3E */ /* * We are returning to a context with interrupts soft disabled. * * However, we may also about to hard enable, so we need to * make sure that in this case, we also clear PACA_IRQ_HARD_DIS * or that bit can get out of sync and bad things will happen */ restore_irq_off: ld r3,_MSR(r1) lbz r7,PACAIRQHAPPENED(r13) andi. r0,r3,MSR_EE beq 1f rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS stb r7,PACAIRQHAPPENED(r13) 1: li r0,0 stb r0,PACASOFTIRQEN(r13); TRACE_DISABLE_INTS b do_restore /* * Something did happen, check if a re-emit is needed * (this also clears paca->irq_happened) */ restore_check_irq_replay: /* XXX: We could implement a fast path here where we check * for irq_happened being just 0x01, in which case we can * clear it and return. That means that we would potentially * miss a decrementer having wrapped all the way around. * * Still, this might be useful for things like hash_page */ bl .__check_irq_replay cmpwi cr0,r3,0 beq restore_no_replay /* * We need to re-emit an interrupt. We do so by re-using our * existing exception frame. We first change the trap value, * but we need to ensure we preserve the low nibble of it */ ld r4,_TRAP(r1) clrldi r4,r4,60 or r4,r4,r3 std r4,_TRAP(r1) /* * Then find the right handler and call it. Interrupts are * still soft-disabled and we keep them that way. */ cmpwi cr0,r3,0x500 bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; bl .do_IRQ b .ret_from_except 1: cmpwi cr0,r3,0x900 bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; bl .timer_interrupt b .ret_from_except #ifdef CONFIG_PPC_BOOK3E 1: cmpwi cr0,r3,0x280 bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; bl .doorbell_exception b .ret_from_except #endif /* CONFIG_PPC_BOOK3E */ 1: b .ret_from_except /* What else to do here ? */ unrecov_restore: addi r3,r1,STACK_FRAME_OVERHEAD bl .unrecoverable_exception b unrecov_restore #ifdef CONFIG_PPC_RTAS /* * On CHRP, the Run-Time Abstraction Services (RTAS) have to be * called with the MMU off. * * In addition, we need to be in 32b mode, at least for now. * * Note: r3 is an input parameter to rtas, so don't trash it... */ _GLOBAL(enter_rtas) mflr r0 std r0,16(r1) stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */ /* Because RTAS is running in 32b mode, it clobbers the high order half * of all registers that it saves. We therefore save those registers * RTAS might touch to the stack. (r0, r3-r13 are caller saved) */ SAVE_GPR(2, r1) /* Save the TOC */ SAVE_GPR(13, r1) /* Save paca */ SAVE_8GPRS(14, r1) /* Save the non-volatiles */ SAVE_10GPRS(22, r1) /* ditto */ mfcr r4 std r4,_CCR(r1) mfctr r5 std r5,_CTR(r1) mfspr r6,SPRN_XER std r6,_XER(r1) mfdar r7 std r7,_DAR(r1) mfdsisr r8 std r8,_DSISR(r1) /* Temporary workaround to clear CR until RTAS can be modified to * ignore all bits. */ li r0,0 mtcr r0 #ifdef CONFIG_BUG /* There is no way it is acceptable to get here with interrupts enabled, * check it with the asm equivalent of WARN_ON */ lbz r0,PACASOFTIRQEN(r13) 1: tdnei r0,0 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING #endif /* Hard-disable interrupts */ mfmsr r6 rldicl r7,r6,48,1 rotldi r7,r7,16 mtmsrd r7,1 /* Unfortunately, the stack pointer and the MSR are also clobbered, * so they are saved in the PACA which allows us to restore * our original state after RTAS returns. */ std r1,PACAR1(r13) std r6,PACASAVEDMSR(r13) /* Setup our real return addr */ LOAD_REG_ADDR(r4,.rtas_return_loc) clrldi r4,r4,2 /* convert to realmode address */ mtlr r4 li r0,0 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI andc r0,r6,r0 li r9,1 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI andc r6,r0,r9 sync /* disable interrupts so SRR0/1 */ mtmsrd r0 /* don't get trashed */ LOAD_REG_ADDR(r4, rtas) ld r5,RTASENTRY(r4) /* get the rtas->entry value */ ld r4,RTASBASE(r4) /* get the rtas->base value */ mtspr SPRN_SRR0,r5 mtspr SPRN_SRR1,r6 rfid b . /* prevent speculative execution */ _STATIC(rtas_return_loc) /* relocation is off at this point */ GET_PACA(r4) clrldi r4,r4,2 /* convert to realmode address */ bcl 20,31,$+4 0: mflr r3 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */ mfmsr r6 li r0,MSR_RI andc r6,r6,r0 sync mtmsrd r6 ld r1,PACAR1(r4) /* Restore our SP */ ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 rfid b . /* prevent speculative execution */ .align 3 1: .llong .rtas_restore_regs _STATIC(rtas_restore_regs) /* relocation is on at this point */ REST_GPR(2, r1) /* Restore the TOC */ REST_GPR(13, r1) /* Restore paca */ REST_8GPRS(14, r1) /* Restore the non-volatiles */ REST_10GPRS(22, r1) /* ditto */ GET_PACA(r13) ld r4,_CCR(r1) mtcr r4 ld r5,_CTR(r1) mtctr r5 ld r6,_XER(r1) mtspr SPRN_XER,r6 ld r7,_DAR(r1) mtdar r7 ld r8,_DSISR(r1) mtdsisr r8 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */ ld r0,16(r1) /* get return address */ mtlr r0 blr /* return to caller */ #endif /* CONFIG_PPC_RTAS */ _GLOBAL(enter_prom) mflr r0 std r0,16(r1) stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ /* Because PROM is running in 32b mode, it clobbers the high order half * of all registers that it saves. We therefore save those registers * PROM might touch to the stack. (r0, r3-r13 are caller saved) */ SAVE_GPR(2, r1) SAVE_GPR(13, r1) SAVE_8GPRS(14, r1) SAVE_10GPRS(22, r1) mfcr r10 mfmsr r11 std r10,_CCR(r1) std r11,_MSR(r1) /* Get the PROM entrypoint */ mtlr r4 /* Switch MSR to 32 bits mode */ #ifdef CONFIG_PPC_BOOK3E rlwinm r11,r11,0,1,31 mtmsr r11 #else /* CONFIG_PPC_BOOK3E */ mfmsr r11 li r12,1 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) andc r11,r11,r12 li r12,1 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) andc r11,r11,r12 mtmsrd r11 #endif /* CONFIG_PPC_BOOK3E */ isync /* Enter PROM here... */ blrl /* Just make sure that r1 top 32 bits didn't get * corrupt by OF */ rldicl r1,r1,0,32 /* Restore the MSR (back to 64 bits) */ ld r0,_MSR(r1) MTMSRD(r0) isync /* Restore other registers */ REST_GPR(2, r1) REST_GPR(13, r1) REST_8GPRS(14, r1) REST_10GPRS(22, r1) ld r4,_CCR(r1) mtcr r4 addi r1,r1,PROM_FRAME_SIZE ld r0,16(r1) mtlr r0 blr #ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE _GLOBAL(mcount) _GLOBAL(_mcount) blr _GLOBAL(ftrace_caller) /* Taken from output of objdump from lib64/glibc */ mflr r3 ld r11, 0(r1) stdu r1, -112(r1) std r3, 128(r1) ld r4, 16(r11) subi r3, r3, MCOUNT_INSN_SIZE .globl ftrace_call ftrace_call: bl ftrace_stub nop #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: b ftrace_graph_stub _GLOBAL(ftrace_graph_stub) #endif ld r0, 128(r1) mtlr r0 addi r1, r1, 112 _GLOBAL(ftrace_stub) blr #else _GLOBAL(mcount) blr _GLOBAL(_mcount) /* Taken from output of objdump from lib64/glibc */ mflr r3 ld r11, 0(r1) stdu r1, -112(r1) std r3, 128(r1) ld r4, 16(r11) subi r3, r3, MCOUNT_INSN_SIZE LOAD_REG_ADDR(r5,ftrace_trace_function) ld r5,0(r5) ld r5,0(r5) mtctr r5 bctrl nop #ifdef CONFIG_FUNCTION_GRAPH_TRACER b ftrace_graph_caller #endif ld r0, 128(r1) mtlr r0 addi r1, r1, 112 _GLOBAL(ftrace_stub) blr #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER _GLOBAL(ftrace_graph_caller) /* load r4 with local address */ ld r4, 128(r1) subi r4, r4, MCOUNT_INSN_SIZE /* get the parent address */ ld r11, 112(r1) addi r3, r11, 16 bl .prepare_ftrace_return nop ld r0, 128(r1) mtlr r0 addi r1, r1, 112 blr _GLOBAL(return_to_handler) /* need to save return values */ std r4, -24(r1) std r3, -16(r1) std r31, -8(r1) mr r31, r1 stdu r1, -112(r1) bl .ftrace_return_to_handler nop /* return value has real return address */ mtlr r3 ld r1, 0(r1) ld r4, -24(r1) ld r3, -16(r1) ld r31, -8(r1) /* Jump back to real return address */ blr _GLOBAL(mod_return_to_handler) /* need to save return values */ std r4, -32(r1) std r3, -24(r1) /* save TOC */ std r2, -16(r1) std r31, -8(r1) mr r31, r1 stdu r1, -112(r1) /* * We are in a module using the module's TOC. * Switch to our TOC to run inside the core kernel. */ ld r2, PACATOC(r13) bl .ftrace_return_to_handler nop /* return value has real return address */ mtlr r3 ld r1, 0(r1) ld r4, -32(r1) ld r3, -24(r1) ld r2, -16(r1) ld r31, -8(r1) /* Jump back to real return address */ blr #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */ |