Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 | /* * x86 semaphore implementation. * * (C) Copyright 1999 Linus Torvalds * * Portions Copyright 1999 Red Hat, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> */ #include <linux/linkage.h> #include <asm/alternative-asm.h> #include <asm/frame.h> #define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg) #define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l) #ifdef CONFIG_X86_32 /* * The semaphore operations have a special calling sequence that * allow us to do a simpler in-line version of them. These routines * need to convert that sequence back into the C sequence when * there is contention on the semaphore. * * %eax contains the semaphore pointer on entry. Save the C-clobbered * registers (%eax, %edx and %ecx) except %eax whish is either a return * value or just clobbered.. */ #define save_common_regs \ pushl %ecx #define restore_common_regs \ popl %ecx /* Avoid uglifying the argument copying x86-64 needs to do. */ .macro movq src, dst .endm #else /* * x86-64 rwsem wrappers * * This interfaces the inline asm code to the slow-path * C routines. We need to save the call-clobbered regs * that the asm does not mark as clobbered, and move the * argument from %rax to %rdi. * * NOTE! We don't need to save %rax, because the functions * will always return the semaphore pointer in %rax (which * is also the input argument to these helpers) * * The following can clobber %rdx because the asm clobbers it: * call_rwsem_down_write_failed * call_rwsem_wake * but %rdi, %rsi, %rcx, %r8-r11 always need saving. */ #define save_common_regs \ pushq %rdi; \ pushq %rsi; \ pushq %rcx; \ pushq %r8; \ pushq %r9; \ pushq %r10; \ pushq %r11 #define restore_common_regs \ popq %r11; \ popq %r10; \ popq %r9; \ popq %r8; \ popq %rcx; \ popq %rsi; \ popq %rdi #endif /* Fix up special calling conventions */ ENTRY(call_rwsem_down_read_failed) FRAME_BEGIN save_common_regs __ASM_SIZE(push,) %__ASM_REG(dx) movq %rax,%rdi call rwsem_down_read_failed __ASM_SIZE(pop,) %__ASM_REG(dx) restore_common_regs FRAME_END ret ENDPROC(call_rwsem_down_read_failed) ENTRY(call_rwsem_down_write_failed) FRAME_BEGIN save_common_regs movq %rax,%rdi call rwsem_down_write_failed restore_common_regs FRAME_END ret ENDPROC(call_rwsem_down_write_failed) ENTRY(call_rwsem_wake) FRAME_BEGIN /* do nothing if still outstanding active readers */ __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx) jnz 1f save_common_regs movq %rax,%rdi call rwsem_wake restore_common_regs 1: FRAME_END ret ENDPROC(call_rwsem_wake) ENTRY(call_rwsem_downgrade_wake) FRAME_BEGIN save_common_regs __ASM_SIZE(push,) %__ASM_REG(dx) movq %rax,%rdi call rwsem_downgrade_wake __ASM_SIZE(pop,) %__ASM_REG(dx) restore_common_regs FRAME_END ret ENDPROC(call_rwsem_downgrade_wake) |