Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 * x86 semaphore implementation.
  3 *
  4 * (C) Copyright 1999 Linus Torvalds
  5 *
  6 * Portions Copyright 1999 Red Hat, Inc.
  7 *
  8 *	This program is free software; you can redistribute it and/or
  9 *	modify it under the terms of the GNU General Public License
 10 *	as published by the Free Software Foundation; either version
 11 *	2 of the License, or (at your option) any later version.
 12 *
 13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
 14 */
 15
 16#include <linux/linkage.h>
 17#include <asm/alternative-asm.h>
 18#include <asm/frame.h>
 19
 20#define __ASM_HALF_REG(reg)	__ASM_SEL(reg, e##reg)
 21#define __ASM_HALF_SIZE(inst)	__ASM_SEL(inst##w, inst##l)
 22
 23#ifdef CONFIG_X86_32
 24
 25/*
 26 * The semaphore operations have a special calling sequence that
 27 * allow us to do a simpler in-line version of them. These routines
 28 * need to convert that sequence back into the C sequence when
 29 * there is contention on the semaphore.
 30 *
 31 * %eax contains the semaphore pointer on entry. Save the C-clobbered
 32 * registers (%eax, %edx and %ecx) except %eax which is either a return
 33 * value or just gets clobbered. Same is true for %edx so make sure GCC
 34 * reloads it after the slow path, by making it hold a temporary, for
 35 * example see ____down_write().
 36 */
 37
 38#define save_common_regs \
 39	pushl %ecx
 40
 41#define restore_common_regs \
 42	popl %ecx
 43
 44	/* Avoid uglifying the argument copying x86-64 needs to do. */
 45	.macro movq src, dst
 46	.endm
 47
 48#else
 49
 50/*
 51 * x86-64 rwsem wrappers
 52 *
 53 * This interfaces the inline asm code to the slow-path
 54 * C routines. We need to save the call-clobbered regs
 55 * that the asm does not mark as clobbered, and move the
 56 * argument from %rax to %rdi.
 57 *
 58 * NOTE! We don't need to save %rax, because the functions
 59 * will always return the semaphore pointer in %rax (which
 60 * is also the input argument to these helpers)
 61 *
 62 * The following can clobber %rdx because the asm clobbers it:
 63 *   call_rwsem_down_write_failed
 64 *   call_rwsem_wake
 65 * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
 66 */
 67
 68#define save_common_regs \
 69	pushq %rdi; \
 70	pushq %rsi; \
 71	pushq %rcx; \
 72	pushq %r8;  \
 73	pushq %r9;  \
 74	pushq %r10; \
 75	pushq %r11
 76
 77#define restore_common_regs \
 78	popq %r11; \
 79	popq %r10; \
 80	popq %r9; \
 81	popq %r8; \
 82	popq %rcx; \
 83	popq %rsi; \
 84	popq %rdi
 85
 86#endif
 87
 88/* Fix up special calling conventions */
 89ENTRY(call_rwsem_down_read_failed)
 90	FRAME_BEGIN
 91	save_common_regs
 92	__ASM_SIZE(push,) %__ASM_REG(dx)
 
 93	movq %rax,%rdi
 94	call rwsem_down_read_failed
 95	__ASM_SIZE(pop,) %__ASM_REG(dx)
 
 96	restore_common_regs
 97	FRAME_END
 98	ret
 
 99ENDPROC(call_rwsem_down_read_failed)
100
101ENTRY(call_rwsem_down_write_failed)
102	FRAME_BEGIN
103	save_common_regs
104	movq %rax,%rdi
105	call rwsem_down_write_failed
106	restore_common_regs
107	FRAME_END
108	ret
 
109ENDPROC(call_rwsem_down_write_failed)
110
111ENTRY(call_rwsem_down_write_failed_killable)
112	FRAME_BEGIN
113	save_common_regs
114	movq %rax,%rdi
115	call rwsem_down_write_failed_killable
116	restore_common_regs
117	FRAME_END
118	ret
119ENDPROC(call_rwsem_down_write_failed_killable)
120
121ENTRY(call_rwsem_wake)
122	FRAME_BEGIN
123	/* do nothing if still outstanding active readers */
124	__ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
125	jnz 1f
126	save_common_regs
127	movq %rax,%rdi
128	call rwsem_wake
129	restore_common_regs
1301:	FRAME_END
131	ret
132ENDPROC(call_rwsem_wake)
133
134ENTRY(call_rwsem_downgrade_wake)
135	FRAME_BEGIN
136	save_common_regs
137	__ASM_SIZE(push,) %__ASM_REG(dx)
 
138	movq %rax,%rdi
139	call rwsem_downgrade_wake
140	__ASM_SIZE(pop,) %__ASM_REG(dx)
 
141	restore_common_regs
142	FRAME_END
143	ret
 
144ENDPROC(call_rwsem_downgrade_wake)
v3.1
  1/*
  2 * x86 semaphore implementation.
  3 *
  4 * (C) Copyright 1999 Linus Torvalds
  5 *
  6 * Portions Copyright 1999 Red Hat, Inc.
  7 *
  8 *	This program is free software; you can redistribute it and/or
  9 *	modify it under the terms of the GNU General Public License
 10 *	as published by the Free Software Foundation; either version
 11 *	2 of the License, or (at your option) any later version.
 12 *
 13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
 14 */
 15
 16#include <linux/linkage.h>
 17#include <asm/alternative-asm.h>
 18#include <asm/dwarf2.h>
 19
 20#define __ASM_HALF_REG(reg)	__ASM_SEL(reg, e##reg)
 21#define __ASM_HALF_SIZE(inst)	__ASM_SEL(inst##w, inst##l)
 22
 23#ifdef CONFIG_X86_32
 24
 25/*
 26 * The semaphore operations have a special calling sequence that
 27 * allow us to do a simpler in-line version of them. These routines
 28 * need to convert that sequence back into the C sequence when
 29 * there is contention on the semaphore.
 30 *
 31 * %eax contains the semaphore pointer on entry. Save the C-clobbered
 32 * registers (%eax, %edx and %ecx) except %eax whish is either a return
 33 * value or just clobbered..
 
 
 34 */
 35
 36#define save_common_regs \
 37	pushl_cfi %ecx; CFI_REL_OFFSET ecx, 0
 38
 39#define restore_common_regs \
 40	popl_cfi %ecx; CFI_RESTORE ecx
 41
 42	/* Avoid uglifying the argument copying x86-64 needs to do. */
 43	.macro movq src, dst
 44	.endm
 45
 46#else
 47
 48/*
 49 * x86-64 rwsem wrappers
 50 *
 51 * This interfaces the inline asm code to the slow-path
 52 * C routines. We need to save the call-clobbered regs
 53 * that the asm does not mark as clobbered, and move the
 54 * argument from %rax to %rdi.
 55 *
 56 * NOTE! We don't need to save %rax, because the functions
 57 * will always return the semaphore pointer in %rax (which
 58 * is also the input argument to these helpers)
 59 *
 60 * The following can clobber %rdx because the asm clobbers it:
 61 *   call_rwsem_down_write_failed
 62 *   call_rwsem_wake
 63 * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
 64 */
 65
 66#define save_common_regs \
 67	pushq_cfi %rdi; CFI_REL_OFFSET rdi, 0; \
 68	pushq_cfi %rsi; CFI_REL_OFFSET rsi, 0; \
 69	pushq_cfi %rcx; CFI_REL_OFFSET rcx, 0; \
 70	pushq_cfi %r8;  CFI_REL_OFFSET r8,  0; \
 71	pushq_cfi %r9;  CFI_REL_OFFSET r9,  0; \
 72	pushq_cfi %r10; CFI_REL_OFFSET r10, 0; \
 73	pushq_cfi %r11; CFI_REL_OFFSET r11, 0
 74
 75#define restore_common_regs \
 76	popq_cfi %r11; CFI_RESTORE r11; \
 77	popq_cfi %r10; CFI_RESTORE r10; \
 78	popq_cfi %r9;  CFI_RESTORE r9; \
 79	popq_cfi %r8;  CFI_RESTORE r8; \
 80	popq_cfi %rcx; CFI_RESTORE rcx; \
 81	popq_cfi %rsi; CFI_RESTORE rsi; \
 82	popq_cfi %rdi; CFI_RESTORE rdi
 83
 84#endif
 85
 86/* Fix up special calling conventions */
 87ENTRY(call_rwsem_down_read_failed)
 88	CFI_STARTPROC
 89	save_common_regs
 90	__ASM_SIZE(push,_cfi) %__ASM_REG(dx)
 91	CFI_REL_OFFSET __ASM_REG(dx), 0
 92	movq %rax,%rdi
 93	call rwsem_down_read_failed
 94	__ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
 95	CFI_RESTORE __ASM_REG(dx)
 96	restore_common_regs
 
 97	ret
 98	CFI_ENDPROC
 99ENDPROC(call_rwsem_down_read_failed)
100
101ENTRY(call_rwsem_down_write_failed)
102	CFI_STARTPROC
103	save_common_regs
104	movq %rax,%rdi
105	call rwsem_down_write_failed
106	restore_common_regs
 
107	ret
108	CFI_ENDPROC
109ENDPROC(call_rwsem_down_write_failed)
110
 
 
 
 
 
 
 
 
 
 
111ENTRY(call_rwsem_wake)
112	CFI_STARTPROC
113	/* do nothing if still outstanding active readers */
114	__ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
115	jnz 1f
116	save_common_regs
117	movq %rax,%rdi
118	call rwsem_wake
119	restore_common_regs
1201:	ret
121	CFI_ENDPROC
122ENDPROC(call_rwsem_wake)
123
124ENTRY(call_rwsem_downgrade_wake)
125	CFI_STARTPROC
126	save_common_regs
127	__ASM_SIZE(push,_cfi) %__ASM_REG(dx)
128	CFI_REL_OFFSET __ASM_REG(dx), 0
129	movq %rax,%rdi
130	call rwsem_downgrade_wake
131	__ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
132	CFI_RESTORE __ASM_REG(dx)
133	restore_common_regs
 
134	ret
135	CFI_ENDPROC
136ENDPROC(call_rwsem_downgrade_wake)