Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * vlock.S - simple voting lock implementation for ARM
  4 *
  5 * Created by:	Dave Martin, 2012-08-16
  6 * Copyright:	(C) 2012-2013  Linaro Limited
  7 *
 
 
 
 
 
 
 
 
 
 
  8 * This algorithm is described in more detail in
  9 * Documentation/arch/arm/vlocks.rst.
 10 */
 11
 12#include <linux/linkage.h>
 13#include "vlock.h"
 14
 15.arch armv7-a
 16
 17/* Select different code if voting flags  can fit in a single word. */
 18#if VLOCK_VOTING_SIZE > 4
 19#define FEW(x...)
 20#define MANY(x...) x
 21#else
 22#define FEW(x...) x
 23#define MANY(x...)
 24#endif
 25
 26@ voting lock for first-man coordination
 27
 28.macro voting_begin rbase:req, rcpu:req, rscratch:req
 29	mov	\rscratch, #1
 30	strb	\rscratch, [\rbase, \rcpu]
 31	dmb
 32.endm
 33
 34.macro voting_end rbase:req, rcpu:req, rscratch:req
 35	dmb
 36	mov	\rscratch, #0
 37	strb	\rscratch, [\rbase, \rcpu]
 38	dsb	st
 39	sev
 40.endm
 41
 42/*
 43 * The vlock structure must reside in Strongly-Ordered or Device memory.
 44 * This implementation deliberately eliminates most of the barriers which
 45 * would be required for other memory types, and assumes that independent
 46 * writes to neighbouring locations within a cacheline do not interfere
 47 * with one another.
 48 */
 49
 50@ r0: lock structure base
 51@ r1: CPU ID (0-based index within cluster)
 52ENTRY(vlock_trylock)
 53	add	r1, r1, #VLOCK_VOTING_OFFSET
 54
 55	voting_begin	r0, r1, r2
 56
 57	ldrb	r2, [r0, #VLOCK_OWNER_OFFSET]	@ check whether lock is held
 58	cmp	r2, #VLOCK_OWNER_NONE
 59	bne	trylock_fail			@ fail if so
 60
 61	@ Control dependency implies strb not observable before previous ldrb.
 62
 63	strb	r1, [r0, #VLOCK_OWNER_OFFSET]	@ submit my vote
 64
 65	voting_end	r0, r1, r2		@ implies DMB
 66
 67	@ Wait for the current round of voting to finish:
 68
 69 MANY(	mov	r3, #VLOCK_VOTING_OFFSET			)
 700:
 71 MANY(	ldr	r2, [r0, r3]					)
 72 FEW(	ldr	r2, [r0, #VLOCK_VOTING_OFFSET]			)
 73	cmp	r2, #0
 74	wfene
 75	bne	0b
 76 MANY(	add	r3, r3, #4					)
 77 MANY(	cmp	r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE	)
 78 MANY(	bne	0b						)
 79
 80	@ Check who won:
 81
 82	dmb
 83	ldrb	r2, [r0, #VLOCK_OWNER_OFFSET]
 84	eor	r0, r1, r2			@ zero if I won, else nonzero
 85	bx	lr
 86
 87trylock_fail:
 88	voting_end	r0, r1, r2
 89	mov	r0, #1				@ nonzero indicates that I lost
 90	bx	lr
 91ENDPROC(vlock_trylock)
 92
 93@ r0: lock structure base
 94ENTRY(vlock_unlock)
 95	dmb
 96	mov	r1, #VLOCK_OWNER_NONE
 97	strb	r1, [r0, #VLOCK_OWNER_OFFSET]
 98	dsb	st
 99	sev
100	bx	lr
101ENDPROC(vlock_unlock)
v4.10.11
 
  1/*
  2 * vlock.S - simple voting lock implementation for ARM
  3 *
  4 * Created by:	Dave Martin, 2012-08-16
  5 * Copyright:	(C) 2012-2013  Linaro Limited
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 *
 17 * This algorithm is described in more detail in
 18 * Documentation/arm/vlocks.txt.
 19 */
 20
 21#include <linux/linkage.h>
 22#include "vlock.h"
 
 
 23
 24/* Select different code if voting flags  can fit in a single word. */
 25#if VLOCK_VOTING_SIZE > 4
 26#define FEW(x...)
 27#define MANY(x...) x
 28#else
 29#define FEW(x...) x
 30#define MANY(x...)
 31#endif
 32
 33@ voting lock for first-man coordination
 34
 35.macro voting_begin rbase:req, rcpu:req, rscratch:req
 36	mov	\rscratch, #1
 37	strb	\rscratch, [\rbase, \rcpu]
 38	dmb
 39.endm
 40
 41.macro voting_end rbase:req, rcpu:req, rscratch:req
 42	dmb
 43	mov	\rscratch, #0
 44	strb	\rscratch, [\rbase, \rcpu]
 45	dsb	st
 46	sev
 47.endm
 48
 49/*
 50 * The vlock structure must reside in Strongly-Ordered or Device memory.
 51 * This implementation deliberately eliminates most of the barriers which
 52 * would be required for other memory types, and assumes that independent
 53 * writes to neighbouring locations within a cacheline do not interfere
 54 * with one another.
 55 */
 56
 57@ r0: lock structure base
 58@ r1: CPU ID (0-based index within cluster)
 59ENTRY(vlock_trylock)
 60	add	r1, r1, #VLOCK_VOTING_OFFSET
 61
 62	voting_begin	r0, r1, r2
 63
 64	ldrb	r2, [r0, #VLOCK_OWNER_OFFSET]	@ check whether lock is held
 65	cmp	r2, #VLOCK_OWNER_NONE
 66	bne	trylock_fail			@ fail if so
 67
 68	@ Control dependency implies strb not observable before previous ldrb.
 69
 70	strb	r1, [r0, #VLOCK_OWNER_OFFSET]	@ submit my vote
 71
 72	voting_end	r0, r1, r2		@ implies DMB
 73
 74	@ Wait for the current round of voting to finish:
 75
 76 MANY(	mov	r3, #VLOCK_VOTING_OFFSET			)
 770:
 78 MANY(	ldr	r2, [r0, r3]					)
 79 FEW(	ldr	r2, [r0, #VLOCK_VOTING_OFFSET]			)
 80	cmp	r2, #0
 81	wfene
 82	bne	0b
 83 MANY(	add	r3, r3, #4					)
 84 MANY(	cmp	r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE	)
 85 MANY(	bne	0b						)
 86
 87	@ Check who won:
 88
 89	dmb
 90	ldrb	r2, [r0, #VLOCK_OWNER_OFFSET]
 91	eor	r0, r1, r2			@ zero if I won, else nonzero
 92	bx	lr
 93
 94trylock_fail:
 95	voting_end	r0, r1, r2
 96	mov	r0, #1				@ nonzero indicates that I lost
 97	bx	lr
 98ENDPROC(vlock_trylock)
 99
100@ r0: lock structure base
101ENTRY(vlock_unlock)
102	dmb
103	mov	r1, #VLOCK_OWNER_NONE
104	strb	r1, [r0, #VLOCK_OWNER_OFFSET]
105	dsb	st
106	sev
107	bx	lr
108ENDPROC(vlock_unlock)