Loading...
1/*
2 * Copyright 2011 Calxeda, Inc.
3 * Based on PPC version Copyright 2007 MontaVista Software, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17#ifndef ASM_EDAC_H
18#define ASM_EDAC_H
19/*
20 * ECC atomic, DMA, SMP and interrupt safe scrub function.
21 * Implements the per arch edac_atomic_scrub() that EDAC use for software
22 * ECC scrubbing. It reads memory and then writes back the original
23 * value, allowing the hardware to detect and correct memory errors.
24 */
25
26static inline void edac_atomic_scrub(void *va, u32 size)
27{
28#if __LINUX_ARM_ARCH__ >= 6
29 unsigned int *virt_addr = va;
30 unsigned int temp, temp2;
31 unsigned int i;
32
33 for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
34 /* Very carefully read and write to memory atomically
35 * so we are interrupt, DMA and SMP safe.
36 */
37 __asm__ __volatile__("\n"
38 "1: ldrex %0, [%2]\n"
39 " strex %1, %0, [%2]\n"
40 " teq %1, #0\n"
41 " bne 1b\n"
42 : "=&r"(temp), "=&r"(temp2)
43 : "r"(virt_addr)
44 : "cc");
45 }
46#endif
47}
48
49#endif
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright 2011 Calxeda, Inc.
4 * Based on PPC version Copyright 2007 MontaVista Software, Inc.
5 */
6#ifndef ASM_EDAC_H
7#define ASM_EDAC_H
8/*
9 * ECC atomic, DMA, SMP and interrupt safe scrub function.
10 * Implements the per arch edac_atomic_scrub() that EDAC use for software
11 * ECC scrubbing. It reads memory and then writes back the original
12 * value, allowing the hardware to detect and correct memory errors.
13 */
14
15static inline void edac_atomic_scrub(void *va, u32 size)
16{
17#if __LINUX_ARM_ARCH__ >= 6
18 unsigned int *virt_addr = va;
19 unsigned int temp, temp2;
20 unsigned int i;
21
22 for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
23 /* Very carefully read and write to memory atomically
24 * so we are interrupt, DMA and SMP safe.
25 */
26 __asm__ __volatile__("\n"
27 "1: ldrex %0, [%2]\n"
28 " strex %1, %0, [%2]\n"
29 " teq %1, #0\n"
30 " bne 1b\n"
31 : "=&r"(temp), "=&r"(temp2)
32 : "r"(virt_addr)
33 : "cc");
34 }
35#endif
36}
37
38#endif