Loading...
1/*
2 * linux/arch/arm/mm/tlbv4.S
3 *
4 * Copyright (C) 1997-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ARM architecture version 4 TLB handling functions.
11 * These assume a split I/D TLBs, and no write buffer.
12 *
13 * Processors: ARM720T
14 */
15#include <linux/linkage.h>
16#include <linux/init.h>
17#include <asm/asm-offsets.h>
18#include <asm/tlbflush.h>
19#include "proc-macros.S"
20
21 .align 5
22/*
23 * v4_flush_user_tlb_range(start, end, mm)
24 *
25 * Invalidate a range of TLB entries in the specified user address space.
26 *
27 * - start - range start address
28 * - end - range end address
29 * - mm - mm_struct describing address space
30 */
31 .align 5
32ENTRY(v4_flush_user_tlb_range)
33 vma_vm_mm ip, r2
34 act_mm r3 @ get current->active_mm
35 eors r3, ip, r3 @ == mm ?
36 movne pc, lr @ no, we dont do anything
37.v4_flush_kern_tlb_range:
38 bic r0, r0, #0x0ff
39 bic r0, r0, #0xf00
401: mcr p15, 0, r0, c8, c7, 1 @ invalidate TLB entry
41 add r0, r0, #PAGE_SZ
42 cmp r0, r1
43 blo 1b
44 mov pc, lr
45
46/*
47 * v4_flush_kern_tlb_range(start, end)
48 *
49 * Invalidate a range of TLB entries in the specified kernel
50 * address range.
51 *
52 * - start - virtual address (may not be aligned)
53 * - end - virtual address (may not be aligned)
54 */
55.globl v4_flush_kern_tlb_range
56.equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range
57
58 __INITDATA
59
60 /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
61 define_tlb_functions v4, v4_tlb_flags
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/mm/tlbv4.S
4 *
5 * Copyright (C) 1997-2002 Russell King
6 *
7 * ARM architecture version 4 TLB handling functions.
8 * These assume a split I/D TLBs, and no write buffer.
9 *
10 * Processors: ARM720T
11 */
12#include <linux/linkage.h>
13#include <linux/init.h>
14#include <asm/assembler.h>
15#include <asm/asm-offsets.h>
16#include <asm/tlbflush.h>
17#include "proc-macros.S"
18
19 .align 5
20/*
21 * v4_flush_user_tlb_range(start, end, mm)
22 *
23 * Invalidate a range of TLB entries in the specified user address space.
24 *
25 * - start - range start address
26 * - end - range end address
27 * - mm - mm_struct describing address space
28 */
29 .align 5
30ENTRY(v4_flush_user_tlb_range)
31 vma_vm_mm ip, r2
32 act_mm r3 @ get current->active_mm
33 eors r3, ip, r3 @ == mm ?
34 retne lr @ no, we dont do anything
35.v4_flush_kern_tlb_range:
36 bic r0, r0, #0x0ff
37 bic r0, r0, #0xf00
381: mcr p15, 0, r0, c8, c7, 1 @ invalidate TLB entry
39 add r0, r0, #PAGE_SZ
40 cmp r0, r1
41 blo 1b
42 ret lr
43
44/*
45 * v4_flush_kern_tlb_range(start, end)
46 *
47 * Invalidate a range of TLB entries in the specified kernel
48 * address range.
49 *
50 * - start - virtual address (may not be aligned)
51 * - end - virtual address (may not be aligned)
52 */
53.globl v4_flush_kern_tlb_range
54.equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range
55
56 __INITDATA
57
58 /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
59 define_tlb_functions v4, v4_tlb_flags