Loading...
1/*
2 * linux/arch/arm/mm/tlbv4.S
3 *
4 * Copyright (C) 1997-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ARM architecture version 4 TLB handling functions.
11 * These assume a split I/D TLBs, and no write buffer.
12 *
13 * Processors: ARM720T
14 */
15#include <linux/linkage.h>
16#include <linux/init.h>
17#include <asm/assembler.h>
18#include <asm/asm-offsets.h>
19#include <asm/tlbflush.h>
20#include "proc-macros.S"
21
22 .align 5
23/*
24 * v4_flush_user_tlb_range(start, end, mm)
25 *
26 * Invalidate a range of TLB entries in the specified user address space.
27 *
28 * - start - range start address
29 * - end - range end address
30 * - mm - mm_struct describing address space
31 */
32 .align 5
33ENTRY(v4_flush_user_tlb_range)
34 vma_vm_mm ip, r2
35 act_mm r3 @ get current->active_mm
36 eors r3, ip, r3 @ == mm ?
37 retne lr @ no, we dont do anything
38.v4_flush_kern_tlb_range:
39 bic r0, r0, #0x0ff
40 bic r0, r0, #0xf00
411: mcr p15, 0, r0, c8, c7, 1 @ invalidate TLB entry
42 add r0, r0, #PAGE_SZ
43 cmp r0, r1
44 blo 1b
45 ret lr
46
47/*
48 * v4_flush_kern_tlb_range(start, end)
49 *
50 * Invalidate a range of TLB entries in the specified kernel
51 * address range.
52 *
53 * - start - virtual address (may not be aligned)
54 * - end - virtual address (may not be aligned)
55 */
56.globl v4_flush_kern_tlb_range
57.equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range
58
59 __INITDATA
60
61 /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
62 define_tlb_functions v4, v4_tlb_flags
1/*
2 * linux/arch/arm/mm/tlbv4.S
3 *
4 * Copyright (C) 1997-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ARM architecture version 4 TLB handling functions.
11 * These assume a split I/D TLBs, and no write buffer.
12 *
13 * Processors: ARM720T
14 */
15#include <linux/linkage.h>
16#include <linux/init.h>
17#include <asm/asm-offsets.h>
18#include <asm/tlbflush.h>
19#include "proc-macros.S"
20
21 .align 5
22/*
23 * v4_flush_user_tlb_range(start, end, mm)
24 *
25 * Invalidate a range of TLB entries in the specified user address space.
26 *
27 * - start - range start address
28 * - end - range end address
29 * - mm - mm_struct describing address space
30 */
31 .align 5
32ENTRY(v4_flush_user_tlb_range)
33 vma_vm_mm ip, r2
34 act_mm r3 @ get current->active_mm
35 eors r3, ip, r3 @ == mm ?
36 movne pc, lr @ no, we dont do anything
37.v4_flush_kern_tlb_range:
38 bic r0, r0, #0x0ff
39 bic r0, r0, #0xf00
401: mcr p15, 0, r0, c8, c7, 1 @ invalidate TLB entry
41 add r0, r0, #PAGE_SZ
42 cmp r0, r1
43 blo 1b
44 mov pc, lr
45
46/*
47 * v4_flush_kern_tlb_range(start, end)
48 *
49 * Invalidate a range of TLB entries in the specified kernel
50 * address range.
51 *
52 * - start - virtual address (may not be aligned)
53 * - end - virtual address (may not be aligned)
54 */
55.globl v4_flush_kern_tlb_range
56.equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range
57
58 __INITDATA
59
60 /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
61 define_tlb_functions v4, v4_tlb_flags