Loading...
1/*
2 * linux/arch/arm/lib/copypage-armv4mc.S
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This handles the mini data cache, as found on SA11x0 and XScale
11 * processors. When we copy a user page page, we map it in such a way
12 * that accesses to this page will not touch the main data cache, but
13 * will be cached in the mini data cache. This prevents us thrashing
14 * the main data cache on page faults.
15 */
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/highmem.h>
19
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
22#include <asm/cacheflush.h>
23
24#include "mm.h"
25
26/*
27 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
28 * specific hacks for copying pages efficiently.
29 */
30#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
31 L_PTE_MT_MINICACHE)
32
33static DEFINE_SPINLOCK(minicache_lock);
34
35/*
36 * ARMv4 mini-dcache optimised copy_user_highpage
37 *
38 * We flush the destination cache lines just before we write the data into the
39 * corresponding address. Since the Dcache is read-allocate, this removes the
40 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
41 * and merged as appropriate.
42 *
43 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
44 * instruction. If your processor does not supply this, you have to write your
45 * own copy_user_highpage that does the right thing.
46 */
47static void __naked
48mc_copy_user_page(void *from, void *to)
49{
50 asm volatile(
51 "stmfd sp!, {r4, lr} @ 2\n\
52 mov r4, %2 @ 1\n\
53 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
541: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
55 stmia %1!, {r2, r3, ip, lr} @ 4\n\
56 ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\
57 stmia %1!, {r2, r3, ip, lr} @ 4\n\
58 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
59 mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
60 stmia %1!, {r2, r3, ip, lr} @ 4\n\
61 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
62 subs r4, r4, #1 @ 1\n\
63 stmia %1!, {r2, r3, ip, lr} @ 4\n\
64 ldmneia %0!, {r2, r3, ip, lr} @ 4\n\
65 bne 1b @ 1\n\
66 ldmfd sp!, {r4, pc} @ 3"
67 :
68 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
69}
70
71void v4_mc_copy_user_highpage(struct page *to, struct page *from,
72 unsigned long vaddr, struct vm_area_struct *vma)
73{
74 void *kto = kmap_atomic(to, KM_USER1);
75
76 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
77 __flush_dcache_page(page_mapping(from), from);
78
79 spin_lock(&minicache_lock);
80
81 set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
82 flush_tlb_kernel_page(0xffff8000);
83
84 mc_copy_user_page((void *)0xffff8000, kto);
85
86 spin_unlock(&minicache_lock);
87
88 kunmap_atomic(kto, KM_USER1);
89}
90
91/*
92 * ARMv4 optimised clear_user_page
93 */
94void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
95{
96 void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
97 asm volatile("\
98 mov r1, %2 @ 1\n\
99 mov r2, #0 @ 1\n\
100 mov r3, #0 @ 1\n\
101 mov ip, #0 @ 1\n\
102 mov lr, #0 @ 1\n\
1031: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
104 stmia %0!, {r2, r3, ip, lr} @ 4\n\
105 stmia %0!, {r2, r3, ip, lr} @ 4\n\
106 mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
107 stmia %0!, {r2, r3, ip, lr} @ 4\n\
108 stmia %0!, {r2, r3, ip, lr} @ 4\n\
109 subs r1, r1, #1 @ 1\n\
110 bne 1b @ 1"
111 : "=r" (ptr)
112 : "0" (kaddr), "I" (PAGE_SIZE / 64)
113 : "r1", "r2", "r3", "ip", "lr");
114 kunmap_atomic(kaddr, KM_USER0);
115}
116
117struct cpu_user_fns v4_mc_user_fns __initdata = {
118 .cpu_clear_user_highpage = v4_mc_clear_user_highpage,
119 .cpu_copy_user_highpage = v4_mc_copy_user_highpage,
120};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/lib/copypage-armv4mc.S
4 *
5 * Copyright (C) 1995-2005 Russell King
6 *
7 * This handles the mini data cache, as found on SA11x0 and XScale
8 * processors. When we copy a user page page, we map it in such a way
9 * that accesses to this page will not touch the main data cache, but
10 * will be cached in the mini data cache. This prevents us thrashing
11 * the main data cache on page faults.
12 */
13#include <linux/init.h>
14#include <linux/mm.h>
15#include <linux/highmem.h>
16#include <linux/pagemap.h>
17
18#include <asm/tlbflush.h>
19#include <asm/cacheflush.h>
20
21#include "mm.h"
22
23#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
24 L_PTE_MT_MINICACHE)
25
26static DEFINE_RAW_SPINLOCK(minicache_lock);
27
28/*
29 * ARMv4 mini-dcache optimised copy_user_highpage
30 *
31 * We flush the destination cache lines just before we write the data into the
32 * corresponding address. Since the Dcache is read-allocate, this removes the
33 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
34 * and merged as appropriate.
35 *
36 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
37 * instruction. If your processor does not supply this, you have to write your
38 * own copy_user_highpage that does the right thing.
39 */
40static void mc_copy_user_page(void *from, void *to)
41{
42 int tmp;
43
44 asm volatile ("\
45 .syntax unified\n\
46 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
471: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
48 stmia %1!, {r2, r3, ip, lr} @ 4\n\
49 ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\
50 stmia %1!, {r2, r3, ip, lr} @ 4\n\
51 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
52 mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
53 stmia %1!, {r2, r3, ip, lr} @ 4\n\
54 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
55 subs %2, %2, #1 @ 1\n\
56 stmia %1!, {r2, r3, ip, lr} @ 4\n\
57 ldmiane %0!, {r2, r3, ip, lr} @ 4\n\
58 bne 1b @ "
59 : "+&r" (from), "+&r" (to), "=&r" (tmp)
60 : "2" (PAGE_SIZE / 64)
61 : "r2", "r3", "ip", "lr");
62}
63
64void v4_mc_copy_user_highpage(struct page *to, struct page *from,
65 unsigned long vaddr, struct vm_area_struct *vma)
66{
67 void *kto = kmap_atomic(to);
68
69 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
70 __flush_dcache_page(page_mapping_file(from), from);
71
72 raw_spin_lock(&minicache_lock);
73
74 set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
75
76 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
77
78 raw_spin_unlock(&minicache_lock);
79
80 kunmap_atomic(kto);
81}
82
83/*
84 * ARMv4 optimised clear_user_page
85 */
86void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
87{
88 void *ptr, *kaddr = kmap_atomic(page);
89 asm volatile("\
90 mov r1, %2 @ 1\n\
91 mov r2, #0 @ 1\n\
92 mov r3, #0 @ 1\n\
93 mov ip, #0 @ 1\n\
94 mov lr, #0 @ 1\n\
951: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
96 stmia %0!, {r2, r3, ip, lr} @ 4\n\
97 stmia %0!, {r2, r3, ip, lr} @ 4\n\
98 mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
99 stmia %0!, {r2, r3, ip, lr} @ 4\n\
100 stmia %0!, {r2, r3, ip, lr} @ 4\n\
101 subs r1, r1, #1 @ 1\n\
102 bne 1b @ 1"
103 : "=r" (ptr)
104 : "0" (kaddr), "I" (PAGE_SIZE / 64)
105 : "r1", "r2", "r3", "ip", "lr");
106 kunmap_atomic(kaddr);
107}
108
109struct cpu_user_fns v4_mc_user_fns __initdata = {
110 .cpu_clear_user_highpage = v4_mc_clear_user_highpage,
111 .cpu_copy_user_highpage = v4_mc_copy_user_highpage,
112};