Loading...
1/*
2 * linux/arch/arm/mm/copypage-v6.c
3 *
4 * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/spinlock.h>
12#include <linux/mm.h>
13#include <linux/highmem.h>
14
15#include <asm/pgtable.h>
16#include <asm/shmparam.h>
17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h>
19#include <asm/cachetype.h>
20
21#include "mm.h"
22
23#if SHMLBA > 16384
24#error FIX ME
25#endif
26
27#define from_address (0xffff8000)
28#define to_address (0xffffc000)
29
30static DEFINE_SPINLOCK(v6_lock);
31
32/*
33 * Copy the user page. No aliasing to deal with so we can just
34 * attack the kernel's existing mapping of these pages.
35 */
36static void v6_copy_user_highpage_nonaliasing(struct page *to,
37 struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
38{
39 void *kto, *kfrom;
40
41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1);
43 copy_page(kto, kfrom);
44 kunmap_atomic(kto, KM_USER1);
45 kunmap_atomic(kfrom, KM_USER0);
46}
47
48/*
49 * Clear the user page. No aliasing to deal with so we can just
50 * attack the kernel's existing mapping of this page.
51 */
52static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
53{
54 void *kaddr = kmap_atomic(page, KM_USER0);
55 clear_page(kaddr);
56 kunmap_atomic(kaddr, KM_USER0);
57}
58
59/*
60 * Discard data in the kernel mapping for the new page.
61 * FIXME: needs this MCRR to be supported.
62 */
63static void discard_old_kernel_data(void *kto)
64{
65 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
66 :
67 : "r" (kto),
68 "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
69 : "cc");
70}
71
72/*
73 * Copy the page, taking account of the cache colour.
74 */
75static void v6_copy_user_highpage_aliasing(struct page *to,
76 struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
77{
78 unsigned int offset = CACHE_COLOUR(vaddr);
79 unsigned long kfrom, kto;
80
81 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
82 __flush_dcache_page(page_mapping(from), from);
83
84 /* FIXME: not highmem safe */
85 discard_old_kernel_data(page_address(to));
86
87 /*
88 * Now copy the page using the same cache colour as the
89 * pages ultimate destination.
90 */
91 spin_lock(&v6_lock);
92
93 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
94 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
95
96 kfrom = from_address + (offset << PAGE_SHIFT);
97 kto = to_address + (offset << PAGE_SHIFT);
98
99 flush_tlb_kernel_page(kfrom);
100 flush_tlb_kernel_page(kto);
101
102 copy_page((void *)kto, (void *)kfrom);
103
104 spin_unlock(&v6_lock);
105}
106
107/*
108 * Clear the user page. We need to deal with the aliasing issues,
109 * so remap the kernel page into the same cache colour as the user
110 * page.
111 */
112static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
113{
114 unsigned int offset = CACHE_COLOUR(vaddr);
115 unsigned long to = to_address + (offset << PAGE_SHIFT);
116
117 /* FIXME: not highmem safe */
118 discard_old_kernel_data(page_address(page));
119
120 /*
121 * Now clear the page using the same cache colour as
122 * the pages ultimate destination.
123 */
124 spin_lock(&v6_lock);
125
126 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
127 flush_tlb_kernel_page(to);
128 clear_page((void *)to);
129
130 spin_unlock(&v6_lock);
131}
132
133struct cpu_user_fns v6_user_fns __initdata = {
134 .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
135 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
136};
137
138static int __init v6_userpage_init(void)
139{
140 if (cache_is_vipt_aliasing()) {
141 cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
142 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
143 }
144
145 return 0;
146}
147
148core_initcall(v6_userpage_init);
1/*
2 * linux/arch/arm/mm/copypage-v6.c
3 *
4 * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/spinlock.h>
12#include <linux/mm.h>
13#include <linux/highmem.h>
14
15#include <asm/pgtable.h>
16#include <asm/shmparam.h>
17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h>
19#include <asm/cachetype.h>
20
21#include "mm.h"
22
23#if SHMLBA > 16384
24#error FIX ME
25#endif
26
27static DEFINE_RAW_SPINLOCK(v6_lock);
28
29/*
30 * Copy the user page. No aliasing to deal with so we can just
31 * attack the kernel's existing mapping of these pages.
32 */
33static void v6_copy_user_highpage_nonaliasing(struct page *to,
34 struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
35{
36 void *kto, *kfrom;
37
38 kfrom = kmap_atomic(from);
39 kto = kmap_atomic(to);
40 copy_page(kto, kfrom);
41 kunmap_atomic(kto);
42 kunmap_atomic(kfrom);
43}
44
45/*
46 * Clear the user page. No aliasing to deal with so we can just
47 * attack the kernel's existing mapping of this page.
48 */
49static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
50{
51 void *kaddr = kmap_atomic(page);
52 clear_page(kaddr);
53 kunmap_atomic(kaddr);
54}
55
56/*
57 * Discard data in the kernel mapping for the new page.
58 * FIXME: needs this MCRR to be supported.
59 */
60static void discard_old_kernel_data(void *kto)
61{
62 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
63 :
64 : "r" (kto),
65 "r" ((unsigned long)kto + PAGE_SIZE - 1)
66 : "cc");
67}
68
69/*
70 * Copy the page, taking account of the cache colour.
71 */
72static void v6_copy_user_highpage_aliasing(struct page *to,
73 struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
74{
75 unsigned int offset = CACHE_COLOUR(vaddr);
76 unsigned long kfrom, kto;
77
78 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
79 __flush_dcache_page(page_mapping(from), from);
80
81 /* FIXME: not highmem safe */
82 discard_old_kernel_data(page_address(to));
83
84 /*
85 * Now copy the page using the same cache colour as the
86 * pages ultimate destination.
87 */
88 raw_spin_lock(&v6_lock);
89
90 kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
91 kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
92
93 set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
94 set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
95
96 copy_page((void *)kto, (void *)kfrom);
97
98 raw_spin_unlock(&v6_lock);
99}
100
101/*
102 * Clear the user page. We need to deal with the aliasing issues,
103 * so remap the kernel page into the same cache colour as the user
104 * page.
105 */
106static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
107{
108 unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
109
110 /* FIXME: not highmem safe */
111 discard_old_kernel_data(page_address(page));
112
113 /*
114 * Now clear the page using the same cache colour as
115 * the pages ultimate destination.
116 */
117 raw_spin_lock(&v6_lock);
118
119 set_top_pte(to, mk_pte(page, PAGE_KERNEL));
120 clear_page((void *)to);
121
122 raw_spin_unlock(&v6_lock);
123}
124
125struct cpu_user_fns v6_user_fns __initdata = {
126 .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
127 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
128};
129
130static int __init v6_userpage_init(void)
131{
132 if (cache_is_vipt_aliasing()) {
133 cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
134 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
135 }
136
137 return 0;
138}
139
140core_initcall(v6_userpage_init);