Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* clear_page.S: UltraSparc optimized clear page.
3 *
4 * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
6 */
7
8#include <asm/visasm.h>
9#include <asm/thread_info.h>
10#include <asm/page.h>
11#include <asm/pgtable.h>
12#include <asm/spitfire.h>
13#include <asm/head.h>
14#include <asm/export.h>
15
16 /* What we used to do was lock a TLB entry into a specific
17 * TLB slot, clear the page with interrupts disabled, then
18 * restore the original TLB entry. This was great for
19 * disturbing the TLB as little as possible, but it meant
20 * we had to keep interrupts disabled for a long time.
21 *
22 * Now, we simply use the normal TLB loading mechanism,
23 * and this makes the cpu choose a slot all by itself.
24 * Then we do a normal TLB flush on exit. We need only
25 * disable preemption during the clear.
26 */
27
28 .text
29
30 .globl _clear_page
31 EXPORT_SYMBOL(_clear_page)
32_clear_page: /* %o0=dest */
33 ba,pt %xcc, clear_page_common
34 clr %o4
35
36 /* This thing is pretty important, it shows up
37 * on the profiles via do_anonymous_page().
38 */
39 .align 32
40 .globl clear_user_page
41 EXPORT_SYMBOL(clear_user_page)
42clear_user_page: /* %o0=dest, %o1=vaddr */
43 lduw [%g6 + TI_PRE_COUNT], %o2
44 sethi %hi(PAGE_OFFSET), %g2
45 sethi %hi(PAGE_SIZE), %o4
46
47 ldx [%g2 + %lo(PAGE_OFFSET)], %g2
48 sethi %hi(PAGE_KERNEL_LOCKED), %g3
49
50 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
51 sub %o0, %g2, %g1 ! paddr
52
53 and %o1, %o4, %o0 ! vaddr D-cache alias bit
54
55 or %g1, %g3, %g1 ! TTE data
56 sethi %hi(TLBTEMP_BASE), %o3
57
58 add %o2, 1, %o4
59 add %o0, %o3, %o0 ! TTE vaddr
60
61 /* Disable preemption. */
62 mov TLB_TAG_ACCESS, %g3
63 stw %o4, [%g6 + TI_PRE_COUNT]
64
65 /* Load TLB entry. */
66 rdpr %pstate, %o4
67 wrpr %o4, PSTATE_IE, %pstate
68 stxa %o0, [%g3] ASI_DMMU
69 stxa %g1, [%g0] ASI_DTLB_DATA_IN
70 sethi %hi(KERNBASE), %g1
71 flush %g1
72 wrpr %o4, 0x0, %pstate
73
74 mov 1, %o4
75
76clear_page_common:
77 VISEntryHalf
78 membar #StoreLoad | #StoreStore | #LoadStore
79 fzero %f0
80 sethi %hi(PAGE_SIZE/64), %o1
81 mov %o0, %g1 ! remember vaddr for tlbflush
82 fzero %f2
83 or %o1, %lo(PAGE_SIZE/64), %o1
84 faddd %f0, %f2, %f4
85 fmuld %f0, %f2, %f6
86 faddd %f0, %f2, %f8
87 fmuld %f0, %f2, %f10
88
89 faddd %f0, %f2, %f12
90 fmuld %f0, %f2, %f14
911: stda %f0, [%o0 + %g0] ASI_BLK_P
92 subcc %o1, 1, %o1
93 bne,pt %icc, 1b
94 add %o0, 0x40, %o0
95 membar #Sync
96 VISExitHalf
97
98 brz,pn %o4, out
99 nop
100
101 stxa %g0, [%g1] ASI_DMMU_DEMAP
102 membar #Sync
103 stw %o2, [%g6 + TI_PRE_COUNT]
104
105out: retl
106 nop
107
1/* clear_page.S: UltraSparc optimized clear page.
2 *
3 * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
5 */
6
7#include <asm/visasm.h>
8#include <asm/thread_info.h>
9#include <asm/page.h>
10#include <asm/pgtable.h>
11#include <asm/spitfire.h>
12#include <asm/head.h>
13#include <asm/export.h>
14
15 /* What we used to do was lock a TLB entry into a specific
16 * TLB slot, clear the page with interrupts disabled, then
17 * restore the original TLB entry. This was great for
18 * disturbing the TLB as little as possible, but it meant
19 * we had to keep interrupts disabled for a long time.
20 *
21 * Now, we simply use the normal TLB loading mechanism,
22 * and this makes the cpu choose a slot all by itself.
23 * Then we do a normal TLB flush on exit. We need only
24 * disable preemption during the clear.
25 */
26
27 .text
28
29 .globl _clear_page
30 EXPORT_SYMBOL(_clear_page)
31_clear_page: /* %o0=dest */
32 ba,pt %xcc, clear_page_common
33 clr %o4
34
35 /* This thing is pretty important, it shows up
36 * on the profiles via do_anonymous_page().
37 */
38 .align 32
39 .globl clear_user_page
40 EXPORT_SYMBOL(clear_user_page)
41clear_user_page: /* %o0=dest, %o1=vaddr */
42 lduw [%g6 + TI_PRE_COUNT], %o2
43 sethi %hi(PAGE_OFFSET), %g2
44 sethi %hi(PAGE_SIZE), %o4
45
46 ldx [%g2 + %lo(PAGE_OFFSET)], %g2
47 sethi %hi(PAGE_KERNEL_LOCKED), %g3
48
49 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
50 sub %o0, %g2, %g1 ! paddr
51
52 and %o1, %o4, %o0 ! vaddr D-cache alias bit
53
54 or %g1, %g3, %g1 ! TTE data
55 sethi %hi(TLBTEMP_BASE), %o3
56
57 add %o2, 1, %o4
58 add %o0, %o3, %o0 ! TTE vaddr
59
60 /* Disable preemption. */
61 mov TLB_TAG_ACCESS, %g3
62 stw %o4, [%g6 + TI_PRE_COUNT]
63
64 /* Load TLB entry. */
65 rdpr %pstate, %o4
66 wrpr %o4, PSTATE_IE, %pstate
67 stxa %o0, [%g3] ASI_DMMU
68 stxa %g1, [%g0] ASI_DTLB_DATA_IN
69 sethi %hi(KERNBASE), %g1
70 flush %g1
71 wrpr %o4, 0x0, %pstate
72
73 mov 1, %o4
74
75clear_page_common:
76 VISEntryHalf
77 membar #StoreLoad | #StoreStore | #LoadStore
78 fzero %f0
79 sethi %hi(PAGE_SIZE/64), %o1
80 mov %o0, %g1 ! remember vaddr for tlbflush
81 fzero %f2
82 or %o1, %lo(PAGE_SIZE/64), %o1
83 faddd %f0, %f2, %f4
84 fmuld %f0, %f2, %f6
85 faddd %f0, %f2, %f8
86 fmuld %f0, %f2, %f10
87
88 faddd %f0, %f2, %f12
89 fmuld %f0, %f2, %f14
901: stda %f0, [%o0 + %g0] ASI_BLK_P
91 subcc %o1, 1, %o1
92 bne,pt %icc, 1b
93 add %o0, 0x40, %o0
94 membar #Sync
95 VISExitHalf
96
97 brz,pn %o4, out
98 nop
99
100 stxa %g0, [%g1] ASI_DMMU_DEMAP
101 membar #Sync
102 stw %o2, [%g6 + TI_PRE_COUNT]
103
104out: retl
105 nop
106