Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* clear_page.S: UltraSparc optimized clear page.
  3 *
  4 * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
  5 * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
  6 */
  7
  8#include <linux/pgtable.h>
  9#include <asm/visasm.h>
 10#include <asm/thread_info.h>
 11#include <asm/page.h>
 
 12#include <asm/spitfire.h>
 13#include <asm/head.h>
 14#include <asm/export.h>
 15
 16	/* What we used to do was lock a TLB entry into a specific
 17	 * TLB slot, clear the page with interrupts disabled, then
 18	 * restore the original TLB entry.  This was great for
 19	 * disturbing the TLB as little as possible, but it meant
 20	 * we had to keep interrupts disabled for a long time.
 21	 *
 22	 * Now, we simply use the normal TLB loading mechanism,
 23	 * and this makes the cpu choose a slot all by itself.
 24	 * Then we do a normal TLB flush on exit.  We need only
 25	 * disable preemption during the clear.
 26	 */
 27
 28	.text
 29
 30	.globl		_clear_page
 31	EXPORT_SYMBOL(_clear_page)
 32_clear_page:		/* %o0=dest */
 33	ba,pt		%xcc, clear_page_common
 34	 clr		%o4
 35
 36	/* This thing is pretty important, it shows up
 37	 * on the profiles via do_anonymous_page().
 38	 */
 39	.align		32
 40	.globl		clear_user_page
 41	EXPORT_SYMBOL(clear_user_page)
 42clear_user_page:	/* %o0=dest, %o1=vaddr */
 43	lduw		[%g6 + TI_PRE_COUNT], %o2
 44	sethi		%hi(PAGE_OFFSET), %g2
 45	sethi		%hi(PAGE_SIZE), %o4
 46
 47	ldx		[%g2 + %lo(PAGE_OFFSET)], %g2
 48	sethi		%hi(PAGE_KERNEL_LOCKED), %g3
 49
 50	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
 51	sub		%o0, %g2, %g1		! paddr
 52
 53	and		%o1, %o4, %o0		! vaddr D-cache alias bit
 54
 55	or		%g1, %g3, %g1		! TTE data
 56	sethi		%hi(TLBTEMP_BASE), %o3
 57
 58	add		%o2, 1, %o4
 59	add		%o0, %o3, %o0		! TTE vaddr
 60
 61	/* Disable preemption.  */
 62	mov		TLB_TAG_ACCESS, %g3
 63	stw		%o4, [%g6 + TI_PRE_COUNT]
 64
 65	/* Load TLB entry.  */
 66	rdpr		%pstate, %o4
 67	wrpr		%o4, PSTATE_IE, %pstate
 68	stxa		%o0, [%g3] ASI_DMMU
 69	stxa		%g1, [%g0] ASI_DTLB_DATA_IN
 70	sethi		%hi(KERNBASE), %g1
 71	flush		%g1
 72	wrpr		%o4, 0x0, %pstate
 73
 74	mov		1, %o4
 75
 76clear_page_common:
 77	VISEntryHalf
 78	membar		#StoreLoad | #StoreStore | #LoadStore
 79	fzero		%f0
 80	sethi		%hi(PAGE_SIZE/64), %o1
 81	mov		%o0, %g1		! remember vaddr for tlbflush
 82	fzero		%f2
 83	or		%o1, %lo(PAGE_SIZE/64), %o1
 84	faddd		%f0, %f2, %f4
 85	fmuld		%f0, %f2, %f6
 86	faddd		%f0, %f2, %f8
 87	fmuld		%f0, %f2, %f10
 88
 89	faddd		%f0, %f2, %f12
 90	fmuld		%f0, %f2, %f14
 911:	stda		%f0, [%o0 + %g0] ASI_BLK_P
 92	subcc		%o1, 1, %o1
 93	bne,pt		%icc, 1b
 94	 add		%o0, 0x40, %o0
 95	membar		#Sync
 96	VISExitHalf
 97
 98	brz,pn		%o4, out
 99	 nop
100
101	stxa		%g0, [%g1] ASI_DMMU_DEMAP
102	membar		#Sync
103	stw		%o2, [%g6 + TI_PRE_COUNT]
104
105out:	retl
106	 nop
107
v3.5.6
 
  1/* clear_page.S: UltraSparc optimized clear page.
  2 *
  3 * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
  4 * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
  5 */
  6
 
  7#include <asm/visasm.h>
  8#include <asm/thread_info.h>
  9#include <asm/page.h>
 10#include <asm/pgtable.h>
 11#include <asm/spitfire.h>
 12#include <asm/head.h>
 
 13
 14	/* What we used to do was lock a TLB entry into a specific
 15	 * TLB slot, clear the page with interrupts disabled, then
 16	 * restore the original TLB entry.  This was great for
 17	 * disturbing the TLB as little as possible, but it meant
 18	 * we had to keep interrupts disabled for a long time.
 19	 *
 20	 * Now, we simply use the normal TLB loading mechanism,
 21	 * and this makes the cpu choose a slot all by itself.
 22	 * Then we do a normal TLB flush on exit.  We need only
 23	 * disable preemption during the clear.
 24	 */
 25
 26	.text
 27
 28	.globl		_clear_page
 
 29_clear_page:		/* %o0=dest */
 30	ba,pt		%xcc, clear_page_common
 31	 clr		%o4
 32
 33	/* This thing is pretty important, it shows up
 34	 * on the profiles via do_anonymous_page().
 35	 */
 36	.align		32
 37	.globl		clear_user_page
 
 38clear_user_page:	/* %o0=dest, %o1=vaddr */
 39	lduw		[%g6 + TI_PRE_COUNT], %o2
 40	sethi		%uhi(PAGE_OFFSET), %g2
 41	sethi		%hi(PAGE_SIZE), %o4
 42
 43	sllx		%g2, 32, %g2
 44	sethi		%hi(PAGE_KERNEL_LOCKED), %g3
 45
 46	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
 47	sub		%o0, %g2, %g1		! paddr
 48
 49	and		%o1, %o4, %o0		! vaddr D-cache alias bit
 50
 51	or		%g1, %g3, %g1		! TTE data
 52	sethi		%hi(TLBTEMP_BASE), %o3
 53
 54	add		%o2, 1, %o4
 55	add		%o0, %o3, %o0		! TTE vaddr
 56
 57	/* Disable preemption.  */
 58	mov		TLB_TAG_ACCESS, %g3
 59	stw		%o4, [%g6 + TI_PRE_COUNT]
 60
 61	/* Load TLB entry.  */
 62	rdpr		%pstate, %o4
 63	wrpr		%o4, PSTATE_IE, %pstate
 64	stxa		%o0, [%g3] ASI_DMMU
 65	stxa		%g1, [%g0] ASI_DTLB_DATA_IN
 66	sethi		%hi(KERNBASE), %g1
 67	flush		%g1
 68	wrpr		%o4, 0x0, %pstate
 69
 70	mov		1, %o4
 71
 72clear_page_common:
 73	VISEntryHalf
 74	membar		#StoreLoad | #StoreStore | #LoadStore
 75	fzero		%f0
 76	sethi		%hi(PAGE_SIZE/64), %o1
 77	mov		%o0, %g1		! remember vaddr for tlbflush
 78	fzero		%f2
 79	or		%o1, %lo(PAGE_SIZE/64), %o1
 80	faddd		%f0, %f2, %f4
 81	fmuld		%f0, %f2, %f6
 82	faddd		%f0, %f2, %f8
 83	fmuld		%f0, %f2, %f10
 84
 85	faddd		%f0, %f2, %f12
 86	fmuld		%f0, %f2, %f14
 871:	stda		%f0, [%o0 + %g0] ASI_BLK_P
 88	subcc		%o1, 1, %o1
 89	bne,pt		%icc, 1b
 90	 add		%o0, 0x40, %o0
 91	membar		#Sync
 92	VISExitHalf
 93
 94	brz,pn		%o4, out
 95	 nop
 96
 97	stxa		%g0, [%g1] ASI_DMMU_DEMAP
 98	membar		#Sync
 99	stw		%o2, [%g6 + TI_PRE_COUNT]
100
101out:	retl
102	 nop
103