Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  linux/arch/arm/lib/copypage-xscale.S
  3 *
  4 *  Copyright (C) 1995-2005 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 * This handles the mini data cache, as found on SA11x0 and XScale
 11 * processors.  When we copy a user page page, we map it in such a way
 12 * that accesses to this page will not touch the main data cache, but
 13 * will be cached in the mini data cache.  This prevents us thrashing
 14 * the main data cache on page faults.
 15 */
 16#include <linux/init.h>
 17#include <linux/mm.h>
 18#include <linux/highmem.h>
 19
 20#include <asm/pgtable.h>
 21#include <asm/tlbflush.h>
 22#include <asm/cacheflush.h>
 23
 24#include "mm.h"
 25
 26/*
 27 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
 28 * specific hacks for copying pages efficiently.
 29 */
 30#define COPYPAGE_MINICACHE	0xffff8000
 31
 32#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
 33				  L_PTE_MT_MINICACHE)
 34
 35static DEFINE_SPINLOCK(minicache_lock);
 36
 37/*
 38 * XScale mini-dcache optimised copy_user_highpage
 39 *
 40 * We flush the destination cache lines just before we write the data into the
 41 * corresponding address.  Since the Dcache is read-allocate, this removes the
 42 * Dcache aliasing issue.  The writes will be forwarded to the write buffer,
 43 * and merged as appropriate.
 44 */
 45static void __naked
 46mc_copy_user_page(void *from, void *to)
 47{
 
 
 48	/*
 49	 * Strangely enough, best performance is achieved
 50	 * when prefetching destination as well.  (NP)
 51	 */
 52	asm volatile(
 53	"stmfd	sp!, {r4, r5, lr}		\n\
 54	mov	lr, %2				\n\
 55	pld	[r0, #0]			\n\
 56	pld	[r0, #32]			\n\
 57	pld	[r1, #0]			\n\
 58	pld	[r1, #32]			\n\
 591:	pld	[r0, #64]			\n\
 60	pld	[r0, #96]			\n\
 61	pld	[r1, #64]			\n\
 62	pld	[r1, #96]			\n\
 632:	ldrd	r2, [r0], #8			\n\
 64	ldrd	r4, [r0], #8			\n\
 65	mov	ip, r1				\n\
 66	strd	r2, [r1], #8			\n\
 67	ldrd	r2, [r0], #8			\n\
 68	strd	r4, [r1], #8			\n\
 69	ldrd	r4, [r0], #8			\n\
 70	strd	r2, [r1], #8			\n\
 71	strd	r4, [r1], #8			\n\
 72	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
 73	ldrd	r2, [r0], #8			\n\
 74	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
 75	ldrd	r4, [r0], #8			\n\
 76	mov	ip, r1				\n\
 77	strd	r2, [r1], #8			\n\
 78	ldrd	r2, [r0], #8			\n\
 79	strd	r4, [r1], #8			\n\
 80	ldrd	r4, [r0], #8			\n\
 81	strd	r2, [r1], #8			\n\
 82	strd	r4, [r1], #8			\n\
 83	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
 84	subs	lr, lr, #1			\n\
 85	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
 86	bgt	1b				\n\
 87	beq	2b				\n\
 88	ldmfd	sp!, {r4, r5, pc}		"
 89	:
 90	: "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
 91}
 92
 93void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
 94	unsigned long vaddr, struct vm_area_struct *vma)
 95{
 96	void *kto = kmap_atomic(to, KM_USER1);
 97
 98	if (!test_and_set_bit(PG_dcache_clean, &from->flags))
 99		__flush_dcache_page(page_mapping(from), from);
100
101	spin_lock(&minicache_lock);
102
103	set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
104	flush_tlb_kernel_page(COPYPAGE_MINICACHE);
105
106	mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
107
108	spin_unlock(&minicache_lock);
109
110	kunmap_atomic(kto, KM_USER1);
111}
112
113/*
114 * XScale optimised clear_user_page
115 */
116void
117xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
118{
119	void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
120	asm volatile(
121	"mov	r1, %2				\n\
 
122	mov	r2, #0				\n\
123	mov	r3, #0				\n\
1241:	mov	ip, %0				\n\
125	strd	r2, [%0], #8			\n\
126	strd	r2, [%0], #8			\n\
127	strd	r2, [%0], #8			\n\
128	strd	r2, [%0], #8			\n\
129	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
130	subs	r1, r1, #1			\n\
131	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
132	bne	1b"
133	: "=r" (ptr)
134	: "0" (kaddr), "I" (PAGE_SIZE / 32)
135	: "r1", "r2", "r3", "ip");
136	kunmap_atomic(kaddr, KM_USER0);
137}
138
139struct cpu_user_fns xscale_mc_user_fns __initdata = {
140	.cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
141	.cpu_copy_user_highpage	= xscale_mc_copy_user_highpage,
142};
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/lib/copypage-xscale.S
  4 *
  5 *  Copyright (C) 1995-2005 Russell King
  6 *
 
 
 
 
  7 * This handles the mini data cache, as found on SA11x0 and XScale
  8 * processors.  When we copy a user page page, we map it in such a way
  9 * that accesses to this page will not touch the main data cache, but
 10 * will be cached in the mini data cache.  This prevents us thrashing
 11 * the main data cache on page faults.
 12 */
 13#include <linux/init.h>
 14#include <linux/mm.h>
 15#include <linux/highmem.h>
 16
 
 17#include <asm/tlbflush.h>
 18#include <asm/cacheflush.h>
 19
 20#include "mm.h"
 21
 
 
 
 
 
 
 22#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
 23				  L_PTE_MT_MINICACHE)
 24
 25static DEFINE_RAW_SPINLOCK(minicache_lock);
 26
 27/*
 28 * XScale mini-dcache optimised copy_user_highpage
 29 *
 30 * We flush the destination cache lines just before we write the data into the
 31 * corresponding address.  Since the Dcache is read-allocate, this removes the
 32 * Dcache aliasing issue.  The writes will be forwarded to the write buffer,
 33 * and merged as appropriate.
 34 */
 35static void mc_copy_user_page(void *from, void *to)
 
 36{
 37	int tmp;
 38
 39	/*
 40	 * Strangely enough, best performance is achieved
 41	 * when prefetching destination as well.  (NP)
 42	 */
 43	asm volatile ("\
 44.arch xscale					\n\
 45	pld	[%0, #0]			\n\
 46	pld	[%0, #32]			\n\
 47	pld	[%1, #0]			\n\
 48	pld	[%1, #32]			\n\
 491:	pld	[%0, #64]			\n\
 50	pld	[%0, #96]			\n\
 51	pld	[%1, #64]			\n\
 52	pld	[%1, #96]			\n\
 532:	ldrd	r2, r3, [%0], #8		\n\
 54	ldrd	r4, r5, [%0], #8		\n\
 55	mov	ip, %1				\n\
 56	strd	r2, r3, [%1], #8		\n\
 57	ldrd	r2, r3, [%0], #8		\n\
 58	strd	r4, r5, [%1], #8		\n\
 59	ldrd	r4, r5, [%0], #8		\n\
 60	strd	r2, r3, [%1], #8		\n\
 61	strd	r4, r5, [%1], #8		\n\
 
 62	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
 63	ldrd	r2, r3, [%0], #8		\n\
 64	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
 65	ldrd	r4, r5, [%0], #8		\n\
 66	mov	ip, %1				\n\
 67	strd	r2, r3, [%1], #8		\n\
 68	ldrd	r2, r3, [%0], #8		\n\
 69	strd	r4, r5, [%1], #8		\n\
 70	ldrd	r4, r5, [%0], #8		\n\
 71	strd	r2, r3, [%1], #8		\n\
 72	strd	r4, r5, [%1], #8		\n\
 73	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
 74	subs	%2, %2, #1			\n\
 75	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
 76	bgt	1b				\n\
 77	beq	2b				"
 78	: "+&r" (from), "+&r" (to), "=&r" (tmp)
 79	: "2" (PAGE_SIZE / 64 - 1)
 80	: "r2", "r3", "r4", "r5", "ip");
 81}
 82
 83void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
 84	unsigned long vaddr, struct vm_area_struct *vma)
 85{
 86	void *kto = kmap_atomic(to);
 87
 88	if (!test_and_set_bit(PG_dcache_clean, &from->flags))
 89		__flush_dcache_page(page_mapping_file(from), from);
 90
 91	raw_spin_lock(&minicache_lock);
 92
 93	set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
 
 94
 95	mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
 96
 97	raw_spin_unlock(&minicache_lock);
 98
 99	kunmap_atomic(kto);
100}
101
102/*
103 * XScale optimised clear_user_page
104 */
105void
106xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
107{
108	void *ptr, *kaddr = kmap_atomic(page);
109	asm volatile("\
110.arch xscale					\n\
111	mov	r1, %2				\n\
112	mov	r2, #0				\n\
113	mov	r3, #0				\n\
1141:	mov	ip, %0				\n\
115	strd	r2, r3, [%0], #8		\n\
116	strd	r2, r3, [%0], #8		\n\
117	strd	r2, r3, [%0], #8		\n\
118	strd	r2, r3, [%0], #8		\n\
119	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
120	subs	r1, r1, #1			\n\
121	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
122	bne	1b"
123	: "=r" (ptr)
124	: "0" (kaddr), "I" (PAGE_SIZE / 32)
125	: "r1", "r2", "r3", "ip");
126	kunmap_atomic(kaddr);
127}
128
129struct cpu_user_fns xscale_mc_user_fns __initdata = {
130	.cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
131	.cpu_copy_user_highpage	= xscale_mc_copy_user_highpage,
132};