Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Cache flushing routines.
  3 *
  4 * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
  5 *	David Mosberger-Tang <davidm@hpl.hp.com>
  6 *
  7 * 05/28/05 Zoltan Menyhart	Dynamic stride size
  8 */
  9
 10#include <asm/asmmacro.h>
 
 11
 12
 13	/*
 14	 * flush_icache_range(start,end)
 15	 *
 16	 *	Make i-cache(s) coherent with d-caches.
 17	 *
 18	 *	Must deal with range from start to end-1 but nothing else (need to
 19	 *	be careful not to touch addresses that may be unmapped).
 20	 *
 21	 *	Note: "in0" and "in1" are preserved for debugging purposes.
 22	 */
 23	.section .kprobes.text,"ax"
 24GLOBAL_ENTRY(flush_icache_range)
 25
 26	.prologue
 27	alloc	r2=ar.pfs,2,0,0,0
 28	movl	r3=ia64_i_cache_stride_shift
 29 	mov	r21=1
 30	;;
 31	ld8	r20=[r3]		// r20: stride shift
 32	sub	r22=in1,r0,1		// last byte address
 33	;;
 34	shr.u	r23=in0,r20		// start / (stride size)
 35	shr.u	r22=r22,r20		// (last byte address) / (stride size)
 36	shl	r21=r21,r20		// r21: stride size of the i-cache(s)
 37	;;
 38	sub	r8=r22,r23		// number of strides - 1
 39	shl	r24=r23,r20		// r24: addresses for "fc.i" =
 40					//	"start" rounded down to stride boundary
 41	.save	ar.lc,r3
 42	mov	r3=ar.lc		// save ar.lc
 43	;;
 44
 45	.body
 46	mov	ar.lc=r8
 47	;;
 48	/*
 49	 * 32 byte aligned loop, even number of (actually 2) bundles
 50	 */
 51.Loop:	fc.i	r24			// issuable on M0 only
 52	add	r24=r21,r24		// we flush "stride size" bytes per iteration
 53	nop.i	0
 54	br.cloop.sptk.few .Loop
 55	;;
 56	sync.i
 57	;;
 58	srlz.i
 59	;;
 60	mov	ar.lc=r3		// restore ar.lc
 61	br.ret.sptk.many rp
 62END(flush_icache_range)
 
 63
 64	/*
 65	 * clflush_cache_range(start,size)
 66	 *
 67	 *	Flush cache lines from start to start+size-1.
 68	 *
 69	 *	Must deal with range from start to start+size-1 but nothing else
 70	 *	(need to be careful not to touch addresses that may be
 71	 *	unmapped).
 72	 *
 73	 *	Note: "in0" and "in1" are preserved for debugging purposes.
 74	 */
 75	.section .kprobes.text,"ax"
 76GLOBAL_ENTRY(clflush_cache_range)
 77
 78	.prologue
 79	alloc	r2=ar.pfs,2,0,0,0
 80	movl	r3=ia64_cache_stride_shift
 81	mov	r21=1
 82	add     r22=in1,in0
 83	;;
 84	ld8	r20=[r3]		// r20: stride shift
 85	sub	r22=r22,r0,1		// last byte address
 86	;;
 87	shr.u	r23=in0,r20		// start / (stride size)
 88	shr.u	r22=r22,r20		// (last byte address) / (stride size)
 89	shl	r21=r21,r20		// r21: stride size of the i-cache(s)
 90	;;
 91	sub	r8=r22,r23		// number of strides - 1
 92	shl	r24=r23,r20		// r24: addresses for "fc" =
 93					//	"start" rounded down to stride
 94					//	boundary
 95	.save	ar.lc,r3
 96	mov	r3=ar.lc		// save ar.lc
 97	;;
 98
 99	.body
100	mov	ar.lc=r8
101	;;
102	/*
103	 * 32 byte aligned loop, even number of (actually 2) bundles
104	 */
105.Loop_fc:
106	fc	r24		// issuable on M0 only
107	add	r24=r21,r24	// we flush "stride size" bytes per iteration
108	nop.i	0
109	br.cloop.sptk.few .Loop_fc
110	;;
111	sync.i
112	;;
113	srlz.i
114	;;
115	mov	ar.lc=r3		// restore ar.lc
116	br.ret.sptk.many rp
117END(clflush_cache_range)
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Cache flushing routines.
  4 *
  5 * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
  6 *	David Mosberger-Tang <davidm@hpl.hp.com>
  7 *
  8 * 05/28/05 Zoltan Menyhart	Dynamic stride size
  9 */
 10
 11#include <asm/asmmacro.h>
 12#include <asm/export.h>
 13
 14
 15	/*
 16	 * flush_icache_range(start,end)
 17	 *
 18	 *	Make i-cache(s) coherent with d-caches.
 19	 *
 20	 *	Must deal with range from start to end-1 but nothing else (need to
 21	 *	be careful not to touch addresses that may be unmapped).
 22	 *
 23	 *	Note: "in0" and "in1" are preserved for debugging purposes.
 24	 */
 25	.section .kprobes.text,"ax"
 26GLOBAL_ENTRY(flush_icache_range)
 27
 28	.prologue
 29	alloc	r2=ar.pfs,2,0,0,0
 30	movl	r3=ia64_i_cache_stride_shift
 31 	mov	r21=1
 32	;;
 33	ld8	r20=[r3]		// r20: stride shift
 34	sub	r22=in1,r0,1		// last byte address
 35	;;
 36	shr.u	r23=in0,r20		// start / (stride size)
 37	shr.u	r22=r22,r20		// (last byte address) / (stride size)
 38	shl	r21=r21,r20		// r21: stride size of the i-cache(s)
 39	;;
 40	sub	r8=r22,r23		// number of strides - 1
 41	shl	r24=r23,r20		// r24: addresses for "fc.i" =
 42					//	"start" rounded down to stride boundary
 43	.save	ar.lc,r3
 44	mov	r3=ar.lc		// save ar.lc
 45	;;
 46
 47	.body
 48	mov	ar.lc=r8
 49	;;
 50	/*
 51	 * 32 byte aligned loop, even number of (actually 2) bundles
 52	 */
 53.Loop:	fc.i	r24			// issuable on M0 only
 54	add	r24=r21,r24		// we flush "stride size" bytes per iteration
 55	nop.i	0
 56	br.cloop.sptk.few .Loop
 57	;;
 58	sync.i
 59	;;
 60	srlz.i
 61	;;
 62	mov	ar.lc=r3		// restore ar.lc
 63	br.ret.sptk.many rp
 64END(flush_icache_range)
 65EXPORT_SYMBOL_GPL(flush_icache_range)
 66
 67	/*
 68	 * clflush_cache_range(start,size)
 69	 *
 70	 *	Flush cache lines from start to start+size-1.
 71	 *
 72	 *	Must deal with range from start to start+size-1 but nothing else
 73	 *	(need to be careful not to touch addresses that may be
 74	 *	unmapped).
 75	 *
 76	 *	Note: "in0" and "in1" are preserved for debugging purposes.
 77	 */
 78	.section .kprobes.text,"ax"
 79GLOBAL_ENTRY(clflush_cache_range)
 80
 81	.prologue
 82	alloc	r2=ar.pfs,2,0,0,0
 83	movl	r3=ia64_cache_stride_shift
 84	mov	r21=1
 85	add     r22=in1,in0
 86	;;
 87	ld8	r20=[r3]		// r20: stride shift
 88	sub	r22=r22,r0,1		// last byte address
 89	;;
 90	shr.u	r23=in0,r20		// start / (stride size)
 91	shr.u	r22=r22,r20		// (last byte address) / (stride size)
 92	shl	r21=r21,r20		// r21: stride size of the i-cache(s)
 93	;;
 94	sub	r8=r22,r23		// number of strides - 1
 95	shl	r24=r23,r20		// r24: addresses for "fc" =
 96					//	"start" rounded down to stride
 97					//	boundary
 98	.save	ar.lc,r3
 99	mov	r3=ar.lc		// save ar.lc
100	;;
101
102	.body
103	mov	ar.lc=r8
104	;;
105	/*
106	 * 32 byte aligned loop, even number of (actually 2) bundles
107	 */
108.Loop_fc:
109	fc	r24		// issuable on M0 only
110	add	r24=r21,r24	// we flush "stride size" bytes per iteration
111	nop.i	0
112	br.cloop.sptk.few .Loop_fc
113	;;
114	sync.i
115	;;
116	srlz.i
117	;;
118	mov	ar.lc=r3		// restore ar.lc
119	br.ret.sptk.many rp
120END(clflush_cache_range)