Linux Audio

Check our new training course

Loading...
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Cache flushing routines.
  4 *
  5 * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
  6 *	David Mosberger-Tang <davidm@hpl.hp.com>
  7 *
  8 * 05/28/05 Zoltan Menyhart	Dynamic stride size
  9 */
 10
 11#include <asm/asmmacro.h>
 12#include <asm/export.h>
 13
 14
 15	/*
 16	 * flush_icache_range(start,end)
 17	 *
 18	 *	Make i-cache(s) coherent with d-caches.
 19	 *
 20	 *	Must deal with range from start to end-1 but nothing else (need to
 21	 *	be careful not to touch addresses that may be unmapped).
 22	 *
 23	 *	Note: "in0" and "in1" are preserved for debugging purposes.
 24	 */
 25	.section .kprobes.text,"ax"
 26GLOBAL_ENTRY(flush_icache_range)
 27
 28	.prologue
 29	alloc	r2=ar.pfs,2,0,0,0
 30	movl	r3=ia64_i_cache_stride_shift
 31 	mov	r21=1
 32	;;
 33	ld8	r20=[r3]		// r20: stride shift
 34	sub	r22=in1,r0,1		// last byte address
 35	;;
 36	shr.u	r23=in0,r20		// start / (stride size)
 37	shr.u	r22=r22,r20		// (last byte address) / (stride size)
 38	shl	r21=r21,r20		// r21: stride size of the i-cache(s)
 39	;;
 40	sub	r8=r22,r23		// number of strides - 1
 41	shl	r24=r23,r20		// r24: addresses for "fc.i" =
 42					//	"start" rounded down to stride boundary
 43	.save	ar.lc,r3
 44	mov	r3=ar.lc		// save ar.lc
 45	;;
 46
 47	.body
 48	mov	ar.lc=r8
 49	;;
 50	/*
 51	 * 32 byte aligned loop, even number of (actually 2) bundles
 52	 */
 53.Loop:	fc.i	r24			// issuable on M0 only
 54	add	r24=r21,r24		// we flush "stride size" bytes per iteration
 55	nop.i	0
 56	br.cloop.sptk.few .Loop
 57	;;
 58	sync.i
 59	;;
 60	srlz.i
 61	;;
 62	mov	ar.lc=r3		// restore ar.lc
 63	br.ret.sptk.many rp
 64END(flush_icache_range)
 65EXPORT_SYMBOL_GPL(flush_icache_range)
 66
 67	/*
 68	 * clflush_cache_range(start,size)
 69	 *
 70	 *	Flush cache lines from start to start+size-1.
 71	 *
 72	 *	Must deal with range from start to start+size-1 but nothing else
 73	 *	(need to be careful not to touch addresses that may be
 74	 *	unmapped).
 75	 *
 76	 *	Note: "in0" and "in1" are preserved for debugging purposes.
 77	 */
 78	.section .kprobes.text,"ax"
 79GLOBAL_ENTRY(clflush_cache_range)
 80
 81	.prologue
 82	alloc	r2=ar.pfs,2,0,0,0
 83	movl	r3=ia64_cache_stride_shift
 84	mov	r21=1
 85	add     r22=in1,in0
 86	;;
 87	ld8	r20=[r3]		// r20: stride shift
 88	sub	r22=r22,r0,1		// last byte address
 89	;;
 90	shr.u	r23=in0,r20		// start / (stride size)
 91	shr.u	r22=r22,r20		// (last byte address) / (stride size)
 92	shl	r21=r21,r20		// r21: stride size of the i-cache(s)
 93	;;
 94	sub	r8=r22,r23		// number of strides - 1
 95	shl	r24=r23,r20		// r24: addresses for "fc" =
 96					//	"start" rounded down to stride
 97					//	boundary
 98	.save	ar.lc,r3
 99	mov	r3=ar.lc		// save ar.lc
100	;;
101
102	.body
103	mov	ar.lc=r8
104	;;
105	/*
106	 * 32 byte aligned loop, even number of (actually 2) bundles
107	 */
108.Loop_fc:
109	fc	r24		// issuable on M0 only
110	add	r24=r21,r24	// we flush "stride size" bytes per iteration
111	nop.i	0
112	br.cloop.sptk.few .Loop_fc
113	;;
114	sync.i
115	;;
116	srlz.i
117	;;
118	mov	ar.lc=r3		// restore ar.lc
119	br.ret.sptk.many rp
120END(clflush_cache_range)
v4.17
 
  1/*
  2 * Cache flushing routines.
  3 *
  4 * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
  5 *	David Mosberger-Tang <davidm@hpl.hp.com>
  6 *
  7 * 05/28/05 Zoltan Menyhart	Dynamic stride size
  8 */
  9
 10#include <asm/asmmacro.h>
 11#include <asm/export.h>
 12
 13
 14	/*
 15	 * flush_icache_range(start,end)
 16	 *
 17	 *	Make i-cache(s) coherent with d-caches.
 18	 *
 19	 *	Must deal with range from start to end-1 but nothing else (need to
 20	 *	be careful not to touch addresses that may be unmapped).
 21	 *
 22	 *	Note: "in0" and "in1" are preserved for debugging purposes.
 23	 */
 24	.section .kprobes.text,"ax"
 25GLOBAL_ENTRY(flush_icache_range)
 26
 27	.prologue
 28	alloc	r2=ar.pfs,2,0,0,0
 29	movl	r3=ia64_i_cache_stride_shift
 30 	mov	r21=1
 31	;;
 32	ld8	r20=[r3]		// r20: stride shift
 33	sub	r22=in1,r0,1		// last byte address
 34	;;
 35	shr.u	r23=in0,r20		// start / (stride size)
 36	shr.u	r22=r22,r20		// (last byte address) / (stride size)
 37	shl	r21=r21,r20		// r21: stride size of the i-cache(s)
 38	;;
 39	sub	r8=r22,r23		// number of strides - 1
 40	shl	r24=r23,r20		// r24: addresses for "fc.i" =
 41					//	"start" rounded down to stride boundary
 42	.save	ar.lc,r3
 43	mov	r3=ar.lc		// save ar.lc
 44	;;
 45
 46	.body
 47	mov	ar.lc=r8
 48	;;
 49	/*
 50	 * 32 byte aligned loop, even number of (actually 2) bundles
 51	 */
 52.Loop:	fc.i	r24			// issuable on M0 only
 53	add	r24=r21,r24		// we flush "stride size" bytes per iteration
 54	nop.i	0
 55	br.cloop.sptk.few .Loop
 56	;;
 57	sync.i
 58	;;
 59	srlz.i
 60	;;
 61	mov	ar.lc=r3		// restore ar.lc
 62	br.ret.sptk.many rp
 63END(flush_icache_range)
 64EXPORT_SYMBOL_GPL(flush_icache_range)
 65
 66	/*
 67	 * clflush_cache_range(start,size)
 68	 *
 69	 *	Flush cache lines from start to start+size-1.
 70	 *
 71	 *	Must deal with range from start to start+size-1 but nothing else
 72	 *	(need to be careful not to touch addresses that may be
 73	 *	unmapped).
 74	 *
 75	 *	Note: "in0" and "in1" are preserved for debugging purposes.
 76	 */
 77	.section .kprobes.text,"ax"
 78GLOBAL_ENTRY(clflush_cache_range)
 79
 80	.prologue
 81	alloc	r2=ar.pfs,2,0,0,0
 82	movl	r3=ia64_cache_stride_shift
 83	mov	r21=1
 84	add     r22=in1,in0
 85	;;
 86	ld8	r20=[r3]		// r20: stride shift
 87	sub	r22=r22,r0,1		// last byte address
 88	;;
 89	shr.u	r23=in0,r20		// start / (stride size)
 90	shr.u	r22=r22,r20		// (last byte address) / (stride size)
 91	shl	r21=r21,r20		// r21: stride size of the i-cache(s)
 92	;;
 93	sub	r8=r22,r23		// number of strides - 1
 94	shl	r24=r23,r20		// r24: addresses for "fc" =
 95					//	"start" rounded down to stride
 96					//	boundary
 97	.save	ar.lc,r3
 98	mov	r3=ar.lc		// save ar.lc
 99	;;
100
101	.body
102	mov	ar.lc=r8
103	;;
104	/*
105	 * 32 byte aligned loop, even number of (actually 2) bundles
106	 */
107.Loop_fc:
108	fc	r24		// issuable on M0 only
109	add	r24=r21,r24	// we flush "stride size" bytes per iteration
110	nop.i	0
111	br.cloop.sptk.few .Loop_fc
112	;;
113	sync.i
114	;;
115	srlz.i
116	;;
117	mov	ar.lc=r3		// restore ar.lc
118	br.ret.sptk.many rp
119END(clflush_cache_range)