Linux Audio

Check our new training course

Loading...
v3.15
 
  1#ifndef _ASM_IA64_TLBFLUSH_H
  2#define _ASM_IA64_TLBFLUSH_H
  3
  4/*
  5 * Copyright (C) 2002 Hewlett-Packard Co
  6 *	David Mosberger-Tang <davidm@hpl.hp.com>
  7 */
  8
  9
 10#include <linux/mm.h>
 11
 12#include <asm/intrinsics.h>
 13#include <asm/mmu_context.h>
 14#include <asm/page.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 15
 16/*
 17 * Now for some TLB flushing routines.  This is the kind of stuff that
 18 * can be very expensive, so try to avoid them whenever possible.
 19 */
 20extern void setup_ptcg_sem(int max_purges, int from_palo);
 21
 22/*
 23 * Flush everything (kernel mapping may also have changed due to
 24 * vmalloc/vfree).
 25 */
 26extern void local_flush_tlb_all (void);
 27
 28#ifdef CONFIG_SMP
 29  extern void smp_flush_tlb_all (void);
 30  extern void smp_flush_tlb_mm (struct mm_struct *mm);
 31  extern void smp_flush_tlb_cpumask (cpumask_t xcpumask);
 32# define flush_tlb_all()	smp_flush_tlb_all()
 33#else
 34# define flush_tlb_all()	local_flush_tlb_all()
 35# define smp_flush_tlb_cpumask(m) local_flush_tlb_all()
 36#endif
 37
 38static inline void
 39local_finish_flush_tlb_mm (struct mm_struct *mm)
 40{
 41	if (mm == current->active_mm)
 42		activate_context(mm);
 43}
 44
 45/*
 46 * Flush a specified user mapping.  This is called, e.g., as a result of fork() and
 47 * exit().  fork() ends up here because the copy-on-write mechanism needs to write-protect
 48 * the PTEs of the parent task.
 49 */
 50static inline void
 51flush_tlb_mm (struct mm_struct *mm)
 52{
 53	if (!mm)
 54		return;
 55
 56	set_bit(mm->context, ia64_ctx.flushmap);
 57	mm->context = 0;
 58
 59	if (atomic_read(&mm->mm_users) == 0)
 60		return;		/* happens as a result of exit_mmap() */
 61
 62#ifdef CONFIG_SMP
 63	smp_flush_tlb_mm(mm);
 64#else
 65	local_finish_flush_tlb_mm(mm);
 66#endif
 67}
 68
 69extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
 70
 71/*
 72 * Page-granular tlb flush.
 73 */
 74static inline void
 75flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
 76{
 77#ifdef CONFIG_SMP
 78	flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
 79#else
 80	if (vma->vm_mm == current->active_mm)
 81		ia64_ptcl(addr, (PAGE_SHIFT << 2));
 82	else
 83		vma->vm_mm->context = 0;
 84#endif
 85}
 86
 87/*
 88 * Flush the local TLB. Invoked from another cpu using an IPI.
 89 */
 90#ifdef CONFIG_SMP
 91void smp_local_flush_tlb(void);
 92#else
 93#define smp_local_flush_tlb()
 94#endif
 95
 96static inline void flush_tlb_kernel_range(unsigned long start,
 97					  unsigned long end)
 98{
 99	flush_tlb_all();	/* XXX fix me */
100}
101
102#endif /* _ASM_IA64_TLBFLUSH_H */
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_IA64_TLBFLUSH_H
  3#define _ASM_IA64_TLBFLUSH_H
  4
  5/*
  6 * Copyright (C) 2002 Hewlett-Packard Co
  7 *	David Mosberger-Tang <davidm@hpl.hp.com>
  8 */
  9
 10
 11#include <linux/mm.h>
 12
 13#include <asm/intrinsics.h>
 14#include <asm/mmu_context.h>
 15#include <asm/page.h>
 16
 17struct ia64_tr_entry {
 18	u64 ifa;
 19	u64 itir;
 20	u64 pte;
 21	u64 rr;
 22}; /*Record for tr entry!*/
 23
 24extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
 25extern void ia64_ptr_entry(u64 target_mask, int slot);
 26extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
 27
 28/*
 29 region register macros
 30*/
 31#define RR_TO_VE(val)   (((val) >> 0) & 0x0000000000000001)
 32#define RR_VE(val)     (((val) & 0x0000000000000001) << 0)
 33#define RR_VE_MASK     0x0000000000000001L
 34#define RR_VE_SHIFT    0
 35#define RR_TO_PS(val)  (((val) >> 2) & 0x000000000000003f)
 36#define RR_PS(val)     (((val) & 0x000000000000003f) << 2)
 37#define RR_PS_MASK     0x00000000000000fcL
 38#define RR_PS_SHIFT    2
 39#define RR_RID_MASK    0x00000000ffffff00L
 40#define RR_TO_RID(val)         ((val >> 8) & 0xffffff)
 41
 42/*
 43 * Now for some TLB flushing routines.  This is the kind of stuff that
 44 * can be very expensive, so try to avoid them whenever possible.
 45 */
 46extern void setup_ptcg_sem(int max_purges, int from_palo);
 47
 48/*
 49 * Flush everything (kernel mapping may also have changed due to
 50 * vmalloc/vfree).
 51 */
 52extern void local_flush_tlb_all (void);
 53
 54#ifdef CONFIG_SMP
 55  extern void smp_flush_tlb_all (void);
 56  extern void smp_flush_tlb_mm (struct mm_struct *mm);
 57  extern void smp_flush_tlb_cpumask (cpumask_t xcpumask);
 58# define flush_tlb_all()	smp_flush_tlb_all()
 59#else
 60# define flush_tlb_all()	local_flush_tlb_all()
 61# define smp_flush_tlb_cpumask(m) local_flush_tlb_all()
 62#endif
 63
 64static inline void
 65local_finish_flush_tlb_mm (struct mm_struct *mm)
 66{
 67	if (mm == current->active_mm)
 68		activate_context(mm);
 69}
 70
 71/*
 72 * Flush a specified user mapping.  This is called, e.g., as a result of fork() and
 73 * exit().  fork() ends up here because the copy-on-write mechanism needs to write-protect
 74 * the PTEs of the parent task.
 75 */
 76static inline void
 77flush_tlb_mm (struct mm_struct *mm)
 78{
 79	if (!mm)
 80		return;
 81
 82	set_bit(mm->context, ia64_ctx.flushmap);
 83	mm->context = 0;
 84
 85	if (atomic_read(&mm->mm_users) == 0)
 86		return;		/* happens as a result of exit_mmap() */
 87
 88#ifdef CONFIG_SMP
 89	smp_flush_tlb_mm(mm);
 90#else
 91	local_finish_flush_tlb_mm(mm);
 92#endif
 93}
 94
 95extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
 96
 97/*
 98 * Page-granular tlb flush.
 99 */
100static inline void
101flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
102{
103#ifdef CONFIG_SMP
104	flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
105#else
106	if (vma->vm_mm == current->active_mm)
107		ia64_ptcl(addr, (PAGE_SHIFT << 2));
108	else
109		vma->vm_mm->context = 0;
110#endif
111}
112
113/*
114 * Flush the local TLB. Invoked from another cpu using an IPI.
115 */
116#ifdef CONFIG_SMP
117void smp_local_flush_tlb(void);
118#else
119#define smp_local_flush_tlb()
120#endif
121
122static inline void flush_tlb_kernel_range(unsigned long start,
123					  unsigned long end)
124{
125	flush_tlb_all();	/* XXX fix me */
126}
127
128#endif /* _ASM_IA64_TLBFLUSH_H */