Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _S390_TLBFLUSH_H
  3#define _S390_TLBFLUSH_H
  4
  5#include <linux/mm.h>
  6#include <linux/sched.h>
  7#include <asm/processor.h>
  8
  9/*
 10 * Flush all TLB entries on the local CPU.
 11 */
 12static inline void __tlb_flush_local(void)
 13{
 14	asm volatile("ptlb" : : : "memory");
 15}
 16
 17/*
 18 * Flush TLB entries for a specific ASCE on all CPUs
 19 */
 20static inline void __tlb_flush_idte(unsigned long asce)
 21{
 22	unsigned long opt;
 23
 24	opt = IDTE_PTOA;
 25	if (MACHINE_HAS_TLB_GUEST)
 26		opt |= IDTE_GUEST_ASCE;
 27	/* Global TLB flush for the mm */
 28	asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc");
 29}
 30
 31/*
 32 * Flush all TLB entries on all CPUs.
 33 */
 34static inline void __tlb_flush_global(void)
 35{
 36	unsigned int dummy = 0;
 37
 38	csp(&dummy, 0, 0);
 39}
 40
 41/*
 42 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
 43 * this implicates multiple ASCEs!).
 44 */
 45static inline void __tlb_flush_mm(struct mm_struct *mm)
 46{
 47	unsigned long gmap_asce;
 48
 49	/*
 50	 * If the machine has IDTE we prefer to do a per mm flush
 51	 * on all cpus instead of doing a local flush if the mm
 52	 * only ran on the local cpu.
 53	 */
 54	preempt_disable();
 55	atomic_inc(&mm->context.flush_count);
 56	/* Reset TLB flush mask */
 57	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
 58	barrier();
 59	gmap_asce = READ_ONCE(mm->context.gmap_asce);
 60	if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
 61		if (gmap_asce)
 62			__tlb_flush_idte(gmap_asce);
 63		__tlb_flush_idte(mm->context.asce);
 64	} else {
 65		/* Global TLB flush */
 66		__tlb_flush_global();
 67	}
 68	atomic_dec(&mm->context.flush_count);
 69	preempt_enable();
 70}
 71
 72static inline void __tlb_flush_kernel(void)
 73{
 74	if (MACHINE_HAS_IDTE)
 75		__tlb_flush_idte(init_mm.context.asce);
 76	else
 77		__tlb_flush_global();
 78}
 79
 80static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
 81{
 82	spin_lock(&mm->context.lock);
 83	if (mm->context.flush_mm) {
 84		mm->context.flush_mm = 0;
 85		__tlb_flush_mm(mm);
 86	}
 87	spin_unlock(&mm->context.lock);
 88}
 89
 90/*
 91 * TLB flushing:
 92 *  flush_tlb() - flushes the current mm struct TLBs
 93 *  flush_tlb_all() - flushes all processes TLBs
 94 *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
 95 *  flush_tlb_page(vma, vmaddr) - flushes one page
 96 *  flush_tlb_range(vma, start, end) - flushes a range of pages
 97 *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
 98 */
 99
100/*
101 * flush_tlb_mm goes together with ptep_set_wrprotect for the
102 * copy_page_range operation and flush_tlb_range is related to
103 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
104 * ptep_get_and_clear do not flush the TLBs directly if the mm has
105 * only one user. At the end of the update the flush_tlb_mm and
106 * flush_tlb_range functions need to do the flush.
107 */
108#define flush_tlb()				do { } while (0)
109#define flush_tlb_all()				do { } while (0)
110#define flush_tlb_page(vma, addr)		do { } while (0)
111
112static inline void flush_tlb_mm(struct mm_struct *mm)
113{
114	__tlb_flush_mm_lazy(mm);
115}
116
117static inline void flush_tlb_range(struct vm_area_struct *vma,
118				   unsigned long start, unsigned long end)
119{
120	__tlb_flush_mm_lazy(vma->vm_mm);
121}
122
123static inline void flush_tlb_kernel_range(unsigned long start,
124					  unsigned long end)
125{
126	__tlb_flush_kernel();
127}
128
129#endif /* _S390_TLBFLUSH_H */
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _S390_TLBFLUSH_H
  3#define _S390_TLBFLUSH_H
  4
  5#include <linux/mm.h>
  6#include <linux/sched.h>
  7#include <asm/processor.h>
  8
  9/*
 10 * Flush all TLB entries on the local CPU.
 11 */
 12static inline void __tlb_flush_local(void)
 13{
 14	asm volatile("ptlb" : : : "memory");
 15}
 16
 17/*
 18 * Flush TLB entries for a specific ASCE on all CPUs
 19 */
 20static inline void __tlb_flush_idte(unsigned long asce)
 21{
 22	unsigned long opt;
 23
 24	opt = IDTE_PTOA;
 25	if (MACHINE_HAS_TLB_GUEST)
 26		opt |= IDTE_GUEST_ASCE;
 27	/* Global TLB flush for the mm */
 28	asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc");
 29}
 30
 31/*
 32 * Flush all TLB entries on all CPUs.
 33 */
 34static inline void __tlb_flush_global(void)
 35{
 36	unsigned int dummy = 0;
 37
 38	csp(&dummy, 0, 0);
 39}
 40
 41/*
 42 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
 43 * this implicates multiple ASCEs!).
 44 */
 45static inline void __tlb_flush_mm(struct mm_struct *mm)
 46{
 47	unsigned long gmap_asce;
 48
 
 
 
 
 
 49	preempt_disable();
 50	atomic_inc(&mm->context.flush_count);
 51	/* Reset TLB flush mask */
 52	cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
 53	barrier();
 54	gmap_asce = READ_ONCE(mm->context.gmap_asce);
 55	if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
 56		if (gmap_asce)
 57			__tlb_flush_idte(gmap_asce);
 58		__tlb_flush_idte(mm->context.asce);
 59	} else {
 60		/* Global TLB flush */
 61		__tlb_flush_global();
 62	}
 63	atomic_dec(&mm->context.flush_count);
 64	preempt_enable();
 65}
 66
 67static inline void __tlb_flush_kernel(void)
 68{
 69	if (MACHINE_HAS_IDTE)
 70		__tlb_flush_idte(init_mm.context.asce);
 71	else
 72		__tlb_flush_global();
 73}
 74
 75static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
 76{
 77	spin_lock(&mm->context.lock);
 78	if (mm->context.flush_mm) {
 79		mm->context.flush_mm = 0;
 80		__tlb_flush_mm(mm);
 81	}
 82	spin_unlock(&mm->context.lock);
 83}
 84
 85/*
 86 * TLB flushing:
 87 *  flush_tlb() - flushes the current mm struct TLBs
 88 *  flush_tlb_all() - flushes all processes TLBs
 89 *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
 90 *  flush_tlb_page(vma, vmaddr) - flushes one page
 91 *  flush_tlb_range(vma, start, end) - flushes a range of pages
 92 *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
 93 */
 94
 95/*
 96 * flush_tlb_mm goes together with ptep_set_wrprotect for the
 97 * copy_page_range operation and flush_tlb_range is related to
 98 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
 99 * ptep_get_and_clear do not flush the TLBs directly if the mm has
100 * only one user. At the end of the update the flush_tlb_mm and
101 * flush_tlb_range functions need to do the flush.
102 */
103#define flush_tlb()				do { } while (0)
104#define flush_tlb_all()				do { } while (0)
105#define flush_tlb_page(vma, addr)		do { } while (0)
106
107static inline void flush_tlb_mm(struct mm_struct *mm)
108{
109	__tlb_flush_mm_lazy(mm);
110}
111
112static inline void flush_tlb_range(struct vm_area_struct *vma,
113				   unsigned long start, unsigned long end)
114{
115	__tlb_flush_mm_lazy(vma->vm_mm);
116}
117
118static inline void flush_tlb_kernel_range(unsigned long start,
119					  unsigned long end)
120{
121	__tlb_flush_kernel();
122}
123
124#endif /* _S390_TLBFLUSH_H */