Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * TLB shootdown specifics for powerpc
4 *
5 * Copyright (C) 2002 Anton Blanchard, IBM Corp.
6 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
7 */
8#ifndef _ASM_POWERPC_TLB_H
9#define _ASM_POWERPC_TLB_H
10#ifdef __KERNEL__
11
12#ifndef __powerpc64__
13#include <linux/pgtable.h>
14#endif
15#ifndef __powerpc64__
16#include <asm/page.h>
17#include <asm/mmu.h>
18#endif
19
20#include <linux/pagemap.h>
21
22static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
23 unsigned long address);
24#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
25
26#define tlb_flush tlb_flush
27extern void tlb_flush(struct mmu_gather *tlb);
28/*
29 * book3s:
30 * Hash does not use the linux page-tables, so we can avoid
31 * the TLB invalidate for page-table freeing, Radix otoh does use the
32 * page-tables and needs the TLBI.
33 *
34 * nohash:
35 * We still do TLB invalidate in the __pte_free_tlb routine before we
36 * add the page table pages to mmu gather table batch.
37 */
38#define tlb_needs_table_invalidate() radix_enabled()
39
40/* Get the generic bits... */
41#include <asm-generic/tlb.h>
42
43static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
44 unsigned long address)
45{
46#ifdef CONFIG_PPC_BOOK3S_32
47 if (pte_val(*ptep) & _PAGE_HASHPTE)
48 flush_hash_entry(tlb->mm, ptep, address);
49#endif
50}
51
52#ifdef CONFIG_SMP
53static inline int mm_is_core_local(struct mm_struct *mm)
54{
55 return cpumask_subset(mm_cpumask(mm),
56 topology_sibling_cpumask(smp_processor_id()));
57}
58
59#ifdef CONFIG_PPC_BOOK3S_64
60static inline int mm_is_thread_local(struct mm_struct *mm)
61{
62 if (atomic_read(&mm->context.active_cpus) > 1)
63 return false;
64 return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
65}
66#else /* CONFIG_PPC_BOOK3S_64 */
67static inline int mm_is_thread_local(struct mm_struct *mm)
68{
69 return cpumask_equal(mm_cpumask(mm),
70 cpumask_of(smp_processor_id()));
71}
72#endif /* !CONFIG_PPC_BOOK3S_64 */
73
74#else /* CONFIG_SMP */
75static inline int mm_is_core_local(struct mm_struct *mm)
76{
77 return 1;
78}
79
80static inline int mm_is_thread_local(struct mm_struct *mm)
81{
82 return 1;
83}
84#endif
85
86#define arch_supports_page_table_move arch_supports_page_table_move
87static inline bool arch_supports_page_table_move(void)
88{
89 return radix_enabled();
90}
91
92#endif /* __KERNEL__ */
93#endif /* __ASM_POWERPC_TLB_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * TLB shootdown specifics for powerpc
4 *
5 * Copyright (C) 2002 Anton Blanchard, IBM Corp.
6 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
7 */
8#ifndef _ASM_POWERPC_TLB_H
9#define _ASM_POWERPC_TLB_H
10#ifdef __KERNEL__
11
12#ifndef __powerpc64__
13#include <asm/pgtable.h>
14#endif
15#include <asm/pgalloc.h>
16#ifndef __powerpc64__
17#include <asm/page.h>
18#include <asm/mmu.h>
19#endif
20
21#include <linux/pagemap.h>
22
23#define tlb_start_vma(tlb, vma) do { } while (0)
24#define tlb_end_vma(tlb, vma) do { } while (0)
25#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
26
27#define tlb_flush tlb_flush
28extern void tlb_flush(struct mmu_gather *tlb);
29
30/* Get the generic bits... */
31#include <asm-generic/tlb.h>
32
33extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
34 unsigned long address);
35
36static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
37 unsigned long address)
38{
39#ifdef CONFIG_PPC_BOOK3S_32
40 if (pte_val(*ptep) & _PAGE_HASHPTE)
41 flush_hash_entry(tlb->mm, ptep, address);
42#endif
43}
44
45#ifdef CONFIG_SMP
46static inline int mm_is_core_local(struct mm_struct *mm)
47{
48 return cpumask_subset(mm_cpumask(mm),
49 topology_sibling_cpumask(smp_processor_id()));
50}
51
52#ifdef CONFIG_PPC_BOOK3S_64
53static inline int mm_is_thread_local(struct mm_struct *mm)
54{
55 if (atomic_read(&mm->context.active_cpus) > 1)
56 return false;
57 return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
58}
59static inline void mm_reset_thread_local(struct mm_struct *mm)
60{
61 WARN_ON(atomic_read(&mm->context.copros) > 0);
62 /*
63 * It's possible for mm_access to take a reference on mm_users to
64 * access the remote mm from another thread, but it's not allowed
65 * to set mm_cpumask, so mm_users may be > 1 here.
66 */
67 WARN_ON(current->mm != mm);
68 atomic_set(&mm->context.active_cpus, 1);
69 cpumask_clear(mm_cpumask(mm));
70 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
71}
72#else /* CONFIG_PPC_BOOK3S_64 */
73static inline int mm_is_thread_local(struct mm_struct *mm)
74{
75 return cpumask_equal(mm_cpumask(mm),
76 cpumask_of(smp_processor_id()));
77}
78#endif /* !CONFIG_PPC_BOOK3S_64 */
79
80#else /* CONFIG_SMP */
81static inline int mm_is_core_local(struct mm_struct *mm)
82{
83 return 1;
84}
85
86static inline int mm_is_thread_local(struct mm_struct *mm)
87{
88 return 1;
89}
90#endif
91
92#endif /* __KERNEL__ */
93#endif /* __ASM_POWERPC_TLB_H */