Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * PARISC64 Huge TLB page support.
  4 *
  5 * This parisc implementation is heavily based on the SPARC and x86 code.
  6 *
  7 * Copyright (C) 2015 Helge Deller <deller@gmx.de>
  8 */
  9
 10#include <linux/fs.h>
 11#include <linux/mm.h>
 12#include <linux/sched/mm.h>
 13#include <linux/hugetlb.h>
 14#include <linux/pagemap.h>
 15#include <linux/sysctl.h>
 16
 17#include <asm/mman.h>
 
 18#include <asm/tlb.h>
 19#include <asm/tlbflush.h>
 20#include <asm/cacheflush.h>
 21#include <asm/mmu_context.h>
 22
 23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24
 25
 26pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
 27			unsigned long addr, unsigned long sz)
 28{
 29	pgd_t *pgd;
 30	p4d_t *p4d;
 31	pud_t *pud;
 32	pmd_t *pmd;
 33	pte_t *pte = NULL;
 34
 35	/* We must align the address, because our caller will run
 36	 * set_huge_pte_at() on whatever we return, which writes out
 37	 * all of the sub-ptes for the hugepage range.  So we have
 38	 * to give it the first such sub-pte.
 39	 */
 40	addr &= HPAGE_MASK;
 41
 42	pgd = pgd_offset(mm, addr);
 43	p4d = p4d_offset(pgd, addr);
 44	pud = pud_alloc(mm, p4d, addr);
 45	if (pud) {
 46		pmd = pmd_alloc(mm, pud, addr);
 47		if (pmd)
 48			pte = pte_alloc_huge(mm, pmd, addr);
 49	}
 50	return pte;
 51}
 52
 53pte_t *huge_pte_offset(struct mm_struct *mm,
 54		       unsigned long addr, unsigned long sz)
 55{
 56	pgd_t *pgd;
 57	p4d_t *p4d;
 58	pud_t *pud;
 59	pmd_t *pmd;
 60	pte_t *pte = NULL;
 61
 62	addr &= HPAGE_MASK;
 63
 64	pgd = pgd_offset(mm, addr);
 65	if (!pgd_none(*pgd)) {
 66		p4d = p4d_offset(pgd, addr);
 67		if (!p4d_none(*p4d)) {
 68			pud = pud_offset(p4d, addr);
 69			if (!pud_none(*pud)) {
 70				pmd = pmd_offset(pud, addr);
 71				if (!pmd_none(*pmd))
 72					pte = pte_offset_huge(pmd, addr);
 73			}
 74		}
 75	}
 76	return pte;
 77}
 78
 79/* Purge data and instruction TLB entries.  Must be called holding
 80 * the pa_tlb_lock.  The TLB purge instructions are slow on SMP
 81 * machines since the purge must be broadcast to all CPUs.
 82 */
 83static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
 84{
 85	int i;
 86
 87	/* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
 88	 * Linux standard huge pages (e.g. 2 MB) */
 89	BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
 90
 91	addr &= HPAGE_MASK;
 92	addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
 93
 94	for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
 95		purge_tlb_entries(mm, addr);
 96		addr += (1UL << REAL_HPAGE_SHIFT);
 97	}
 98}
 99
100/* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
101static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
102		     pte_t *ptep, pte_t entry)
103{
104	unsigned long addr_start;
105	int i;
106
107	addr &= HPAGE_MASK;
108	addr_start = addr;
109
110	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
111		set_pte(ptep, entry);
112		ptep++;
113
114		addr += PAGE_SIZE;
115		pte_val(entry) += PAGE_SIZE;
116	}
117
118	purge_tlb_entries_huge(mm, addr_start);
119}
120
121void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
122		     pte_t *ptep, pte_t entry, unsigned long sz)
123{
 
 
 
124	__set_huge_pte_at(mm, addr, ptep, entry);
 
125}
126
127
128pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
129			      pte_t *ptep, unsigned long sz)
130{
 
131	pte_t entry;
132
 
133	entry = *ptep;
134	__set_huge_pte_at(mm, addr, ptep, __pte(0));
 
135
136	return entry;
137}
138
139
140void huge_ptep_set_wrprotect(struct mm_struct *mm,
141				unsigned long addr, pte_t *ptep)
142{
 
143	pte_t old_pte;
144
 
145	old_pte = *ptep;
146	__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
 
147}
148
149int huge_ptep_set_access_flags(struct vm_area_struct *vma,
150				unsigned long addr, pte_t *ptep,
151				pte_t pte, int dirty)
152{
 
153	int changed;
154	struct mm_struct *mm = vma->vm_mm;
155
 
156	changed = !pte_same(*ptep, pte);
157	if (changed) {
158		__set_huge_pte_at(mm, addr, ptep, pte);
159	}
 
160	return changed;
 
 
 
 
 
 
 
 
 
 
 
161}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * PARISC64 Huge TLB page support.
  4 *
  5 * This parisc implementation is heavily based on the SPARC and x86 code.
  6 *
  7 * Copyright (C) 2015 Helge Deller <deller@gmx.de>
  8 */
  9
 10#include <linux/fs.h>
 11#include <linux/mm.h>
 12#include <linux/sched/mm.h>
 13#include <linux/hugetlb.h>
 14#include <linux/pagemap.h>
 15#include <linux/sysctl.h>
 16
 17#include <asm/mman.h>
 18#include <asm/pgalloc.h>
 19#include <asm/tlb.h>
 20#include <asm/tlbflush.h>
 21#include <asm/cacheflush.h>
 22#include <asm/mmu_context.h>
 23
 24
 25unsigned long
 26hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 27		unsigned long len, unsigned long pgoff, unsigned long flags)
 28{
 29	struct hstate *h = hstate_file(file);
 30
 31	if (len & ~huge_page_mask(h))
 32		return -EINVAL;
 33	if (len > TASK_SIZE)
 34		return -ENOMEM;
 35
 36	if (flags & MAP_FIXED)
 37		if (prepare_hugepage_range(file, addr, len))
 38			return -EINVAL;
 39
 40	if (addr)
 41		addr = ALIGN(addr, huge_page_size(h));
 42
 43	/* we need to make sure the colouring is OK */
 44	return arch_get_unmapped_area(file, addr, len, pgoff, flags);
 45}
 46
 47
 48pte_t *huge_pte_alloc(struct mm_struct *mm,
 49			unsigned long addr, unsigned long sz)
 50{
 51	pgd_t *pgd;
 
 52	pud_t *pud;
 53	pmd_t *pmd;
 54	pte_t *pte = NULL;
 55
 56	/* We must align the address, because our caller will run
 57	 * set_huge_pte_at() on whatever we return, which writes out
 58	 * all of the sub-ptes for the hugepage range.  So we have
 59	 * to give it the first such sub-pte.
 60	 */
 61	addr &= HPAGE_MASK;
 62
 63	pgd = pgd_offset(mm, addr);
 64	pud = pud_alloc(mm, pgd, addr);
 
 65	if (pud) {
 66		pmd = pmd_alloc(mm, pud, addr);
 67		if (pmd)
 68			pte = pte_alloc_map(mm, pmd, addr);
 69	}
 70	return pte;
 71}
 72
 73pte_t *huge_pte_offset(struct mm_struct *mm,
 74		       unsigned long addr, unsigned long sz)
 75{
 76	pgd_t *pgd;
 
 77	pud_t *pud;
 78	pmd_t *pmd;
 79	pte_t *pte = NULL;
 80
 81	addr &= HPAGE_MASK;
 82
 83	pgd = pgd_offset(mm, addr);
 84	if (!pgd_none(*pgd)) {
 85		pud = pud_offset(pgd, addr);
 86		if (!pud_none(*pud)) {
 87			pmd = pmd_offset(pud, addr);
 88			if (!pmd_none(*pmd))
 89				pte = pte_offset_map(pmd, addr);
 
 
 
 90		}
 91	}
 92	return pte;
 93}
 94
 95/* Purge data and instruction TLB entries.  Must be called holding
 96 * the pa_tlb_lock.  The TLB purge instructions are slow on SMP
 97 * machines since the purge must be broadcast to all CPUs.
 98 */
 99static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
100{
101	int i;
102
103	/* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
104	 * Linux standard huge pages (e.g. 2 MB) */
105	BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
106
107	addr &= HPAGE_MASK;
108	addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
109
110	for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
111		purge_tlb_entries(mm, addr);
112		addr += (1UL << REAL_HPAGE_SHIFT);
113	}
114}
115
116/* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
117static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
118		     pte_t *ptep, pte_t entry)
119{
120	unsigned long addr_start;
121	int i;
122
123	addr &= HPAGE_MASK;
124	addr_start = addr;
125
126	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
127		set_pte(ptep, entry);
128		ptep++;
129
130		addr += PAGE_SIZE;
131		pte_val(entry) += PAGE_SIZE;
132	}
133
134	purge_tlb_entries_huge(mm, addr_start);
135}
136
137void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
138		     pte_t *ptep, pte_t entry)
139{
140	unsigned long flags;
141
142	purge_tlb_start(flags);
143	__set_huge_pte_at(mm, addr, ptep, entry);
144	purge_tlb_end(flags);
145}
146
147
148pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
149			      pte_t *ptep)
150{
151	unsigned long flags;
152	pte_t entry;
153
154	purge_tlb_start(flags);
155	entry = *ptep;
156	__set_huge_pte_at(mm, addr, ptep, __pte(0));
157	purge_tlb_end(flags);
158
159	return entry;
160}
161
162
163void huge_ptep_set_wrprotect(struct mm_struct *mm,
164				unsigned long addr, pte_t *ptep)
165{
166	unsigned long flags;
167	pte_t old_pte;
168
169	purge_tlb_start(flags);
170	old_pte = *ptep;
171	__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
172	purge_tlb_end(flags);
173}
174
175int huge_ptep_set_access_flags(struct vm_area_struct *vma,
176				unsigned long addr, pte_t *ptep,
177				pte_t pte, int dirty)
178{
179	unsigned long flags;
180	int changed;
 
181
182	purge_tlb_start(flags);
183	changed = !pte_same(*ptep, pte);
184	if (changed) {
185		__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
186	}
187	purge_tlb_end(flags);
188	return changed;
189}
190
191
192int pmd_huge(pmd_t pmd)
193{
194	return 0;
195}
196
197int pud_huge(pud_t pud)
198{
199	return 0;
200}