Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * PARISC64 Huge TLB page support.
  3 *
  4 * This parisc implementation is heavily based on the SPARC and x86 code.
  5 *
  6 * Copyright (C) 2015 Helge Deller <deller@gmx.de>
  7 */
  8
  9#include <linux/fs.h>
 10#include <linux/mm.h>
 
 11#include <linux/hugetlb.h>
 12#include <linux/pagemap.h>
 13#include <linux/sysctl.h>
 14
 15#include <asm/mman.h>
 16#include <asm/pgalloc.h>
 17#include <asm/tlb.h>
 18#include <asm/tlbflush.h>
 19#include <asm/cacheflush.h>
 20#include <asm/mmu_context.h>
 21
 22
 23unsigned long
 24hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 25		unsigned long len, unsigned long pgoff, unsigned long flags)
 26{
 27	struct hstate *h = hstate_file(file);
 28
 29	if (len & ~huge_page_mask(h))
 30		return -EINVAL;
 31	if (len > TASK_SIZE)
 32		return -ENOMEM;
 33
 34	if (flags & MAP_FIXED)
 35		if (prepare_hugepage_range(file, addr, len))
 36			return -EINVAL;
 37
 38	if (addr)
 39		addr = ALIGN(addr, huge_page_size(h));
 40
 41	/* we need to make sure the colouring is OK */
 42	return arch_get_unmapped_area(file, addr, len, pgoff, flags);
 43}
 44
 45
 46pte_t *huge_pte_alloc(struct mm_struct *mm,
 47			unsigned long addr, unsigned long sz)
 48{
 49	pgd_t *pgd;
 
 50	pud_t *pud;
 51	pmd_t *pmd;
 52	pte_t *pte = NULL;
 53
 54	/* We must align the address, because our caller will run
 55	 * set_huge_pte_at() on whatever we return, which writes out
 56	 * all of the sub-ptes for the hugepage range.  So we have
 57	 * to give it the first such sub-pte.
 58	 */
 59	addr &= HPAGE_MASK;
 60
 61	pgd = pgd_offset(mm, addr);
 62	pud = pud_alloc(mm, pgd, addr);
 
 63	if (pud) {
 64		pmd = pmd_alloc(mm, pud, addr);
 65		if (pmd)
 66			pte = pte_alloc_map(mm, pmd, addr);
 67	}
 68	return pte;
 69}
 70
 71pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 
 72{
 73	pgd_t *pgd;
 
 74	pud_t *pud;
 75	pmd_t *pmd;
 76	pte_t *pte = NULL;
 77
 78	addr &= HPAGE_MASK;
 79
 80	pgd = pgd_offset(mm, addr);
 81	if (!pgd_none(*pgd)) {
 82		pud = pud_offset(pgd, addr);
 83		if (!pud_none(*pud)) {
 84			pmd = pmd_offset(pud, addr);
 85			if (!pmd_none(*pmd))
 86				pte = pte_offset_map(pmd, addr);
 
 
 
 87		}
 88	}
 89	return pte;
 90}
 91
 92/* Purge data and instruction TLB entries.  Must be called holding
 93 * the pa_tlb_lock.  The TLB purge instructions are slow on SMP
 94 * machines since the purge must be broadcast to all CPUs.
 95 */
 96static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
 97{
 98	int i;
 99
100	/* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
101	 * Linux standard huge pages (e.g. 2 MB) */
102	BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
103
104	addr &= HPAGE_MASK;
105	addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
106
107	for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
108		purge_tlb_entries(mm, addr);
109		addr += (1UL << REAL_HPAGE_SHIFT);
110	}
111}
112
113/* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
114static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
115		     pte_t *ptep, pte_t entry)
116{
117	unsigned long addr_start;
118	int i;
119
120	addr &= HPAGE_MASK;
121	addr_start = addr;
122
123	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
124		set_pte(ptep, entry);
125		ptep++;
126
127		addr += PAGE_SIZE;
128		pte_val(entry) += PAGE_SIZE;
129	}
130
131	purge_tlb_entries_huge(mm, addr_start);
132}
133
134void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
135		     pte_t *ptep, pte_t entry)
136{
137	unsigned long flags;
138
139	purge_tlb_start(flags);
140	__set_huge_pte_at(mm, addr, ptep, entry);
141	purge_tlb_end(flags);
142}
143
144
145pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
146			      pte_t *ptep)
147{
148	unsigned long flags;
149	pte_t entry;
150
151	purge_tlb_start(flags);
152	entry = *ptep;
153	__set_huge_pte_at(mm, addr, ptep, __pte(0));
154	purge_tlb_end(flags);
155
156	return entry;
157}
158
159
160void huge_ptep_set_wrprotect(struct mm_struct *mm,
161				unsigned long addr, pte_t *ptep)
162{
163	unsigned long flags;
164	pte_t old_pte;
165
166	purge_tlb_start(flags);
167	old_pte = *ptep;
168	__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
169	purge_tlb_end(flags);
170}
171
172int huge_ptep_set_access_flags(struct vm_area_struct *vma,
173				unsigned long addr, pte_t *ptep,
174				pte_t pte, int dirty)
175{
176	unsigned long flags;
177	int changed;
 
178
179	purge_tlb_start(flags);
180	changed = !pte_same(*ptep, pte);
181	if (changed) {
182		__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
183	}
184	purge_tlb_end(flags);
185	return changed;
186}
187
188
189int pmd_huge(pmd_t pmd)
190{
191	return 0;
192}
193
194int pud_huge(pud_t pud)
195{
196	return 0;
197}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * PARISC64 Huge TLB page support.
  4 *
  5 * This parisc implementation is heavily based on the SPARC and x86 code.
  6 *
  7 * Copyright (C) 2015 Helge Deller <deller@gmx.de>
  8 */
  9
 10#include <linux/fs.h>
 11#include <linux/mm.h>
 12#include <linux/sched/mm.h>
 13#include <linux/hugetlb.h>
 14#include <linux/pagemap.h>
 15#include <linux/sysctl.h>
 16
 17#include <asm/mman.h>
 
 18#include <asm/tlb.h>
 19#include <asm/tlbflush.h>
 20#include <asm/cacheflush.h>
 21#include <asm/mmu_context.h>
 22
 23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24
 25
 26pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
 27			unsigned long addr, unsigned long sz)
 28{
 29	pgd_t *pgd;
 30	p4d_t *p4d;
 31	pud_t *pud;
 32	pmd_t *pmd;
 33	pte_t *pte = NULL;
 34
 35	/* We must align the address, because our caller will run
 36	 * set_huge_pte_at() on whatever we return, which writes out
 37	 * all of the sub-ptes for the hugepage range.  So we have
 38	 * to give it the first such sub-pte.
 39	 */
 40	addr &= HPAGE_MASK;
 41
 42	pgd = pgd_offset(mm, addr);
 43	p4d = p4d_offset(pgd, addr);
 44	pud = pud_alloc(mm, p4d, addr);
 45	if (pud) {
 46		pmd = pmd_alloc(mm, pud, addr);
 47		if (pmd)
 48			pte = pte_alloc_huge(mm, pmd, addr);
 49	}
 50	return pte;
 51}
 52
 53pte_t *huge_pte_offset(struct mm_struct *mm,
 54		       unsigned long addr, unsigned long sz)
 55{
 56	pgd_t *pgd;
 57	p4d_t *p4d;
 58	pud_t *pud;
 59	pmd_t *pmd;
 60	pte_t *pte = NULL;
 61
 62	addr &= HPAGE_MASK;
 63
 64	pgd = pgd_offset(mm, addr);
 65	if (!pgd_none(*pgd)) {
 66		p4d = p4d_offset(pgd, addr);
 67		if (!p4d_none(*p4d)) {
 68			pud = pud_offset(p4d, addr);
 69			if (!pud_none(*pud)) {
 70				pmd = pmd_offset(pud, addr);
 71				if (!pmd_none(*pmd))
 72					pte = pte_offset_huge(pmd, addr);
 73			}
 74		}
 75	}
 76	return pte;
 77}
 78
 79/* Purge data and instruction TLB entries.  Must be called holding
 80 * the pa_tlb_lock.  The TLB purge instructions are slow on SMP
 81 * machines since the purge must be broadcast to all CPUs.
 82 */
 83static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
 84{
 85	int i;
 86
 87	/* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
 88	 * Linux standard huge pages (e.g. 2 MB) */
 89	BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
 90
 91	addr &= HPAGE_MASK;
 92	addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
 93
 94	for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
 95		purge_tlb_entries(mm, addr);
 96		addr += (1UL << REAL_HPAGE_SHIFT);
 97	}
 98}
 99
100/* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
101static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
102		     pte_t *ptep, pte_t entry)
103{
104	unsigned long addr_start;
105	int i;
106
107	addr &= HPAGE_MASK;
108	addr_start = addr;
109
110	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
111		set_pte(ptep, entry);
112		ptep++;
113
114		addr += PAGE_SIZE;
115		pte_val(entry) += PAGE_SIZE;
116	}
117
118	purge_tlb_entries_huge(mm, addr_start);
119}
120
121void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
122		     pte_t *ptep, pte_t entry, unsigned long sz)
123{
 
 
 
124	__set_huge_pte_at(mm, addr, ptep, entry);
 
125}
126
127
128pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
129			      pte_t *ptep, unsigned long sz)
130{
 
131	pte_t entry;
132
 
133	entry = *ptep;
134	__set_huge_pte_at(mm, addr, ptep, __pte(0));
 
135
136	return entry;
137}
138
139
140void huge_ptep_set_wrprotect(struct mm_struct *mm,
141				unsigned long addr, pte_t *ptep)
142{
 
143	pte_t old_pte;
144
 
145	old_pte = *ptep;
146	__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
 
147}
148
149int huge_ptep_set_access_flags(struct vm_area_struct *vma,
150				unsigned long addr, pte_t *ptep,
151				pte_t pte, int dirty)
152{
 
153	int changed;
154	struct mm_struct *mm = vma->vm_mm;
155
 
156	changed = !pte_same(*ptep, pte);
157	if (changed) {
158		__set_huge_pte_at(mm, addr, ptep, pte);
159	}
 
160	return changed;
 
 
 
 
 
 
 
 
 
 
 
161}