Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/mm/pgd.c
  4 *
  5 *  Copyright (C) 1998-2005 Russell King
  6 */
  7#include <linux/mm.h>
  8#include <linux/gfp.h>
  9#include <linux/highmem.h>
 10#include <linux/slab.h>
 11
 12#include <asm/cp15.h>
 13#include <asm/pgalloc.h>
 14#include <asm/page.h>
 15#include <asm/tlbflush.h>
 16
 17#include "mm.h"
 18
 19#ifdef CONFIG_ARM_LPAE
 20#define __pgd_alloc()	kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL)
 21#define __pgd_free(pgd)	kfree(pgd)
 22#else
 23#define __pgd_alloc()	(pgd_t *)__get_free_pages(GFP_KERNEL, 2)
 24#define __pgd_free(pgd)	free_pages((unsigned long)pgd, 2)
 25#endif
 26
 27/*
 28 * need to get a 16k page for level 1
 29 */
 30pgd_t *pgd_alloc(struct mm_struct *mm)
 31{
 32	pgd_t *new_pgd, *init_pgd;
 33	p4d_t *new_p4d, *init_p4d;
 34	pud_t *new_pud, *init_pud;
 35	pmd_t *new_pmd, *init_pmd;
 36	pte_t *new_pte, *init_pte;
 37
 38	new_pgd = __pgd_alloc();
 39	if (!new_pgd)
 40		goto no_pgd;
 41
 42	memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
 43
 44	/*
 45	 * Copy over the kernel and IO PGD entries
 46	 */
 47	init_pgd = pgd_offset_k(0);
 48	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
 49		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
 50
 51	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
 52
 53#ifdef CONFIG_ARM_LPAE
 54	/*
 55	 * Allocate PMD table for modules and pkmap mappings.
 56	 */
 57	new_p4d = p4d_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
 58			    MODULES_VADDR);
 59	if (!new_p4d)
 60		goto no_p4d;
 61
 62	new_pud = pud_alloc(mm, new_p4d, MODULES_VADDR);
 63	if (!new_pud)
 64		goto no_pud;
 65
 66	new_pmd = pmd_alloc(mm, new_pud, 0);
 67	if (!new_pmd)
 68		goto no_pmd;
 69#ifdef CONFIG_KASAN
 70	/*
 71	 * Copy PMD table for KASAN shadow mappings.
 72	 */
 73	init_pgd = pgd_offset_k(TASK_SIZE);
 74	init_p4d = p4d_offset(init_pgd, TASK_SIZE);
 75	init_pud = pud_offset(init_p4d, TASK_SIZE);
 76	init_pmd = pmd_offset(init_pud, TASK_SIZE);
 77	new_pmd = pmd_offset(new_pud, TASK_SIZE);
 78	memcpy(new_pmd, init_pmd,
 79	       (pmd_index(MODULES_VADDR) - pmd_index(TASK_SIZE))
 80	       * sizeof(pmd_t));
 81	clean_dcache_area(new_pmd, PTRS_PER_PMD * sizeof(pmd_t));
 82#endif /* CONFIG_KASAN */
 83#endif /* CONFIG_LPAE */
 84
 85	if (!vectors_high()) {
 86		/*
 87		 * On ARM, first page must always be allocated since it
 88		 * contains the machine vectors. The vectors are always high
 89		 * with LPAE.
 90		 */
 91		new_p4d = p4d_alloc(mm, new_pgd, 0);
 92		if (!new_p4d)
 93			goto no_p4d;
 94
 95		new_pud = pud_alloc(mm, new_p4d, 0);
 96		if (!new_pud)
 97			goto no_pud;
 98
 99		new_pmd = pmd_alloc(mm, new_pud, 0);
100		if (!new_pmd)
101			goto no_pmd;
102
103		new_pte = pte_alloc_map(mm, new_pmd, 0);
104		if (!new_pte)
105			goto no_pte;
106
107#ifndef CONFIG_ARM_LPAE
108		/*
109		 * Modify the PTE pointer to have the correct domain.  This
110		 * needs to be the vectors domain to avoid the low vectors
111		 * being unmapped.
112		 */
113		pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
114		pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
115#endif
116
117		init_p4d = p4d_offset(init_pgd, 0);
118		init_pud = pud_offset(init_p4d, 0);
119		init_pmd = pmd_offset(init_pud, 0);
120		init_pte = pte_offset_map(init_pmd, 0);
121		set_pte_ext(new_pte + 0, init_pte[0], 0);
122		set_pte_ext(new_pte + 1, init_pte[1], 0);
123		pte_unmap(init_pte);
124		pte_unmap(new_pte);
125	}
126
127	return new_pgd;
128
129no_pte:
130	pmd_free(mm, new_pmd);
131	mm_dec_nr_pmds(mm);
132no_pmd:
133	pud_free(mm, new_pud);
134no_pud:
135	p4d_free(mm, new_p4d);
136no_p4d:
137	__pgd_free(new_pgd);
138no_pgd:
139	return NULL;
140}
141
142void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
143{
144	pgd_t *pgd;
145	p4d_t *p4d;
146	pud_t *pud;
147	pmd_t *pmd;
148	pgtable_t pte;
149
150	if (!pgd_base)
151		return;
152
153	pgd = pgd_base + pgd_index(0);
154	if (pgd_none_or_clear_bad(pgd))
155		goto no_pgd;
156
157	p4d = p4d_offset(pgd, 0);
158	if (p4d_none_or_clear_bad(p4d))
159		goto no_p4d;
160
161	pud = pud_offset(p4d, 0);
162	if (pud_none_or_clear_bad(pud))
163		goto no_pud;
164
165	pmd = pmd_offset(pud, 0);
166	if (pmd_none_or_clear_bad(pmd))
167		goto no_pmd;
168
169	pte = pmd_pgtable(*pmd);
170	pmd_clear(pmd);
171	pte_free(mm, pte);
172	mm_dec_nr_ptes(mm);
173no_pmd:
174	pud_clear(pud);
175	pmd_free(mm, pmd);
176	mm_dec_nr_pmds(mm);
177no_pud:
178	p4d_clear(p4d);
179	pud_free(mm, pud);
180no_p4d:
181	pgd_clear(pgd);
182	p4d_free(mm, p4d);
183no_pgd:
184#ifdef CONFIG_ARM_LPAE
185	/*
186	 * Free modules/pkmap or identity pmd tables.
187	 */
188	for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
189		if (pgd_none_or_clear_bad(pgd))
190			continue;
191		if (pgd_val(*pgd) & L_PGD_SWAPPER)
192			continue;
193		p4d = p4d_offset(pgd, 0);
194		if (p4d_none_or_clear_bad(p4d))
195			continue;
196		pud = pud_offset(p4d, 0);
197		if (pud_none_or_clear_bad(pud))
198			continue;
199		pmd = pmd_offset(pud, 0);
200		pud_clear(pud);
201		pmd_free(mm, pmd);
202		mm_dec_nr_pmds(mm);
203		p4d_clear(p4d);
204		pud_free(mm, pud);
205		mm_dec_nr_puds(mm);
206		pgd_clear(pgd);
207		p4d_free(mm, p4d);
208	}
209#endif
210	__pgd_free(pgd_base);
211}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/mm/pgd.c
  4 *
  5 *  Copyright (C) 1998-2005 Russell King
  6 */
  7#include <linux/mm.h>
  8#include <linux/gfp.h>
  9#include <linux/highmem.h>
 10#include <linux/slab.h>
 11
 12#include <asm/cp15.h>
 13#include <asm/pgalloc.h>
 14#include <asm/page.h>
 15#include <asm/tlbflush.h>
 16
 17#include "mm.h"
 18
 19#ifdef CONFIG_ARM_LPAE
 20#define __pgd_alloc()	kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL)
 21#define __pgd_free(pgd)	kfree(pgd)
 22#else
 23#define __pgd_alloc()	(pgd_t *)__get_free_pages(GFP_KERNEL, 2)
 24#define __pgd_free(pgd)	free_pages((unsigned long)pgd, 2)
 25#endif
 26
 27/*
 28 * need to get a 16k page for level 1
 29 */
 30pgd_t *pgd_alloc(struct mm_struct *mm)
 31{
 32	pgd_t *new_pgd, *init_pgd;
 
 33	pud_t *new_pud, *init_pud;
 34	pmd_t *new_pmd, *init_pmd;
 35	pte_t *new_pte, *init_pte;
 36
 37	new_pgd = __pgd_alloc();
 38	if (!new_pgd)
 39		goto no_pgd;
 40
 41	memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
 42
 43	/*
 44	 * Copy over the kernel and IO PGD entries
 45	 */
 46	init_pgd = pgd_offset_k(0);
 47	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
 48		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
 49
 50	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
 51
 52#ifdef CONFIG_ARM_LPAE
 53	/*
 54	 * Allocate PMD table for modules and pkmap mappings.
 55	 */
 56	new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
 57			    MODULES_VADDR);
 
 
 
 
 58	if (!new_pud)
 59		goto no_pud;
 60
 61	new_pmd = pmd_alloc(mm, new_pud, 0);
 62	if (!new_pmd)
 63		goto no_pmd;
 64#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 65
 66	if (!vectors_high()) {
 67		/*
 68		 * On ARM, first page must always be allocated since it
 69		 * contains the machine vectors. The vectors are always high
 70		 * with LPAE.
 71		 */
 72		new_pud = pud_alloc(mm, new_pgd, 0);
 
 
 
 
 73		if (!new_pud)
 74			goto no_pud;
 75
 76		new_pmd = pmd_alloc(mm, new_pud, 0);
 77		if (!new_pmd)
 78			goto no_pmd;
 79
 80		new_pte = pte_alloc_map(mm, new_pmd, 0);
 81		if (!new_pte)
 82			goto no_pte;
 83
 84#ifndef CONFIG_ARM_LPAE
 85		/*
 86		 * Modify the PTE pointer to have the correct domain.  This
 87		 * needs to be the vectors domain to avoid the low vectors
 88		 * being unmapped.
 89		 */
 90		pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
 91		pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
 92#endif
 93
 94		init_pud = pud_offset(init_pgd, 0);
 
 95		init_pmd = pmd_offset(init_pud, 0);
 96		init_pte = pte_offset_map(init_pmd, 0);
 97		set_pte_ext(new_pte + 0, init_pte[0], 0);
 98		set_pte_ext(new_pte + 1, init_pte[1], 0);
 99		pte_unmap(init_pte);
100		pte_unmap(new_pte);
101	}
102
103	return new_pgd;
104
105no_pte:
106	pmd_free(mm, new_pmd);
107	mm_dec_nr_pmds(mm);
108no_pmd:
109	pud_free(mm, new_pud);
110no_pud:
 
 
111	__pgd_free(new_pgd);
112no_pgd:
113	return NULL;
114}
115
116void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
117{
118	pgd_t *pgd;
 
119	pud_t *pud;
120	pmd_t *pmd;
121	pgtable_t pte;
122
123	if (!pgd_base)
124		return;
125
126	pgd = pgd_base + pgd_index(0);
127	if (pgd_none_or_clear_bad(pgd))
128		goto no_pgd;
129
130	pud = pud_offset(pgd, 0);
 
 
 
 
131	if (pud_none_or_clear_bad(pud))
132		goto no_pud;
133
134	pmd = pmd_offset(pud, 0);
135	if (pmd_none_or_clear_bad(pmd))
136		goto no_pmd;
137
138	pte = pmd_pgtable(*pmd);
139	pmd_clear(pmd);
140	pte_free(mm, pte);
141	mm_dec_nr_ptes(mm);
142no_pmd:
143	pud_clear(pud);
144	pmd_free(mm, pmd);
145	mm_dec_nr_pmds(mm);
146no_pud:
 
 
 
147	pgd_clear(pgd);
148	pud_free(mm, pud);
149no_pgd:
150#ifdef CONFIG_ARM_LPAE
151	/*
152	 * Free modules/pkmap or identity pmd tables.
153	 */
154	for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
155		if (pgd_none_or_clear_bad(pgd))
156			continue;
157		if (pgd_val(*pgd) & L_PGD_SWAPPER)
158			continue;
159		pud = pud_offset(pgd, 0);
 
 
 
160		if (pud_none_or_clear_bad(pud))
161			continue;
162		pmd = pmd_offset(pud, 0);
163		pud_clear(pud);
164		pmd_free(mm, pmd);
165		mm_dec_nr_pmds(mm);
 
 
 
166		pgd_clear(pgd);
167		pud_free(mm, pud);
168	}
169#endif
170	__pgd_free(pgd_base);
171}