Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Page table allocation functions
  4 *
  5 *    Copyright IBM Corp. 2016
  6 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7 */
  8
  9#include <linux/sysctl.h>
 10#include <linux/slab.h>
 11#include <linux/mm.h>
 
 12#include <asm/mmu_context.h>
 13#include <asm/page-states.h>
 14#include <asm/pgalloc.h>
 15#include <asm/gmap.h>
 16#include <asm/tlb.h>
 17#include <asm/tlbflush.h>
 18
 19#ifdef CONFIG_PGSTE
 20
 
 
 21int page_table_allocate_pgste = 0;
 22EXPORT_SYMBOL(page_table_allocate_pgste);
 23
 24static struct ctl_table page_table_sysctl[] = {
 25	{
 26		.procname	= "allocate_pgste",
 27		.data		= &page_table_allocate_pgste,
 28		.maxlen		= sizeof(int),
 29		.mode		= S_IRUGO | S_IWUSR,
 30		.proc_handler	= proc_dointvec_minmax,
 31		.extra1		= SYSCTL_ZERO,
 32		.extra2		= SYSCTL_ONE,
 
 
 
 
 
 
 
 
 
 
 33	},
 
 34};
 35
 36static int __init page_table_register_sysctl(void)
 37{
 38	return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
 39}
 40__initcall(page_table_register_sysctl);
 41
 42#endif /* CONFIG_PGSTE */
 43
 44unsigned long *crst_table_alloc(struct mm_struct *mm)
 45{
 46	struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
 47	unsigned long *table;
 48
 49	if (!ptdesc)
 50		return NULL;
 51	table = ptdesc_to_virt(ptdesc);
 52	__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
 53	return table;
 54}
 55
 56void crst_table_free(struct mm_struct *mm, unsigned long *table)
 57{
 58	pagetable_free(virt_to_ptdesc(table));
 59}
 60
 61static void __crst_table_upgrade(void *arg)
 62{
 63	struct mm_struct *mm = arg;
 64
 65	/* change all active ASCEs to avoid the creation of new TLBs */
 66	if (current->active_mm == mm) {
 67		S390_lowcore.user_asce.val = mm->context.asce;
 68		local_ctl_load(7, &S390_lowcore.user_asce);
 69	}
 70	__tlb_flush_local();
 71}
 72
 73int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
 74{
 75	unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
 76	unsigned long asce_limit = mm->context.asce_limit;
 77
 78	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
 79	VM_BUG_ON(asce_limit < _REGION2_SIZE);
 80
 81	if (end <= asce_limit)
 82		return 0;
 83
 84	if (asce_limit == _REGION2_SIZE) {
 85		p4d = crst_table_alloc(mm);
 86		if (unlikely(!p4d))
 87			goto err_p4d;
 88		crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
 89	}
 90	if (end > _REGION1_SIZE) {
 91		pgd = crst_table_alloc(mm);
 92		if (unlikely(!pgd))
 93			goto err_pgd;
 94		crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
 95	}
 96
 97	spin_lock_bh(&mm->page_table_lock);
 
 
 
 
 
 
 
 
 
 98
 99	/*
100	 * This routine gets called with mmap_lock lock held and there is
101	 * no reason to optimize for the case of otherwise. However, if
102	 * that would ever change, the below check will let us know.
103	 */
104	VM_BUG_ON(asce_limit != mm->context.asce_limit);
105
106	if (p4d) {
107		__pgd = (unsigned long *) mm->pgd;
108		p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
109		mm->pgd = (pgd_t *) p4d;
110		mm->context.asce_limit = _REGION1_SIZE;
111		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
112			_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
113		mm_inc_nr_puds(mm);
114	}
115	if (pgd) {
116		__pgd = (unsigned long *) mm->pgd;
117		pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
118		mm->pgd = (pgd_t *) pgd;
119		mm->context.asce_limit = TASK_SIZE_MAX;
120		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
121			_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
122	}
123
124	spin_unlock_bh(&mm->page_table_lock);
 
 
 
 
 
 
125
126	on_each_cpu(__crst_table_upgrade, mm, 0);
 
 
127
128	return 0;
 
 
129
130err_pgd:
131	crst_table_free(mm, p4d);
132err_p4d:
133	return -ENOMEM;
 
134}
135
136#ifdef CONFIG_PGSTE
137
138struct page *page_table_alloc_pgste(struct mm_struct *mm)
139{
140	struct ptdesc *ptdesc;
141	u64 *table;
142
143	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
144	if (ptdesc) {
145		table = (u64 *)ptdesc_to_virt(ptdesc);
146		__arch_set_page_dat(table, 1);
147		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
148		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
149	}
150	return ptdesc_page(ptdesc);
151}
152
153void page_table_free_pgste(struct page *page)
154{
155	pagetable_free(page_ptdesc(page));
156}
157
158#endif /* CONFIG_PGSTE */
159
 
 
 
160unsigned long *page_table_alloc(struct mm_struct *mm)
161{
162	struct ptdesc *ptdesc;
163	unsigned long *table;
 
 
164
165	ptdesc = pagetable_alloc(GFP_KERNEL, 0);
166	if (!ptdesc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167		return NULL;
168	if (!pagetable_pte_ctor(ptdesc)) {
169		pagetable_free(ptdesc);
170		return NULL;
171	}
172	table = ptdesc_to_virt(ptdesc);
173	__arch_set_page_dat(table, 1);
174	/* pt_list is used by gmap only */
175	INIT_LIST_HEAD(&ptdesc->pt_list);
176	memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
177	memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
 
 
 
 
 
 
 
 
 
178	return table;
179}
180
181static void pagetable_pte_dtor_free(struct ptdesc *ptdesc)
182{
183	pagetable_pte_dtor(ptdesc);
184	pagetable_free(ptdesc);
185}
186
187void page_table_free(struct mm_struct *mm, unsigned long *table)
188{
189	struct ptdesc *ptdesc = virt_to_ptdesc(table);
190
191	pagetable_pte_dtor_free(ptdesc);
192}
193
194void __tlb_remove_table(void *table)
195{
196	struct ptdesc *ptdesc = virt_to_ptdesc(table);
197	struct page *page = ptdesc_page(ptdesc);
198
199	if (compound_order(page) == CRST_ALLOC_ORDER) {
200		/* pmd, pud, or p4d */
201		pagetable_free(ptdesc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202		return;
203	}
204	pagetable_pte_dtor_free(ptdesc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205}
206
207#ifdef CONFIG_TRANSPARENT_HUGEPAGE
208static void pte_free_now(struct rcu_head *head)
209{
210	struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
211
212	pagetable_pte_dtor_free(ptdesc);
213}
214
215void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
216{
217	struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
218
219	call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
220	/*
221	 * THPs are not allowed for KVM guests. Warn if pgste ever reaches here.
222	 * Turn to the generic pte_free_defer() version once gmap is removed.
 
 
 
223	 */
224	WARN_ON_ONCE(mm_has_pgste(mm));
 
225}
226#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
227
228/*
229 * Base infrastructure required to generate basic asces, region, segment,
230 * and page tables that do not make use of enhanced features like EDAT1.
231 */
232
233static struct kmem_cache *base_pgt_cache;
234
235static unsigned long *base_pgt_alloc(void)
236{
237	unsigned long *table;
 
238
239	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
240	if (table)
241		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
242	return table;
243}
244
245static void base_pgt_free(unsigned long *table)
246{
247	kmem_cache_free(base_pgt_cache, table);
248}
249
250static unsigned long *base_crst_alloc(unsigned long val)
251{
252	unsigned long *table;
253	struct ptdesc *ptdesc;
254
255	ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
256	if (!ptdesc)
257		return NULL;
258	table = ptdesc_address(ptdesc);
259	crst_table_init(table, val);
260	return table;
261}
262
263static void base_crst_free(unsigned long *table)
264{
265	pagetable_free(virt_to_ptdesc(table));
266}
267
268#define BASE_ADDR_END_FUNC(NAME, SIZE)					\
269static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
270						   unsigned long end)	\
271{									\
272	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
273									\
274	return (next - 1) < (end - 1) ? next : end;			\
275}
276
277BASE_ADDR_END_FUNC(page,    _PAGE_SIZE)
278BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
279BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
280BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
281BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
282
283static inline unsigned long base_lra(unsigned long address)
284{
285	unsigned long real;
286
287	asm volatile(
288		"	lra	%0,0(%1)\n"
289		: "=d" (real) : "a" (address) : "cc");
290	return real;
291}
292
293static int base_page_walk(unsigned long *origin, unsigned long addr,
294			  unsigned long end, int alloc)
295{
296	unsigned long *pte, next;
297
298	if (!alloc)
299		return 0;
300	pte = origin;
301	pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
302	do {
303		next = base_page_addr_end(addr, end);
304		*pte = base_lra(addr);
305	} while (pte++, addr = next, addr < end);
306	return 0;
307}
308
309static int base_segment_walk(unsigned long *origin, unsigned long addr,
310			     unsigned long end, int alloc)
311{
312	unsigned long *ste, next, *table;
313	int rc;
314
315	ste = origin;
316	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
317	do {
318		next = base_segment_addr_end(addr, end);
319		if (*ste & _SEGMENT_ENTRY_INVALID) {
320			if (!alloc)
321				continue;
322			table = base_pgt_alloc();
323			if (!table)
324				return -ENOMEM;
325			*ste = __pa(table) | _SEGMENT_ENTRY;
326		}
327		table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
328		rc = base_page_walk(table, addr, next, alloc);
329		if (rc)
330			return rc;
331		if (!alloc)
332			base_pgt_free(table);
333		cond_resched();
334	} while (ste++, addr = next, addr < end);
335	return 0;
336}
337
338static int base_region3_walk(unsigned long *origin, unsigned long addr,
339			     unsigned long end, int alloc)
340{
341	unsigned long *rtte, next, *table;
342	int rc;
343
344	rtte = origin;
345	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
346	do {
347		next = base_region3_addr_end(addr, end);
348		if (*rtte & _REGION_ENTRY_INVALID) {
349			if (!alloc)
350				continue;
351			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
352			if (!table)
353				return -ENOMEM;
354			*rtte = __pa(table) | _REGION3_ENTRY;
355		}
356		table = __va(*rtte & _REGION_ENTRY_ORIGIN);
357		rc = base_segment_walk(table, addr, next, alloc);
358		if (rc)
359			return rc;
360		if (!alloc)
361			base_crst_free(table);
362	} while (rtte++, addr = next, addr < end);
363	return 0;
364}
365
366static int base_region2_walk(unsigned long *origin, unsigned long addr,
367			     unsigned long end, int alloc)
368{
369	unsigned long *rste, next, *table;
370	int rc;
371
372	rste = origin;
373	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
374	do {
375		next = base_region2_addr_end(addr, end);
376		if (*rste & _REGION_ENTRY_INVALID) {
377			if (!alloc)
378				continue;
379			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
380			if (!table)
381				return -ENOMEM;
382			*rste = __pa(table) | _REGION2_ENTRY;
383		}
384		table = __va(*rste & _REGION_ENTRY_ORIGIN);
385		rc = base_region3_walk(table, addr, next, alloc);
386		if (rc)
387			return rc;
388		if (!alloc)
389			base_crst_free(table);
390	} while (rste++, addr = next, addr < end);
391	return 0;
392}
393
394static int base_region1_walk(unsigned long *origin, unsigned long addr,
395			     unsigned long end, int alloc)
396{
397	unsigned long *rfte, next, *table;
398	int rc;
399
400	rfte = origin;
401	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
402	do {
403		next = base_region1_addr_end(addr, end);
404		if (*rfte & _REGION_ENTRY_INVALID) {
405			if (!alloc)
406				continue;
407			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
408			if (!table)
409				return -ENOMEM;
410			*rfte = __pa(table) | _REGION1_ENTRY;
411		}
412		table = __va(*rfte & _REGION_ENTRY_ORIGIN);
413		rc = base_region2_walk(table, addr, next, alloc);
414		if (rc)
415			return rc;
416		if (!alloc)
417			base_crst_free(table);
418	} while (rfte++, addr = next, addr < end);
419	return 0;
420}
421
422/**
423 * base_asce_free - free asce and tables returned from base_asce_alloc()
424 * @asce: asce to be freed
425 *
426 * Frees all region, segment, and page tables that were allocated with a
427 * corresponding base_asce_alloc() call.
428 */
429void base_asce_free(unsigned long asce)
430{
431	unsigned long *table = __va(asce & _ASCE_ORIGIN);
432
433	if (!asce)
434		return;
435	switch (asce & _ASCE_TYPE_MASK) {
436	case _ASCE_TYPE_SEGMENT:
437		base_segment_walk(table, 0, _REGION3_SIZE, 0);
438		break;
439	case _ASCE_TYPE_REGION3:
440		base_region3_walk(table, 0, _REGION2_SIZE, 0);
441		break;
442	case _ASCE_TYPE_REGION2:
443		base_region2_walk(table, 0, _REGION1_SIZE, 0);
444		break;
445	case _ASCE_TYPE_REGION1:
446		base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
447		break;
448	}
449	base_crst_free(table);
450}
451
452static int base_pgt_cache_init(void)
453{
454	static DEFINE_MUTEX(base_pgt_cache_mutex);
455	unsigned long sz = _PAGE_TABLE_SIZE;
456
457	if (base_pgt_cache)
458		return 0;
459	mutex_lock(&base_pgt_cache_mutex);
460	if (!base_pgt_cache)
461		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
462	mutex_unlock(&base_pgt_cache_mutex);
463	return base_pgt_cache ? 0 : -ENOMEM;
464}
465
466/**
467 * base_asce_alloc - create kernel mapping without enhanced DAT features
468 * @addr: virtual start address of kernel mapping
469 * @num_pages: number of consecutive pages
470 *
471 * Generate an asce, including all required region, segment and page tables,
472 * that can be used to access the virtual kernel mapping. The difference is
473 * that the returned asce does not make use of any enhanced DAT features like
474 * e.g. large pages. This is required for some I/O functions that pass an
475 * asce, like e.g. some service call requests.
476 *
477 * Note: the returned asce may NEVER be attached to any cpu. It may only be
478 *	 used for I/O requests. tlb entries that might result because the
479 *	 asce was attached to a cpu won't be cleared.
480 */
481unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
482{
483	unsigned long asce, *table, end;
484	int rc;
485
486	if (base_pgt_cache_init())
487		return 0;
488	end = addr + num_pages * PAGE_SIZE;
489	if (end <= _REGION3_SIZE) {
490		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
491		if (!table)
492			return 0;
493		rc = base_segment_walk(table, addr, end, 1);
494		asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
495	} else if (end <= _REGION2_SIZE) {
496		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
497		if (!table)
498			return 0;
499		rc = base_region3_walk(table, addr, end, 1);
500		asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
501	} else if (end <= _REGION1_SIZE) {
502		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
503		if (!table)
504			return 0;
505		rc = base_region2_walk(table, addr, end, 1);
506		asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
507	} else {
508		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
509		if (!table)
510			return 0;
511		rc = base_region1_walk(table, addr, end, 1);
512		asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
513	}
514	if (rc) {
515		base_asce_free(asce);
516		asce = 0;
517	}
518	return asce;
 
 
519}
v4.10.11
 
  1/*
  2 *  Page table allocation functions
  3 *
  4 *    Copyright IBM Corp. 2016
  5 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  6 */
  7
 
 
  8#include <linux/mm.h>
  9#include <linux/sysctl.h>
 10#include <asm/mmu_context.h>
 
 11#include <asm/pgalloc.h>
 12#include <asm/gmap.h>
 13#include <asm/tlb.h>
 14#include <asm/tlbflush.h>
 15
 16#ifdef CONFIG_PGSTE
 17
 18static int page_table_allocate_pgste_min = 0;
 19static int page_table_allocate_pgste_max = 1;
 20int page_table_allocate_pgste = 0;
 21EXPORT_SYMBOL(page_table_allocate_pgste);
 22
 23static struct ctl_table page_table_sysctl[] = {
 24	{
 25		.procname	= "allocate_pgste",
 26		.data		= &page_table_allocate_pgste,
 27		.maxlen		= sizeof(int),
 28		.mode		= S_IRUGO | S_IWUSR,
 29		.proc_handler	= proc_dointvec,
 30		.extra1		= &page_table_allocate_pgste_min,
 31		.extra2		= &page_table_allocate_pgste_max,
 32	},
 33	{ }
 34};
 35
 36static struct ctl_table page_table_sysctl_dir[] = {
 37	{
 38		.procname	= "vm",
 39		.maxlen		= 0,
 40		.mode		= 0555,
 41		.child		= page_table_sysctl,
 42	},
 43	{ }
 44};
 45
 46static int __init page_table_register_sysctl(void)
 47{
 48	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
 49}
 50__initcall(page_table_register_sysctl);
 51
 52#endif /* CONFIG_PGSTE */
 53
 54unsigned long *crst_table_alloc(struct mm_struct *mm)
 55{
 56	struct page *page = alloc_pages(GFP_KERNEL, 2);
 
 57
 58	if (!page)
 59		return NULL;
 60	return (unsigned long *) page_to_phys(page);
 
 
 61}
 62
 63void crst_table_free(struct mm_struct *mm, unsigned long *table)
 64{
 65	free_pages((unsigned long) table, 2);
 66}
 67
 68static void __crst_table_upgrade(void *arg)
 69{
 70	struct mm_struct *mm = arg;
 71
 
 72	if (current->active_mm == mm) {
 73		clear_user_asce();
 74		set_user_asce(mm);
 75	}
 76	__tlb_flush_local();
 77}
 78
 79int crst_table_upgrade(struct mm_struct *mm)
 80{
 81	unsigned long *table, *pgd;
 
 82
 83	/* upgrade should only happen from 3 to 4 levels */
 84	BUG_ON(mm->context.asce_limit != (1UL << 42));
 85
 86	table = crst_table_alloc(mm);
 87	if (!table)
 88		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 89
 90	spin_lock_bh(&mm->page_table_lock);
 91	pgd = (unsigned long *) mm->pgd;
 92	crst_table_init(table, _REGION2_ENTRY_EMPTY);
 93	pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
 94	mm->pgd = (pgd_t *) table;
 95	mm->context.asce_limit = 1UL << 53;
 96	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
 97			   _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
 98	mm->task_size = mm->context.asce_limit;
 99	spin_unlock_bh(&mm->page_table_lock);
100
101	on_each_cpu(__crst_table_upgrade, mm, 0);
102	return 0;
103}
 
 
 
104
105void crst_table_downgrade(struct mm_struct *mm)
106{
107	pgd_t *pgd;
108
109	/* downgrade should only happen from 3 to 2 levels (compat only) */
110	BUG_ON(mm->context.asce_limit != (1UL << 42));
111
112	if (current->active_mm == mm) {
113		clear_user_asce();
114		__tlb_flush_mm(mm);
 
 
 
 
 
 
115	}
116
117	pgd = mm->pgd;
118	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
119	mm->context.asce_limit = 1UL << 31;
120	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
121			   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
122	mm->task_size = mm->context.asce_limit;
123	crst_table_free(mm, (unsigned long *) pgd);
124
125	if (current->active_mm == mm)
126		set_user_asce(mm);
127}
128
129static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
130{
131	unsigned int old, new;
132
133	do {
134		old = atomic_read(v);
135		new = old ^ bits;
136	} while (atomic_cmpxchg(v, old, new) != old);
137	return new;
138}
139
140#ifdef CONFIG_PGSTE
141
142struct page *page_table_alloc_pgste(struct mm_struct *mm)
143{
144	struct page *page;
145	unsigned long *table;
146
147	page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
148	if (page) {
149		table = (unsigned long *) page_to_phys(page);
150		clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
151		clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
 
152	}
153	return page;
154}
155
156void page_table_free_pgste(struct page *page)
157{
158	__free_page(page);
159}
160
161#endif /* CONFIG_PGSTE */
162
163/*
164 * page table entry allocation/free routines.
165 */
166unsigned long *page_table_alloc(struct mm_struct *mm)
167{
 
168	unsigned long *table;
169	struct page *page;
170	unsigned int mask, bit;
171
172	/* Try to get a fragment of a 4K page as a 2K page table */
173	if (!mm_alloc_pgste(mm)) {
174		table = NULL;
175		spin_lock_bh(&mm->context.pgtable_lock);
176		if (!list_empty(&mm->context.pgtable_list)) {
177			page = list_first_entry(&mm->context.pgtable_list,
178						struct page, lru);
179			mask = atomic_read(&page->_mapcount);
180			mask = (mask | (mask >> 4)) & 3;
181			if (mask != 3) {
182				table = (unsigned long *) page_to_phys(page);
183				bit = mask & 1;		/* =1 -> second 2K */
184				if (bit)
185					table += PTRS_PER_PTE;
186				atomic_xor_bits(&page->_mapcount, 1U << bit);
187				list_del(&page->lru);
188			}
189		}
190		spin_unlock_bh(&mm->context.pgtable_lock);
191		if (table)
192			return table;
193	}
194	/* Allocate a fresh page */
195	page = alloc_page(GFP_KERNEL);
196	if (!page)
197		return NULL;
198	if (!pgtable_page_ctor(page)) {
199		__free_page(page);
200		return NULL;
201	}
202	/* Initialize page table */
203	table = (unsigned long *) page_to_phys(page);
204	if (mm_alloc_pgste(mm)) {
205		/* Return 4K page table with PGSTEs */
206		atomic_set(&page->_mapcount, 3);
207		clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
208		clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
209	} else {
210		/* Return the first 2K fragment of the page */
211		atomic_set(&page->_mapcount, 1);
212		clear_table(table, _PAGE_INVALID, PAGE_SIZE);
213		spin_lock_bh(&mm->context.pgtable_lock);
214		list_add(&page->lru, &mm->context.pgtable_list);
215		spin_unlock_bh(&mm->context.pgtable_lock);
216	}
217	return table;
218}
219
 
 
 
 
 
 
220void page_table_free(struct mm_struct *mm, unsigned long *table)
221{
222	struct page *page;
223	unsigned int bit, mask;
 
 
 
 
 
 
 
224
225	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
226	if (!mm_alloc_pgste(mm)) {
227		/* Free 2K page table fragment of a 4K page */
228		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
229		spin_lock_bh(&mm->context.pgtable_lock);
230		mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
231		if (mask & 3)
232			list_add(&page->lru, &mm->context.pgtable_list);
233		else
234			list_del(&page->lru);
235		spin_unlock_bh(&mm->context.pgtable_lock);
236		if (mask != 0)
237			return;
238	}
239
240	pgtable_page_dtor(page);
241	atomic_set(&page->_mapcount, -1);
242	__free_page(page);
243}
244
245void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
246			 unsigned long vmaddr)
247{
248	struct mm_struct *mm;
249	struct page *page;
250	unsigned int bit, mask;
251
252	mm = tlb->mm;
253	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
254	if (mm_alloc_pgste(mm)) {
255		gmap_unlink(mm, table, vmaddr);
256		table = (unsigned long *) (__pa(table) | 3);
257		tlb_remove_table(tlb, table);
258		return;
259	}
260	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
261	spin_lock_bh(&mm->context.pgtable_lock);
262	mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
263	if (mask & 3)
264		list_add_tail(&page->lru, &mm->context.pgtable_list);
265	else
266		list_del(&page->lru);
267	spin_unlock_bh(&mm->context.pgtable_lock);
268	table = (unsigned long *) (__pa(table) | (1U << bit));
269	tlb_remove_table(tlb, table);
270}
271
272static void __tlb_remove_table(void *_table)
273{
274	unsigned int mask = (unsigned long) _table & 3;
275	void *table = (void *)((unsigned long) _table ^ mask);
276	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
277
278	switch (mask) {
279	case 0:		/* pmd or pud */
280		free_pages((unsigned long) table, 2);
281		break;
282	case 1:		/* lower 2K of a 4K page table */
283	case 2:		/* higher 2K of a 4K page table */
284		if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
285			break;
286		/* fallthrough */
287	case 3:		/* 4K page table with pgstes */
288		pgtable_page_dtor(page);
289		atomic_set(&page->_mapcount, -1);
290		__free_page(page);
291		break;
292	}
293}
294
295static void tlb_remove_table_smp_sync(void *arg)
 
296{
297	/* Simply deliver the interrupt */
 
 
298}
299
300static void tlb_remove_table_one(void *table)
301{
 
 
 
302	/*
303	 * This isn't an RCU grace period and hence the page-tables cannot be
304	 * assumed to be actually RCU-freed.
305	 *
306	 * It is however sufficient for software page-table walkers that rely
307	 * on IRQ disabling. See the comment near struct mmu_table_batch.
308	 */
309	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
310	__tlb_remove_table(table);
311}
 
312
313static void tlb_remove_table_rcu(struct rcu_head *head)
 
 
 
 
 
 
 
314{
315	struct mmu_table_batch *batch;
316	int i;
317
318	batch = container_of(head, struct mmu_table_batch, rcu);
 
 
 
 
 
 
 
 
 
319
320	for (i = 0; i < batch->nr; i++)
321		__tlb_remove_table(batch->tables[i]);
 
 
322
323	free_page((unsigned long)batch);
 
 
 
 
 
324}
325
326void tlb_table_flush(struct mmu_gather *tlb)
327{
328	struct mmu_table_batch **batch = &tlb->batch;
 
329
330	if (*batch) {
331		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
332		*batch = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334}
335
336void tlb_remove_table(struct mmu_gather *tlb, void *table)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337{
338	struct mmu_table_batch **batch = &tlb->batch;
 
339
340	tlb->mm->context.flush_mm = 1;
341	if (*batch == NULL) {
342		*batch = (struct mmu_table_batch *)
343			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
344		if (*batch == NULL) {
345			__tlb_flush_mm_lazy(tlb->mm);
346			tlb_remove_table_one(table);
347			return;
348		}
349		(*batch)->nr = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350	}
351	(*batch)->tables[(*batch)->nr++] = table;
352	if ((*batch)->nr == MAX_TABLE_BATCH)
353		tlb_flush_mmu(tlb);
354}