Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Page table allocation functions
  4 *
  5 *    Copyright IBM Corp. 2016
  6 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7 */
  8
  9#include <linux/sysctl.h>
 10#include <linux/slab.h>
 11#include <linux/mm.h>
 12#include <asm/mmu_context.h>
 13#include <asm/pgalloc.h>
 14#include <asm/gmap.h>
 15#include <asm/tlb.h>
 16#include <asm/tlbflush.h>
 17
 18#ifdef CONFIG_PGSTE
 19
 
 
 20int page_table_allocate_pgste = 0;
 21EXPORT_SYMBOL(page_table_allocate_pgste);
 22
 23static struct ctl_table page_table_sysctl[] = {
 24	{
 25		.procname	= "allocate_pgste",
 26		.data		= &page_table_allocate_pgste,
 27		.maxlen		= sizeof(int),
 28		.mode		= S_IRUGO | S_IWUSR,
 29		.proc_handler	= proc_dointvec_minmax,
 30		.extra1		= SYSCTL_ZERO,
 31		.extra2		= SYSCTL_ONE,
 32	},
 33	{ }
 34};
 35
 36static struct ctl_table page_table_sysctl_dir[] = {
 37	{
 38		.procname	= "vm",
 39		.maxlen		= 0,
 40		.mode		= 0555,
 41		.child		= page_table_sysctl,
 42	},
 43	{ }
 44};
 45
 46static int __init page_table_register_sysctl(void)
 47{
 48	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
 49}
 50__initcall(page_table_register_sysctl);
 51
 52#endif /* CONFIG_PGSTE */
 53
 54unsigned long *crst_table_alloc(struct mm_struct *mm)
 55{
 56	struct page *page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
 57
 58	if (!page)
 59		return NULL;
 60	arch_set_page_dat(page, CRST_ALLOC_ORDER);
 61	return (unsigned long *) page_to_virt(page);
 62}
 63
 64void crst_table_free(struct mm_struct *mm, unsigned long *table)
 65{
 66	free_pages((unsigned long)table, CRST_ALLOC_ORDER);
 67}
 68
 69static void __crst_table_upgrade(void *arg)
 70{
 71	struct mm_struct *mm = arg;
 72
 73	/* change all active ASCEs to avoid the creation of new TLBs */
 74	if (current->active_mm == mm) {
 75		S390_lowcore.user_asce = mm->context.asce;
 76		__ctl_load(S390_lowcore.user_asce, 7, 7);
 77	}
 78	__tlb_flush_local();
 79}
 80
 81int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
 82{
 83	unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
 84	unsigned long asce_limit = mm->context.asce_limit;
 85
 86	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
 87	VM_BUG_ON(asce_limit < _REGION2_SIZE);
 88
 89	if (end <= asce_limit)
 90		return 0;
 91
 92	if (asce_limit == _REGION2_SIZE) {
 93		p4d = crst_table_alloc(mm);
 94		if (unlikely(!p4d))
 95			goto err_p4d;
 96		crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
 97	}
 98	if (end > _REGION1_SIZE) {
 99		pgd = crst_table_alloc(mm);
100		if (unlikely(!pgd))
101			goto err_pgd;
102		crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
 
 
 
 
 
 
 
 
 
 
 
 
103	}
 
 
 
 
104
105	spin_lock_bh(&mm->page_table_lock);
 
 
106
107	/*
108	 * This routine gets called with mmap_lock lock held and there is
109	 * no reason to optimize for the case of otherwise. However, if
110	 * that would ever change, the below check will let us know.
111	 */
112	VM_BUG_ON(asce_limit != mm->context.asce_limit);
113
114	if (p4d) {
115		__pgd = (unsigned long *) mm->pgd;
116		p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
117		mm->pgd = (pgd_t *) p4d;
118		mm->context.asce_limit = _REGION1_SIZE;
119		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
120			_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
121		mm_inc_nr_puds(mm);
122	}
123	if (pgd) {
124		__pgd = (unsigned long *) mm->pgd;
125		pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
126		mm->pgd = (pgd_t *) pgd;
127		mm->context.asce_limit = TASK_SIZE_MAX;
128		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
129			_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
130	}
131
132	spin_unlock_bh(&mm->page_table_lock);
 
 
 
 
 
133
134	on_each_cpu(__crst_table_upgrade, mm, 0);
135
136	return 0;
137
138err_pgd:
139	crst_table_free(mm, p4d);
140err_p4d:
141	return -ENOMEM;
142}
143
144static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
145{
146	unsigned int old, new;
147
148	do {
149		old = atomic_read(v);
150		new = old ^ bits;
151	} while (atomic_cmpxchg(v, old, new) != old);
152	return new;
153}
154
155#ifdef CONFIG_PGSTE
156
157struct page *page_table_alloc_pgste(struct mm_struct *mm)
158{
159	struct page *page;
160	u64 *table;
161
162	page = alloc_page(GFP_KERNEL);
163	if (page) {
164		table = (u64 *)page_to_virt(page);
165		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
166		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
167	}
168	return page;
169}
170
171void page_table_free_pgste(struct page *page)
172{
173	__free_page(page);
174}
175
176#endif /* CONFIG_PGSTE */
177
178/*
179 * A 2KB-pgtable is either upper or lower half of a normal page.
180 * The second half of the page may be unused or used as another
181 * 2KB-pgtable.
182 *
183 * Whenever possible the parent page for a new 2KB-pgtable is picked
184 * from the list of partially allocated pages mm_context_t::pgtable_list.
185 * In case the list is empty a new parent page is allocated and added to
186 * the list.
187 *
188 * When a parent page gets fully allocated it contains 2KB-pgtables in both
189 * upper and lower halves and is removed from mm_context_t::pgtable_list.
190 *
191 * When 2KB-pgtable is freed from to fully allocated parent page that
192 * page turns partially allocated and added to mm_context_t::pgtable_list.
193 *
194 * If 2KB-pgtable is freed from the partially allocated parent page that
195 * page turns unused and gets removed from mm_context_t::pgtable_list.
196 * Furthermore, the unused parent page is released.
197 *
198 * As follows from the above, no unallocated or fully allocated parent
199 * pages are contained in mm_context_t::pgtable_list.
200 *
201 * The upper byte (bits 24-31) of the parent page _refcount is used
202 * for tracking contained 2KB-pgtables and has the following format:
203 *
204 *   PP  AA
205 * 01234567    upper byte (bits 24-31) of struct page::_refcount
206 *   ||  ||
207 *   ||  |+--- upper 2KB-pgtable is allocated
208 *   ||  +---- lower 2KB-pgtable is allocated
209 *   |+------- upper 2KB-pgtable is pending for removal
210 *   +-------- lower 2KB-pgtable is pending for removal
211 *
212 * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
213 * using _refcount is possible).
214 *
215 * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
216 * The parent page is either:
217 *   - added to mm_context_t::pgtable_list in case the second half of the
218 *     parent page is still unallocated;
219 *   - removed from mm_context_t::pgtable_list in case both hales of the
220 *     parent page are allocated;
221 * These operations are protected with mm_context_t::lock.
222 *
223 * When 2KB-pgtable is deallocated the corresponding AA bit is set to 0
224 * and the corresponding PP bit is set to 1 in a single atomic operation.
225 * Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually
226 * exclusive and may never be both set to 1!
227 * The parent page is either:
228 *   - added to mm_context_t::pgtable_list in case the second half of the
229 *     parent page is still allocated;
230 *   - removed from mm_context_t::pgtable_list in case the second half of
231 *     the parent page is unallocated;
232 * These operations are protected with mm_context_t::lock.
233 *
234 * It is important to understand that mm_context_t::lock only protects
235 * mm_context_t::pgtable_list and AA bits, but not the parent page itself
236 * and PP bits.
237 *
238 * Releasing the parent page happens whenever the PP bit turns from 1 to 0,
239 * while both AA bits and the second PP bit are already unset. Then the
240 * parent page does not contain any 2KB-pgtable fragment anymore, and it has
241 * also been removed from mm_context_t::pgtable_list. It is safe to release
242 * the page therefore.
243 *
244 * PGSTE memory spaces use full 4KB-pgtables and do not need most of the
245 * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
246 * while the PP bits are never used, nor such a page is added to or removed
247 * from mm_context_t::pgtable_list.
248 */
249unsigned long *page_table_alloc(struct mm_struct *mm)
250{
251	unsigned long *table;
252	struct page *page;
253	unsigned int mask, bit;
254
255	/* Try to get a fragment of a 4K page as a 2K page table */
256	if (!mm_alloc_pgste(mm)) {
257		table = NULL;
258		spin_lock_bh(&mm->context.lock);
259		if (!list_empty(&mm->context.pgtable_list)) {
260			page = list_first_entry(&mm->context.pgtable_list,
261						struct page, lru);
262			mask = atomic_read(&page->_refcount) >> 24;
263			/*
264			 * The pending removal bits must also be checked.
265			 * Failure to do so might lead to an impossible
266			 * value of (i.e 0x13 or 0x23) written to _refcount.
267			 * Such values violate the assumption that pending and
268			 * allocation bits are mutually exclusive, and the rest
269			 * of the code unrails as result. That could lead to
270			 * a whole bunch of races and corruptions.
271			 */
272			mask = (mask | (mask >> 4)) & 0x03U;
273			if (mask != 0x03U) {
274				table = (unsigned long *) page_to_virt(page);
275				bit = mask & 1;		/* =1 -> second 2K */
276				if (bit)
277					table += PTRS_PER_PTE;
278				atomic_xor_bits(&page->_refcount,
279							0x01U << (bit + 24));
280				list_del(&page->lru);
281			}
282		}
283		spin_unlock_bh(&mm->context.lock);
284		if (table)
285			return table;
286	}
287	/* Allocate a fresh page */
288	page = alloc_page(GFP_KERNEL);
289	if (!page)
290		return NULL;
291	if (!pgtable_pte_page_ctor(page)) {
292		__free_page(page);
293		return NULL;
294	}
295	arch_set_page_dat(page, 0);
296	/* Initialize page table */
297	table = (unsigned long *) page_to_virt(page);
298	if (mm_alloc_pgste(mm)) {
299		/* Return 4K page table with PGSTEs */
300		atomic_xor_bits(&page->_refcount, 0x03U << 24);
301		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
302		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
303	} else {
304		/* Return the first 2K fragment of the page */
305		atomic_xor_bits(&page->_refcount, 0x01U << 24);
306		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
307		spin_lock_bh(&mm->context.lock);
308		list_add(&page->lru, &mm->context.pgtable_list);
309		spin_unlock_bh(&mm->context.lock);
310	}
311	return table;
312}
313
314static void page_table_release_check(struct page *page, void *table,
315				     unsigned int half, unsigned int mask)
316{
317	char msg[128];
318
319	if (!IS_ENABLED(CONFIG_DEBUG_VM) || !mask)
320		return;
321	snprintf(msg, sizeof(msg),
322		 "Invalid pgtable %p release half 0x%02x mask 0x%02x",
323		 table, half, mask);
324	dump_page(page, msg);
325}
326
327void page_table_free(struct mm_struct *mm, unsigned long *table)
328{
329	unsigned int mask, bit, half;
330	struct page *page;
 
331
332	page = virt_to_page(table);
333	if (!mm_alloc_pgste(mm)) {
334		/* Free 2K page table fragment of a 4K page */
335		bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
336		spin_lock_bh(&mm->context.lock);
337		/*
338		 * Mark the page for delayed release. The actual release
339		 * will happen outside of the critical section from this
340		 * function or from __tlb_remove_table()
341		 */
342		mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
343		mask >>= 24;
344		if (mask & 0x03U)
345			list_add(&page->lru, &mm->context.pgtable_list);
346		else
347			list_del(&page->lru);
348		spin_unlock_bh(&mm->context.lock);
349		mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
350		mask >>= 24;
351		if (mask != 0x00U)
352			return;
353		half = 0x01U << bit;
354	} else {
355		half = 0x03U;
356		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
357		mask >>= 24;
358	}
359
360	page_table_release_check(page, table, half, mask);
361	pgtable_pte_page_dtor(page);
362	__free_page(page);
363}
364
365void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
366			 unsigned long vmaddr)
367{
368	struct mm_struct *mm;
369	struct page *page;
370	unsigned int bit, mask;
371
372	mm = tlb->mm;
373	page = virt_to_page(table);
374	if (mm_alloc_pgste(mm)) {
375		gmap_unlink(mm, table, vmaddr);
376		table = (unsigned long *) ((unsigned long)table | 0x03U);
377		tlb_remove_table(tlb, table);
378		return;
379	}
380	bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
381	spin_lock_bh(&mm->context.lock);
382	/*
383	 * Mark the page for delayed release. The actual release will happen
384	 * outside of the critical section from __tlb_remove_table() or from
385	 * page_table_free()
386	 */
387	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
388	mask >>= 24;
389	if (mask & 0x03U)
390		list_add_tail(&page->lru, &mm->context.pgtable_list);
391	else
392		list_del(&page->lru);
393	spin_unlock_bh(&mm->context.lock);
394	table = (unsigned long *) ((unsigned long) table | (0x01U << bit));
395	tlb_remove_table(tlb, table);
396}
397
398void __tlb_remove_table(void *_table)
399{
400	unsigned int mask = (unsigned long) _table & 0x03U, half = mask;
401	void *table = (void *)((unsigned long) _table ^ mask);
402	struct page *page = virt_to_page(table);
403
404	switch (half) {
405	case 0x00U:	/* pmd, pud, or p4d */
406		free_pages((unsigned long)table, CRST_ALLOC_ORDER);
407		return;
408	case 0x01U:	/* lower 2K of a 4K page table */
409	case 0x02U:	/* higher 2K of a 4K page table */
410		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
411		mask >>= 24;
412		if (mask != 0x00U)
413			return;
414		break;
415	case 0x03U:	/* 4K page table with pgstes */
416		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
417		mask >>= 24;
 
 
 
 
 
 
418		break;
419	}
 
420
421	page_table_release_check(page, table, half, mask);
422	pgtable_pte_page_dtor(page);
423	__free_page(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424}
425
426/*
427 * Base infrastructure required to generate basic asces, region, segment,
428 * and page tables that do not make use of enhanced features like EDAT1.
429 */
430
431static struct kmem_cache *base_pgt_cache;
432
433static unsigned long *base_pgt_alloc(void)
434{
435	unsigned long *table;
436
437	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
438	if (table)
439		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
440	return table;
441}
442
443static void base_pgt_free(unsigned long *table)
444{
445	kmem_cache_free(base_pgt_cache, table);
446}
447
448static unsigned long *base_crst_alloc(unsigned long val)
449{
450	unsigned long *table;
451
452	table =	(unsigned long *)__get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
453	if (table)
454		crst_table_init(table, val);
455	return table;
456}
457
458static void base_crst_free(unsigned long *table)
459{
460	free_pages((unsigned long)table, CRST_ALLOC_ORDER);
461}
462
463#define BASE_ADDR_END_FUNC(NAME, SIZE)					\
464static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
465						   unsigned long end)	\
466{									\
467	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
468									\
469	return (next - 1) < (end - 1) ? next : end;			\
470}
471
472BASE_ADDR_END_FUNC(page,    _PAGE_SIZE)
473BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
474BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
475BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
476BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
477
478static inline unsigned long base_lra(unsigned long address)
479{
480	unsigned long real;
481
482	asm volatile(
483		"	lra	%0,0(%1)\n"
484		: "=d" (real) : "a" (address) : "cc");
485	return real;
486}
487
488static int base_page_walk(unsigned long *origin, unsigned long addr,
489			  unsigned long end, int alloc)
490{
491	unsigned long *pte, next;
492
493	if (!alloc)
494		return 0;
495	pte = origin;
496	pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
497	do {
498		next = base_page_addr_end(addr, end);
499		*pte = base_lra(addr);
500	} while (pte++, addr = next, addr < end);
501	return 0;
502}
503
504static int base_segment_walk(unsigned long *origin, unsigned long addr,
505			     unsigned long end, int alloc)
506{
507	unsigned long *ste, next, *table;
508	int rc;
509
510	ste = origin;
511	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
512	do {
513		next = base_segment_addr_end(addr, end);
514		if (*ste & _SEGMENT_ENTRY_INVALID) {
515			if (!alloc)
516				continue;
517			table = base_pgt_alloc();
518			if (!table)
519				return -ENOMEM;
520			*ste = __pa(table) | _SEGMENT_ENTRY;
521		}
522		table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
523		rc = base_page_walk(table, addr, next, alloc);
524		if (rc)
525			return rc;
526		if (!alloc)
527			base_pgt_free(table);
528		cond_resched();
529	} while (ste++, addr = next, addr < end);
530	return 0;
531}
532
533static int base_region3_walk(unsigned long *origin, unsigned long addr,
534			     unsigned long end, int alloc)
535{
536	unsigned long *rtte, next, *table;
537	int rc;
538
539	rtte = origin;
540	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
541	do {
542		next = base_region3_addr_end(addr, end);
543		if (*rtte & _REGION_ENTRY_INVALID) {
544			if (!alloc)
545				continue;
546			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
547			if (!table)
548				return -ENOMEM;
549			*rtte = __pa(table) | _REGION3_ENTRY;
550		}
551		table = __va(*rtte & _REGION_ENTRY_ORIGIN);
552		rc = base_segment_walk(table, addr, next, alloc);
553		if (rc)
554			return rc;
555		if (!alloc)
556			base_crst_free(table);
557	} while (rtte++, addr = next, addr < end);
558	return 0;
559}
560
561static int base_region2_walk(unsigned long *origin, unsigned long addr,
562			     unsigned long end, int alloc)
563{
564	unsigned long *rste, next, *table;
565	int rc;
566
567	rste = origin;
568	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
569	do {
570		next = base_region2_addr_end(addr, end);
571		if (*rste & _REGION_ENTRY_INVALID) {
572			if (!alloc)
573				continue;
574			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
575			if (!table)
576				return -ENOMEM;
577			*rste = __pa(table) | _REGION2_ENTRY;
578		}
579		table = __va(*rste & _REGION_ENTRY_ORIGIN);
580		rc = base_region3_walk(table, addr, next, alloc);
581		if (rc)
582			return rc;
583		if (!alloc)
584			base_crst_free(table);
585	} while (rste++, addr = next, addr < end);
586	return 0;
587}
588
589static int base_region1_walk(unsigned long *origin, unsigned long addr,
590			     unsigned long end, int alloc)
591{
592	unsigned long *rfte, next, *table;
593	int rc;
594
595	rfte = origin;
596	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
597	do {
598		next = base_region1_addr_end(addr, end);
599		if (*rfte & _REGION_ENTRY_INVALID) {
600			if (!alloc)
601				continue;
602			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
603			if (!table)
604				return -ENOMEM;
605			*rfte = __pa(table) | _REGION1_ENTRY;
606		}
607		table = __va(*rfte & _REGION_ENTRY_ORIGIN);
608		rc = base_region2_walk(table, addr, next, alloc);
609		if (rc)
610			return rc;
611		if (!alloc)
612			base_crst_free(table);
613	} while (rfte++, addr = next, addr < end);
614	return 0;
615}
616
617/**
618 * base_asce_free - free asce and tables returned from base_asce_alloc()
619 * @asce: asce to be freed
620 *
621 * Frees all region, segment, and page tables that were allocated with a
622 * corresponding base_asce_alloc() call.
623 */
624void base_asce_free(unsigned long asce)
625{
626	unsigned long *table = __va(asce & _ASCE_ORIGIN);
627
628	if (!asce)
629		return;
630	switch (asce & _ASCE_TYPE_MASK) {
631	case _ASCE_TYPE_SEGMENT:
632		base_segment_walk(table, 0, _REGION3_SIZE, 0);
633		break;
634	case _ASCE_TYPE_REGION3:
635		base_region3_walk(table, 0, _REGION2_SIZE, 0);
636		break;
637	case _ASCE_TYPE_REGION2:
638		base_region2_walk(table, 0, _REGION1_SIZE, 0);
639		break;
640	case _ASCE_TYPE_REGION1:
641		base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
642		break;
643	}
644	base_crst_free(table);
645}
646
647static int base_pgt_cache_init(void)
648{
649	static DEFINE_MUTEX(base_pgt_cache_mutex);
650	unsigned long sz = _PAGE_TABLE_SIZE;
651
652	if (base_pgt_cache)
653		return 0;
654	mutex_lock(&base_pgt_cache_mutex);
655	if (!base_pgt_cache)
656		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
657	mutex_unlock(&base_pgt_cache_mutex);
658	return base_pgt_cache ? 0 : -ENOMEM;
659}
660
661/**
662 * base_asce_alloc - create kernel mapping without enhanced DAT features
663 * @addr: virtual start address of kernel mapping
664 * @num_pages: number of consecutive pages
665 *
666 * Generate an asce, including all required region, segment and page tables,
667 * that can be used to access the virtual kernel mapping. The difference is
668 * that the returned asce does not make use of any enhanced DAT features like
669 * e.g. large pages. This is required for some I/O functions that pass an
670 * asce, like e.g. some service call requests.
671 *
672 * Note: the returned asce may NEVER be attached to any cpu. It may only be
673 *	 used for I/O requests. tlb entries that might result because the
674 *	 asce was attached to a cpu won't be cleared.
675 */
676unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
677{
678	unsigned long asce, *table, end;
679	int rc;
680
681	if (base_pgt_cache_init())
682		return 0;
683	end = addr + num_pages * PAGE_SIZE;
684	if (end <= _REGION3_SIZE) {
685		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
686		if (!table)
687			return 0;
688		rc = base_segment_walk(table, addr, end, 1);
689		asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
690	} else if (end <= _REGION2_SIZE) {
691		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
692		if (!table)
693			return 0;
694		rc = base_region3_walk(table, addr, end, 1);
695		asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
696	} else if (end <= _REGION1_SIZE) {
697		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
698		if (!table)
699			return 0;
700		rc = base_region2_walk(table, addr, end, 1);
701		asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
702	} else {
703		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
704		if (!table)
705			return 0;
706		rc = base_region1_walk(table, addr, end, 1);
707		asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
708	}
709	if (rc) {
710		base_asce_free(asce);
711		asce = 0;
712	}
713	return asce;
714}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Page table allocation functions
  4 *
  5 *    Copyright IBM Corp. 2016
  6 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7 */
  8
  9#include <linux/sysctl.h>
 10#include <linux/slab.h>
 11#include <linux/mm.h>
 12#include <asm/mmu_context.h>
 13#include <asm/pgalloc.h>
 14#include <asm/gmap.h>
 15#include <asm/tlb.h>
 16#include <asm/tlbflush.h>
 17
 18#ifdef CONFIG_PGSTE
 19
 20static int page_table_allocate_pgste_min = 0;
 21static int page_table_allocate_pgste_max = 1;
 22int page_table_allocate_pgste = 0;
 23EXPORT_SYMBOL(page_table_allocate_pgste);
 24
 25static struct ctl_table page_table_sysctl[] = {
 26	{
 27		.procname	= "allocate_pgste",
 28		.data		= &page_table_allocate_pgste,
 29		.maxlen		= sizeof(int),
 30		.mode		= S_IRUGO | S_IWUSR,
 31		.proc_handler	= proc_dointvec,
 32		.extra1		= &page_table_allocate_pgste_min,
 33		.extra2		= &page_table_allocate_pgste_max,
 34	},
 35	{ }
 36};
 37
 38static struct ctl_table page_table_sysctl_dir[] = {
 39	{
 40		.procname	= "vm",
 41		.maxlen		= 0,
 42		.mode		= 0555,
 43		.child		= page_table_sysctl,
 44	},
 45	{ }
 46};
 47
 48static int __init page_table_register_sysctl(void)
 49{
 50	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
 51}
 52__initcall(page_table_register_sysctl);
 53
 54#endif /* CONFIG_PGSTE */
 55
 56unsigned long *crst_table_alloc(struct mm_struct *mm)
 57{
 58	struct page *page = alloc_pages(GFP_KERNEL, 2);
 59
 60	if (!page)
 61		return NULL;
 62	arch_set_page_dat(page, 2);
 63	return (unsigned long *) page_to_phys(page);
 64}
 65
 66void crst_table_free(struct mm_struct *mm, unsigned long *table)
 67{
 68	free_pages((unsigned long) table, 2);
 69}
 70
 71static void __crst_table_upgrade(void *arg)
 72{
 73	struct mm_struct *mm = arg;
 74
 75	if (current->active_mm == mm)
 76		set_user_asce(mm);
 
 
 
 77	__tlb_flush_local();
 78}
 79
 80int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
 81{
 82	unsigned long *table, *pgd;
 83	int rc, notify;
 84
 85	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
 86	VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
 87	rc = 0;
 88	notify = 0;
 89	while (mm->context.asce_limit < end) {
 90		table = crst_table_alloc(mm);
 91		if (!table) {
 92			rc = -ENOMEM;
 93			break;
 94		}
 95		spin_lock_bh(&mm->page_table_lock);
 96		pgd = (unsigned long *) mm->pgd;
 97		if (mm->context.asce_limit == _REGION2_SIZE) {
 98			crst_table_init(table, _REGION2_ENTRY_EMPTY);
 99			p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
100			mm->pgd = (pgd_t *) table;
101			mm->context.asce_limit = _REGION1_SIZE;
102			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
103				_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
104		} else {
105			crst_table_init(table, _REGION1_ENTRY_EMPTY);
106			pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
107			mm->pgd = (pgd_t *) table;
108			mm->context.asce_limit = -PAGE_SIZE;
109			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
110				_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
111		}
112		notify = 1;
113		spin_unlock_bh(&mm->page_table_lock);
114	}
115	if (notify)
116		on_each_cpu(__crst_table_upgrade, mm, 0);
117	return rc;
118}
119
120void crst_table_downgrade(struct mm_struct *mm)
121{
122	pgd_t *pgd;
123
124	/* downgrade should only happen from 3 to 2 levels (compat only) */
125	VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
 
 
 
 
126
127	if (current->active_mm == mm) {
128		clear_user_asce();
129		__tlb_flush_mm(mm);
 
 
 
 
 
 
 
 
 
 
 
 
 
130	}
131
132	pgd = mm->pgd;
133	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
134	mm->context.asce_limit = _REGION3_SIZE;
135	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
136			   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
137	crst_table_free(mm, (unsigned long *) pgd);
138
139	if (current->active_mm == mm)
140		set_user_asce(mm);
 
 
 
 
 
 
141}
142
143static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
144{
145	unsigned int old, new;
146
147	do {
148		old = atomic_read(v);
149		new = old ^ bits;
150	} while (atomic_cmpxchg(v, old, new) != old);
151	return new;
152}
153
154#ifdef CONFIG_PGSTE
155
156struct page *page_table_alloc_pgste(struct mm_struct *mm)
157{
158	struct page *page;
159	u64 *table;
160
161	page = alloc_page(GFP_KERNEL);
162	if (page) {
163		table = (u64 *)page_to_phys(page);
164		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
165		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
166	}
167	return page;
168}
169
170void page_table_free_pgste(struct page *page)
171{
172	__free_page(page);
173}
174
175#endif /* CONFIG_PGSTE */
176
177/*
178 * page table entry allocation/free routines.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179 */
180unsigned long *page_table_alloc(struct mm_struct *mm)
181{
182	unsigned long *table;
183	struct page *page;
184	unsigned int mask, bit;
185
186	/* Try to get a fragment of a 4K page as a 2K page table */
187	if (!mm_alloc_pgste(mm)) {
188		table = NULL;
189		spin_lock_bh(&mm->context.lock);
190		if (!list_empty(&mm->context.pgtable_list)) {
191			page = list_first_entry(&mm->context.pgtable_list,
192						struct page, lru);
193			mask = atomic_read(&page->_mapcount);
194			mask = (mask | (mask >> 4)) & 3;
195			if (mask != 3) {
196				table = (unsigned long *) page_to_phys(page);
 
 
 
 
 
 
 
 
 
197				bit = mask & 1;		/* =1 -> second 2K */
198				if (bit)
199					table += PTRS_PER_PTE;
200				atomic_xor_bits(&page->_mapcount, 1U << bit);
 
201				list_del(&page->lru);
202			}
203		}
204		spin_unlock_bh(&mm->context.lock);
205		if (table)
206			return table;
207	}
208	/* Allocate a fresh page */
209	page = alloc_page(GFP_KERNEL);
210	if (!page)
211		return NULL;
212	if (!pgtable_page_ctor(page)) {
213		__free_page(page);
214		return NULL;
215	}
216	arch_set_page_dat(page, 0);
217	/* Initialize page table */
218	table = (unsigned long *) page_to_phys(page);
219	if (mm_alloc_pgste(mm)) {
220		/* Return 4K page table with PGSTEs */
221		atomic_set(&page->_mapcount, 3);
222		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
223		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
224	} else {
225		/* Return the first 2K fragment of the page */
226		atomic_set(&page->_mapcount, 1);
227		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
228		spin_lock_bh(&mm->context.lock);
229		list_add(&page->lru, &mm->context.pgtable_list);
230		spin_unlock_bh(&mm->context.lock);
231	}
232	return table;
233}
234
 
 
 
 
 
 
 
 
 
 
 
 
 
235void page_table_free(struct mm_struct *mm, unsigned long *table)
236{
 
237	struct page *page;
238	unsigned int bit, mask;
239
240	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
241	if (!mm_alloc_pgste(mm)) {
242		/* Free 2K page table fragment of a 4K page */
243		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
244		spin_lock_bh(&mm->context.lock);
245		mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
246		if (mask & 3)
 
 
 
 
 
 
247			list_add(&page->lru, &mm->context.pgtable_list);
248		else
249			list_del(&page->lru);
250		spin_unlock_bh(&mm->context.lock);
251		if (mask != 0)
 
 
252			return;
 
 
 
 
 
253	}
254
255	pgtable_page_dtor(page);
256	atomic_set(&page->_mapcount, -1);
257	__free_page(page);
258}
259
260void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
261			 unsigned long vmaddr)
262{
263	struct mm_struct *mm;
264	struct page *page;
265	unsigned int bit, mask;
266
267	mm = tlb->mm;
268	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
269	if (mm_alloc_pgste(mm)) {
270		gmap_unlink(mm, table, vmaddr);
271		table = (unsigned long *) (__pa(table) | 3);
272		tlb_remove_table(tlb, table);
273		return;
274	}
275	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
276	spin_lock_bh(&mm->context.lock);
277	mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
278	if (mask & 3)
 
 
 
 
 
 
279		list_add_tail(&page->lru, &mm->context.pgtable_list);
280	else
281		list_del(&page->lru);
282	spin_unlock_bh(&mm->context.lock);
283	table = (unsigned long *) (__pa(table) | (1U << bit));
284	tlb_remove_table(tlb, table);
285}
286
287static void __tlb_remove_table(void *_table)
288{
289	unsigned int mask = (unsigned long) _table & 3;
290	void *table = (void *)((unsigned long) _table ^ mask);
291	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
292
293	switch (mask) {
294	case 0:		/* pmd, pud, or p4d */
295		free_pages((unsigned long) table, 2);
 
 
 
 
 
 
 
296		break;
297	case 1:		/* lower 2K of a 4K page table */
298	case 2:		/* higher 2K of a 4K page table */
299		if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
300			break;
301		/* fallthrough */
302	case 3:		/* 4K page table with pgstes */
303		pgtable_page_dtor(page);
304		atomic_set(&page->_mapcount, -1);
305		__free_page(page);
306		break;
307	}
308}
309
310static void tlb_remove_table_smp_sync(void *arg)
311{
312	/* Simply deliver the interrupt */
313}
314
315static void tlb_remove_table_one(void *table)
316{
317	/*
318	 * This isn't an RCU grace period and hence the page-tables cannot be
319	 * assumed to be actually RCU-freed.
320	 *
321	 * It is however sufficient for software page-table walkers that rely
322	 * on IRQ disabling. See the comment near struct mmu_table_batch.
323	 */
324	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
325	__tlb_remove_table(table);
326}
327
328static void tlb_remove_table_rcu(struct rcu_head *head)
329{
330	struct mmu_table_batch *batch;
331	int i;
332
333	batch = container_of(head, struct mmu_table_batch, rcu);
334
335	for (i = 0; i < batch->nr; i++)
336		__tlb_remove_table(batch->tables[i]);
337
338	free_page((unsigned long)batch);
339}
340
341void tlb_table_flush(struct mmu_gather *tlb)
342{
343	struct mmu_table_batch **batch = &tlb->batch;
344
345	if (*batch) {
346		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
347		*batch = NULL;
348	}
349}
350
351void tlb_remove_table(struct mmu_gather *tlb, void *table)
352{
353	struct mmu_table_batch **batch = &tlb->batch;
354
355	tlb->mm->context.flush_mm = 1;
356	if (*batch == NULL) {
357		*batch = (struct mmu_table_batch *)
358			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
359		if (*batch == NULL) {
360			__tlb_flush_mm_lazy(tlb->mm);
361			tlb_remove_table_one(table);
362			return;
363		}
364		(*batch)->nr = 0;
365	}
366	(*batch)->tables[(*batch)->nr++] = table;
367	if ((*batch)->nr == MAX_TABLE_BATCH)
368		tlb_flush_mmu(tlb);
369}
370
371/*
372 * Base infrastructure required to generate basic asces, region, segment,
373 * and page tables that do not make use of enhanced features like EDAT1.
374 */
375
376static struct kmem_cache *base_pgt_cache;
377
378static unsigned long base_pgt_alloc(void)
379{
380	u64 *table;
381
382	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
383	if (table)
384		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
385	return (unsigned long) table;
386}
387
388static void base_pgt_free(unsigned long table)
389{
390	kmem_cache_free(base_pgt_cache, (void *) table);
391}
392
393static unsigned long base_crst_alloc(unsigned long val)
394{
395	unsigned long table;
396
397	table =	 __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
398	if (table)
399		crst_table_init((unsigned long *)table, val);
400	return table;
401}
402
403static void base_crst_free(unsigned long table)
404{
405	free_pages(table, CRST_ALLOC_ORDER);
406}
407
408#define BASE_ADDR_END_FUNC(NAME, SIZE)					\
409static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
410						   unsigned long end)	\
411{									\
412	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
413									\
414	return (next - 1) < (end - 1) ? next : end;			\
415}
416
417BASE_ADDR_END_FUNC(page,    _PAGE_SIZE)
418BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
419BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
420BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
421BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
422
423static inline unsigned long base_lra(unsigned long address)
424{
425	unsigned long real;
426
427	asm volatile(
428		"	lra	%0,0(%1)\n"
429		: "=d" (real) : "a" (address) : "cc");
430	return real;
431}
432
433static int base_page_walk(unsigned long origin, unsigned long addr,
434			  unsigned long end, int alloc)
435{
436	unsigned long *pte, next;
437
438	if (!alloc)
439		return 0;
440	pte = (unsigned long *) origin;
441	pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
442	do {
443		next = base_page_addr_end(addr, end);
444		*pte = base_lra(addr);
445	} while (pte++, addr = next, addr < end);
446	return 0;
447}
448
449static int base_segment_walk(unsigned long origin, unsigned long addr,
450			     unsigned long end, int alloc)
451{
452	unsigned long *ste, next, table;
453	int rc;
454
455	ste = (unsigned long *) origin;
456	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
457	do {
458		next = base_segment_addr_end(addr, end);
459		if (*ste & _SEGMENT_ENTRY_INVALID) {
460			if (!alloc)
461				continue;
462			table = base_pgt_alloc();
463			if (!table)
464				return -ENOMEM;
465			*ste = table | _SEGMENT_ENTRY;
466		}
467		table = *ste & _SEGMENT_ENTRY_ORIGIN;
468		rc = base_page_walk(table, addr, next, alloc);
469		if (rc)
470			return rc;
471		if (!alloc)
472			base_pgt_free(table);
473		cond_resched();
474	} while (ste++, addr = next, addr < end);
475	return 0;
476}
477
478static int base_region3_walk(unsigned long origin, unsigned long addr,
479			     unsigned long end, int alloc)
480{
481	unsigned long *rtte, next, table;
482	int rc;
483
484	rtte = (unsigned long *) origin;
485	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
486	do {
487		next = base_region3_addr_end(addr, end);
488		if (*rtte & _REGION_ENTRY_INVALID) {
489			if (!alloc)
490				continue;
491			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
492			if (!table)
493				return -ENOMEM;
494			*rtte = table | _REGION3_ENTRY;
495		}
496		table = *rtte & _REGION_ENTRY_ORIGIN;
497		rc = base_segment_walk(table, addr, next, alloc);
498		if (rc)
499			return rc;
500		if (!alloc)
501			base_crst_free(table);
502	} while (rtte++, addr = next, addr < end);
503	return 0;
504}
505
506static int base_region2_walk(unsigned long origin, unsigned long addr,
507			     unsigned long end, int alloc)
508{
509	unsigned long *rste, next, table;
510	int rc;
511
512	rste = (unsigned long *) origin;
513	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
514	do {
515		next = base_region2_addr_end(addr, end);
516		if (*rste & _REGION_ENTRY_INVALID) {
517			if (!alloc)
518				continue;
519			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
520			if (!table)
521				return -ENOMEM;
522			*rste = table | _REGION2_ENTRY;
523		}
524		table = *rste & _REGION_ENTRY_ORIGIN;
525		rc = base_region3_walk(table, addr, next, alloc);
526		if (rc)
527			return rc;
528		if (!alloc)
529			base_crst_free(table);
530	} while (rste++, addr = next, addr < end);
531	return 0;
532}
533
534static int base_region1_walk(unsigned long origin, unsigned long addr,
535			     unsigned long end, int alloc)
536{
537	unsigned long *rfte, next, table;
538	int rc;
539
540	rfte = (unsigned long *) origin;
541	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
542	do {
543		next = base_region1_addr_end(addr, end);
544		if (*rfte & _REGION_ENTRY_INVALID) {
545			if (!alloc)
546				continue;
547			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
548			if (!table)
549				return -ENOMEM;
550			*rfte = table | _REGION1_ENTRY;
551		}
552		table = *rfte & _REGION_ENTRY_ORIGIN;
553		rc = base_region2_walk(table, addr, next, alloc);
554		if (rc)
555			return rc;
556		if (!alloc)
557			base_crst_free(table);
558	} while (rfte++, addr = next, addr < end);
559	return 0;
560}
561
562/**
563 * base_asce_free - free asce and tables returned from base_asce_alloc()
564 * @asce: asce to be freed
565 *
566 * Frees all region, segment, and page tables that were allocated with a
567 * corresponding base_asce_alloc() call.
568 */
569void base_asce_free(unsigned long asce)
570{
571	unsigned long table = asce & _ASCE_ORIGIN;
572
573	if (!asce)
574		return;
575	switch (asce & _ASCE_TYPE_MASK) {
576	case _ASCE_TYPE_SEGMENT:
577		base_segment_walk(table, 0, _REGION3_SIZE, 0);
578		break;
579	case _ASCE_TYPE_REGION3:
580		base_region3_walk(table, 0, _REGION2_SIZE, 0);
581		break;
582	case _ASCE_TYPE_REGION2:
583		base_region2_walk(table, 0, _REGION1_SIZE, 0);
584		break;
585	case _ASCE_TYPE_REGION1:
586		base_region1_walk(table, 0, -_PAGE_SIZE, 0);
587		break;
588	}
589	base_crst_free(table);
590}
591
592static int base_pgt_cache_init(void)
593{
594	static DEFINE_MUTEX(base_pgt_cache_mutex);
595	unsigned long sz = _PAGE_TABLE_SIZE;
596
597	if (base_pgt_cache)
598		return 0;
599	mutex_lock(&base_pgt_cache_mutex);
600	if (!base_pgt_cache)
601		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
602	mutex_unlock(&base_pgt_cache_mutex);
603	return base_pgt_cache ? 0 : -ENOMEM;
604}
605
606/**
607 * base_asce_alloc - create kernel mapping without enhanced DAT features
608 * @addr: virtual start address of kernel mapping
609 * @num_pages: number of consecutive pages
610 *
611 * Generate an asce, including all required region, segment and page tables,
612 * that can be used to access the virtual kernel mapping. The difference is
613 * that the returned asce does not make use of any enhanced DAT features like
614 * e.g. large pages. This is required for some I/O functions that pass an
615 * asce, like e.g. some service call requests.
616 *
617 * Note: the returned asce may NEVER be attached to any cpu. It may only be
618 *	 used for I/O requests. tlb entries that might result because the
619 *	 asce was attached to a cpu won't be cleared.
620 */
621unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
622{
623	unsigned long asce, table, end;
624	int rc;
625
626	if (base_pgt_cache_init())
627		return 0;
628	end = addr + num_pages * PAGE_SIZE;
629	if (end <= _REGION3_SIZE) {
630		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
631		if (!table)
632			return 0;
633		rc = base_segment_walk(table, addr, end, 1);
634		asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
635	} else if (end <= _REGION2_SIZE) {
636		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
637		if (!table)
638			return 0;
639		rc = base_region3_walk(table, addr, end, 1);
640		asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
641	} else if (end <= _REGION1_SIZE) {
642		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
643		if (!table)
644			return 0;
645		rc = base_region2_walk(table, addr, end, 1);
646		asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
647	} else {
648		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
649		if (!table)
650			return 0;
651		rc = base_region1_walk(table, addr, end, 1);
652		asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
653	}
654	if (rc) {
655		base_asce_free(asce);
656		asce = 0;
657	}
658	return asce;
659}