Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 *  Page table allocation functions
  3 *
  4 *    Copyright IBM Corp. 2016
  5 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  6 */
  7
  8#include <linux/mm.h>
  9#include <linux/sysctl.h>
 
 
 10#include <asm/mmu_context.h>
 11#include <asm/pgalloc.h>
 12#include <asm/gmap.h>
 13#include <asm/tlb.h>
 14#include <asm/tlbflush.h>
 15
 16#ifdef CONFIG_PGSTE
 17
 18static int page_table_allocate_pgste_min = 0;
 19static int page_table_allocate_pgste_max = 1;
 20int page_table_allocate_pgste = 0;
 21EXPORT_SYMBOL(page_table_allocate_pgste);
 22
 23static struct ctl_table page_table_sysctl[] = {
 24	{
 25		.procname	= "allocate_pgste",
 26		.data		= &page_table_allocate_pgste,
 27		.maxlen		= sizeof(int),
 28		.mode		= S_IRUGO | S_IWUSR,
 29		.proc_handler	= proc_dointvec,
 30		.extra1		= &page_table_allocate_pgste_min,
 31		.extra2		= &page_table_allocate_pgste_max,
 32	},
 33	{ }
 34};
 35
 36static struct ctl_table page_table_sysctl_dir[] = {
 37	{
 38		.procname	= "vm",
 39		.maxlen		= 0,
 40		.mode		= 0555,
 41		.child		= page_table_sysctl,
 42	},
 43	{ }
 44};
 45
 46static int __init page_table_register_sysctl(void)
 47{
 48	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
 49}
 50__initcall(page_table_register_sysctl);
 51
 52#endif /* CONFIG_PGSTE */
 53
 54unsigned long *crst_table_alloc(struct mm_struct *mm)
 55{
 56	struct page *page = alloc_pages(GFP_KERNEL, 2);
 57
 58	if (!page)
 59		return NULL;
 
 60	return (unsigned long *) page_to_phys(page);
 61}
 62
 63void crst_table_free(struct mm_struct *mm, unsigned long *table)
 64{
 65	free_pages((unsigned long) table, 2);
 66}
 67
 68static void __crst_table_upgrade(void *arg)
 69{
 70	struct mm_struct *mm = arg;
 71
 72	if (current->active_mm == mm) {
 73		clear_user_asce();
 74		set_user_asce(mm);
 75	}
 76	__tlb_flush_local();
 77}
 78
 79int crst_table_upgrade(struct mm_struct *mm)
 80{
 81	unsigned long *table, *pgd;
 
 82
 83	/* upgrade should only happen from 3 to 4 levels */
 84	BUG_ON(mm->context.asce_limit != (1UL << 42));
 85
 86	table = crst_table_alloc(mm);
 87	if (!table)
 88		return -ENOMEM;
 89
 90	spin_lock_bh(&mm->page_table_lock);
 91	pgd = (unsigned long *) mm->pgd;
 92	crst_table_init(table, _REGION2_ENTRY_EMPTY);
 93	pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
 94	mm->pgd = (pgd_t *) table;
 95	mm->context.asce_limit = 1UL << 53;
 96	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
 97			   _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
 98	mm->task_size = mm->context.asce_limit;
 99	spin_unlock_bh(&mm->page_table_lock);
100
101	on_each_cpu(__crst_table_upgrade, mm, 0);
102	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
103}
104
105void crst_table_downgrade(struct mm_struct *mm)
106{
107	pgd_t *pgd;
108
109	/* downgrade should only happen from 3 to 2 levels (compat only) */
110	BUG_ON(mm->context.asce_limit != (1UL << 42));
111
112	if (current->active_mm == mm) {
113		clear_user_asce();
114		__tlb_flush_mm(mm);
115	}
116
117	pgd = mm->pgd;
118	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
119	mm->context.asce_limit = 1UL << 31;
120	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
121			   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
122	mm->task_size = mm->context.asce_limit;
123	crst_table_free(mm, (unsigned long *) pgd);
124
125	if (current->active_mm == mm)
126		set_user_asce(mm);
127}
128
129static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
130{
131	unsigned int old, new;
132
133	do {
134		old = atomic_read(v);
135		new = old ^ bits;
136	} while (atomic_cmpxchg(v, old, new) != old);
137	return new;
138}
139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140/*
141 * page table entry allocation/free routines.
142 */
143unsigned long *page_table_alloc(struct mm_struct *mm)
144{
145	unsigned long *table;
146	struct page *page;
147	unsigned int mask, bit;
148
149	/* Try to get a fragment of a 4K page as a 2K page table */
150	if (!mm_alloc_pgste(mm)) {
151		table = NULL;
152		spin_lock_bh(&mm->context.list_lock);
153		if (!list_empty(&mm->context.pgtable_list)) {
154			page = list_first_entry(&mm->context.pgtable_list,
155						struct page, lru);
156			mask = atomic_read(&page->_mapcount);
157			mask = (mask | (mask >> 4)) & 3;
158			if (mask != 3) {
159				table = (unsigned long *) page_to_phys(page);
160				bit = mask & 1;		/* =1 -> second 2K */
161				if (bit)
162					table += PTRS_PER_PTE;
163				atomic_xor_bits(&page->_mapcount, 1U << bit);
164				list_del(&page->lru);
165			}
166		}
167		spin_unlock_bh(&mm->context.list_lock);
168		if (table)
169			return table;
170	}
171	/* Allocate a fresh page */
172	page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
173	if (!page)
174		return NULL;
175	if (!pgtable_page_ctor(page)) {
176		__free_page(page);
177		return NULL;
178	}
 
179	/* Initialize page table */
180	table = (unsigned long *) page_to_phys(page);
181	if (mm_alloc_pgste(mm)) {
182		/* Return 4K page table with PGSTEs */
183		atomic_set(&page->_mapcount, 3);
184		clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
185		clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
186	} else {
187		/* Return the first 2K fragment of the page */
188		atomic_set(&page->_mapcount, 1);
189		clear_table(table, _PAGE_INVALID, PAGE_SIZE);
190		spin_lock_bh(&mm->context.list_lock);
191		list_add(&page->lru, &mm->context.pgtable_list);
192		spin_unlock_bh(&mm->context.list_lock);
193	}
194	return table;
195}
196
197void page_table_free(struct mm_struct *mm, unsigned long *table)
198{
199	struct page *page;
200	unsigned int bit, mask;
201
202	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
203	if (!mm_alloc_pgste(mm)) {
204		/* Free 2K page table fragment of a 4K page */
205		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
206		spin_lock_bh(&mm->context.list_lock);
207		mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
208		if (mask & 3)
209			list_add(&page->lru, &mm->context.pgtable_list);
210		else
211			list_del(&page->lru);
212		spin_unlock_bh(&mm->context.list_lock);
213		if (mask != 0)
214			return;
215	}
216
217	pgtable_page_dtor(page);
218	atomic_set(&page->_mapcount, -1);
219	__free_page(page);
220}
221
222void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
223			 unsigned long vmaddr)
224{
225	struct mm_struct *mm;
226	struct page *page;
227	unsigned int bit, mask;
228
229	mm = tlb->mm;
230	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
231	if (mm_alloc_pgste(mm)) {
232		gmap_unlink(mm, table, vmaddr);
233		table = (unsigned long *) (__pa(table) | 3);
234		tlb_remove_table(tlb, table);
235		return;
236	}
237	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
238	spin_lock_bh(&mm->context.list_lock);
239	mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
240	if (mask & 3)
241		list_add_tail(&page->lru, &mm->context.pgtable_list);
242	else
243		list_del(&page->lru);
244	spin_unlock_bh(&mm->context.list_lock);
245	table = (unsigned long *) (__pa(table) | (1U << bit));
246	tlb_remove_table(tlb, table);
247}
248
249static void __tlb_remove_table(void *_table)
250{
251	unsigned int mask = (unsigned long) _table & 3;
252	void *table = (void *)((unsigned long) _table ^ mask);
253	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
254
255	switch (mask) {
256	case 0:		/* pmd or pud */
257		free_pages((unsigned long) table, 2);
258		break;
259	case 1:		/* lower 2K of a 4K page table */
260	case 2:		/* higher 2K of a 4K page table */
261		if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
262			break;
263		/* fallthrough */
264	case 3:		/* 4K page table with pgstes */
265		pgtable_page_dtor(page);
266		atomic_set(&page->_mapcount, -1);
267		__free_page(page);
268		break;
269	}
270}
271
272static void tlb_remove_table_smp_sync(void *arg)
273{
274	/* Simply deliver the interrupt */
275}
276
277static void tlb_remove_table_one(void *table)
278{
279	/*
280	 * This isn't an RCU grace period and hence the page-tables cannot be
281	 * assumed to be actually RCU-freed.
282	 *
283	 * It is however sufficient for software page-table walkers that rely
284	 * on IRQ disabling. See the comment near struct mmu_table_batch.
285	 */
286	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
287	__tlb_remove_table(table);
288}
289
290static void tlb_remove_table_rcu(struct rcu_head *head)
291{
292	struct mmu_table_batch *batch;
293	int i;
294
295	batch = container_of(head, struct mmu_table_batch, rcu);
296
297	for (i = 0; i < batch->nr; i++)
298		__tlb_remove_table(batch->tables[i]);
299
300	free_page((unsigned long)batch);
301}
302
303void tlb_table_flush(struct mmu_gather *tlb)
304{
305	struct mmu_table_batch **batch = &tlb->batch;
306
307	if (*batch) {
308		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
309		*batch = NULL;
310	}
311}
312
313void tlb_remove_table(struct mmu_gather *tlb, void *table)
314{
315	struct mmu_table_batch **batch = &tlb->batch;
316
317	tlb->mm->context.flush_mm = 1;
318	if (*batch == NULL) {
319		*batch = (struct mmu_table_batch *)
320			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
321		if (*batch == NULL) {
322			__tlb_flush_mm_lazy(tlb->mm);
323			tlb_remove_table_one(table);
324			return;
325		}
326		(*batch)->nr = 0;
327	}
328	(*batch)->tables[(*batch)->nr++] = table;
329	if ((*batch)->nr == MAX_TABLE_BATCH)
330		tlb_flush_mmu(tlb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Page table allocation functions
  4 *
  5 *    Copyright IBM Corp. 2016
  6 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7 */
  8
 
  9#include <linux/sysctl.h>
 10#include <linux/slab.h>
 11#include <linux/mm.h>
 12#include <asm/mmu_context.h>
 13#include <asm/pgalloc.h>
 14#include <asm/gmap.h>
 15#include <asm/tlb.h>
 16#include <asm/tlbflush.h>
 17
 18#ifdef CONFIG_PGSTE
 19
 20static int page_table_allocate_pgste_min = 0;
 21static int page_table_allocate_pgste_max = 1;
 22int page_table_allocate_pgste = 0;
 23EXPORT_SYMBOL(page_table_allocate_pgste);
 24
 25static struct ctl_table page_table_sysctl[] = {
 26	{
 27		.procname	= "allocate_pgste",
 28		.data		= &page_table_allocate_pgste,
 29		.maxlen		= sizeof(int),
 30		.mode		= S_IRUGO | S_IWUSR,
 31		.proc_handler	= proc_dointvec,
 32		.extra1		= &page_table_allocate_pgste_min,
 33		.extra2		= &page_table_allocate_pgste_max,
 34	},
 35	{ }
 36};
 37
 38static struct ctl_table page_table_sysctl_dir[] = {
 39	{
 40		.procname	= "vm",
 41		.maxlen		= 0,
 42		.mode		= 0555,
 43		.child		= page_table_sysctl,
 44	},
 45	{ }
 46};
 47
 48static int __init page_table_register_sysctl(void)
 49{
 50	return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
 51}
 52__initcall(page_table_register_sysctl);
 53
 54#endif /* CONFIG_PGSTE */
 55
 56unsigned long *crst_table_alloc(struct mm_struct *mm)
 57{
 58	struct page *page = alloc_pages(GFP_KERNEL, 2);
 59
 60	if (!page)
 61		return NULL;
 62	arch_set_page_dat(page, 2);
 63	return (unsigned long *) page_to_phys(page);
 64}
 65
 66void crst_table_free(struct mm_struct *mm, unsigned long *table)
 67{
 68	free_pages((unsigned long) table, 2);
 69}
 70
 71static void __crst_table_upgrade(void *arg)
 72{
 73	struct mm_struct *mm = arg;
 74
 75	if (current->active_mm == mm)
 
 76		set_user_asce(mm);
 
 77	__tlb_flush_local();
 78}
 79
 80int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
 81{
 82	unsigned long *table, *pgd;
 83	int rc, notify;
 84
 85	/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
 86	VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
 87	rc = 0;
 88	notify = 0;
 89	while (mm->context.asce_limit < end) {
 90		table = crst_table_alloc(mm);
 91		if (!table) {
 92			rc = -ENOMEM;
 93			break;
 94		}
 95		spin_lock_bh(&mm->page_table_lock);
 96		pgd = (unsigned long *) mm->pgd;
 97		if (mm->context.asce_limit == _REGION2_SIZE) {
 98			crst_table_init(table, _REGION2_ENTRY_EMPTY);
 99			p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
100			mm->pgd = (pgd_t *) table;
101			mm->context.asce_limit = _REGION1_SIZE;
102			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
103				_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
104		} else {
105			crst_table_init(table, _REGION1_ENTRY_EMPTY);
106			pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
107			mm->pgd = (pgd_t *) table;
108			mm->context.asce_limit = -PAGE_SIZE;
109			mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
110				_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
111		}
112		notify = 1;
113		spin_unlock_bh(&mm->page_table_lock);
114	}
115	if (notify)
116		on_each_cpu(__crst_table_upgrade, mm, 0);
117	return rc;
118}
119
120void crst_table_downgrade(struct mm_struct *mm)
121{
122	pgd_t *pgd;
123
124	/* downgrade should only happen from 3 to 2 levels (compat only) */
125	VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
126
127	if (current->active_mm == mm) {
128		clear_user_asce();
129		__tlb_flush_mm(mm);
130	}
131
132	pgd = mm->pgd;
133	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
134	mm->context.asce_limit = _REGION3_SIZE;
135	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
136			   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
 
137	crst_table_free(mm, (unsigned long *) pgd);
138
139	if (current->active_mm == mm)
140		set_user_asce(mm);
141}
142
143static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
144{
145	unsigned int old, new;
146
147	do {
148		old = atomic_read(v);
149		new = old ^ bits;
150	} while (atomic_cmpxchg(v, old, new) != old);
151	return new;
152}
153
154#ifdef CONFIG_PGSTE
155
156struct page *page_table_alloc_pgste(struct mm_struct *mm)
157{
158	struct page *page;
159	u64 *table;
160
161	page = alloc_page(GFP_KERNEL);
162	if (page) {
163		table = (u64 *)page_to_phys(page);
164		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
165		memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
166	}
167	return page;
168}
169
170void page_table_free_pgste(struct page *page)
171{
172	__free_page(page);
173}
174
175#endif /* CONFIG_PGSTE */
176
177/*
178 * page table entry allocation/free routines.
179 */
180unsigned long *page_table_alloc(struct mm_struct *mm)
181{
182	unsigned long *table;
183	struct page *page;
184	unsigned int mask, bit;
185
186	/* Try to get a fragment of a 4K page as a 2K page table */
187	if (!mm_alloc_pgste(mm)) {
188		table = NULL;
189		spin_lock_bh(&mm->context.lock);
190		if (!list_empty(&mm->context.pgtable_list)) {
191			page = list_first_entry(&mm->context.pgtable_list,
192						struct page, lru);
193			mask = atomic_read(&page->_mapcount);
194			mask = (mask | (mask >> 4)) & 3;
195			if (mask != 3) {
196				table = (unsigned long *) page_to_phys(page);
197				bit = mask & 1;		/* =1 -> second 2K */
198				if (bit)
199					table += PTRS_PER_PTE;
200				atomic_xor_bits(&page->_mapcount, 1U << bit);
201				list_del(&page->lru);
202			}
203		}
204		spin_unlock_bh(&mm->context.lock);
205		if (table)
206			return table;
207	}
208	/* Allocate a fresh page */
209	page = alloc_page(GFP_KERNEL);
210	if (!page)
211		return NULL;
212	if (!pgtable_page_ctor(page)) {
213		__free_page(page);
214		return NULL;
215	}
216	arch_set_page_dat(page, 0);
217	/* Initialize page table */
218	table = (unsigned long *) page_to_phys(page);
219	if (mm_alloc_pgste(mm)) {
220		/* Return 4K page table with PGSTEs */
221		atomic_set(&page->_mapcount, 3);
222		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
223		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
224	} else {
225		/* Return the first 2K fragment of the page */
226		atomic_set(&page->_mapcount, 1);
227		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
228		spin_lock_bh(&mm->context.lock);
229		list_add(&page->lru, &mm->context.pgtable_list);
230		spin_unlock_bh(&mm->context.lock);
231	}
232	return table;
233}
234
235void page_table_free(struct mm_struct *mm, unsigned long *table)
236{
237	struct page *page;
238	unsigned int bit, mask;
239
240	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
241	if (!mm_alloc_pgste(mm)) {
242		/* Free 2K page table fragment of a 4K page */
243		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
244		spin_lock_bh(&mm->context.lock);
245		mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
246		if (mask & 3)
247			list_add(&page->lru, &mm->context.pgtable_list);
248		else
249			list_del(&page->lru);
250		spin_unlock_bh(&mm->context.lock);
251		if (mask != 0)
252			return;
253	}
254
255	pgtable_page_dtor(page);
256	atomic_set(&page->_mapcount, -1);
257	__free_page(page);
258}
259
260void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
261			 unsigned long vmaddr)
262{
263	struct mm_struct *mm;
264	struct page *page;
265	unsigned int bit, mask;
266
267	mm = tlb->mm;
268	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
269	if (mm_alloc_pgste(mm)) {
270		gmap_unlink(mm, table, vmaddr);
271		table = (unsigned long *) (__pa(table) | 3);
272		tlb_remove_table(tlb, table);
273		return;
274	}
275	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
276	spin_lock_bh(&mm->context.lock);
277	mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
278	if (mask & 3)
279		list_add_tail(&page->lru, &mm->context.pgtable_list);
280	else
281		list_del(&page->lru);
282	spin_unlock_bh(&mm->context.lock);
283	table = (unsigned long *) (__pa(table) | (1U << bit));
284	tlb_remove_table(tlb, table);
285}
286
287static void __tlb_remove_table(void *_table)
288{
289	unsigned int mask = (unsigned long) _table & 3;
290	void *table = (void *)((unsigned long) _table ^ mask);
291	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
292
293	switch (mask) {
294	case 0:		/* pmd, pud, or p4d */
295		free_pages((unsigned long) table, 2);
296		break;
297	case 1:		/* lower 2K of a 4K page table */
298	case 2:		/* higher 2K of a 4K page table */
299		if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
300			break;
301		/* fallthrough */
302	case 3:		/* 4K page table with pgstes */
303		pgtable_page_dtor(page);
304		atomic_set(&page->_mapcount, -1);
305		__free_page(page);
306		break;
307	}
308}
309
310static void tlb_remove_table_smp_sync(void *arg)
311{
312	/* Simply deliver the interrupt */
313}
314
315static void tlb_remove_table_one(void *table)
316{
317	/*
318	 * This isn't an RCU grace period and hence the page-tables cannot be
319	 * assumed to be actually RCU-freed.
320	 *
321	 * It is however sufficient for software page-table walkers that rely
322	 * on IRQ disabling. See the comment near struct mmu_table_batch.
323	 */
324	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
325	__tlb_remove_table(table);
326}
327
328static void tlb_remove_table_rcu(struct rcu_head *head)
329{
330	struct mmu_table_batch *batch;
331	int i;
332
333	batch = container_of(head, struct mmu_table_batch, rcu);
334
335	for (i = 0; i < batch->nr; i++)
336		__tlb_remove_table(batch->tables[i]);
337
338	free_page((unsigned long)batch);
339}
340
341void tlb_table_flush(struct mmu_gather *tlb)
342{
343	struct mmu_table_batch **batch = &tlb->batch;
344
345	if (*batch) {
346		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
347		*batch = NULL;
348	}
349}
350
351void tlb_remove_table(struct mmu_gather *tlb, void *table)
352{
353	struct mmu_table_batch **batch = &tlb->batch;
354
355	tlb->mm->context.flush_mm = 1;
356	if (*batch == NULL) {
357		*batch = (struct mmu_table_batch *)
358			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
359		if (*batch == NULL) {
360			__tlb_flush_mm_lazy(tlb->mm);
361			tlb_remove_table_one(table);
362			return;
363		}
364		(*batch)->nr = 0;
365	}
366	(*batch)->tables[(*batch)->nr++] = table;
367	if ((*batch)->nr == MAX_TABLE_BATCH)
368		tlb_flush_mmu(tlb);
369}
370
371/*
372 * Base infrastructure required to generate basic asces, region, segment,
373 * and page tables that do not make use of enhanced features like EDAT1.
374 */
375
376static struct kmem_cache *base_pgt_cache;
377
378static unsigned long base_pgt_alloc(void)
379{
380	u64 *table;
381
382	table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
383	if (table)
384		memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
385	return (unsigned long) table;
386}
387
388static void base_pgt_free(unsigned long table)
389{
390	kmem_cache_free(base_pgt_cache, (void *) table);
391}
392
393static unsigned long base_crst_alloc(unsigned long val)
394{
395	unsigned long table;
396
397	table =	 __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
398	if (table)
399		crst_table_init((unsigned long *)table, val);
400	return table;
401}
402
403static void base_crst_free(unsigned long table)
404{
405	free_pages(table, CRST_ALLOC_ORDER);
406}
407
408#define BASE_ADDR_END_FUNC(NAME, SIZE)					\
409static inline unsigned long base_##NAME##_addr_end(unsigned long addr,	\
410						   unsigned long end)	\
411{									\
412	unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1);		\
413									\
414	return (next - 1) < (end - 1) ? next : end;			\
415}
416
417BASE_ADDR_END_FUNC(page,    _PAGE_SIZE)
418BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
419BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
420BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
421BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
422
423static inline unsigned long base_lra(unsigned long address)
424{
425	unsigned long real;
426
427	asm volatile(
428		"	lra	%0,0(%1)\n"
429		: "=d" (real) : "a" (address) : "cc");
430	return real;
431}
432
433static int base_page_walk(unsigned long origin, unsigned long addr,
434			  unsigned long end, int alloc)
435{
436	unsigned long *pte, next;
437
438	if (!alloc)
439		return 0;
440	pte = (unsigned long *) origin;
441	pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
442	do {
443		next = base_page_addr_end(addr, end);
444		*pte = base_lra(addr);
445	} while (pte++, addr = next, addr < end);
446	return 0;
447}
448
449static int base_segment_walk(unsigned long origin, unsigned long addr,
450			     unsigned long end, int alloc)
451{
452	unsigned long *ste, next, table;
453	int rc;
454
455	ste = (unsigned long *) origin;
456	ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
457	do {
458		next = base_segment_addr_end(addr, end);
459		if (*ste & _SEGMENT_ENTRY_INVALID) {
460			if (!alloc)
461				continue;
462			table = base_pgt_alloc();
463			if (!table)
464				return -ENOMEM;
465			*ste = table | _SEGMENT_ENTRY;
466		}
467		table = *ste & _SEGMENT_ENTRY_ORIGIN;
468		rc = base_page_walk(table, addr, next, alloc);
469		if (rc)
470			return rc;
471		if (!alloc)
472			base_pgt_free(table);
473		cond_resched();
474	} while (ste++, addr = next, addr < end);
475	return 0;
476}
477
478static int base_region3_walk(unsigned long origin, unsigned long addr,
479			     unsigned long end, int alloc)
480{
481	unsigned long *rtte, next, table;
482	int rc;
483
484	rtte = (unsigned long *) origin;
485	rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
486	do {
487		next = base_region3_addr_end(addr, end);
488		if (*rtte & _REGION_ENTRY_INVALID) {
489			if (!alloc)
490				continue;
491			table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
492			if (!table)
493				return -ENOMEM;
494			*rtte = table | _REGION3_ENTRY;
495		}
496		table = *rtte & _REGION_ENTRY_ORIGIN;
497		rc = base_segment_walk(table, addr, next, alloc);
498		if (rc)
499			return rc;
500		if (!alloc)
501			base_crst_free(table);
502	} while (rtte++, addr = next, addr < end);
503	return 0;
504}
505
506static int base_region2_walk(unsigned long origin, unsigned long addr,
507			     unsigned long end, int alloc)
508{
509	unsigned long *rste, next, table;
510	int rc;
511
512	rste = (unsigned long *) origin;
513	rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
514	do {
515		next = base_region2_addr_end(addr, end);
516		if (*rste & _REGION_ENTRY_INVALID) {
517			if (!alloc)
518				continue;
519			table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
520			if (!table)
521				return -ENOMEM;
522			*rste = table | _REGION2_ENTRY;
523		}
524		table = *rste & _REGION_ENTRY_ORIGIN;
525		rc = base_region3_walk(table, addr, next, alloc);
526		if (rc)
527			return rc;
528		if (!alloc)
529			base_crst_free(table);
530	} while (rste++, addr = next, addr < end);
531	return 0;
532}
533
534static int base_region1_walk(unsigned long origin, unsigned long addr,
535			     unsigned long end, int alloc)
536{
537	unsigned long *rfte, next, table;
538	int rc;
539
540	rfte = (unsigned long *) origin;
541	rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
542	do {
543		next = base_region1_addr_end(addr, end);
544		if (*rfte & _REGION_ENTRY_INVALID) {
545			if (!alloc)
546				continue;
547			table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
548			if (!table)
549				return -ENOMEM;
550			*rfte = table | _REGION1_ENTRY;
551		}
552		table = *rfte & _REGION_ENTRY_ORIGIN;
553		rc = base_region2_walk(table, addr, next, alloc);
554		if (rc)
555			return rc;
556		if (!alloc)
557			base_crst_free(table);
558	} while (rfte++, addr = next, addr < end);
559	return 0;
560}
561
562/**
563 * base_asce_free - free asce and tables returned from base_asce_alloc()
564 * @asce: asce to be freed
565 *
566 * Frees all region, segment, and page tables that were allocated with a
567 * corresponding base_asce_alloc() call.
568 */
569void base_asce_free(unsigned long asce)
570{
571	unsigned long table = asce & _ASCE_ORIGIN;
572
573	if (!asce)
574		return;
575	switch (asce & _ASCE_TYPE_MASK) {
576	case _ASCE_TYPE_SEGMENT:
577		base_segment_walk(table, 0, _REGION3_SIZE, 0);
578		break;
579	case _ASCE_TYPE_REGION3:
580		base_region3_walk(table, 0, _REGION2_SIZE, 0);
581		break;
582	case _ASCE_TYPE_REGION2:
583		base_region2_walk(table, 0, _REGION1_SIZE, 0);
584		break;
585	case _ASCE_TYPE_REGION1:
586		base_region1_walk(table, 0, -_PAGE_SIZE, 0);
587		break;
588	}
589	base_crst_free(table);
590}
591
592static int base_pgt_cache_init(void)
593{
594	static DEFINE_MUTEX(base_pgt_cache_mutex);
595	unsigned long sz = _PAGE_TABLE_SIZE;
596
597	if (base_pgt_cache)
598		return 0;
599	mutex_lock(&base_pgt_cache_mutex);
600	if (!base_pgt_cache)
601		base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
602	mutex_unlock(&base_pgt_cache_mutex);
603	return base_pgt_cache ? 0 : -ENOMEM;
604}
605
606/**
607 * base_asce_alloc - create kernel mapping without enhanced DAT features
608 * @addr: virtual start address of kernel mapping
609 * @num_pages: number of consecutive pages
610 *
611 * Generate an asce, including all required region, segment and page tables,
612 * that can be used to access the virtual kernel mapping. The difference is
613 * that the returned asce does not make use of any enhanced DAT features like
614 * e.g. large pages. This is required for some I/O functions that pass an
615 * asce, like e.g. some service call requests.
616 *
617 * Note: the returned asce may NEVER be attached to any cpu. It may only be
618 *	 used for I/O requests. tlb entries that might result because the
619 *	 asce was attached to a cpu won't be cleared.
620 */
621unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
622{
623	unsigned long asce, table, end;
624	int rc;
625
626	if (base_pgt_cache_init())
627		return 0;
628	end = addr + num_pages * PAGE_SIZE;
629	if (end <= _REGION3_SIZE) {
630		table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
631		if (!table)
632			return 0;
633		rc = base_segment_walk(table, addr, end, 1);
634		asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
635	} else if (end <= _REGION2_SIZE) {
636		table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
637		if (!table)
638			return 0;
639		rc = base_region3_walk(table, addr, end, 1);
640		asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
641	} else if (end <= _REGION1_SIZE) {
642		table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
643		if (!table)
644			return 0;
645		rc = base_region2_walk(table, addr, end, 1);
646		asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
647	} else {
648		table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
649		if (!table)
650			return 0;
651		rc = base_region1_walk(table, addr, end, 1);
652		asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
653	}
654	if (rc) {
655		base_asce_free(asce);
656		asce = 0;
657	}
658	return asce;
659}