Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * address space "slices" (meta-segments) support
  4 *
  5 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
  6 *
  7 * Based on hugetlb implementation
  8 *
  9 * Copyright (C) 2003 David Gibson, IBM Corporation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 10 */
 11
 12#undef DEBUG
 13
 14#include <linux/kernel.h>
 15#include <linux/mm.h>
 16#include <linux/pagemap.h>
 17#include <linux/err.h>
 18#include <linux/spinlock.h>
 19#include <linux/export.h>
 20#include <linux/hugetlb.h>
 21#include <linux/sched/mm.h>
 22#include <linux/security.h>
 23#include <asm/mman.h>
 24#include <asm/mmu.h>
 25#include <asm/copro.h>
 26#include <asm/hugetlb.h>
 27#include <asm/mmu_context.h>
 28
 29static DEFINE_SPINLOCK(slice_convert_lock);
 30
 
 31#ifdef DEBUG
 32int _slice_debug = 1;
 33
 34static void slice_print_mask(const char *label, const struct slice_mask *mask)
 35{
 
 
 
 36	if (!_slice_debug)
 37		return;
 38	pr_devel("%s low_slice: %*pbl\n", label,
 39			(int)SLICE_NUM_LOW, &mask->low_slices);
 40	pr_devel("%s high_slice: %*pbl\n", label,
 41			(int)SLICE_NUM_HIGH, mask->high_slices);
 
 
 
 
 
 
 
 42}
 43
 44#define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
 45
 46#else
 47
 48static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
 49#define slice_dbg(fmt...)
 50
 51#endif
 52
 53static inline bool slice_addr_is_low(unsigned long addr)
 54{
 55	u64 tmp = (u64)addr;
 56
 57	return tmp < SLICE_LOW_TOP;
 58}
 59
 60static void slice_range_to_mask(unsigned long start, unsigned long len,
 61				struct slice_mask *ret)
 62{
 63	unsigned long end = start + len - 1;
 
 64
 65	ret->low_slices = 0;
 66	if (SLICE_NUM_HIGH)
 67		bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
 68
 69	if (slice_addr_is_low(start)) {
 70		unsigned long mend = min(end,
 71					 (unsigned long)(SLICE_LOW_TOP - 1));
 72
 73		ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
 74			- (1u << GET_LOW_SLICE_INDEX(start));
 75	}
 76
 77	if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
 78		unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
 79		unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
 80		unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
 81
 82		bitmap_set(ret->high_slices, start_index, count);
 83	}
 84}
 85
 86static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
 87			      unsigned long len)
 88{
 89	struct vm_area_struct *vma;
 90
 91	if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
 92		return 0;
 93	vma = find_vma(mm, addr);
 94	return (!vma || (addr + len) <= vm_start_gap(vma));
 95}
 96
 97static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
 98{
 99	return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
100				   1ul << SLICE_LOW_SHIFT);
101}
102
103static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
104{
105	unsigned long start = slice << SLICE_HIGH_SHIFT;
106	unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
107
108	/* Hack, so that each addresses is controlled by exactly one
109	 * of the high or low area bitmaps, the first high area starts
110	 * at 4GB, not 0 */
111	if (start == 0)
112		start = (unsigned long)SLICE_LOW_TOP;
113
114	return !slice_area_is_free(mm, start, end - start);
115}
116
117static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
118				unsigned long high_limit)
119{
 
120	unsigned long i;
121
122	ret->low_slices = 0;
123	if (SLICE_NUM_HIGH)
124		bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
125
126	for (i = 0; i < SLICE_NUM_LOW; i++)
127		if (!slice_low_has_vma(mm, i))
128			ret->low_slices |= 1u << i;
129
130	if (slice_addr_is_low(high_limit - 1))
131		return;
132
133	for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
134		if (!slice_high_has_vma(mm, i))
135			__set_bit(i, ret->high_slices);
 
 
136}
137
138static bool slice_check_range_fits(struct mm_struct *mm,
139			   const struct slice_mask *available,
140			   unsigned long start, unsigned long len)
141{
142	unsigned long end = start + len - 1;
143	u64 low_slices = 0;
 
144
145	if (slice_addr_is_low(start)) {
146		unsigned long mend = min(end,
147					 (unsigned long)(SLICE_LOW_TOP - 1));
148
149		low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
150				- (1u << GET_LOW_SLICE_INDEX(start));
151	}
152	if ((low_slices & available->low_slices) != low_slices)
153		return false;
154
155	if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
156		unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
157		unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
158		unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
159		unsigned long i;
160
161		for (i = start_index; i < start_index + count; i++) {
162			if (!test_bit(i, available->high_slices))
163				return false;
164		}
165	}
166
167	return true;
 
 
 
 
 
 
168}
169
170static void slice_flush_segments(void *parm)
171{
172#ifdef CONFIG_PPC64
173	struct mm_struct *mm = parm;
174	unsigned long flags;
175
176	if (mm != current->active_mm)
177		return;
178
179	copy_mm_to_paca(current->active_mm);
 
180
181	local_irq_save(flags);
182	slb_flush_and_restore_bolted();
183	local_irq_restore(flags);
184#endif
185}
186
187static void slice_convert(struct mm_struct *mm,
188				const struct slice_mask *mask, int psize)
189{
190	int index, mask_index;
191	/* Write the new slice psize bits */
192	unsigned char *hpsizes, *lpsizes;
193	struct slice_mask *psize_mask, *old_mask;
194	unsigned long i, flags;
195	int old_psize;
196
197	slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
198	slice_print_mask(" mask", mask);
199
200	psize_mask = slice_mask_for_size(&mm->context, psize);
201
202	/* We need to use a spinlock here to protect against
203	 * concurrent 64k -> 4k demotion ...
204	 */
205	spin_lock_irqsave(&slice_convert_lock, flags);
206
207	lpsizes = mm_ctx_low_slices(&mm->context);
208	for (i = 0; i < SLICE_NUM_LOW; i++) {
209		if (!(mask->low_slices & (1u << i)))
210			continue;
211
212		mask_index = i & 0x1;
213		index = i >> 1;
214
215		/* Update the slice_mask */
216		old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
217		old_mask = slice_mask_for_size(&mm->context, old_psize);
218		old_mask->low_slices &= ~(1u << i);
219		psize_mask->low_slices |= 1u << i;
220
221		/* Update the sizes array */
222		lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
223				(((unsigned long)psize) << (mask_index * 4));
224	}
225
226	hpsizes = mm_ctx_high_slices(&mm->context);
227	for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
228		if (!test_bit(i, mask->high_slices))
229			continue;
230
231		mask_index = i & 0x1;
232		index = i >> 1;
233
234		/* Update the slice_mask */
235		old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
236		old_mask = slice_mask_for_size(&mm->context, old_psize);
237		__clear_bit(i, old_mask->high_slices);
238		__set_bit(i, psize_mask->high_slices);
239
240		/* Update the sizes array */
241		hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
242				(((unsigned long)psize) << (mask_index * 4));
243	}
244
245	slice_dbg(" lsps=%lx, hsps=%lx\n",
246		  (unsigned long)mm_ctx_low_slices(&mm->context),
247		  (unsigned long)mm_ctx_high_slices(&mm->context));
248
249	spin_unlock_irqrestore(&slice_convert_lock, flags);
250
251	copro_flush_all_slbs(mm);
252}
253
254/*
255 * Compute which slice addr is part of;
256 * set *boundary_addr to the start or end boundary of that slice
257 * (depending on 'end' parameter);
258 * return boolean indicating if the slice is marked as available in the
259 * 'available' slice_mark.
260 */
261static bool slice_scan_available(unsigned long addr,
262				 const struct slice_mask *available,
263				 int end, unsigned long *boundary_addr)
264{
265	unsigned long slice;
266	if (slice_addr_is_low(addr)) {
267		slice = GET_LOW_SLICE_INDEX(addr);
268		*boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
269		return !!(available->low_slices & (1u << slice));
270	} else {
271		slice = GET_HIGH_SLICE_INDEX(addr);
272		*boundary_addr = (slice + end) ?
273			((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
274		return !!test_bit(slice, available->high_slices);
275	}
276}
277
278static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
279					      unsigned long len,
280					      const struct slice_mask *available,
281					      int psize, unsigned long high_limit)
282{
 
 
 
283	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
284	unsigned long addr, found, next_end;
285	struct vm_unmapped_area_info info;
286
287	info.flags = 0;
288	info.length = len;
289	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
290	info.align_offset = 0;
291
292	addr = TASK_UNMAPPED_BASE;
293	/*
294	 * Check till the allow max value for this mmap request
295	 */
296	while (addr < high_limit) {
297		info.low_limit = addr;
298		if (!slice_scan_available(addr, available, 1, &addr))
 
 
 
 
 
 
 
 
 
 
 
299			continue;
300
301 next_slice:
302		/*
303		 * At this point [info.low_limit; addr) covers
304		 * available slices only and ends at a slice boundary.
305		 * Check if we need to reduce the range, or if we can
306		 * extend it to cover the next available slice.
307		 */
308		if (addr >= high_limit)
309			addr = high_limit;
310		else if (slice_scan_available(addr, available, 1, &next_end)) {
311			addr = next_end;
312			goto next_slice;
313		}
314		info.high_limit = addr;
315
316		found = vm_unmapped_area(&info);
317		if (!(found & ~PAGE_MASK))
318			return found;
 
 
 
 
 
 
319	}
320
 
 
 
 
 
 
321	return -ENOMEM;
322}
323
324static unsigned long slice_find_area_topdown(struct mm_struct *mm,
325					     unsigned long len,
326					     const struct slice_mask *available,
327					     int psize, unsigned long high_limit)
328{
 
 
 
329	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
330	unsigned long addr, found, prev;
331	struct vm_unmapped_area_info info;
332	unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
333
334	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
335	info.length = len;
336	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
337	info.align_offset = 0;
338
339	addr = mm->mmap_base;
340	/*
341	 * If we are trying to allocate above DEFAULT_MAP_WINDOW
342	 * Add the different to the mmap_base.
343	 * Only for that request for which high_limit is above
344	 * DEFAULT_MAP_WINDOW we should apply this.
345	 */
346	if (high_limit > DEFAULT_MAP_WINDOW)
347		addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
 
 
348
349	while (addr > min_addr) {
350		info.high_limit = addr;
351		if (!slice_scan_available(addr - 1, available, 0, &addr))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352			continue;
 
353
354 prev_slice:
355		/*
356		 * At this point [addr; info.high_limit) covers
357		 * available slices only and starts at a slice boundary.
358		 * Check if we need to reduce the range, or if we can
359		 * extend it to cover the previous available slice.
360		 */
361		if (addr < min_addr)
362			addr = min_addr;
363		else if (slice_scan_available(addr - 1, available, 0, &prev)) {
364			addr = prev;
365			goto prev_slice;
 
366		}
367		info.low_limit = addr;
368
369		found = vm_unmapped_area(&info);
370		if (!(found & ~PAGE_MASK))
371			return found;
 
 
 
372	}
373
374	/*
375	 * A failed mmap() very likely causes application failure,
376	 * so fall back to the bottom-up function here. This scenario
377	 * can happen with large stack limits and large mmap()
378	 * allocations.
379	 */
380	return slice_find_area_bottomup(mm, len, available, psize, high_limit);
 
 
 
 
 
 
 
 
 
 
381}
382
383
384static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
385				     const struct slice_mask *mask, int psize,
386				     int topdown, unsigned long high_limit)
387{
388	if (topdown)
389		return slice_find_area_topdown(mm, len, mask, psize, high_limit);
390	else
391		return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
392}
393
394static inline void slice_copy_mask(struct slice_mask *dst,
395					const struct slice_mask *src)
396{
397	dst->low_slices = src->low_slices;
398	if (!SLICE_NUM_HIGH)
399		return;
400	bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
401}
402
403static inline void slice_or_mask(struct slice_mask *dst,
404					const struct slice_mask *src1,
405					const struct slice_mask *src2)
406{
407	dst->low_slices = src1->low_slices | src2->low_slices;
408	if (!SLICE_NUM_HIGH)
409		return;
410	bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
411}
412
413static inline void slice_andnot_mask(struct slice_mask *dst,
414					const struct slice_mask *src1,
415					const struct slice_mask *src2)
416{
417	dst->low_slices = src1->low_slices & ~src2->low_slices;
418	if (!SLICE_NUM_HIGH)
419		return;
420	bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
421}
422
423#ifdef CONFIG_PPC_64K_PAGES
424#define MMU_PAGE_BASE	MMU_PAGE_64K
425#else
426#define MMU_PAGE_BASE	MMU_PAGE_4K
427#endif
428
429unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
430				      unsigned long flags, unsigned int psize,
431				      int topdown)
432{
 
433	struct slice_mask good_mask;
434	struct slice_mask potential_mask;
435	const struct slice_mask *maskp;
436	const struct slice_mask *compat_maskp = NULL;
437	int fixed = (flags & MAP_FIXED);
438	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
439	unsigned long page_size = 1UL << pshift;
440	struct mm_struct *mm = current->mm;
441	unsigned long newaddr;
442	unsigned long high_limit;
443
444	high_limit = DEFAULT_MAP_WINDOW;
445	if (addr >= high_limit || (fixed && (addr + len > high_limit)))
446		high_limit = TASK_SIZE;
447
448	if (len > high_limit)
449		return -ENOMEM;
450	if (len & (page_size - 1))
451		return -EINVAL;
452	if (fixed) {
453		if (addr & (page_size - 1))
454			return -EINVAL;
455		if (addr > high_limit - len)
456			return -ENOMEM;
457	}
458
459	if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
460		/*
461		 * Increasing the slb_addr_limit does not require
462		 * slice mask cache to be recalculated because it should
463		 * be already initialised beyond the old address limit.
464		 */
465		mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
466
467		on_each_cpu(slice_flush_segments, mm, 1);
468	}
469
470	/* Sanity checks */
471	BUG_ON(mm->task_size == 0);
472	BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
473	VM_BUG_ON(radix_enabled());
474
475	slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
476	slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
477		  addr, len, flags, topdown);
 
 
 
 
 
 
 
 
 
478
479	/* If hint, make sure it matches our alignment restrictions */
480	if (!fixed && addr) {
481		addr = _ALIGN_UP(addr, page_size);
482		slice_dbg(" aligned addr=%lx\n", addr);
483		/* Ignore hint if it's too large or overlaps a VMA */
484		if (addr > high_limit - len || addr < mmap_min_addr ||
485		    !slice_area_is_free(mm, addr, len))
486			addr = 0;
487	}
488
489	/* First make up a "good" mask of slices that have the right size
490	 * already
491	 */
492	maskp = slice_mask_for_size(&mm->context, psize);
 
493
494	/*
495	 * Here "good" means slices that are already the right page size,
496	 * "compat" means slices that have a compatible page size (i.e.
497	 * 4k in a 64k pagesize kernel), and "free" means slices without
498	 * any VMAs.
499	 *
500	 * If MAP_FIXED:
501	 *	check if fits in good | compat => OK
502	 *	check if fits in good | compat | free => convert free
503	 *	else bad
504	 * If have hint:
505	 *	check if hint fits in good => OK
506	 *	check if hint fits in good | free => convert free
507	 * Otherwise:
508	 *	search in good, found => OK
509	 *	search in good | free, found => convert free
510	 *	search in good | compat | free, found => convert free.
511	 */
512
513	/*
514	 * If we support combo pages, we can allow 64k pages in 4k slices
515	 * The mask copies could be avoided in most cases here if we had
516	 * a pointer to good mask for the next code to use.
517	 */
518	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
519		compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
520		if (fixed)
521			slice_or_mask(&good_mask, maskp, compat_maskp);
522		else
523			slice_copy_mask(&good_mask, maskp);
524	} else {
525		slice_copy_mask(&good_mask, maskp);
526	}
527
528	slice_print_mask(" good_mask", &good_mask);
529	if (compat_maskp)
530		slice_print_mask(" compat_mask", compat_maskp);
531
532	/* First check hint if it's valid or if we have MAP_FIXED */
533	if (addr != 0 || fixed) {
 
 
 
 
534		/* Check if we fit in the good mask. If we do, we just return,
535		 * nothing else to do
536		 */
537		if (slice_check_range_fits(mm, &good_mask, addr, len)) {
538			slice_dbg(" fits good !\n");
539			newaddr = addr;
540			goto return_addr;
541		}
542	} else {
543		/* Now let's see if we can find something in the existing
544		 * slices for that size
545		 */
546		newaddr = slice_find_area(mm, len, &good_mask,
547					  psize, topdown, high_limit);
548		if (newaddr != -ENOMEM) {
549			/* Found within the good mask, we don't have to setup,
550			 * we thus return directly
551			 */
552			slice_dbg(" found area at 0x%lx\n", newaddr);
553			goto return_addr;
554		}
555	}
556	/*
557	 * We don't fit in the good mask, check what other slices are
558	 * empty and thus can be converted
559	 */
560	slice_mask_for_free(mm, &potential_mask, high_limit);
561	slice_or_mask(&potential_mask, &potential_mask, &good_mask);
562	slice_print_mask(" potential", &potential_mask);
563
564	if (addr != 0 || fixed) {
565		if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
566			slice_dbg(" fits potential !\n");
567			newaddr = addr;
568			goto convert;
569		}
570	}
571
572	/* If we have MAP_FIXED and failed the above steps, then error out */
573	if (fixed)
574		return -EBUSY;
575
576	slice_dbg(" search...\n");
577
578	/* If we had a hint that didn't work out, see if we can fit
579	 * anywhere in the good area.
580	 */
581	if (addr) {
582		newaddr = slice_find_area(mm, len, &good_mask,
583					  psize, topdown, high_limit);
584		if (newaddr != -ENOMEM) {
585			slice_dbg(" found area at 0x%lx\n", newaddr);
586			goto return_addr;
587		}
588	}
589
590	/* Now let's see if we can find something in the existing slices
591	 * for that size plus free slices
592	 */
593	newaddr = slice_find_area(mm, len, &potential_mask,
594				  psize, topdown, high_limit);
595
596	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
597	    psize == MMU_PAGE_64K) {
598		/* retry the search with 4k-page slices included */
599		slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
600		newaddr = slice_find_area(mm, len, &potential_mask,
601					  psize, topdown, high_limit);
602	}
 
603
604	if (newaddr == -ENOMEM)
605		return -ENOMEM;
606
607	slice_range_to_mask(newaddr, len, &potential_mask);
608	slice_dbg(" found potential area at 0x%lx\n", newaddr);
609	slice_print_mask(" mask", &potential_mask);
610
611 convert:
612	/*
613	 * Try to allocate the context before we do slice convert
614	 * so that we handle the context allocation failure gracefully.
615	 */
616	if (need_extra_context(mm, newaddr)) {
617		if (alloc_extended_context(mm, newaddr) < 0)
618			return -ENOMEM;
619	}
620
621	slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
622	if (compat_maskp && !fixed)
623		slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
624	if (potential_mask.low_slices ||
625		(SLICE_NUM_HIGH &&
626		 !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
627		slice_convert(mm, &potential_mask, psize);
628		if (psize > MMU_PAGE_BASE)
629			on_each_cpu(slice_flush_segments, mm, 1);
630	}
631	return newaddr;
632
633return_addr:
634	if (need_extra_context(mm, newaddr)) {
635		if (alloc_extended_context(mm, newaddr) < 0)
636			return -ENOMEM;
637	}
638	return newaddr;
639}
640EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
641
642unsigned long arch_get_unmapped_area(struct file *filp,
643				     unsigned long addr,
644				     unsigned long len,
645				     unsigned long pgoff,
646				     unsigned long flags)
647{
648	return slice_get_unmapped_area(addr, len, flags,
649				       mm_ctx_user_psize(&current->mm->context), 0);
 
650}
651
652unsigned long arch_get_unmapped_area_topdown(struct file *filp,
653					     const unsigned long addr0,
654					     const unsigned long len,
655					     const unsigned long pgoff,
656					     const unsigned long flags)
657{
658	return slice_get_unmapped_area(addr0, len, flags,
659				       mm_ctx_user_psize(&current->mm->context), 1);
 
660}
661
662unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
663{
664	unsigned char *psizes;
665	int index, mask_index;
666
667	VM_BUG_ON(radix_enabled());
668
669	if (slice_addr_is_low(addr)) {
670		psizes = mm_ctx_low_slices(&mm->context);
671		index = GET_LOW_SLICE_INDEX(addr);
672	} else {
673		psizes = mm_ctx_high_slices(&mm->context);
674		index = GET_HIGH_SLICE_INDEX(addr);
675	}
676	mask_index = index & 0x1;
677	return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
678}
679EXPORT_SYMBOL_GPL(get_slice_psize);
680
681void slice_init_new_context_exec(struct mm_struct *mm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
682{
683	unsigned char *hpsizes, *lpsizes;
684	struct slice_mask *mask;
685	unsigned int psize = mmu_virtual_psize;
686
687	slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
688
689	/*
690	 * In the case of exec, use the default limit. In the
691	 * case of fork it is just inherited from the mm being
692	 * duplicated.
693	 */
694	mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
695	mm_ctx_set_user_psize(&mm->context, psize);
696
697	/*
698	 * Set all slice psizes to the default.
699	 */
700	lpsizes = mm_ctx_low_slices(&mm->context);
701	memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
702
703	hpsizes = mm_ctx_high_slices(&mm->context);
704	memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
705
706	/*
707	 * Slice mask cache starts zeroed, fill the default size cache.
708	 */
709	mask = slice_mask_for_size(&mm->context, psize);
710	mask->low_slices = ~0UL;
711	if (SLICE_NUM_HIGH)
712		bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
713}
714
715#ifdef CONFIG_PPC_BOOK3S_64
716void slice_setup_new_exec(void)
717{
718	struct mm_struct *mm = current->mm;
719
720	slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
721
722	if (!is_32bit_task())
723		return;
 
 
 
 
 
 
 
 
724
725	mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
726}
727#endif
 
728
729void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
730			   unsigned long len, unsigned int psize)
731{
732	struct slice_mask mask;
733
734	VM_BUG_ON(radix_enabled());
735
736	slice_range_to_mask(start, len, &mask);
737	slice_convert(mm, &mask, psize);
738}
739
740#ifdef CONFIG_HUGETLB_PAGE
741/*
742 * is_hugepage_only_range() is used by generic code to verify whether
743 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
744 *
745 * until the generic code provides a more generic hook and/or starts
746 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
747 * here knows how to deal with), we hijack it to keep standard mappings
748 * away from us.
749 *
750 * because of that generic code limitation, MAP_FIXED mapping cannot
751 * "convert" back a slice with no VMAs to the standard page size, only
752 * get_unmapped_area() can. It would be possible to fix it here but I
753 * prefer working on fixing the generic code instead.
754 *
755 * WARNING: This will not work if hugetlbfs isn't enabled since the
756 * generic code will redefine that function as 0 in that. This is ok
757 * for now as we only use slices with hugetlbfs enabled. This should
758 * be fixed as the generic code gets fixed.
759 */
760int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
761			   unsigned long len)
762{
763	const struct slice_mask *maskp;
764	unsigned int psize = mm_ctx_user_psize(&mm->context);
765
766	VM_BUG_ON(radix_enabled());
767
768	maskp = slice_mask_for_size(&mm->context, psize);
769
 
 
 
770	/* We need to account for 4k slices too */
771	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
772		const struct slice_mask *compat_maskp;
773		struct slice_mask available;
774
775		compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
776		slice_or_mask(&available, maskp, compat_maskp);
777		return !slice_check_range_fits(mm, &available, addr, len);
778	}
 
779
780	return !slice_check_range_fits(mm, maskp, addr, len);
781}
 
 
 
782#endif
v3.1
 
  1/*
  2 * address space "slices" (meta-segments) support
  3 *
  4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
  5 *
  6 * Based on hugetlb implementation
  7 *
  8 * Copyright (C) 2003 David Gibson, IBM Corporation.
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of the GNU General Public License as published by
 12 * the Free Software Foundation; either version 2 of the License, or
 13 * (at your option) any later version.
 14 *
 15 * This program is distributed in the hope that it will be useful,
 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 18 * GNU General Public License for more details.
 19 *
 20 * You should have received a copy of the GNU General Public License
 21 * along with this program; if not, write to the Free Software
 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 23 */
 24
 25#undef DEBUG
 26
 27#include <linux/kernel.h>
 28#include <linux/mm.h>
 29#include <linux/pagemap.h>
 30#include <linux/err.h>
 31#include <linux/spinlock.h>
 32#include <linux/module.h>
 
 
 
 33#include <asm/mman.h>
 34#include <asm/mmu.h>
 35#include <asm/spu.h>
 
 
 36
 37static DEFINE_SPINLOCK(slice_convert_lock);
 38
 39
 40#ifdef DEBUG
 41int _slice_debug = 1;
 42
 43static void slice_print_mask(const char *label, struct slice_mask mask)
 44{
 45	char	*p, buf[16 + 3 + 16 + 1];
 46	int	i;
 47
 48	if (!_slice_debug)
 49		return;
 50	p = buf;
 51	for (i = 0; i < SLICE_NUM_LOW; i++)
 52		*(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
 53	*(p++) = ' ';
 54	*(p++) = '-';
 55	*(p++) = ' ';
 56	for (i = 0; i < SLICE_NUM_HIGH; i++)
 57		*(p++) = (mask.high_slices & (1 << i)) ? '1' : '0';
 58	*(p++) = 0;
 59
 60	printk(KERN_DEBUG "%s:%s\n", label, buf);
 61}
 62
 63#define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
 64
 65#else
 66
 67static void slice_print_mask(const char *label, struct slice_mask mask) {}
 68#define slice_dbg(fmt...)
 69
 70#endif
 71
 72static struct slice_mask slice_range_to_mask(unsigned long start,
 73					     unsigned long len)
 
 
 
 
 
 
 
 74{
 75	unsigned long end = start + len - 1;
 76	struct slice_mask ret = { 0, 0 };
 77
 78	if (start < SLICE_LOW_TOP) {
 79		unsigned long mend = min(end, SLICE_LOW_TOP);
 80		unsigned long mstart = min(start, SLICE_LOW_TOP);
 
 
 
 
 81
 82		ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
 83			- (1u << GET_LOW_SLICE_INDEX(mstart));
 84	}
 85
 86	if ((start + len) > SLICE_LOW_TOP)
 87		ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1))
 88			- (1u << GET_HIGH_SLICE_INDEX(start));
 
 89
 90	return ret;
 
 91}
 92
 93static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
 94			      unsigned long len)
 95{
 96	struct vm_area_struct *vma;
 97
 98	if ((mm->task_size - len) < addr)
 99		return 0;
100	vma = find_vma(mm, addr);
101	return (!vma || (addr + len) <= vma->vm_start);
102}
103
104static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
105{
106	return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
107				   1ul << SLICE_LOW_SHIFT);
108}
109
110static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
111{
112	unsigned long start = slice << SLICE_HIGH_SHIFT;
113	unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
114
115	/* Hack, so that each addresses is controlled by exactly one
116	 * of the high or low area bitmaps, the first high area starts
117	 * at 4GB, not 0 */
118	if (start == 0)
119		start = SLICE_LOW_TOP;
120
121	return !slice_area_is_free(mm, start, end - start);
122}
123
124static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
 
125{
126	struct slice_mask ret = { 0, 0 };
127	unsigned long i;
128
 
 
 
 
129	for (i = 0; i < SLICE_NUM_LOW; i++)
130		if (!slice_low_has_vma(mm, i))
131			ret.low_slices |= 1u << i;
132
133	if (mm->task_size <= SLICE_LOW_TOP)
134		return ret;
135
136	for (i = 0; i < SLICE_NUM_HIGH; i++)
137		if (!slice_high_has_vma(mm, i))
138			ret.high_slices |= 1u << i;
139
140	return ret;
141}
142
143static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
 
 
144{
145	struct slice_mask ret = { 0, 0 };
146	unsigned long i;
147	u64 psizes;
148
149	psizes = mm->context.low_slices_psize;
150	for (i = 0; i < SLICE_NUM_LOW; i++)
151		if (((psizes >> (i * 4)) & 0xf) == psize)
152			ret.low_slices |= 1u << i;
153
154	psizes = mm->context.high_slices_psize;
155	for (i = 0; i < SLICE_NUM_HIGH; i++)
156		if (((psizes >> (i * 4)) & 0xf) == psize)
157			ret.high_slices |= 1u << i;
 
 
 
 
 
 
 
 
 
 
 
 
158
159	return ret;
160}
161
162static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
163{
164	return (mask.low_slices & available.low_slices) == mask.low_slices &&
165		(mask.high_slices & available.high_slices) == mask.high_slices;
166}
167
168static void slice_flush_segments(void *parm)
169{
 
170	struct mm_struct *mm = parm;
171	unsigned long flags;
172
173	if (mm != current->active_mm)
174		return;
175
176	/* update the paca copy of the context struct */
177	get_paca()->context = current->active_mm->context;
178
179	local_irq_save(flags);
180	slb_flush_and_rebolt();
181	local_irq_restore(flags);
 
182}
183
184static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
 
185{
 
186	/* Write the new slice psize bits */
187	u64 lpsizes, hpsizes;
 
188	unsigned long i, flags;
 
189
190	slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
191	slice_print_mask(" mask", mask);
192
 
 
193	/* We need to use a spinlock here to protect against
194	 * concurrent 64k -> 4k demotion ...
195	 */
196	spin_lock_irqsave(&slice_convert_lock, flags);
197
198	lpsizes = mm->context.low_slices_psize;
199	for (i = 0; i < SLICE_NUM_LOW; i++)
200		if (mask.low_slices & (1u << i))
201			lpsizes = (lpsizes & ~(0xful << (i * 4))) |
202				(((unsigned long)psize) << (i * 4));
203
204	hpsizes = mm->context.high_slices_psize;
205	for (i = 0; i < SLICE_NUM_HIGH; i++)
206		if (mask.high_slices & (1u << i))
207			hpsizes = (hpsizes & ~(0xful << (i * 4))) |
208				(((unsigned long)psize) << (i * 4));
 
 
 
 
 
 
 
 
 
 
 
 
209
210	mm->context.low_slices_psize = lpsizes;
211	mm->context.high_slices_psize = hpsizes;
 
 
 
 
 
 
 
 
 
 
 
212
213	slice_dbg(" lsps=%lx, hsps=%lx\n",
214		  mm->context.low_slices_psize,
215		  mm->context.high_slices_psize);
216
217	spin_unlock_irqrestore(&slice_convert_lock, flags);
218
219#ifdef CONFIG_SPU_BASE
220	spu_flush_all_slbs(mm);
221#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222}
223
224static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
225					      unsigned long len,
226					      struct slice_mask available,
227					      int psize, int use_cache)
228{
229	struct vm_area_struct *vma;
230	unsigned long start_addr, addr;
231	struct slice_mask mask;
232	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
 
 
233
234	if (use_cache) {
235		if (len <= mm->cached_hole_size) {
236			start_addr = addr = TASK_UNMAPPED_BASE;
237			mm->cached_hole_size = 0;
238		} else
239			start_addr = addr = mm->free_area_cache;
240	} else
241		start_addr = addr = TASK_UNMAPPED_BASE;
242
243full_search:
244	for (;;) {
245		addr = _ALIGN_UP(addr, 1ul << pshift);
246		if ((TASK_SIZE - len) < addr)
247			break;
248		vma = find_vma(mm, addr);
249		BUG_ON(vma && (addr >= vma->vm_end));
250
251		mask = slice_range_to_mask(addr, len);
252		if (!slice_check_fit(mask, available)) {
253			if (addr < SLICE_LOW_TOP)
254				addr = _ALIGN_UP(addr + 1,  1ul << SLICE_LOW_SHIFT);
255			else
256				addr = _ALIGN_UP(addr + 1,  1ul << SLICE_HIGH_SHIFT);
257			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
258		}
259		if (!vma || addr + len <= vma->vm_start) {
260			/*
261			 * Remember the place where we stopped the search:
262			 */
263			if (use_cache)
264				mm->free_area_cache = addr + len;
265			return addr;
266		}
267		if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
268		        mm->cached_hole_size = vma->vm_start - addr;
269		addr = vma->vm_end;
270	}
271
272	/* Make sure we didn't miss any holes */
273	if (use_cache && start_addr != TASK_UNMAPPED_BASE) {
274		start_addr = addr = TASK_UNMAPPED_BASE;
275		mm->cached_hole_size = 0;
276		goto full_search;
277	}
278	return -ENOMEM;
279}
280
281static unsigned long slice_find_area_topdown(struct mm_struct *mm,
282					     unsigned long len,
283					     struct slice_mask available,
284					     int psize, int use_cache)
285{
286	struct vm_area_struct *vma;
287	unsigned long addr;
288	struct slice_mask mask;
289	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
 
 
 
 
 
 
 
 
290
291	/* check if free_area_cache is useful for us */
292	if (use_cache) {
293		if (len <= mm->cached_hole_size) {
294			mm->cached_hole_size = 0;
295			mm->free_area_cache = mm->mmap_base;
296		}
297
298		/* either no address requested or can't fit in requested
299		 * address hole
300		 */
301		addr = mm->free_area_cache;
302
303		/* make sure it can fit in the remaining address space */
304		if (addr > len) {
305			addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
306			mask = slice_range_to_mask(addr, len);
307			if (slice_check_fit(mask, available) &&
308			    slice_area_is_free(mm, addr, len))
309					/* remember the address as a hint for
310					 * next time
311					 */
312					return (mm->free_area_cache = addr);
313		}
314	}
315
316	addr = mm->mmap_base;
317	while (addr > len) {
318		/* Go down by chunk size */
319		addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
320
321		/* Check for hit with different page size */
322		mask = slice_range_to_mask(addr, len);
323		if (!slice_check_fit(mask, available)) {
324			if (addr < SLICE_LOW_TOP)
325				addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT);
326			else if (addr < (1ul << SLICE_HIGH_SHIFT))
327				addr = SLICE_LOW_TOP;
328			else
329				addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT);
330			continue;
331		}
332
 
333		/*
334		 * Lookup failure means no vma is above this address,
335		 * else if new region fits below vma->vm_start,
336		 * return with success:
 
337		 */
338		vma = find_vma(mm, addr);
339		if (!vma || (addr + len) <= vma->vm_start) {
340			/* remember the address as a hint for next time */
341			if (use_cache)
342				mm->free_area_cache = addr;
343			return addr;
344		}
 
345
346		/* remember the largest hole we saw so far */
347		if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
348		        mm->cached_hole_size = vma->vm_start - addr;
349
350		/* try just below the current vma->vm_start */
351		addr = vma->vm_start;
352	}
353
354	/*
355	 * A failed mmap() very likely causes application failure,
356	 * so fall back to the bottom-up function here. This scenario
357	 * can happen with large stack limits and large mmap()
358	 * allocations.
359	 */
360	addr = slice_find_area_bottomup(mm, len, available, psize, 0);
361
362	/*
363	 * Restore the topdown base:
364	 */
365	if (use_cache) {
366		mm->free_area_cache = mm->mmap_base;
367		mm->cached_hole_size = ~0UL;
368	}
369
370	return addr;
371}
372
373
374static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
375				     struct slice_mask mask, int psize,
376				     int topdown, int use_cache)
377{
378	if (topdown)
379		return slice_find_area_topdown(mm, len, mask, psize, use_cache);
380	else
381		return slice_find_area_bottomup(mm, len, mask, psize, use_cache);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382}
383
384#define or_mask(dst, src)	do {			\
385	(dst).low_slices |= (src).low_slices;		\
386	(dst).high_slices |= (src).high_slices;		\
387} while (0)
388
389#define andnot_mask(dst, src)	do {			\
390	(dst).low_slices &= ~(src).low_slices;		\
391	(dst).high_slices &= ~(src).high_slices;	\
392} while (0)
393
394#ifdef CONFIG_PPC_64K_PAGES
395#define MMU_PAGE_BASE	MMU_PAGE_64K
396#else
397#define MMU_PAGE_BASE	MMU_PAGE_4K
398#endif
399
400unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
401				      unsigned long flags, unsigned int psize,
402				      int topdown, int use_cache)
403{
404	struct slice_mask mask = {0, 0};
405	struct slice_mask good_mask;
406	struct slice_mask potential_mask = {0,0} /* silence stupid warning */;
407	struct slice_mask compat_mask = {0, 0};
 
408	int fixed = (flags & MAP_FIXED);
409	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
 
410	struct mm_struct *mm = current->mm;
411	unsigned long newaddr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
412
413	/* Sanity checks */
414	BUG_ON(mm->task_size == 0);
 
 
415
416	slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
417	slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n",
418		  addr, len, flags, topdown, use_cache);
419
420	if (len > mm->task_size)
421		return -ENOMEM;
422	if (len & ((1ul << pshift) - 1))
423		return -EINVAL;
424	if (fixed && (addr & ((1ul << pshift) - 1)))
425		return -EINVAL;
426	if (fixed && addr > (mm->task_size - len))
427		return -EINVAL;
428
429	/* If hint, make sure it matches our alignment restrictions */
430	if (!fixed && addr) {
431		addr = _ALIGN_UP(addr, 1ul << pshift);
432		slice_dbg(" aligned addr=%lx\n", addr);
433		/* Ignore hint if it's too large or overlaps a VMA */
434		if (addr > mm->task_size - len ||
435		    !slice_area_is_free(mm, addr, len))
436			addr = 0;
437	}
438
439	/* First make up a "good" mask of slices that have the right size
440	 * already
441	 */
442	good_mask = slice_mask_for_size(mm, psize);
443	slice_print_mask(" good_mask", good_mask);
444
445	/*
446	 * Here "good" means slices that are already the right page size,
447	 * "compat" means slices that have a compatible page size (i.e.
448	 * 4k in a 64k pagesize kernel), and "free" means slices without
449	 * any VMAs.
450	 *
451	 * If MAP_FIXED:
452	 *	check if fits in good | compat => OK
453	 *	check if fits in good | compat | free => convert free
454	 *	else bad
455	 * If have hint:
456	 *	check if hint fits in good => OK
457	 *	check if hint fits in good | free => convert free
458	 * Otherwise:
459	 *	search in good, found => OK
460	 *	search in good | free, found => convert free
461	 *	search in good | compat | free, found => convert free.
462	 */
463
464#ifdef CONFIG_PPC_64K_PAGES
465	/* If we support combo pages, we can allow 64k pages in 4k slices */
466	if (psize == MMU_PAGE_64K) {
467		compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
 
 
 
468		if (fixed)
469			or_mask(good_mask, compat_mask);
 
 
 
 
470	}
471#endif
 
 
 
472
473	/* First check hint if it's valid or if we have MAP_FIXED */
474	if (addr != 0 || fixed) {
475		/* Build a mask for the requested range */
476		mask = slice_range_to_mask(addr, len);
477		slice_print_mask(" mask", mask);
478
479		/* Check if we fit in the good mask. If we do, we just return,
480		 * nothing else to do
481		 */
482		if (slice_check_fit(mask, good_mask)) {
483			slice_dbg(" fits good !\n");
484			return addr;
 
485		}
486	} else {
487		/* Now let's see if we can find something in the existing
488		 * slices for that size
489		 */
490		newaddr = slice_find_area(mm, len, good_mask, psize, topdown,
491					  use_cache);
492		if (newaddr != -ENOMEM) {
493			/* Found within the good mask, we don't have to setup,
494			 * we thus return directly
495			 */
496			slice_dbg(" found area at 0x%lx\n", newaddr);
497			return newaddr;
498		}
499	}
500
501	/* We don't fit in the good mask, check what other slices are
502	 * empty and thus can be converted
503	 */
504	potential_mask = slice_mask_for_free(mm);
505	or_mask(potential_mask, good_mask);
506	slice_print_mask(" potential", potential_mask);
507
508	if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
509		slice_dbg(" fits potential !\n");
510		goto convert;
 
 
 
511	}
512
513	/* If we have MAP_FIXED and failed the above steps, then error out */
514	if (fixed)
515		return -EBUSY;
516
517	slice_dbg(" search...\n");
518
519	/* If we had a hint that didn't work out, see if we can fit
520	 * anywhere in the good area.
521	 */
522	if (addr) {
523		addr = slice_find_area(mm, len, good_mask, psize, topdown,
524				       use_cache);
525		if (addr != -ENOMEM) {
526			slice_dbg(" found area at 0x%lx\n", addr);
527			return addr;
528		}
529	}
530
531	/* Now let's see if we can find something in the existing slices
532	 * for that size plus free slices
533	 */
534	addr = slice_find_area(mm, len, potential_mask, psize, topdown,
535			       use_cache);
536
537#ifdef CONFIG_PPC_64K_PAGES
538	if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
539		/* retry the search with 4k-page slices included */
540		or_mask(potential_mask, compat_mask);
541		addr = slice_find_area(mm, len, potential_mask, psize,
542				       topdown, use_cache);
543	}
544#endif
545
546	if (addr == -ENOMEM)
547		return -ENOMEM;
548
549	mask = slice_range_to_mask(addr, len);
550	slice_dbg(" found potential area at 0x%lx\n", addr);
551	slice_print_mask(" mask", mask);
552
553 convert:
554	andnot_mask(mask, good_mask);
555	andnot_mask(mask, compat_mask);
556	if (mask.low_slices || mask.high_slices) {
557		slice_convert(mm, mask, psize);
 
 
 
 
 
 
 
 
 
 
 
 
558		if (psize > MMU_PAGE_BASE)
559			on_each_cpu(slice_flush_segments, mm, 1);
560	}
561	return addr;
562
 
 
 
 
 
 
563}
564EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
565
566unsigned long arch_get_unmapped_area(struct file *filp,
567				     unsigned long addr,
568				     unsigned long len,
569				     unsigned long pgoff,
570				     unsigned long flags)
571{
572	return slice_get_unmapped_area(addr, len, flags,
573				       current->mm->context.user_psize,
574				       0, 1);
575}
576
577unsigned long arch_get_unmapped_area_topdown(struct file *filp,
578					     const unsigned long addr0,
579					     const unsigned long len,
580					     const unsigned long pgoff,
581					     const unsigned long flags)
582{
583	return slice_get_unmapped_area(addr0, len, flags,
584				       current->mm->context.user_psize,
585				       1, 1);
586}
587
588unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
589{
590	u64 psizes;
591	int index;
592
593	if (addr < SLICE_LOW_TOP) {
594		psizes = mm->context.low_slices_psize;
 
 
595		index = GET_LOW_SLICE_INDEX(addr);
596	} else {
597		psizes = mm->context.high_slices_psize;
598		index = GET_HIGH_SLICE_INDEX(addr);
599	}
600
601	return (psizes >> (index * 4)) & 0xf;
602}
603EXPORT_SYMBOL_GPL(get_slice_psize);
604
605/*
606 * This is called by hash_page when it needs to do a lazy conversion of
607 * an address space from real 64K pages to combo 4K pages (typically
608 * when hitting a non cacheable mapping on a processor or hypervisor
609 * that won't allow them for 64K pages).
610 *
611 * This is also called in init_new_context() to change back the user
612 * psize from whatever the parent context had it set to
613 * N.B. This may be called before mm->context.id has been set.
614 *
615 * This function will only change the content of the {low,high)_slice_psize
616 * masks, it will not flush SLBs as this shall be handled lazily by the
617 * caller.
618 */
619void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
620{
621	unsigned long flags, lpsizes, hpsizes;
622	unsigned int old_psize;
623	int i;
624
625	slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
626
627	spin_lock_irqsave(&slice_convert_lock, flags);
 
 
 
 
 
 
628
629	old_psize = mm->context.user_psize;
630	slice_dbg(" old_psize=%d\n", old_psize);
631	if (old_psize == psize)
632		goto bail;
 
633
634	mm->context.user_psize = psize;
635	wmb();
636
637	lpsizes = mm->context.low_slices_psize;
638	for (i = 0; i < SLICE_NUM_LOW; i++)
639		if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
640			lpsizes = (lpsizes & ~(0xful << (i * 4))) |
641				(((unsigned long)psize) << (i * 4));
642
643	hpsizes = mm->context.high_slices_psize;
644	for (i = 0; i < SLICE_NUM_HIGH; i++)
645		if (((hpsizes >> (i * 4)) & 0xf) == old_psize)
646			hpsizes = (hpsizes & ~(0xful << (i * 4))) |
647				(((unsigned long)psize) << (i * 4));
648
649	mm->context.low_slices_psize = lpsizes;
650	mm->context.high_slices_psize = hpsizes;
651
652	slice_dbg(" lsps=%lx, hsps=%lx\n",
653		  mm->context.low_slices_psize,
654		  mm->context.high_slices_psize);
655
656 bail:
657	spin_unlock_irqrestore(&slice_convert_lock, flags);
658}
659
660void slice_set_psize(struct mm_struct *mm, unsigned long address,
661		     unsigned int psize)
662{
663	unsigned long i, flags;
664	u64 *p;
 
665
666	spin_lock_irqsave(&slice_convert_lock, flags);
667	if (address < SLICE_LOW_TOP) {
668		i = GET_LOW_SLICE_INDEX(address);
669		p = &mm->context.low_slices_psize;
670	} else {
671		i = GET_HIGH_SLICE_INDEX(address);
672		p = &mm->context.high_slices_psize;
673	}
674	*p = (*p & ~(0xful << (i * 4))) | ((unsigned long) psize << (i * 4));
675	spin_unlock_irqrestore(&slice_convert_lock, flags);
676
677#ifdef CONFIG_SPU_BASE
678	spu_flush_all_slbs(mm);
679#endif
680}
681
682void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
683			   unsigned long len, unsigned int psize)
684{
685	struct slice_mask mask = slice_range_to_mask(start, len);
686
687	slice_convert(mm, mask, psize);
 
 
 
688}
689
 
690/*
691 * is_hugepage_only_range() is used by generic code to verify wether
692 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
693 *
694 * until the generic code provides a more generic hook and/or starts
695 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
696 * here knows how to deal with), we hijack it to keep standard mappings
697 * away from us.
698 *
699 * because of that generic code limitation, MAP_FIXED mapping cannot
700 * "convert" back a slice with no VMAs to the standard page size, only
701 * get_unmapped_area() can. It would be possible to fix it here but I
702 * prefer working on fixing the generic code instead.
703 *
704 * WARNING: This will not work if hugetlbfs isn't enabled since the
705 * generic code will redefine that function as 0 in that. This is ok
706 * for now as we only use slices with hugetlbfs enabled. This should
707 * be fixed as the generic code gets fixed.
708 */
709int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
710			   unsigned long len)
711{
712	struct slice_mask mask, available;
713	unsigned int psize = mm->context.user_psize;
 
 
 
 
714
715	mask = slice_range_to_mask(addr, len);
716	available = slice_mask_for_size(mm, psize);
717#ifdef CONFIG_PPC_64K_PAGES
718	/* We need to account for 4k slices too */
719	if (psize == MMU_PAGE_64K) {
720		struct slice_mask compat_mask;
721		compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
722		or_mask(available, compat_mask);
 
 
 
723	}
724#endif
725
726#if 0 /* too verbose */
727	slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
728		 mm, addr, len);
729	slice_print_mask(" mask", mask);
730	slice_print_mask(" available", available);
731#endif
732	return !slice_check_fit(mask, available);
733}
734