Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * address space "slices" (meta-segments) support
  4 *
  5 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
  6 *
  7 * Based on hugetlb implementation
  8 *
  9 * Copyright (C) 2003 David Gibson, IBM Corporation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 10 */
 11
 12#undef DEBUG
 13
 14#include <linux/kernel.h>
 15#include <linux/mm.h>
 16#include <linux/pagemap.h>
 17#include <linux/err.h>
 18#include <linux/spinlock.h>
 19#include <linux/export.h>
 20#include <linux/hugetlb.h>
 21#include <linux/sched/mm.h>
 22#include <linux/security.h>
 23#include <asm/mman.h>
 24#include <asm/mmu.h>
 25#include <asm/copro.h>
 26#include <asm/hugetlb.h>
 27#include <asm/mmu_context.h>
 28
 29static DEFINE_SPINLOCK(slice_convert_lock);
 30
 31#ifdef DEBUG
 32int _slice_debug = 1;
 33
 34static void slice_print_mask(const char *label, const struct slice_mask *mask)
 35{
 36	if (!_slice_debug)
 37		return;
 38	pr_devel("%s low_slice: %*pbl\n", label,
 39			(int)SLICE_NUM_LOW, &mask->low_slices);
 40	pr_devel("%s high_slice: %*pbl\n", label,
 41			(int)SLICE_NUM_HIGH, mask->high_slices);
 42}
 43
 44#define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
 45
 46#else
 47
 48static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
 49#define slice_dbg(fmt...)
 50
 51#endif
 52
 53static inline bool slice_addr_is_low(unsigned long addr)
 54{
 55	u64 tmp = (u64)addr;
 56
 57	return tmp < SLICE_LOW_TOP;
 58}
 59
 60static void slice_range_to_mask(unsigned long start, unsigned long len,
 61				struct slice_mask *ret)
 62{
 63	unsigned long end = start + len - 1;
 64
 65	ret->low_slices = 0;
 66	if (SLICE_NUM_HIGH)
 67		bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
 68
 69	if (slice_addr_is_low(start)) {
 70		unsigned long mend = min(end,
 71					 (unsigned long)(SLICE_LOW_TOP - 1));
 72
 73		ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
 74			- (1u << GET_LOW_SLICE_INDEX(start));
 75	}
 76
 77	if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
 78		unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
 79		unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
 80		unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
 81
 82		bitmap_set(ret->high_slices, start_index, count);
 83	}
 84}
 85
 86static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
 87			      unsigned long len)
 88{
 89	struct vm_area_struct *vma;
 90
 91	if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
 92		return 0;
 93	vma = find_vma(mm, addr);
 94	return (!vma || (addr + len) <= vm_start_gap(vma));
 95}
 96
 97static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
 98{
 99	return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
100				   1ul << SLICE_LOW_SHIFT);
101}
102
103static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
104{
105	unsigned long start = slice << SLICE_HIGH_SHIFT;
106	unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
107
 
108	/* Hack, so that each addresses is controlled by exactly one
109	 * of the high or low area bitmaps, the first high area starts
110	 * at 4GB, not 0 */
111	if (start == 0)
112		start = (unsigned long)SLICE_LOW_TOP;
 
113
114	return !slice_area_is_free(mm, start, end - start);
115}
116
117static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
118				unsigned long high_limit)
119{
120	unsigned long i;
121
122	ret->low_slices = 0;
123	if (SLICE_NUM_HIGH)
124		bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
125
126	for (i = 0; i < SLICE_NUM_LOW; i++)
127		if (!slice_low_has_vma(mm, i))
128			ret->low_slices |= 1u << i;
129
130	if (slice_addr_is_low(high_limit - 1))
131		return;
132
133	for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
134		if (!slice_high_has_vma(mm, i))
135			__set_bit(i, ret->high_slices);
136}
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138static bool slice_check_range_fits(struct mm_struct *mm,
139			   const struct slice_mask *available,
140			   unsigned long start, unsigned long len)
141{
142	unsigned long end = start + len - 1;
143	u64 low_slices = 0;
144
145	if (slice_addr_is_low(start)) {
146		unsigned long mend = min(end,
147					 (unsigned long)(SLICE_LOW_TOP - 1));
148
149		low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
150				- (1u << GET_LOW_SLICE_INDEX(start));
151	}
152	if ((low_slices & available->low_slices) != low_slices)
153		return false;
154
155	if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
156		unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
157		unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
158		unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
159		unsigned long i;
160
161		for (i = start_index; i < start_index + count; i++) {
162			if (!test_bit(i, available->high_slices))
163				return false;
164		}
165	}
166
167	return true;
168}
169
170static void slice_flush_segments(void *parm)
171{
172#ifdef CONFIG_PPC64
173	struct mm_struct *mm = parm;
174	unsigned long flags;
175
176	if (mm != current->active_mm)
177		return;
178
179	copy_mm_to_paca(current->active_mm);
180
181	local_irq_save(flags);
182	slb_flush_and_restore_bolted();
183	local_irq_restore(flags);
184#endif
185}
186
187static void slice_convert(struct mm_struct *mm,
188				const struct slice_mask *mask, int psize)
189{
190	int index, mask_index;
191	/* Write the new slice psize bits */
192	unsigned char *hpsizes, *lpsizes;
193	struct slice_mask *psize_mask, *old_mask;
194	unsigned long i, flags;
195	int old_psize;
196
197	slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
198	slice_print_mask(" mask", mask);
199
200	psize_mask = slice_mask_for_size(&mm->context, psize);
201
202	/* We need to use a spinlock here to protect against
203	 * concurrent 64k -> 4k demotion ...
204	 */
205	spin_lock_irqsave(&slice_convert_lock, flags);
206
207	lpsizes = mm_ctx_low_slices(&mm->context);
208	for (i = 0; i < SLICE_NUM_LOW; i++) {
209		if (!(mask->low_slices & (1u << i)))
210			continue;
211
212		mask_index = i & 0x1;
213		index = i >> 1;
214
215		/* Update the slice_mask */
216		old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
217		old_mask = slice_mask_for_size(&mm->context, old_psize);
218		old_mask->low_slices &= ~(1u << i);
219		psize_mask->low_slices |= 1u << i;
220
221		/* Update the sizes array */
222		lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
223				(((unsigned long)psize) << (mask_index * 4));
224	}
225
226	hpsizes = mm_ctx_high_slices(&mm->context);
227	for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
228		if (!test_bit(i, mask->high_slices))
229			continue;
230
231		mask_index = i & 0x1;
232		index = i >> 1;
233
234		/* Update the slice_mask */
235		old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
236		old_mask = slice_mask_for_size(&mm->context, old_psize);
237		__clear_bit(i, old_mask->high_slices);
238		__set_bit(i, psize_mask->high_slices);
239
240		/* Update the sizes array */
241		hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
242				(((unsigned long)psize) << (mask_index * 4));
243	}
244
245	slice_dbg(" lsps=%lx, hsps=%lx\n",
246		  (unsigned long)mm_ctx_low_slices(&mm->context),
247		  (unsigned long)mm_ctx_high_slices(&mm->context));
248
249	spin_unlock_irqrestore(&slice_convert_lock, flags);
250
251	copro_flush_all_slbs(mm);
252}
253
254/*
255 * Compute which slice addr is part of;
256 * set *boundary_addr to the start or end boundary of that slice
257 * (depending on 'end' parameter);
258 * return boolean indicating if the slice is marked as available in the
259 * 'available' slice_mark.
260 */
261static bool slice_scan_available(unsigned long addr,
262				 const struct slice_mask *available,
263				 int end, unsigned long *boundary_addr)
264{
265	unsigned long slice;
266	if (slice_addr_is_low(addr)) {
267		slice = GET_LOW_SLICE_INDEX(addr);
268		*boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
269		return !!(available->low_slices & (1u << slice));
270	} else {
271		slice = GET_HIGH_SLICE_INDEX(addr);
272		*boundary_addr = (slice + end) ?
273			((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
274		return !!test_bit(slice, available->high_slices);
275	}
276}
277
278static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
279					      unsigned long len,
280					      const struct slice_mask *available,
281					      int psize, unsigned long high_limit)
282{
283	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
284	unsigned long addr, found, next_end;
285	struct vm_unmapped_area_info info;
286
287	info.flags = 0;
288	info.length = len;
289	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
290	info.align_offset = 0;
291
292	addr = TASK_UNMAPPED_BASE;
293	/*
294	 * Check till the allow max value for this mmap request
295	 */
296	while (addr < high_limit) {
297		info.low_limit = addr;
298		if (!slice_scan_available(addr, available, 1, &addr))
299			continue;
300
301 next_slice:
302		/*
303		 * At this point [info.low_limit; addr) covers
304		 * available slices only and ends at a slice boundary.
305		 * Check if we need to reduce the range, or if we can
306		 * extend it to cover the next available slice.
307		 */
308		if (addr >= high_limit)
309			addr = high_limit;
310		else if (slice_scan_available(addr, available, 1, &next_end)) {
311			addr = next_end;
312			goto next_slice;
313		}
314		info.high_limit = addr;
315
316		found = vm_unmapped_area(&info);
317		if (!(found & ~PAGE_MASK))
318			return found;
319	}
320
321	return -ENOMEM;
322}
323
324static unsigned long slice_find_area_topdown(struct mm_struct *mm,
325					     unsigned long len,
326					     const struct slice_mask *available,
327					     int psize, unsigned long high_limit)
328{
329	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
330	unsigned long addr, found, prev;
331	struct vm_unmapped_area_info info;
332	unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
333
334	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
335	info.length = len;
336	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
337	info.align_offset = 0;
338
339	addr = mm->mmap_base;
340	/*
341	 * If we are trying to allocate above DEFAULT_MAP_WINDOW
342	 * Add the different to the mmap_base.
343	 * Only for that request for which high_limit is above
344	 * DEFAULT_MAP_WINDOW we should apply this.
345	 */
346	if (high_limit > DEFAULT_MAP_WINDOW)
347		addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
348
349	while (addr > min_addr) {
350		info.high_limit = addr;
351		if (!slice_scan_available(addr - 1, available, 0, &addr))
352			continue;
353
354 prev_slice:
355		/*
356		 * At this point [addr; info.high_limit) covers
357		 * available slices only and starts at a slice boundary.
358		 * Check if we need to reduce the range, or if we can
359		 * extend it to cover the previous available slice.
360		 */
361		if (addr < min_addr)
362			addr = min_addr;
363		else if (slice_scan_available(addr - 1, available, 0, &prev)) {
364			addr = prev;
365			goto prev_slice;
366		}
367		info.low_limit = addr;
368
369		found = vm_unmapped_area(&info);
370		if (!(found & ~PAGE_MASK))
371			return found;
372	}
373
374	/*
375	 * A failed mmap() very likely causes application failure,
376	 * so fall back to the bottom-up function here. This scenario
377	 * can happen with large stack limits and large mmap()
378	 * allocations.
379	 */
380	return slice_find_area_bottomup(mm, len, available, psize, high_limit);
381}
382
383
384static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
385				     const struct slice_mask *mask, int psize,
386				     int topdown, unsigned long high_limit)
387{
388	if (topdown)
389		return slice_find_area_topdown(mm, len, mask, psize, high_limit);
390	else
391		return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
392}
393
394static inline void slice_copy_mask(struct slice_mask *dst,
395					const struct slice_mask *src)
396{
397	dst->low_slices = src->low_slices;
398	if (!SLICE_NUM_HIGH)
399		return;
400	bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
401}
402
403static inline void slice_or_mask(struct slice_mask *dst,
404					const struct slice_mask *src1,
405					const struct slice_mask *src2)
406{
407	dst->low_slices = src1->low_slices | src2->low_slices;
408	if (!SLICE_NUM_HIGH)
409		return;
410	bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
411}
412
413static inline void slice_andnot_mask(struct slice_mask *dst,
414					const struct slice_mask *src1,
415					const struct slice_mask *src2)
416{
417	dst->low_slices = src1->low_slices & ~src2->low_slices;
418	if (!SLICE_NUM_HIGH)
419		return;
420	bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
421}
422
423#ifdef CONFIG_PPC_64K_PAGES
424#define MMU_PAGE_BASE	MMU_PAGE_64K
425#else
426#define MMU_PAGE_BASE	MMU_PAGE_4K
427#endif
428
429unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
430				      unsigned long flags, unsigned int psize,
431				      int topdown)
432{
433	struct slice_mask good_mask;
434	struct slice_mask potential_mask;
435	const struct slice_mask *maskp;
436	const struct slice_mask *compat_maskp = NULL;
437	int fixed = (flags & MAP_FIXED);
438	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
439	unsigned long page_size = 1UL << pshift;
440	struct mm_struct *mm = current->mm;
441	unsigned long newaddr;
442	unsigned long high_limit;
443
444	high_limit = DEFAULT_MAP_WINDOW;
445	if (addr >= high_limit || (fixed && (addr + len > high_limit)))
446		high_limit = TASK_SIZE;
447
448	if (len > high_limit)
449		return -ENOMEM;
450	if (len & (page_size - 1))
451		return -EINVAL;
452	if (fixed) {
453		if (addr & (page_size - 1))
454			return -EINVAL;
455		if (addr > high_limit - len)
456			return -ENOMEM;
457	}
458
459	if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
460		/*
461		 * Increasing the slb_addr_limit does not require
462		 * slice mask cache to be recalculated because it should
463		 * be already initialised beyond the old address limit.
464		 */
465		mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
466
467		on_each_cpu(slice_flush_segments, mm, 1);
468	}
469
470	/* Sanity checks */
471	BUG_ON(mm->task_size == 0);
472	BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
473	VM_BUG_ON(radix_enabled());
474
475	slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
476	slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
477		  addr, len, flags, topdown);
478
479	/* If hint, make sure it matches our alignment restrictions */
480	if (!fixed && addr) {
481		addr = _ALIGN_UP(addr, page_size);
482		slice_dbg(" aligned addr=%lx\n", addr);
483		/* Ignore hint if it's too large or overlaps a VMA */
484		if (addr > high_limit - len || addr < mmap_min_addr ||
485		    !slice_area_is_free(mm, addr, len))
486			addr = 0;
487	}
488
489	/* First make up a "good" mask of slices that have the right size
490	 * already
491	 */
492	maskp = slice_mask_for_size(&mm->context, psize);
493
494	/*
495	 * Here "good" means slices that are already the right page size,
496	 * "compat" means slices that have a compatible page size (i.e.
497	 * 4k in a 64k pagesize kernel), and "free" means slices without
498	 * any VMAs.
499	 *
500	 * If MAP_FIXED:
501	 *	check if fits in good | compat => OK
502	 *	check if fits in good | compat | free => convert free
503	 *	else bad
504	 * If have hint:
505	 *	check if hint fits in good => OK
506	 *	check if hint fits in good | free => convert free
507	 * Otherwise:
508	 *	search in good, found => OK
509	 *	search in good | free, found => convert free
510	 *	search in good | compat | free, found => convert free.
511	 */
512
513	/*
514	 * If we support combo pages, we can allow 64k pages in 4k slices
515	 * The mask copies could be avoided in most cases here if we had
516	 * a pointer to good mask for the next code to use.
517	 */
518	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
519		compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
520		if (fixed)
521			slice_or_mask(&good_mask, maskp, compat_maskp);
522		else
523			slice_copy_mask(&good_mask, maskp);
524	} else {
525		slice_copy_mask(&good_mask, maskp);
526	}
527
528	slice_print_mask(" good_mask", &good_mask);
529	if (compat_maskp)
530		slice_print_mask(" compat_mask", compat_maskp);
531
532	/* First check hint if it's valid or if we have MAP_FIXED */
533	if (addr != 0 || fixed) {
534		/* Check if we fit in the good mask. If we do, we just return,
535		 * nothing else to do
536		 */
537		if (slice_check_range_fits(mm, &good_mask, addr, len)) {
538			slice_dbg(" fits good !\n");
539			newaddr = addr;
540			goto return_addr;
541		}
542	} else {
543		/* Now let's see if we can find something in the existing
544		 * slices for that size
545		 */
546		newaddr = slice_find_area(mm, len, &good_mask,
547					  psize, topdown, high_limit);
548		if (newaddr != -ENOMEM) {
549			/* Found within the good mask, we don't have to setup,
550			 * we thus return directly
551			 */
552			slice_dbg(" found area at 0x%lx\n", newaddr);
553			goto return_addr;
554		}
555	}
556	/*
557	 * We don't fit in the good mask, check what other slices are
558	 * empty and thus can be converted
559	 */
560	slice_mask_for_free(mm, &potential_mask, high_limit);
561	slice_or_mask(&potential_mask, &potential_mask, &good_mask);
562	slice_print_mask(" potential", &potential_mask);
563
564	if (addr != 0 || fixed) {
565		if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
566			slice_dbg(" fits potential !\n");
567			newaddr = addr;
568			goto convert;
569		}
570	}
571
572	/* If we have MAP_FIXED and failed the above steps, then error out */
573	if (fixed)
574		return -EBUSY;
575
576	slice_dbg(" search...\n");
577
578	/* If we had a hint that didn't work out, see if we can fit
579	 * anywhere in the good area.
580	 */
581	if (addr) {
582		newaddr = slice_find_area(mm, len, &good_mask,
583					  psize, topdown, high_limit);
584		if (newaddr != -ENOMEM) {
585			slice_dbg(" found area at 0x%lx\n", newaddr);
586			goto return_addr;
587		}
588	}
589
590	/* Now let's see if we can find something in the existing slices
591	 * for that size plus free slices
592	 */
593	newaddr = slice_find_area(mm, len, &potential_mask,
594				  psize, topdown, high_limit);
595
596	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
597	    psize == MMU_PAGE_64K) {
598		/* retry the search with 4k-page slices included */
599		slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
600		newaddr = slice_find_area(mm, len, &potential_mask,
601					  psize, topdown, high_limit);
602	}
 
603
604	if (newaddr == -ENOMEM)
605		return -ENOMEM;
606
607	slice_range_to_mask(newaddr, len, &potential_mask);
608	slice_dbg(" found potential area at 0x%lx\n", newaddr);
609	slice_print_mask(" mask", &potential_mask);
610
611 convert:
612	/*
613	 * Try to allocate the context before we do slice convert
614	 * so that we handle the context allocation failure gracefully.
615	 */
616	if (need_extra_context(mm, newaddr)) {
617		if (alloc_extended_context(mm, newaddr) < 0)
618			return -ENOMEM;
619	}
620
621	slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
622	if (compat_maskp && !fixed)
623		slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
624	if (potential_mask.low_slices ||
625		(SLICE_NUM_HIGH &&
626		 !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
627		slice_convert(mm, &potential_mask, psize);
628		if (psize > MMU_PAGE_BASE)
629			on_each_cpu(slice_flush_segments, mm, 1);
630	}
631	return newaddr;
632
633return_addr:
634	if (need_extra_context(mm, newaddr)) {
635		if (alloc_extended_context(mm, newaddr) < 0)
636			return -ENOMEM;
637	}
638	return newaddr;
639}
640EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
641
642unsigned long arch_get_unmapped_area(struct file *filp,
643				     unsigned long addr,
644				     unsigned long len,
645				     unsigned long pgoff,
646				     unsigned long flags)
647{
648	return slice_get_unmapped_area(addr, len, flags,
649				       mm_ctx_user_psize(&current->mm->context), 0);
650}
651
652unsigned long arch_get_unmapped_area_topdown(struct file *filp,
653					     const unsigned long addr0,
654					     const unsigned long len,
655					     const unsigned long pgoff,
656					     const unsigned long flags)
657{
658	return slice_get_unmapped_area(addr0, len, flags,
659				       mm_ctx_user_psize(&current->mm->context), 1);
660}
661
662unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
663{
664	unsigned char *psizes;
665	int index, mask_index;
666
667	VM_BUG_ON(radix_enabled());
668
669	if (slice_addr_is_low(addr)) {
670		psizes = mm_ctx_low_slices(&mm->context);
671		index = GET_LOW_SLICE_INDEX(addr);
672	} else {
673		psizes = mm_ctx_high_slices(&mm->context);
674		index = GET_HIGH_SLICE_INDEX(addr);
675	}
676	mask_index = index & 0x1;
677	return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
678}
679EXPORT_SYMBOL_GPL(get_slice_psize);
680
681void slice_init_new_context_exec(struct mm_struct *mm)
682{
683	unsigned char *hpsizes, *lpsizes;
684	struct slice_mask *mask;
685	unsigned int psize = mmu_virtual_psize;
686
687	slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
688
689	/*
690	 * In the case of exec, use the default limit. In the
691	 * case of fork it is just inherited from the mm being
692	 * duplicated.
693	 */
694	mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
695	mm_ctx_set_user_psize(&mm->context, psize);
 
 
 
 
 
696
697	/*
698	 * Set all slice psizes to the default.
699	 */
700	lpsizes = mm_ctx_low_slices(&mm->context);
701	memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
702
703	hpsizes = mm_ctx_high_slices(&mm->context);
704	memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
705
706	/*
707	 * Slice mask cache starts zeroed, fill the default size cache.
708	 */
709	mask = slice_mask_for_size(&mm->context, psize);
710	mask->low_slices = ~0UL;
711	if (SLICE_NUM_HIGH)
712		bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
713}
714
715#ifdef CONFIG_PPC_BOOK3S_64
716void slice_setup_new_exec(void)
717{
718	struct mm_struct *mm = current->mm;
719
720	slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
721
722	if (!is_32bit_task())
723		return;
724
725	mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
726}
727#endif
728
729void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
730			   unsigned long len, unsigned int psize)
731{
732	struct slice_mask mask;
733
734	VM_BUG_ON(radix_enabled());
735
736	slice_range_to_mask(start, len, &mask);
737	slice_convert(mm, &mask, psize);
738}
739
740#ifdef CONFIG_HUGETLB_PAGE
741/*
742 * is_hugepage_only_range() is used by generic code to verify whether
743 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
744 *
745 * until the generic code provides a more generic hook and/or starts
746 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
747 * here knows how to deal with), we hijack it to keep standard mappings
748 * away from us.
749 *
750 * because of that generic code limitation, MAP_FIXED mapping cannot
751 * "convert" back a slice with no VMAs to the standard page size, only
752 * get_unmapped_area() can. It would be possible to fix it here but I
753 * prefer working on fixing the generic code instead.
754 *
755 * WARNING: This will not work if hugetlbfs isn't enabled since the
756 * generic code will redefine that function as 0 in that. This is ok
757 * for now as we only use slices with hugetlbfs enabled. This should
758 * be fixed as the generic code gets fixed.
759 */
760int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
761			   unsigned long len)
762{
763	const struct slice_mask *maskp;
764	unsigned int psize = mm_ctx_user_psize(&mm->context);
765
766	VM_BUG_ON(radix_enabled());
767
768	maskp = slice_mask_for_size(&mm->context, psize);
769
770	/* We need to account for 4k slices too */
771	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
772		const struct slice_mask *compat_maskp;
773		struct slice_mask available;
774
775		compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
776		slice_or_mask(&available, maskp, compat_maskp);
777		return !slice_check_range_fits(mm, &available, addr, len);
778	}
 
779
780	return !slice_check_range_fits(mm, maskp, addr, len);
781}
782#endif
v4.17
 
  1/*
  2 * address space "slices" (meta-segments) support
  3 *
  4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
  5 *
  6 * Based on hugetlb implementation
  7 *
  8 * Copyright (C) 2003 David Gibson, IBM Corporation.
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of the GNU General Public License as published by
 12 * the Free Software Foundation; either version 2 of the License, or
 13 * (at your option) any later version.
 14 *
 15 * This program is distributed in the hope that it will be useful,
 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 18 * GNU General Public License for more details.
 19 *
 20 * You should have received a copy of the GNU General Public License
 21 * along with this program; if not, write to the Free Software
 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 23 */
 24
 25#undef DEBUG
 26
 27#include <linux/kernel.h>
 28#include <linux/mm.h>
 29#include <linux/pagemap.h>
 30#include <linux/err.h>
 31#include <linux/spinlock.h>
 32#include <linux/export.h>
 33#include <linux/hugetlb.h>
 
 
 34#include <asm/mman.h>
 35#include <asm/mmu.h>
 36#include <asm/copro.h>
 37#include <asm/hugetlb.h>
 38#include <asm/mmu_context.h>
 39
 40static DEFINE_SPINLOCK(slice_convert_lock);
 41
 42#ifdef DEBUG
 43int _slice_debug = 1;
 44
 45static void slice_print_mask(const char *label, const struct slice_mask *mask)
 46{
 47	if (!_slice_debug)
 48		return;
 49	pr_devel("%s low_slice: %*pbl\n", label,
 50			(int)SLICE_NUM_LOW, &mask->low_slices);
 51	pr_devel("%s high_slice: %*pbl\n", label,
 52			(int)SLICE_NUM_HIGH, mask->high_slices);
 53}
 54
 55#define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
 56
 57#else
 58
 59static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
 60#define slice_dbg(fmt...)
 61
 62#endif
 63
 
 
 
 
 
 
 
 64static void slice_range_to_mask(unsigned long start, unsigned long len,
 65				struct slice_mask *ret)
 66{
 67	unsigned long end = start + len - 1;
 68
 69	ret->low_slices = 0;
 70	if (SLICE_NUM_HIGH)
 71		bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
 72
 73	if (start < SLICE_LOW_TOP) {
 74		unsigned long mend = min(end,
 75					 (unsigned long)(SLICE_LOW_TOP - 1));
 76
 77		ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
 78			- (1u << GET_LOW_SLICE_INDEX(start));
 79	}
 80
 81	if ((start + len) > SLICE_LOW_TOP) {
 82		unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
 83		unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
 84		unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
 85
 86		bitmap_set(ret->high_slices, start_index, count);
 87	}
 88}
 89
 90static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
 91			      unsigned long len)
 92{
 93	struct vm_area_struct *vma;
 94
 95	if ((mm->context.slb_addr_limit - len) < addr)
 96		return 0;
 97	vma = find_vma(mm, addr);
 98	return (!vma || (addr + len) <= vm_start_gap(vma));
 99}
100
101static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
102{
103	return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
104				   1ul << SLICE_LOW_SHIFT);
105}
106
107static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
108{
109	unsigned long start = slice << SLICE_HIGH_SHIFT;
110	unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
111
112#ifdef CONFIG_PPC64
113	/* Hack, so that each addresses is controlled by exactly one
114	 * of the high or low area bitmaps, the first high area starts
115	 * at 4GB, not 0 */
116	if (start == 0)
117		start = SLICE_LOW_TOP;
118#endif
119
120	return !slice_area_is_free(mm, start, end - start);
121}
122
123static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
124				unsigned long high_limit)
125{
126	unsigned long i;
127
128	ret->low_slices = 0;
129	if (SLICE_NUM_HIGH)
130		bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
131
132	for (i = 0; i < SLICE_NUM_LOW; i++)
133		if (!slice_low_has_vma(mm, i))
134			ret->low_slices |= 1u << i;
135
136	if (high_limit <= SLICE_LOW_TOP)
137		return;
138
139	for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
140		if (!slice_high_has_vma(mm, i))
141			__set_bit(i, ret->high_slices);
142}
143
144#ifdef CONFIG_PPC_BOOK3S_64
145static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
146{
147#ifdef CONFIG_PPC_64K_PAGES
148	if (psize == MMU_PAGE_64K)
149		return &mm->context.mask_64k;
150#endif
151	if (psize == MMU_PAGE_4K)
152		return &mm->context.mask_4k;
153#ifdef CONFIG_HUGETLB_PAGE
154	if (psize == MMU_PAGE_16M)
155		return &mm->context.mask_16m;
156	if (psize == MMU_PAGE_16G)
157		return &mm->context.mask_16g;
158#endif
159	BUG();
160}
161#elif defined(CONFIG_PPC_8xx)
162static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
163{
164	if (psize == mmu_virtual_psize)
165		return &mm->context.mask_base_psize;
166#ifdef CONFIG_HUGETLB_PAGE
167	if (psize == MMU_PAGE_512K)
168		return &mm->context.mask_512k;
169	if (psize == MMU_PAGE_8M)
170		return &mm->context.mask_8m;
171#endif
172	BUG();
173}
174#else
175#error "Must define the slice masks for page sizes supported by the platform"
176#endif
177
178static bool slice_check_range_fits(struct mm_struct *mm,
179			   const struct slice_mask *available,
180			   unsigned long start, unsigned long len)
181{
182	unsigned long end = start + len - 1;
183	u64 low_slices = 0;
184
185	if (start < SLICE_LOW_TOP) {
186		unsigned long mend = min(end,
187					 (unsigned long)(SLICE_LOW_TOP - 1));
188
189		low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
190				- (1u << GET_LOW_SLICE_INDEX(start));
191	}
192	if ((low_slices & available->low_slices) != low_slices)
193		return false;
194
195	if (SLICE_NUM_HIGH && ((start + len) > SLICE_LOW_TOP)) {
196		unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
197		unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
198		unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
199		unsigned long i;
200
201		for (i = start_index; i < start_index + count; i++) {
202			if (!test_bit(i, available->high_slices))
203				return false;
204		}
205	}
206
207	return true;
208}
209
210static void slice_flush_segments(void *parm)
211{
212#ifdef CONFIG_PPC64
213	struct mm_struct *mm = parm;
214	unsigned long flags;
215
216	if (mm != current->active_mm)
217		return;
218
219	copy_mm_to_paca(current->active_mm);
220
221	local_irq_save(flags);
222	slb_flush_and_rebolt();
223	local_irq_restore(flags);
224#endif
225}
226
227static void slice_convert(struct mm_struct *mm,
228				const struct slice_mask *mask, int psize)
229{
230	int index, mask_index;
231	/* Write the new slice psize bits */
232	unsigned char *hpsizes, *lpsizes;
233	struct slice_mask *psize_mask, *old_mask;
234	unsigned long i, flags;
235	int old_psize;
236
237	slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
238	slice_print_mask(" mask", mask);
239
240	psize_mask = slice_mask_for_size(mm, psize);
241
242	/* We need to use a spinlock here to protect against
243	 * concurrent 64k -> 4k demotion ...
244	 */
245	spin_lock_irqsave(&slice_convert_lock, flags);
246
247	lpsizes = mm->context.low_slices_psize;
248	for (i = 0; i < SLICE_NUM_LOW; i++) {
249		if (!(mask->low_slices & (1u << i)))
250			continue;
251
252		mask_index = i & 0x1;
253		index = i >> 1;
254
255		/* Update the slice_mask */
256		old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
257		old_mask = slice_mask_for_size(mm, old_psize);
258		old_mask->low_slices &= ~(1u << i);
259		psize_mask->low_slices |= 1u << i;
260
261		/* Update the sizes array */
262		lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
263				(((unsigned long)psize) << (mask_index * 4));
264	}
265
266	hpsizes = mm->context.high_slices_psize;
267	for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
268		if (!test_bit(i, mask->high_slices))
269			continue;
270
271		mask_index = i & 0x1;
272		index = i >> 1;
273
274		/* Update the slice_mask */
275		old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
276		old_mask = slice_mask_for_size(mm, old_psize);
277		__clear_bit(i, old_mask->high_slices);
278		__set_bit(i, psize_mask->high_slices);
279
280		/* Update the sizes array */
281		hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
282				(((unsigned long)psize) << (mask_index * 4));
283	}
284
285	slice_dbg(" lsps=%lx, hsps=%lx\n",
286		  (unsigned long)mm->context.low_slices_psize,
287		  (unsigned long)mm->context.high_slices_psize);
288
289	spin_unlock_irqrestore(&slice_convert_lock, flags);
290
291	copro_flush_all_slbs(mm);
292}
293
294/*
295 * Compute which slice addr is part of;
296 * set *boundary_addr to the start or end boundary of that slice
297 * (depending on 'end' parameter);
298 * return boolean indicating if the slice is marked as available in the
299 * 'available' slice_mark.
300 */
301static bool slice_scan_available(unsigned long addr,
302				 const struct slice_mask *available,
303				 int end, unsigned long *boundary_addr)
304{
305	unsigned long slice;
306	if (addr < SLICE_LOW_TOP) {
307		slice = GET_LOW_SLICE_INDEX(addr);
308		*boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
309		return !!(available->low_slices & (1u << slice));
310	} else {
311		slice = GET_HIGH_SLICE_INDEX(addr);
312		*boundary_addr = (slice + end) ?
313			((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
314		return !!test_bit(slice, available->high_slices);
315	}
316}
317
318static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
319					      unsigned long len,
320					      const struct slice_mask *available,
321					      int psize, unsigned long high_limit)
322{
323	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
324	unsigned long addr, found, next_end;
325	struct vm_unmapped_area_info info;
326
327	info.flags = 0;
328	info.length = len;
329	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
330	info.align_offset = 0;
331
332	addr = TASK_UNMAPPED_BASE;
333	/*
334	 * Check till the allow max value for this mmap request
335	 */
336	while (addr < high_limit) {
337		info.low_limit = addr;
338		if (!slice_scan_available(addr, available, 1, &addr))
339			continue;
340
341 next_slice:
342		/*
343		 * At this point [info.low_limit; addr) covers
344		 * available slices only and ends at a slice boundary.
345		 * Check if we need to reduce the range, or if we can
346		 * extend it to cover the next available slice.
347		 */
348		if (addr >= high_limit)
349			addr = high_limit;
350		else if (slice_scan_available(addr, available, 1, &next_end)) {
351			addr = next_end;
352			goto next_slice;
353		}
354		info.high_limit = addr;
355
356		found = vm_unmapped_area(&info);
357		if (!(found & ~PAGE_MASK))
358			return found;
359	}
360
361	return -ENOMEM;
362}
363
364static unsigned long slice_find_area_topdown(struct mm_struct *mm,
365					     unsigned long len,
366					     const struct slice_mask *available,
367					     int psize, unsigned long high_limit)
368{
369	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
370	unsigned long addr, found, prev;
371	struct vm_unmapped_area_info info;
 
372
373	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
374	info.length = len;
375	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
376	info.align_offset = 0;
377
378	addr = mm->mmap_base;
379	/*
380	 * If we are trying to allocate above DEFAULT_MAP_WINDOW
381	 * Add the different to the mmap_base.
382	 * Only for that request for which high_limit is above
383	 * DEFAULT_MAP_WINDOW we should apply this.
384	 */
385	if (high_limit > DEFAULT_MAP_WINDOW)
386		addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
387
388	while (addr > PAGE_SIZE) {
389		info.high_limit = addr;
390		if (!slice_scan_available(addr - 1, available, 0, &addr))
391			continue;
392
393 prev_slice:
394		/*
395		 * At this point [addr; info.high_limit) covers
396		 * available slices only and starts at a slice boundary.
397		 * Check if we need to reduce the range, or if we can
398		 * extend it to cover the previous available slice.
399		 */
400		if (addr < PAGE_SIZE)
401			addr = PAGE_SIZE;
402		else if (slice_scan_available(addr - 1, available, 0, &prev)) {
403			addr = prev;
404			goto prev_slice;
405		}
406		info.low_limit = addr;
407
408		found = vm_unmapped_area(&info);
409		if (!(found & ~PAGE_MASK))
410			return found;
411	}
412
413	/*
414	 * A failed mmap() very likely causes application failure,
415	 * so fall back to the bottom-up function here. This scenario
416	 * can happen with large stack limits and large mmap()
417	 * allocations.
418	 */
419	return slice_find_area_bottomup(mm, len, available, psize, high_limit);
420}
421
422
423static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
424				     const struct slice_mask *mask, int psize,
425				     int topdown, unsigned long high_limit)
426{
427	if (topdown)
428		return slice_find_area_topdown(mm, len, mask, psize, high_limit);
429	else
430		return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
431}
432
433static inline void slice_copy_mask(struct slice_mask *dst,
434					const struct slice_mask *src)
435{
436	dst->low_slices = src->low_slices;
437	if (!SLICE_NUM_HIGH)
438		return;
439	bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
440}
441
442static inline void slice_or_mask(struct slice_mask *dst,
443					const struct slice_mask *src1,
444					const struct slice_mask *src2)
445{
446	dst->low_slices = src1->low_slices | src2->low_slices;
447	if (!SLICE_NUM_HIGH)
448		return;
449	bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
450}
451
452static inline void slice_andnot_mask(struct slice_mask *dst,
453					const struct slice_mask *src1,
454					const struct slice_mask *src2)
455{
456	dst->low_slices = src1->low_slices & ~src2->low_slices;
457	if (!SLICE_NUM_HIGH)
458		return;
459	bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
460}
461
462#ifdef CONFIG_PPC_64K_PAGES
463#define MMU_PAGE_BASE	MMU_PAGE_64K
464#else
465#define MMU_PAGE_BASE	MMU_PAGE_4K
466#endif
467
468unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
469				      unsigned long flags, unsigned int psize,
470				      int topdown)
471{
472	struct slice_mask good_mask;
473	struct slice_mask potential_mask;
474	const struct slice_mask *maskp;
475	const struct slice_mask *compat_maskp = NULL;
476	int fixed = (flags & MAP_FIXED);
477	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
478	unsigned long page_size = 1UL << pshift;
479	struct mm_struct *mm = current->mm;
480	unsigned long newaddr;
481	unsigned long high_limit;
482
483	high_limit = DEFAULT_MAP_WINDOW;
484	if (addr >= high_limit || (fixed && (addr + len > high_limit)))
485		high_limit = TASK_SIZE;
486
487	if (len > high_limit)
488		return -ENOMEM;
489	if (len & (page_size - 1))
490		return -EINVAL;
491	if (fixed) {
492		if (addr & (page_size - 1))
493			return -EINVAL;
494		if (addr > high_limit - len)
495			return -ENOMEM;
496	}
497
498	if (high_limit > mm->context.slb_addr_limit) {
499		/*
500		 * Increasing the slb_addr_limit does not require
501		 * slice mask cache to be recalculated because it should
502		 * be already initialised beyond the old address limit.
503		 */
504		mm->context.slb_addr_limit = high_limit;
505
506		on_each_cpu(slice_flush_segments, mm, 1);
507	}
508
509	/* Sanity checks */
510	BUG_ON(mm->task_size == 0);
511	BUG_ON(mm->context.slb_addr_limit == 0);
512	VM_BUG_ON(radix_enabled());
513
514	slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
515	slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
516		  addr, len, flags, topdown);
517
518	/* If hint, make sure it matches our alignment restrictions */
519	if (!fixed && addr) {
520		addr = _ALIGN_UP(addr, page_size);
521		slice_dbg(" aligned addr=%lx\n", addr);
522		/* Ignore hint if it's too large or overlaps a VMA */
523		if (addr > high_limit - len ||
524		    !slice_area_is_free(mm, addr, len))
525			addr = 0;
526	}
527
528	/* First make up a "good" mask of slices that have the right size
529	 * already
530	 */
531	maskp = slice_mask_for_size(mm, psize);
532
533	/*
534	 * Here "good" means slices that are already the right page size,
535	 * "compat" means slices that have a compatible page size (i.e.
536	 * 4k in a 64k pagesize kernel), and "free" means slices without
537	 * any VMAs.
538	 *
539	 * If MAP_FIXED:
540	 *	check if fits in good | compat => OK
541	 *	check if fits in good | compat | free => convert free
542	 *	else bad
543	 * If have hint:
544	 *	check if hint fits in good => OK
545	 *	check if hint fits in good | free => convert free
546	 * Otherwise:
547	 *	search in good, found => OK
548	 *	search in good | free, found => convert free
549	 *	search in good | compat | free, found => convert free.
550	 */
551
552	/*
553	 * If we support combo pages, we can allow 64k pages in 4k slices
554	 * The mask copies could be avoided in most cases here if we had
555	 * a pointer to good mask for the next code to use.
556	 */
557	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
558		compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
559		if (fixed)
560			slice_or_mask(&good_mask, maskp, compat_maskp);
561		else
562			slice_copy_mask(&good_mask, maskp);
563	} else {
564		slice_copy_mask(&good_mask, maskp);
565	}
566
567	slice_print_mask(" good_mask", &good_mask);
568	if (compat_maskp)
569		slice_print_mask(" compat_mask", compat_maskp);
570
571	/* First check hint if it's valid or if we have MAP_FIXED */
572	if (addr != 0 || fixed) {
573		/* Check if we fit in the good mask. If we do, we just return,
574		 * nothing else to do
575		 */
576		if (slice_check_range_fits(mm, &good_mask, addr, len)) {
577			slice_dbg(" fits good !\n");
578			newaddr = addr;
579			goto return_addr;
580		}
581	} else {
582		/* Now let's see if we can find something in the existing
583		 * slices for that size
584		 */
585		newaddr = slice_find_area(mm, len, &good_mask,
586					  psize, topdown, high_limit);
587		if (newaddr != -ENOMEM) {
588			/* Found within the good mask, we don't have to setup,
589			 * we thus return directly
590			 */
591			slice_dbg(" found area at 0x%lx\n", newaddr);
592			goto return_addr;
593		}
594	}
595	/*
596	 * We don't fit in the good mask, check what other slices are
597	 * empty and thus can be converted
598	 */
599	slice_mask_for_free(mm, &potential_mask, high_limit);
600	slice_or_mask(&potential_mask, &potential_mask, &good_mask);
601	slice_print_mask(" potential", &potential_mask);
602
603	if (addr != 0 || fixed) {
604		if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
605			slice_dbg(" fits potential !\n");
606			newaddr = addr;
607			goto convert;
608		}
609	}
610
611	/* If we have MAP_FIXED and failed the above steps, then error out */
612	if (fixed)
613		return -EBUSY;
614
615	slice_dbg(" search...\n");
616
617	/* If we had a hint that didn't work out, see if we can fit
618	 * anywhere in the good area.
619	 */
620	if (addr) {
621		newaddr = slice_find_area(mm, len, &good_mask,
622					  psize, topdown, high_limit);
623		if (newaddr != -ENOMEM) {
624			slice_dbg(" found area at 0x%lx\n", newaddr);
625			goto return_addr;
626		}
627	}
628
629	/* Now let's see if we can find something in the existing slices
630	 * for that size plus free slices
631	 */
632	newaddr = slice_find_area(mm, len, &potential_mask,
633				  psize, topdown, high_limit);
634
635#ifdef CONFIG_PPC_64K_PAGES
636	if (newaddr == -ENOMEM && psize == MMU_PAGE_64K) {
637		/* retry the search with 4k-page slices included */
638		slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
639		newaddr = slice_find_area(mm, len, &potential_mask,
640					  psize, topdown, high_limit);
641	}
642#endif
643
644	if (newaddr == -ENOMEM)
645		return -ENOMEM;
646
647	slice_range_to_mask(newaddr, len, &potential_mask);
648	slice_dbg(" found potential area at 0x%lx\n", newaddr);
649	slice_print_mask(" mask", &potential_mask);
650
651 convert:
652	/*
653	 * Try to allocate the context before we do slice convert
654	 * so that we handle the context allocation failure gracefully.
655	 */
656	if (need_extra_context(mm, newaddr)) {
657		if (alloc_extended_context(mm, newaddr) < 0)
658			return -ENOMEM;
659	}
660
661	slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
662	if (compat_maskp && !fixed)
663		slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
664	if (potential_mask.low_slices ||
665		(SLICE_NUM_HIGH &&
666		 !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
667		slice_convert(mm, &potential_mask, psize);
668		if (psize > MMU_PAGE_BASE)
669			on_each_cpu(slice_flush_segments, mm, 1);
670	}
671	return newaddr;
672
673return_addr:
674	if (need_extra_context(mm, newaddr)) {
675		if (alloc_extended_context(mm, newaddr) < 0)
676			return -ENOMEM;
677	}
678	return newaddr;
679}
680EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
681
682unsigned long arch_get_unmapped_area(struct file *filp,
683				     unsigned long addr,
684				     unsigned long len,
685				     unsigned long pgoff,
686				     unsigned long flags)
687{
688	return slice_get_unmapped_area(addr, len, flags,
689				       current->mm->context.user_psize, 0);
690}
691
692unsigned long arch_get_unmapped_area_topdown(struct file *filp,
693					     const unsigned long addr0,
694					     const unsigned long len,
695					     const unsigned long pgoff,
696					     const unsigned long flags)
697{
698	return slice_get_unmapped_area(addr0, len, flags,
699				       current->mm->context.user_psize, 1);
700}
701
702unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
703{
704	unsigned char *psizes;
705	int index, mask_index;
706
707	VM_BUG_ON(radix_enabled());
708
709	if (addr < SLICE_LOW_TOP) {
710		psizes = mm->context.low_slices_psize;
711		index = GET_LOW_SLICE_INDEX(addr);
712	} else {
713		psizes = mm->context.high_slices_psize;
714		index = GET_HIGH_SLICE_INDEX(addr);
715	}
716	mask_index = index & 0x1;
717	return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
718}
719EXPORT_SYMBOL_GPL(get_slice_psize);
720
721void slice_init_new_context_exec(struct mm_struct *mm)
722{
723	unsigned char *hpsizes, *lpsizes;
724	struct slice_mask *mask;
725	unsigned int psize = mmu_virtual_psize;
726
727	slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
728
729	/*
730	 * In the case of exec, use the default limit. In the
731	 * case of fork it is just inherited from the mm being
732	 * duplicated.
733	 */
734#ifdef CONFIG_PPC64
735	mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
736#else
737	mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
738#endif
739
740	mm->context.user_psize = psize;
741
742	/*
743	 * Set all slice psizes to the default.
744	 */
745	lpsizes = mm->context.low_slices_psize;
746	memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
747
748	hpsizes = mm->context.high_slices_psize;
749	memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
750
751	/*
752	 * Slice mask cache starts zeroed, fill the default size cache.
753	 */
754	mask = slice_mask_for_size(mm, psize);
755	mask->low_slices = ~0UL;
756	if (SLICE_NUM_HIGH)
757		bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
758}
759
 
 
 
 
 
 
 
 
 
 
 
 
 
 
760void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
761			   unsigned long len, unsigned int psize)
762{
763	struct slice_mask mask;
764
765	VM_BUG_ON(radix_enabled());
766
767	slice_range_to_mask(start, len, &mask);
768	slice_convert(mm, &mask, psize);
769}
770
771#ifdef CONFIG_HUGETLB_PAGE
772/*
773 * is_hugepage_only_range() is used by generic code to verify whether
774 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
775 *
776 * until the generic code provides a more generic hook and/or starts
777 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
778 * here knows how to deal with), we hijack it to keep standard mappings
779 * away from us.
780 *
781 * because of that generic code limitation, MAP_FIXED mapping cannot
782 * "convert" back a slice with no VMAs to the standard page size, only
783 * get_unmapped_area() can. It would be possible to fix it here but I
784 * prefer working on fixing the generic code instead.
785 *
786 * WARNING: This will not work if hugetlbfs isn't enabled since the
787 * generic code will redefine that function as 0 in that. This is ok
788 * for now as we only use slices with hugetlbfs enabled. This should
789 * be fixed as the generic code gets fixed.
790 */
791int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
792			   unsigned long len)
793{
794	const struct slice_mask *maskp;
795	unsigned int psize = mm->context.user_psize;
796
797	VM_BUG_ON(radix_enabled());
798
799	maskp = slice_mask_for_size(mm, psize);
800#ifdef CONFIG_PPC_64K_PAGES
801	/* We need to account for 4k slices too */
802	if (psize == MMU_PAGE_64K) {
803		const struct slice_mask *compat_maskp;
804		struct slice_mask available;
805
806		compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
807		slice_or_mask(&available, maskp, compat_maskp);
808		return !slice_check_range_fits(mm, &available, addr, len);
809	}
810#endif
811
812	return !slice_check_range_fits(mm, maskp, addr, len);
813}
814#endif