Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 * address space "slices" (meta-segments) support
  3 *
  4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
  5 *
  6 * Based on hugetlb implementation
  7 *
  8 * Copyright (C) 2003 David Gibson, IBM Corporation.
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of the GNU General Public License as published by
 12 * the Free Software Foundation; either version 2 of the License, or
 13 * (at your option) any later version.
 14 *
 15 * This program is distributed in the hope that it will be useful,
 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 18 * GNU General Public License for more details.
 19 *
 20 * You should have received a copy of the GNU General Public License
 21 * along with this program; if not, write to the Free Software
 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 23 */
 24
 25#undef DEBUG
 26
 27#include <linux/kernel.h>
 28#include <linux/mm.h>
 29#include <linux/pagemap.h>
 30#include <linux/err.h>
 31#include <linux/spinlock.h>
 32#include <linux/export.h>
 33#include <linux/hugetlb.h>
 34#include <asm/mman.h>
 35#include <asm/mmu.h>
 36#include <asm/copro.h>
 37#include <asm/hugetlb.h>
 38
 39/* some sanity checks */
 40#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
 41#error PGTABLE_RANGE exceeds slice_mask high_slices size
 42#endif
 43
 44static DEFINE_SPINLOCK(slice_convert_lock);
 45
 46
 47#ifdef DEBUG
 48int _slice_debug = 1;
 49
 50static void slice_print_mask(const char *label, struct slice_mask mask)
 51{
 52	char	*p, buf[16 + 3 + 64 + 1];
 53	int	i;
 54
 55	if (!_slice_debug)
 56		return;
 57	p = buf;
 58	for (i = 0; i < SLICE_NUM_LOW; i++)
 59		*(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
 60	*(p++) = ' ';
 61	*(p++) = '-';
 62	*(p++) = ' ';
 63	for (i = 0; i < SLICE_NUM_HIGH; i++)
 64		*(p++) = (mask.high_slices & (1ul << i)) ? '1' : '0';
 65	*(p++) = 0;
 66
 67	printk(KERN_DEBUG "%s:%s\n", label, buf);
 68}
 69
 70#define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
 71
 72#else
 73
 74static void slice_print_mask(const char *label, struct slice_mask mask) {}
 75#define slice_dbg(fmt...)
 76
 77#endif
 78
 79static struct slice_mask slice_range_to_mask(unsigned long start,
 80					     unsigned long len)
 81{
 82	unsigned long end = start + len - 1;
 83	struct slice_mask ret = { 0, 0 };
 84
 85	if (start < SLICE_LOW_TOP) {
 86		unsigned long mend = min(end, SLICE_LOW_TOP);
 87		unsigned long mstart = min(start, SLICE_LOW_TOP);
 88
 89		ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
 90			- (1u << GET_LOW_SLICE_INDEX(mstart));
 91	}
 92
 93	if ((start + len) > SLICE_LOW_TOP)
 94		ret.high_slices = (1ul << (GET_HIGH_SLICE_INDEX(end) + 1))
 95			- (1ul << GET_HIGH_SLICE_INDEX(start));
 96
 97	return ret;
 98}
 99
100static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
101			      unsigned long len)
102{
103	struct vm_area_struct *vma;
104
105	if ((mm->task_size - len) < addr)
106		return 0;
107	vma = find_vma(mm, addr);
108	return (!vma || (addr + len) <= vma->vm_start);
109}
110
111static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
112{
113	return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
114				   1ul << SLICE_LOW_SHIFT);
115}
116
117static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
118{
119	unsigned long start = slice << SLICE_HIGH_SHIFT;
120	unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
121
122	/* Hack, so that each addresses is controlled by exactly one
123	 * of the high or low area bitmaps, the first high area starts
124	 * at 4GB, not 0 */
125	if (start == 0)
126		start = SLICE_LOW_TOP;
127
128	return !slice_area_is_free(mm, start, end - start);
129}
130
131static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
132{
133	struct slice_mask ret = { 0, 0 };
134	unsigned long i;
135
136	for (i = 0; i < SLICE_NUM_LOW; i++)
137		if (!slice_low_has_vma(mm, i))
138			ret.low_slices |= 1u << i;
139
140	if (mm->task_size <= SLICE_LOW_TOP)
141		return ret;
142
143	for (i = 0; i < SLICE_NUM_HIGH; i++)
144		if (!slice_high_has_vma(mm, i))
145			ret.high_slices |= 1ul << i;
146
147	return ret;
148}
149
150static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
151{
152	unsigned char *hpsizes;
153	int index, mask_index;
154	struct slice_mask ret = { 0, 0 };
155	unsigned long i;
156	u64 lpsizes;
157
158	lpsizes = mm->context.low_slices_psize;
159	for (i = 0; i < SLICE_NUM_LOW; i++)
160		if (((lpsizes >> (i * 4)) & 0xf) == psize)
161			ret.low_slices |= 1u << i;
162
163	hpsizes = mm->context.high_slices_psize;
164	for (i = 0; i < SLICE_NUM_HIGH; i++) {
165		mask_index = i & 0x1;
166		index = i >> 1;
167		if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
168			ret.high_slices |= 1ul << i;
169	}
170
171	return ret;
172}
173
174static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
175{
176	return (mask.low_slices & available.low_slices) == mask.low_slices &&
177		(mask.high_slices & available.high_slices) == mask.high_slices;
178}
179
180static void slice_flush_segments(void *parm)
181{
182	struct mm_struct *mm = parm;
183	unsigned long flags;
184
185	if (mm != current->active_mm)
186		return;
187
188	copy_mm_to_paca(&current->active_mm->context);
189
190	local_irq_save(flags);
191	slb_flush_and_rebolt();
192	local_irq_restore(flags);
193}
194
195static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
196{
197	int index, mask_index;
198	/* Write the new slice psize bits */
199	unsigned char *hpsizes;
200	u64 lpsizes;
201	unsigned long i, flags;
202
203	slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
204	slice_print_mask(" mask", mask);
205
206	/* We need to use a spinlock here to protect against
207	 * concurrent 64k -> 4k demotion ...
208	 */
209	spin_lock_irqsave(&slice_convert_lock, flags);
210
211	lpsizes = mm->context.low_slices_psize;
212	for (i = 0; i < SLICE_NUM_LOW; i++)
213		if (mask.low_slices & (1u << i))
214			lpsizes = (lpsizes & ~(0xful << (i * 4))) |
215				(((unsigned long)psize) << (i * 4));
216
217	/* Assign the value back */
218	mm->context.low_slices_psize = lpsizes;
219
220	hpsizes = mm->context.high_slices_psize;
221	for (i = 0; i < SLICE_NUM_HIGH; i++) {
222		mask_index = i & 0x1;
223		index = i >> 1;
224		if (mask.high_slices & (1ul << i))
225			hpsizes[index] = (hpsizes[index] &
226					  ~(0xf << (mask_index * 4))) |
227				(((unsigned long)psize) << (mask_index * 4));
228	}
229
230	slice_dbg(" lsps=%lx, hsps=%lx\n",
231		  mm->context.low_slices_psize,
232		  mm->context.high_slices_psize);
233
234	spin_unlock_irqrestore(&slice_convert_lock, flags);
235
236	copro_flush_all_slbs(mm);
237}
238
239/*
240 * Compute which slice addr is part of;
241 * set *boundary_addr to the start or end boundary of that slice
242 * (depending on 'end' parameter);
243 * return boolean indicating if the slice is marked as available in the
244 * 'available' slice_mark.
245 */
246static bool slice_scan_available(unsigned long addr,
247				 struct slice_mask available,
248				 int end,
249				 unsigned long *boundary_addr)
250{
251	unsigned long slice;
252	if (addr < SLICE_LOW_TOP) {
253		slice = GET_LOW_SLICE_INDEX(addr);
254		*boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
255		return !!(available.low_slices & (1u << slice));
256	} else {
257		slice = GET_HIGH_SLICE_INDEX(addr);
258		*boundary_addr = (slice + end) ?
259			((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
260		return !!(available.high_slices & (1ul << slice));
261	}
262}
263
264static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
265					      unsigned long len,
266					      struct slice_mask available,
267					      int psize)
268{
269	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
270	unsigned long addr, found, next_end;
271	struct vm_unmapped_area_info info;
272
273	info.flags = 0;
274	info.length = len;
275	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
276	info.align_offset = 0;
277
278	addr = TASK_UNMAPPED_BASE;
279	while (addr < TASK_SIZE) {
280		info.low_limit = addr;
281		if (!slice_scan_available(addr, available, 1, &addr))
282			continue;
283
284 next_slice:
285		/*
286		 * At this point [info.low_limit; addr) covers
287		 * available slices only and ends at a slice boundary.
288		 * Check if we need to reduce the range, or if we can
289		 * extend it to cover the next available slice.
290		 */
291		if (addr >= TASK_SIZE)
292			addr = TASK_SIZE;
293		else if (slice_scan_available(addr, available, 1, &next_end)) {
294			addr = next_end;
295			goto next_slice;
296		}
297		info.high_limit = addr;
298
299		found = vm_unmapped_area(&info);
300		if (!(found & ~PAGE_MASK))
301			return found;
302	}
303
304	return -ENOMEM;
305}
306
307static unsigned long slice_find_area_topdown(struct mm_struct *mm,
308					     unsigned long len,
309					     struct slice_mask available,
310					     int psize)
311{
312	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
313	unsigned long addr, found, prev;
314	struct vm_unmapped_area_info info;
315
316	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
317	info.length = len;
318	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
319	info.align_offset = 0;
320
321	addr = mm->mmap_base;
322	while (addr > PAGE_SIZE) {
323		info.high_limit = addr;
324		if (!slice_scan_available(addr - 1, available, 0, &addr))
325			continue;
326
327 prev_slice:
328		/*
329		 * At this point [addr; info.high_limit) covers
330		 * available slices only and starts at a slice boundary.
331		 * Check if we need to reduce the range, or if we can
332		 * extend it to cover the previous available slice.
333		 */
334		if (addr < PAGE_SIZE)
335			addr = PAGE_SIZE;
336		else if (slice_scan_available(addr - 1, available, 0, &prev)) {
337			addr = prev;
338			goto prev_slice;
339		}
340		info.low_limit = addr;
341
342		found = vm_unmapped_area(&info);
343		if (!(found & ~PAGE_MASK))
344			return found;
345	}
346
347	/*
348	 * A failed mmap() very likely causes application failure,
349	 * so fall back to the bottom-up function here. This scenario
350	 * can happen with large stack limits and large mmap()
351	 * allocations.
352	 */
353	return slice_find_area_bottomup(mm, len, available, psize);
354}
355
356
357static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
358				     struct slice_mask mask, int psize,
359				     int topdown)
360{
361	if (topdown)
362		return slice_find_area_topdown(mm, len, mask, psize);
363	else
364		return slice_find_area_bottomup(mm, len, mask, psize);
365}
366
367#define or_mask(dst, src)	do {			\
368	(dst).low_slices |= (src).low_slices;		\
369	(dst).high_slices |= (src).high_slices;		\
370} while (0)
371
372#define andnot_mask(dst, src)	do {			\
373	(dst).low_slices &= ~(src).low_slices;		\
374	(dst).high_slices &= ~(src).high_slices;	\
375} while (0)
376
377#ifdef CONFIG_PPC_64K_PAGES
378#define MMU_PAGE_BASE	MMU_PAGE_64K
379#else
380#define MMU_PAGE_BASE	MMU_PAGE_4K
381#endif
382
383unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
384				      unsigned long flags, unsigned int psize,
385				      int topdown)
386{
387	struct slice_mask mask = {0, 0};
388	struct slice_mask good_mask;
389	struct slice_mask potential_mask = {0,0} /* silence stupid warning */;
390	struct slice_mask compat_mask = {0, 0};
391	int fixed = (flags & MAP_FIXED);
392	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
393	struct mm_struct *mm = current->mm;
394	unsigned long newaddr;
395
396	/* Sanity checks */
397	BUG_ON(mm->task_size == 0);
398
399	slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
400	slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
401		  addr, len, flags, topdown);
402
403	if (len > mm->task_size)
404		return -ENOMEM;
405	if (len & ((1ul << pshift) - 1))
406		return -EINVAL;
407	if (fixed && (addr & ((1ul << pshift) - 1)))
408		return -EINVAL;
409	if (fixed && addr > (mm->task_size - len))
410		return -ENOMEM;
411
412	/* If hint, make sure it matches our alignment restrictions */
413	if (!fixed && addr) {
414		addr = _ALIGN_UP(addr, 1ul << pshift);
415		slice_dbg(" aligned addr=%lx\n", addr);
416		/* Ignore hint if it's too large or overlaps a VMA */
417		if (addr > mm->task_size - len ||
418		    !slice_area_is_free(mm, addr, len))
419			addr = 0;
420	}
421
422	/* First make up a "good" mask of slices that have the right size
423	 * already
424	 */
425	good_mask = slice_mask_for_size(mm, psize);
426	slice_print_mask(" good_mask", good_mask);
427
428	/*
429	 * Here "good" means slices that are already the right page size,
430	 * "compat" means slices that have a compatible page size (i.e.
431	 * 4k in a 64k pagesize kernel), and "free" means slices without
432	 * any VMAs.
433	 *
434	 * If MAP_FIXED:
435	 *	check if fits in good | compat => OK
436	 *	check if fits in good | compat | free => convert free
437	 *	else bad
438	 * If have hint:
439	 *	check if hint fits in good => OK
440	 *	check if hint fits in good | free => convert free
441	 * Otherwise:
442	 *	search in good, found => OK
443	 *	search in good | free, found => convert free
444	 *	search in good | compat | free, found => convert free.
445	 */
446
447#ifdef CONFIG_PPC_64K_PAGES
448	/* If we support combo pages, we can allow 64k pages in 4k slices */
449	if (psize == MMU_PAGE_64K) {
450		compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
451		if (fixed)
452			or_mask(good_mask, compat_mask);
453	}
454#endif
455
456	/* First check hint if it's valid or if we have MAP_FIXED */
457	if (addr != 0 || fixed) {
458		/* Build a mask for the requested range */
459		mask = slice_range_to_mask(addr, len);
460		slice_print_mask(" mask", mask);
461
462		/* Check if we fit in the good mask. If we do, we just return,
463		 * nothing else to do
464		 */
465		if (slice_check_fit(mask, good_mask)) {
466			slice_dbg(" fits good !\n");
467			return addr;
468		}
469	} else {
470		/* Now let's see if we can find something in the existing
471		 * slices for that size
472		 */
473		newaddr = slice_find_area(mm, len, good_mask, psize, topdown);
474		if (newaddr != -ENOMEM) {
475			/* Found within the good mask, we don't have to setup,
476			 * we thus return directly
477			 */
478			slice_dbg(" found area at 0x%lx\n", newaddr);
479			return newaddr;
480		}
481	}
482
483	/* We don't fit in the good mask, check what other slices are
484	 * empty and thus can be converted
485	 */
486	potential_mask = slice_mask_for_free(mm);
487	or_mask(potential_mask, good_mask);
488	slice_print_mask(" potential", potential_mask);
489
490	if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
491		slice_dbg(" fits potential !\n");
492		goto convert;
493	}
494
495	/* If we have MAP_FIXED and failed the above steps, then error out */
496	if (fixed)
497		return -EBUSY;
498
499	slice_dbg(" search...\n");
500
501	/* If we had a hint that didn't work out, see if we can fit
502	 * anywhere in the good area.
503	 */
504	if (addr) {
505		addr = slice_find_area(mm, len, good_mask, psize, topdown);
506		if (addr != -ENOMEM) {
507			slice_dbg(" found area at 0x%lx\n", addr);
508			return addr;
509		}
510	}
511
512	/* Now let's see if we can find something in the existing slices
513	 * for that size plus free slices
514	 */
515	addr = slice_find_area(mm, len, potential_mask, psize, topdown);
516
517#ifdef CONFIG_PPC_64K_PAGES
518	if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
519		/* retry the search with 4k-page slices included */
520		or_mask(potential_mask, compat_mask);
521		addr = slice_find_area(mm, len, potential_mask, psize,
522				       topdown);
523	}
524#endif
525
526	if (addr == -ENOMEM)
527		return -ENOMEM;
528
529	mask = slice_range_to_mask(addr, len);
530	slice_dbg(" found potential area at 0x%lx\n", addr);
531	slice_print_mask(" mask", mask);
532
533 convert:
534	andnot_mask(mask, good_mask);
535	andnot_mask(mask, compat_mask);
536	if (mask.low_slices || mask.high_slices) {
537		slice_convert(mm, mask, psize);
538		if (psize > MMU_PAGE_BASE)
539			on_each_cpu(slice_flush_segments, mm, 1);
540	}
541	return addr;
542
543}
544EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
545
546unsigned long arch_get_unmapped_area(struct file *filp,
547				     unsigned long addr,
548				     unsigned long len,
549				     unsigned long pgoff,
550				     unsigned long flags)
551{
552	return slice_get_unmapped_area(addr, len, flags,
553				       current->mm->context.user_psize, 0);
554}
555
556unsigned long arch_get_unmapped_area_topdown(struct file *filp,
557					     const unsigned long addr0,
558					     const unsigned long len,
559					     const unsigned long pgoff,
560					     const unsigned long flags)
561{
562	return slice_get_unmapped_area(addr0, len, flags,
563				       current->mm->context.user_psize, 1);
564}
565
566unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
567{
568	unsigned char *hpsizes;
569	int index, mask_index;
570
571	if (addr < SLICE_LOW_TOP) {
572		u64 lpsizes;
573		lpsizes = mm->context.low_slices_psize;
574		index = GET_LOW_SLICE_INDEX(addr);
575		return (lpsizes >> (index * 4)) & 0xf;
576	}
577	hpsizes = mm->context.high_slices_psize;
578	index = GET_HIGH_SLICE_INDEX(addr);
579	mask_index = index & 0x1;
580	return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
581}
582EXPORT_SYMBOL_GPL(get_slice_psize);
583
584/*
585 * This is called by hash_page when it needs to do a lazy conversion of
586 * an address space from real 64K pages to combo 4K pages (typically
587 * when hitting a non cacheable mapping on a processor or hypervisor
588 * that won't allow them for 64K pages).
589 *
590 * This is also called in init_new_context() to change back the user
591 * psize from whatever the parent context had it set to
592 * N.B. This may be called before mm->context.id has been set.
593 *
594 * This function will only change the content of the {low,high)_slice_psize
595 * masks, it will not flush SLBs as this shall be handled lazily by the
596 * caller.
597 */
598void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
599{
600	int index, mask_index;
601	unsigned char *hpsizes;
602	unsigned long flags, lpsizes;
603	unsigned int old_psize;
604	int i;
605
606	slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
607
608	spin_lock_irqsave(&slice_convert_lock, flags);
609
610	old_psize = mm->context.user_psize;
611	slice_dbg(" old_psize=%d\n", old_psize);
612	if (old_psize == psize)
613		goto bail;
614
615	mm->context.user_psize = psize;
616	wmb();
617
618	lpsizes = mm->context.low_slices_psize;
619	for (i = 0; i < SLICE_NUM_LOW; i++)
620		if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
621			lpsizes = (lpsizes & ~(0xful << (i * 4))) |
622				(((unsigned long)psize) << (i * 4));
623	/* Assign the value back */
624	mm->context.low_slices_psize = lpsizes;
625
626	hpsizes = mm->context.high_slices_psize;
627	for (i = 0; i < SLICE_NUM_HIGH; i++) {
628		mask_index = i & 0x1;
629		index = i >> 1;
630		if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
631			hpsizes[index] = (hpsizes[index] &
632					  ~(0xf << (mask_index * 4))) |
633				(((unsigned long)psize) << (mask_index * 4));
634	}
635
636
637
638
639	slice_dbg(" lsps=%lx, hsps=%lx\n",
640		  mm->context.low_slices_psize,
641		  mm->context.high_slices_psize);
642
643 bail:
644	spin_unlock_irqrestore(&slice_convert_lock, flags);
645}
646
647void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
648			   unsigned long len, unsigned int psize)
649{
650	struct slice_mask mask = slice_range_to_mask(start, len);
651
652	slice_convert(mm, mask, psize);
653}
654
655#ifdef CONFIG_HUGETLB_PAGE
656/*
657 * is_hugepage_only_range() is used by generic code to verify whether
658 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
659 *
660 * until the generic code provides a more generic hook and/or starts
661 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
662 * here knows how to deal with), we hijack it to keep standard mappings
663 * away from us.
664 *
665 * because of that generic code limitation, MAP_FIXED mapping cannot
666 * "convert" back a slice with no VMAs to the standard page size, only
667 * get_unmapped_area() can. It would be possible to fix it here but I
668 * prefer working on fixing the generic code instead.
669 *
670 * WARNING: This will not work if hugetlbfs isn't enabled since the
671 * generic code will redefine that function as 0 in that. This is ok
672 * for now as we only use slices with hugetlbfs enabled. This should
673 * be fixed as the generic code gets fixed.
674 */
675int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
676			   unsigned long len)
677{
678	struct slice_mask mask, available;
679	unsigned int psize = mm->context.user_psize;
680
681	mask = slice_range_to_mask(addr, len);
682	available = slice_mask_for_size(mm, psize);
683#ifdef CONFIG_PPC_64K_PAGES
684	/* We need to account for 4k slices too */
685	if (psize == MMU_PAGE_64K) {
686		struct slice_mask compat_mask;
687		compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
688		or_mask(available, compat_mask);
689	}
690#endif
691
692#if 0 /* too verbose */
693	slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
694		 mm, addr, len);
695	slice_print_mask(" mask", mask);
696	slice_print_mask(" available", available);
697#endif
698	return !slice_check_fit(mask, available);
699}
700#endif