Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Contiguous Memory Allocator
4 *
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
8 * Written by:
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 */
14
15#define pr_fmt(fmt) "cma: " fmt
16
17#ifdef CONFIG_CMA_DEBUG
18#ifndef DEBUG
19# define DEBUG
20#endif
21#endif
22#define CREATE_TRACE_POINTS
23
24#include <linux/memblock.h>
25#include <linux/err.h>
26#include <linux/mm.h>
27#include <linux/sizes.h>
28#include <linux/slab.h>
29#include <linux/log2.h>
30#include <linux/cma.h>
31#include <linux/highmem.h>
32#include <linux/io.h>
33#include <linux/kmemleak.h>
34#include <trace/events/cma.h>
35
36#include "internal.h"
37#include "cma.h"
38
39struct cma cma_areas[MAX_CMA_AREAS];
40unsigned cma_area_count;
41static DEFINE_MUTEX(cma_mutex);
42
43phys_addr_t cma_get_base(const struct cma *cma)
44{
45 return PFN_PHYS(cma->base_pfn);
46}
47
48unsigned long cma_get_size(const struct cma *cma)
49{
50 return cma->count << PAGE_SHIFT;
51}
52
53const char *cma_get_name(const struct cma *cma)
54{
55 return cma->name;
56}
57
58static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
59 unsigned int align_order)
60{
61 if (align_order <= cma->order_per_bit)
62 return 0;
63 return (1UL << (align_order - cma->order_per_bit)) - 1;
64}
65
66/*
67 * Find the offset of the base PFN from the specified align_order.
68 * The value returned is represented in order_per_bits.
69 */
70static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
71 unsigned int align_order)
72{
73 return (cma->base_pfn & ((1UL << align_order) - 1))
74 >> cma->order_per_bit;
75}
76
77static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
78 unsigned long pages)
79{
80 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
81}
82
83static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
84 unsigned long count)
85{
86 unsigned long bitmap_no, bitmap_count;
87 unsigned long flags;
88
89 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
90 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
91
92 spin_lock_irqsave(&cma->lock, flags);
93 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
94 spin_unlock_irqrestore(&cma->lock, flags);
95}
96
97static void __init cma_activate_area(struct cma *cma)
98{
99 unsigned long base_pfn = cma->base_pfn, pfn;
100 struct zone *zone;
101
102 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
103 if (!cma->bitmap)
104 goto out_error;
105
106 /*
107 * alloc_contig_range() requires the pfn range specified to be in the
108 * same zone. Simplify by forcing the entire CMA resv range to be in the
109 * same zone.
110 */
111 WARN_ON_ONCE(!pfn_valid(base_pfn));
112 zone = page_zone(pfn_to_page(base_pfn));
113 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
114 WARN_ON_ONCE(!pfn_valid(pfn));
115 if (page_zone(pfn_to_page(pfn)) != zone)
116 goto not_in_zone;
117 }
118
119 for (pfn = base_pfn; pfn < base_pfn + cma->count;
120 pfn += pageblock_nr_pages)
121 init_cma_reserved_pageblock(pfn_to_page(pfn));
122
123 spin_lock_init(&cma->lock);
124
125#ifdef CONFIG_CMA_DEBUGFS
126 INIT_HLIST_HEAD(&cma->mem_head);
127 spin_lock_init(&cma->mem_head_lock);
128#endif
129
130 return;
131
132not_in_zone:
133 bitmap_free(cma->bitmap);
134out_error:
135 /* Expose all pages to the buddy, they are useless for CMA. */
136 if (!cma->reserve_pages_on_error) {
137 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
138 free_reserved_page(pfn_to_page(pfn));
139 }
140 totalcma_pages -= cma->count;
141 cma->count = 0;
142 pr_err("CMA area %s could not be activated\n", cma->name);
143 return;
144}
145
146static int __init cma_init_reserved_areas(void)
147{
148 int i;
149
150 for (i = 0; i < cma_area_count; i++)
151 cma_activate_area(&cma_areas[i]);
152
153 return 0;
154}
155core_initcall(cma_init_reserved_areas);
156
157void __init cma_reserve_pages_on_error(struct cma *cma)
158{
159 cma->reserve_pages_on_error = true;
160}
161
162/**
163 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
164 * @base: Base address of the reserved area
165 * @size: Size of the reserved area (in bytes),
166 * @order_per_bit: Order of pages represented by one bit on bitmap.
167 * @name: The name of the area. If this parameter is NULL, the name of
168 * the area will be set to "cmaN", where N is a running counter of
169 * used areas.
170 * @res_cma: Pointer to store the created cma region.
171 *
172 * This function creates custom contiguous area from already reserved memory.
173 */
174int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
175 unsigned int order_per_bit,
176 const char *name,
177 struct cma **res_cma)
178{
179 struct cma *cma;
180
181 /* Sanity checks */
182 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
183 pr_err("Not enough slots for CMA reserved regions!\n");
184 return -ENOSPC;
185 }
186
187 if (!size || !memblock_is_region_reserved(base, size))
188 return -EINVAL;
189
190 /* alignment should be aligned with order_per_bit */
191 if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit))
192 return -EINVAL;
193
194 /* ensure minimal alignment required by mm core */
195 if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
196 return -EINVAL;
197
198 /*
199 * Each reserved area must be initialised later, when more kernel
200 * subsystems (like slab allocator) are available.
201 */
202 cma = &cma_areas[cma_area_count];
203
204 if (name)
205 snprintf(cma->name, CMA_MAX_NAME, name);
206 else
207 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
208
209 cma->base_pfn = PFN_DOWN(base);
210 cma->count = size >> PAGE_SHIFT;
211 cma->order_per_bit = order_per_bit;
212 *res_cma = cma;
213 cma_area_count++;
214 totalcma_pages += (size / PAGE_SIZE);
215
216 return 0;
217}
218
219/**
220 * cma_declare_contiguous_nid() - reserve custom contiguous area
221 * @base: Base address of the reserved area optional, use 0 for any
222 * @size: Size of the reserved area (in bytes),
223 * @limit: End address of the reserved memory (optional, 0 for any).
224 * @alignment: Alignment for the CMA area, should be power of 2 or zero
225 * @order_per_bit: Order of pages represented by one bit on bitmap.
226 * @fixed: hint about where to place the reserved area
227 * @name: The name of the area. See function cma_init_reserved_mem()
228 * @res_cma: Pointer to store the created cma region.
229 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
230 *
231 * This function reserves memory from early allocator. It should be
232 * called by arch specific code once the early allocator (memblock or bootmem)
233 * has been activated and all other subsystems have already allocated/reserved
234 * memory. This function allows to create custom reserved areas.
235 *
236 * If @fixed is true, reserve contiguous area at exactly @base. If false,
237 * reserve in range from @base to @limit.
238 */
239int __init cma_declare_contiguous_nid(phys_addr_t base,
240 phys_addr_t size, phys_addr_t limit,
241 phys_addr_t alignment, unsigned int order_per_bit,
242 bool fixed, const char *name, struct cma **res_cma,
243 int nid)
244{
245 phys_addr_t memblock_end = memblock_end_of_DRAM();
246 phys_addr_t highmem_start;
247 int ret;
248
249 /*
250 * We can't use __pa(high_memory) directly, since high_memory
251 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
252 * complain. Find the boundary by adding one to the last valid
253 * address.
254 */
255 highmem_start = __pa(high_memory - 1) + 1;
256 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
257 __func__, &size, &base, &limit, &alignment);
258
259 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
260 pr_err("Not enough slots for CMA reserved regions!\n");
261 return -ENOSPC;
262 }
263
264 if (!size)
265 return -EINVAL;
266
267 if (alignment && !is_power_of_2(alignment))
268 return -EINVAL;
269
270 if (!IS_ENABLED(CONFIG_NUMA))
271 nid = NUMA_NO_NODE;
272
273 /* Sanitise input arguments. */
274 alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
275 if (fixed && base & (alignment - 1)) {
276 ret = -EINVAL;
277 pr_err("Region at %pa must be aligned to %pa bytes\n",
278 &base, &alignment);
279 goto err;
280 }
281 base = ALIGN(base, alignment);
282 size = ALIGN(size, alignment);
283 limit &= ~(alignment - 1);
284
285 if (!base)
286 fixed = false;
287
288 /* size should be aligned with order_per_bit */
289 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
290 return -EINVAL;
291
292 /*
293 * If allocating at a fixed base the request region must not cross the
294 * low/high memory boundary.
295 */
296 if (fixed && base < highmem_start && base + size > highmem_start) {
297 ret = -EINVAL;
298 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
299 &base, &highmem_start);
300 goto err;
301 }
302
303 /*
304 * If the limit is unspecified or above the memblock end, its effective
305 * value will be the memblock end. Set it explicitly to simplify further
306 * checks.
307 */
308 if (limit == 0 || limit > memblock_end)
309 limit = memblock_end;
310
311 if (base + size > limit) {
312 ret = -EINVAL;
313 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
314 &size, &base, &limit);
315 goto err;
316 }
317
318 /* Reserve memory */
319 if (fixed) {
320 if (memblock_is_region_reserved(base, size) ||
321 memblock_reserve(base, size) < 0) {
322 ret = -EBUSY;
323 goto err;
324 }
325 } else {
326 phys_addr_t addr = 0;
327
328 /*
329 * If there is enough memory, try a bottom-up allocation first.
330 * It will place the new cma area close to the start of the node
331 * and guarantee that the compaction is moving pages out of the
332 * cma area and not into it.
333 * Avoid using first 4GB to not interfere with constrained zones
334 * like DMA/DMA32.
335 */
336#ifdef CONFIG_PHYS_ADDR_T_64BIT
337 if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
338 memblock_set_bottom_up(true);
339 addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
340 limit, nid, true);
341 memblock_set_bottom_up(false);
342 }
343#endif
344
345 /*
346 * All pages in the reserved area must come from the same zone.
347 * If the requested region crosses the low/high memory boundary,
348 * try allocating from high memory first and fall back to low
349 * memory in case of failure.
350 */
351 if (!addr && base < highmem_start && limit > highmem_start) {
352 addr = memblock_alloc_range_nid(size, alignment,
353 highmem_start, limit, nid, true);
354 limit = highmem_start;
355 }
356
357 if (!addr) {
358 addr = memblock_alloc_range_nid(size, alignment, base,
359 limit, nid, true);
360 if (!addr) {
361 ret = -ENOMEM;
362 goto err;
363 }
364 }
365
366 /*
367 * kmemleak scans/reads tracked objects for pointers to other
368 * objects but this address isn't mapped and accessible
369 */
370 kmemleak_ignore_phys(addr);
371 base = addr;
372 }
373
374 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
375 if (ret)
376 goto free_mem;
377
378 pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size / SZ_1M,
379 &base, nid);
380 return 0;
381
382free_mem:
383 memblock_phys_free(base, size);
384err:
385 pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size / SZ_1M,
386 nid);
387 return ret;
388}
389
390#ifdef CONFIG_CMA_DEBUG
391static void cma_debug_show_areas(struct cma *cma)
392{
393 unsigned long next_zero_bit, next_set_bit, nr_zero;
394 unsigned long start = 0;
395 unsigned long nr_part, nr_total = 0;
396 unsigned long nbits = cma_bitmap_maxno(cma);
397
398 spin_lock_irq(&cma->lock);
399 pr_info("number of available pages: ");
400 for (;;) {
401 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
402 if (next_zero_bit >= nbits)
403 break;
404 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
405 nr_zero = next_set_bit - next_zero_bit;
406 nr_part = nr_zero << cma->order_per_bit;
407 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
408 next_zero_bit);
409 nr_total += nr_part;
410 start = next_zero_bit + nr_zero;
411 }
412 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
413 spin_unlock_irq(&cma->lock);
414}
415#else
416static inline void cma_debug_show_areas(struct cma *cma) { }
417#endif
418
419/**
420 * cma_alloc() - allocate pages from contiguous area
421 * @cma: Contiguous memory region for which the allocation is performed.
422 * @count: Requested number of pages.
423 * @align: Requested alignment of pages (in PAGE_SIZE order).
424 * @no_warn: Avoid printing message about failed allocation
425 *
426 * This function allocates part of contiguous memory on specific
427 * contiguous memory area.
428 */
429struct page *cma_alloc(struct cma *cma, unsigned long count,
430 unsigned int align, bool no_warn)
431{
432 unsigned long mask, offset;
433 unsigned long pfn = -1;
434 unsigned long start = 0;
435 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
436 unsigned long i;
437 struct page *page = NULL;
438 int ret = -ENOMEM;
439
440 if (!cma || !cma->count || !cma->bitmap)
441 goto out;
442
443 pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
444 (void *)cma, cma->name, count, align);
445
446 if (!count)
447 goto out;
448
449 trace_cma_alloc_start(cma->name, count, align);
450
451 mask = cma_bitmap_aligned_mask(cma, align);
452 offset = cma_bitmap_aligned_offset(cma, align);
453 bitmap_maxno = cma_bitmap_maxno(cma);
454 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
455
456 if (bitmap_count > bitmap_maxno)
457 goto out;
458
459 for (;;) {
460 spin_lock_irq(&cma->lock);
461 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
462 bitmap_maxno, start, bitmap_count, mask,
463 offset);
464 if (bitmap_no >= bitmap_maxno) {
465 spin_unlock_irq(&cma->lock);
466 break;
467 }
468 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
469 /*
470 * It's safe to drop the lock here. We've marked this region for
471 * our exclusive use. If the migration fails we will take the
472 * lock again and unmark it.
473 */
474 spin_unlock_irq(&cma->lock);
475
476 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
477 mutex_lock(&cma_mutex);
478 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
479 GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
480 mutex_unlock(&cma_mutex);
481 if (ret == 0) {
482 page = pfn_to_page(pfn);
483 break;
484 }
485
486 cma_clear_bitmap(cma, pfn, count);
487 if (ret != -EBUSY)
488 break;
489
490 pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n",
491 __func__, pfn, pfn_to_page(pfn));
492
493 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
494 count, align);
495 /* try again with a bit different memory target */
496 start = bitmap_no + mask + 1;
497 }
498
499 trace_cma_alloc_finish(cma->name, pfn, page, count, align, ret);
500
501 /*
502 * CMA can allocate multiple page blocks, which results in different
503 * blocks being marked with different tags. Reset the tags to ignore
504 * those page blocks.
505 */
506 if (page) {
507 for (i = 0; i < count; i++)
508 page_kasan_tag_reset(nth_page(page, i));
509 }
510
511 if (ret && !no_warn) {
512 pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
513 __func__, cma->name, count, ret);
514 cma_debug_show_areas(cma);
515 }
516
517 pr_debug("%s(): returned %p\n", __func__, page);
518out:
519 if (page) {
520 count_vm_event(CMA_ALLOC_SUCCESS);
521 cma_sysfs_account_success_pages(cma, count);
522 } else {
523 count_vm_event(CMA_ALLOC_FAIL);
524 if (cma)
525 cma_sysfs_account_fail_pages(cma, count);
526 }
527
528 return page;
529}
530
531bool cma_pages_valid(struct cma *cma, const struct page *pages,
532 unsigned long count)
533{
534 unsigned long pfn;
535
536 if (!cma || !pages)
537 return false;
538
539 pfn = page_to_pfn(pages);
540
541 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) {
542 pr_debug("%s(page %p, count %lu)\n", __func__,
543 (void *)pages, count);
544 return false;
545 }
546
547 return true;
548}
549
550/**
551 * cma_release() - release allocated pages
552 * @cma: Contiguous memory region for which the allocation is performed.
553 * @pages: Allocated pages.
554 * @count: Number of allocated pages.
555 *
556 * This function releases memory allocated by cma_alloc().
557 * It returns false when provided pages do not belong to contiguous area and
558 * true otherwise.
559 */
560bool cma_release(struct cma *cma, const struct page *pages,
561 unsigned long count)
562{
563 unsigned long pfn;
564
565 if (!cma_pages_valid(cma, pages, count))
566 return false;
567
568 pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
569
570 pfn = page_to_pfn(pages);
571
572 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
573
574 free_contig_range(pfn, count);
575 cma_clear_bitmap(cma, pfn, count);
576 trace_cma_release(cma->name, pfn, pages, count);
577
578 return true;
579}
580
581int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
582{
583 int i;
584
585 for (i = 0; i < cma_area_count; i++) {
586 int ret = it(&cma_areas[i], data);
587
588 if (ret)
589 return ret;
590 }
591
592 return 0;
593}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Contiguous Memory Allocator
4 *
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
8 * Written by:
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 */
14
15#define pr_fmt(fmt) "cma: " fmt
16
17#define CREATE_TRACE_POINTS
18
19#include <linux/memblock.h>
20#include <linux/err.h>
21#include <linux/mm.h>
22#include <linux/sizes.h>
23#include <linux/slab.h>
24#include <linux/log2.h>
25#include <linux/cma.h>
26#include <linux/highmem.h>
27#include <linux/io.h>
28#include <linux/kmemleak.h>
29#include <trace/events/cma.h>
30
31#include "internal.h"
32#include "cma.h"
33
34struct cma cma_areas[MAX_CMA_AREAS];
35unsigned cma_area_count;
36static DEFINE_MUTEX(cma_mutex);
37
38phys_addr_t cma_get_base(const struct cma *cma)
39{
40 return PFN_PHYS(cma->base_pfn);
41}
42
43unsigned long cma_get_size(const struct cma *cma)
44{
45 return cma->count << PAGE_SHIFT;
46}
47
48const char *cma_get_name(const struct cma *cma)
49{
50 return cma->name;
51}
52
53static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
54 unsigned int align_order)
55{
56 if (align_order <= cma->order_per_bit)
57 return 0;
58 return (1UL << (align_order - cma->order_per_bit)) - 1;
59}
60
61/*
62 * Find the offset of the base PFN from the specified align_order.
63 * The value returned is represented in order_per_bits.
64 */
65static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
66 unsigned int align_order)
67{
68 return (cma->base_pfn & ((1UL << align_order) - 1))
69 >> cma->order_per_bit;
70}
71
72static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
73 unsigned long pages)
74{
75 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
76}
77
78static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
79 unsigned long count)
80{
81 unsigned long bitmap_no, bitmap_count;
82 unsigned long flags;
83
84 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
85 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
86
87 spin_lock_irqsave(&cma->lock, flags);
88 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
89 spin_unlock_irqrestore(&cma->lock, flags);
90}
91
92static void __init cma_activate_area(struct cma *cma)
93{
94 unsigned long base_pfn = cma->base_pfn, pfn;
95 struct zone *zone;
96
97 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
98 if (!cma->bitmap)
99 goto out_error;
100
101 /*
102 * alloc_contig_range() requires the pfn range specified to be in the
103 * same zone. Simplify by forcing the entire CMA resv range to be in the
104 * same zone.
105 */
106 WARN_ON_ONCE(!pfn_valid(base_pfn));
107 zone = page_zone(pfn_to_page(base_pfn));
108 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
109 WARN_ON_ONCE(!pfn_valid(pfn));
110 if (page_zone(pfn_to_page(pfn)) != zone)
111 goto not_in_zone;
112 }
113
114 for (pfn = base_pfn; pfn < base_pfn + cma->count;
115 pfn += pageblock_nr_pages)
116 init_cma_reserved_pageblock(pfn_to_page(pfn));
117
118 spin_lock_init(&cma->lock);
119
120#ifdef CONFIG_CMA_DEBUGFS
121 INIT_HLIST_HEAD(&cma->mem_head);
122 spin_lock_init(&cma->mem_head_lock);
123#endif
124
125 return;
126
127not_in_zone:
128 bitmap_free(cma->bitmap);
129out_error:
130 /* Expose all pages to the buddy, they are useless for CMA. */
131 if (!cma->reserve_pages_on_error) {
132 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
133 free_reserved_page(pfn_to_page(pfn));
134 }
135 totalcma_pages -= cma->count;
136 cma->count = 0;
137 pr_err("CMA area %s could not be activated\n", cma->name);
138 return;
139}
140
141static int __init cma_init_reserved_areas(void)
142{
143 int i;
144
145 for (i = 0; i < cma_area_count; i++)
146 cma_activate_area(&cma_areas[i]);
147
148 return 0;
149}
150core_initcall(cma_init_reserved_areas);
151
152void __init cma_reserve_pages_on_error(struct cma *cma)
153{
154 cma->reserve_pages_on_error = true;
155}
156
157/**
158 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
159 * @base: Base address of the reserved area
160 * @size: Size of the reserved area (in bytes),
161 * @order_per_bit: Order of pages represented by one bit on bitmap.
162 * @name: The name of the area. If this parameter is NULL, the name of
163 * the area will be set to "cmaN", where N is a running counter of
164 * used areas.
165 * @res_cma: Pointer to store the created cma region.
166 *
167 * This function creates custom contiguous area from already reserved memory.
168 */
169int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
170 unsigned int order_per_bit,
171 const char *name,
172 struct cma **res_cma)
173{
174 struct cma *cma;
175
176 /* Sanity checks */
177 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
178 pr_err("Not enough slots for CMA reserved regions!\n");
179 return -ENOSPC;
180 }
181
182 if (!size || !memblock_is_region_reserved(base, size))
183 return -EINVAL;
184
185 /* alignment should be aligned with order_per_bit */
186 if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit))
187 return -EINVAL;
188
189 /* ensure minimal alignment required by mm core */
190 if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
191 return -EINVAL;
192
193 /*
194 * Each reserved area must be initialised later, when more kernel
195 * subsystems (like slab allocator) are available.
196 */
197 cma = &cma_areas[cma_area_count];
198
199 if (name)
200 snprintf(cma->name, CMA_MAX_NAME, name);
201 else
202 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
203
204 cma->base_pfn = PFN_DOWN(base);
205 cma->count = size >> PAGE_SHIFT;
206 cma->order_per_bit = order_per_bit;
207 *res_cma = cma;
208 cma_area_count++;
209 totalcma_pages += (size / PAGE_SIZE);
210
211 return 0;
212}
213
214/**
215 * cma_declare_contiguous_nid() - reserve custom contiguous area
216 * @base: Base address of the reserved area optional, use 0 for any
217 * @size: Size of the reserved area (in bytes),
218 * @limit: End address of the reserved memory (optional, 0 for any).
219 * @alignment: Alignment for the CMA area, should be power of 2 or zero
220 * @order_per_bit: Order of pages represented by one bit on bitmap.
221 * @fixed: hint about where to place the reserved area
222 * @name: The name of the area. See function cma_init_reserved_mem()
223 * @res_cma: Pointer to store the created cma region.
224 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
225 *
226 * This function reserves memory from early allocator. It should be
227 * called by arch specific code once the early allocator (memblock or bootmem)
228 * has been activated and all other subsystems have already allocated/reserved
229 * memory. This function allows to create custom reserved areas.
230 *
231 * If @fixed is true, reserve contiguous area at exactly @base. If false,
232 * reserve in range from @base to @limit.
233 */
234int __init cma_declare_contiguous_nid(phys_addr_t base,
235 phys_addr_t size, phys_addr_t limit,
236 phys_addr_t alignment, unsigned int order_per_bit,
237 bool fixed, const char *name, struct cma **res_cma,
238 int nid)
239{
240 phys_addr_t memblock_end = memblock_end_of_DRAM();
241 phys_addr_t highmem_start;
242 int ret;
243
244 /*
245 * We can't use __pa(high_memory) directly, since high_memory
246 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
247 * complain. Find the boundary by adding one to the last valid
248 * address.
249 */
250 highmem_start = __pa(high_memory - 1) + 1;
251 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
252 __func__, &size, &base, &limit, &alignment);
253
254 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
255 pr_err("Not enough slots for CMA reserved regions!\n");
256 return -ENOSPC;
257 }
258
259 if (!size)
260 return -EINVAL;
261
262 if (alignment && !is_power_of_2(alignment))
263 return -EINVAL;
264
265 if (!IS_ENABLED(CONFIG_NUMA))
266 nid = NUMA_NO_NODE;
267
268 /* Sanitise input arguments. */
269 alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
270 if (fixed && base & (alignment - 1)) {
271 ret = -EINVAL;
272 pr_err("Region at %pa must be aligned to %pa bytes\n",
273 &base, &alignment);
274 goto err;
275 }
276 base = ALIGN(base, alignment);
277 size = ALIGN(size, alignment);
278 limit &= ~(alignment - 1);
279
280 if (!base)
281 fixed = false;
282
283 /* size should be aligned with order_per_bit */
284 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
285 return -EINVAL;
286
287 /*
288 * If allocating at a fixed base the request region must not cross the
289 * low/high memory boundary.
290 */
291 if (fixed && base < highmem_start && base + size > highmem_start) {
292 ret = -EINVAL;
293 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
294 &base, &highmem_start);
295 goto err;
296 }
297
298 /*
299 * If the limit is unspecified or above the memblock end, its effective
300 * value will be the memblock end. Set it explicitly to simplify further
301 * checks.
302 */
303 if (limit == 0 || limit > memblock_end)
304 limit = memblock_end;
305
306 if (base + size > limit) {
307 ret = -EINVAL;
308 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
309 &size, &base, &limit);
310 goto err;
311 }
312
313 /* Reserve memory */
314 if (fixed) {
315 if (memblock_is_region_reserved(base, size) ||
316 memblock_reserve(base, size) < 0) {
317 ret = -EBUSY;
318 goto err;
319 }
320 } else {
321 phys_addr_t addr = 0;
322
323 /*
324 * If there is enough memory, try a bottom-up allocation first.
325 * It will place the new cma area close to the start of the node
326 * and guarantee that the compaction is moving pages out of the
327 * cma area and not into it.
328 * Avoid using first 4GB to not interfere with constrained zones
329 * like DMA/DMA32.
330 */
331#ifdef CONFIG_PHYS_ADDR_T_64BIT
332 if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
333 memblock_set_bottom_up(true);
334 addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
335 limit, nid, true);
336 memblock_set_bottom_up(false);
337 }
338#endif
339
340 /*
341 * All pages in the reserved area must come from the same zone.
342 * If the requested region crosses the low/high memory boundary,
343 * try allocating from high memory first and fall back to low
344 * memory in case of failure.
345 */
346 if (!addr && base < highmem_start && limit > highmem_start) {
347 addr = memblock_alloc_range_nid(size, alignment,
348 highmem_start, limit, nid, true);
349 limit = highmem_start;
350 }
351
352 if (!addr) {
353 addr = memblock_alloc_range_nid(size, alignment, base,
354 limit, nid, true);
355 if (!addr) {
356 ret = -ENOMEM;
357 goto err;
358 }
359 }
360
361 /*
362 * kmemleak scans/reads tracked objects for pointers to other
363 * objects but this address isn't mapped and accessible
364 */
365 kmemleak_ignore_phys(addr);
366 base = addr;
367 }
368
369 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
370 if (ret)
371 goto free_mem;
372
373 pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size / SZ_1M,
374 &base, nid);
375 return 0;
376
377free_mem:
378 memblock_phys_free(base, size);
379err:
380 pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size / SZ_1M,
381 nid);
382 return ret;
383}
384
385static void cma_debug_show_areas(struct cma *cma)
386{
387 unsigned long next_zero_bit, next_set_bit, nr_zero;
388 unsigned long start = 0;
389 unsigned long nr_part, nr_total = 0;
390 unsigned long nbits = cma_bitmap_maxno(cma);
391
392 spin_lock_irq(&cma->lock);
393 pr_info("number of available pages: ");
394 for (;;) {
395 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
396 if (next_zero_bit >= nbits)
397 break;
398 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
399 nr_zero = next_set_bit - next_zero_bit;
400 nr_part = nr_zero << cma->order_per_bit;
401 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
402 next_zero_bit);
403 nr_total += nr_part;
404 start = next_zero_bit + nr_zero;
405 }
406 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
407 spin_unlock_irq(&cma->lock);
408}
409
410/**
411 * cma_alloc() - allocate pages from contiguous area
412 * @cma: Contiguous memory region for which the allocation is performed.
413 * @count: Requested number of pages.
414 * @align: Requested alignment of pages (in PAGE_SIZE order).
415 * @no_warn: Avoid printing message about failed allocation
416 *
417 * This function allocates part of contiguous memory on specific
418 * contiguous memory area.
419 */
420struct page *cma_alloc(struct cma *cma, unsigned long count,
421 unsigned int align, bool no_warn)
422{
423 unsigned long mask, offset;
424 unsigned long pfn = -1;
425 unsigned long start = 0;
426 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
427 unsigned long i;
428 struct page *page = NULL;
429 int ret = -ENOMEM;
430 const char *name = cma ? cma->name : NULL;
431
432 trace_cma_alloc_start(name, count, align);
433
434 if (!cma || !cma->count || !cma->bitmap)
435 return page;
436
437 pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
438 (void *)cma, cma->name, count, align);
439
440 if (!count)
441 return page;
442
443 mask = cma_bitmap_aligned_mask(cma, align);
444 offset = cma_bitmap_aligned_offset(cma, align);
445 bitmap_maxno = cma_bitmap_maxno(cma);
446 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
447
448 if (bitmap_count > bitmap_maxno)
449 return page;
450
451 for (;;) {
452 spin_lock_irq(&cma->lock);
453 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
454 bitmap_maxno, start, bitmap_count, mask,
455 offset);
456 if (bitmap_no >= bitmap_maxno) {
457 spin_unlock_irq(&cma->lock);
458 break;
459 }
460 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
461 /*
462 * It's safe to drop the lock here. We've marked this region for
463 * our exclusive use. If the migration fails we will take the
464 * lock again and unmark it.
465 */
466 spin_unlock_irq(&cma->lock);
467
468 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
469 mutex_lock(&cma_mutex);
470 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
471 GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
472 mutex_unlock(&cma_mutex);
473 if (ret == 0) {
474 page = pfn_to_page(pfn);
475 break;
476 }
477
478 cma_clear_bitmap(cma, pfn, count);
479 if (ret != -EBUSY)
480 break;
481
482 pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n",
483 __func__, pfn, pfn_to_page(pfn));
484
485 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
486 count, align);
487 /* try again with a bit different memory target */
488 start = bitmap_no + mask + 1;
489 }
490
491 /*
492 * CMA can allocate multiple page blocks, which results in different
493 * blocks being marked with different tags. Reset the tags to ignore
494 * those page blocks.
495 */
496 if (page) {
497 for (i = 0; i < count; i++)
498 page_kasan_tag_reset(nth_page(page, i));
499 }
500
501 if (ret && !no_warn) {
502 pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
503 __func__, cma->name, count, ret);
504 cma_debug_show_areas(cma);
505 }
506
507 pr_debug("%s(): returned %p\n", __func__, page);
508 trace_cma_alloc_finish(name, pfn, page, count, align, ret);
509 if (page) {
510 count_vm_event(CMA_ALLOC_SUCCESS);
511 cma_sysfs_account_success_pages(cma, count);
512 } else {
513 count_vm_event(CMA_ALLOC_FAIL);
514 cma_sysfs_account_fail_pages(cma, count);
515 }
516
517 return page;
518}
519
520bool cma_pages_valid(struct cma *cma, const struct page *pages,
521 unsigned long count)
522{
523 unsigned long pfn;
524
525 if (!cma || !pages)
526 return false;
527
528 pfn = page_to_pfn(pages);
529
530 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) {
531 pr_debug("%s(page %p, count %lu)\n", __func__,
532 (void *)pages, count);
533 return false;
534 }
535
536 return true;
537}
538
539/**
540 * cma_release() - release allocated pages
541 * @cma: Contiguous memory region for which the allocation is performed.
542 * @pages: Allocated pages.
543 * @count: Number of allocated pages.
544 *
545 * This function releases memory allocated by cma_alloc().
546 * It returns false when provided pages do not belong to contiguous area and
547 * true otherwise.
548 */
549bool cma_release(struct cma *cma, const struct page *pages,
550 unsigned long count)
551{
552 unsigned long pfn;
553
554 if (!cma_pages_valid(cma, pages, count))
555 return false;
556
557 pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
558
559 pfn = page_to_pfn(pages);
560
561 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
562
563 free_contig_range(pfn, count);
564 cma_clear_bitmap(cma, pfn, count);
565 cma_sysfs_account_release_pages(cma, count);
566 trace_cma_release(cma->name, pfn, pages, count);
567
568 return true;
569}
570
571int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
572{
573 int i;
574
575 for (i = 0; i < cma_area_count; i++) {
576 int ret = it(&cma_areas[i], data);
577
578 if (ret)
579 return ret;
580 }
581
582 return 0;
583}