Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Contiguous Memory Allocator
4 *
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
8 * Written by:
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 */
14
15#define pr_fmt(fmt) "cma: " fmt
16
17#ifdef CONFIG_CMA_DEBUG
18#ifndef DEBUG
19# define DEBUG
20#endif
21#endif
22#define CREATE_TRACE_POINTS
23
24#include <linux/memblock.h>
25#include <linux/err.h>
26#include <linux/mm.h>
27#include <linux/mutex.h>
28#include <linux/sizes.h>
29#include <linux/slab.h>
30#include <linux/log2.h>
31#include <linux/cma.h>
32#include <linux/highmem.h>
33#include <linux/io.h>
34#include <linux/kmemleak.h>
35#include <trace/events/cma.h>
36
37#include "cma.h"
38
39struct cma cma_areas[MAX_CMA_AREAS];
40unsigned cma_area_count;
41static DEFINE_MUTEX(cma_mutex);
42
43phys_addr_t cma_get_base(const struct cma *cma)
44{
45 return PFN_PHYS(cma->base_pfn);
46}
47
48unsigned long cma_get_size(const struct cma *cma)
49{
50 return cma->count << PAGE_SHIFT;
51}
52
53const char *cma_get_name(const struct cma *cma)
54{
55 return cma->name;
56}
57
58static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
59 unsigned int align_order)
60{
61 if (align_order <= cma->order_per_bit)
62 return 0;
63 return (1UL << (align_order - cma->order_per_bit)) - 1;
64}
65
66/*
67 * Find the offset of the base PFN from the specified align_order.
68 * The value returned is represented in order_per_bits.
69 */
70static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
71 unsigned int align_order)
72{
73 return (cma->base_pfn & ((1UL << align_order) - 1))
74 >> cma->order_per_bit;
75}
76
77static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
78 unsigned long pages)
79{
80 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
81}
82
83static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
84 unsigned int count)
85{
86 unsigned long bitmap_no, bitmap_count;
87
88 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
89 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
90
91 mutex_lock(&cma->lock);
92 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
93 mutex_unlock(&cma->lock);
94}
95
96static void __init cma_activate_area(struct cma *cma)
97{
98 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
99 unsigned i = cma->count >> pageblock_order;
100 struct zone *zone;
101
102 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
103 if (!cma->bitmap)
104 goto out_error;
105
106 WARN_ON_ONCE(!pfn_valid(pfn));
107 zone = page_zone(pfn_to_page(pfn));
108
109 do {
110 unsigned j;
111
112 base_pfn = pfn;
113 for (j = pageblock_nr_pages; j; --j, pfn++) {
114 WARN_ON_ONCE(!pfn_valid(pfn));
115 /*
116 * alloc_contig_range requires the pfn range
117 * specified to be in the same zone. Make this
118 * simple by forcing the entire CMA resv range
119 * to be in the same zone.
120 */
121 if (page_zone(pfn_to_page(pfn)) != zone)
122 goto not_in_zone;
123 }
124 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
125 } while (--i);
126
127 mutex_init(&cma->lock);
128
129#ifdef CONFIG_CMA_DEBUGFS
130 INIT_HLIST_HEAD(&cma->mem_head);
131 spin_lock_init(&cma->mem_head_lock);
132#endif
133
134 return;
135
136not_in_zone:
137 bitmap_free(cma->bitmap);
138out_error:
139 cma->count = 0;
140 pr_err("CMA area %s could not be activated\n", cma->name);
141 return;
142}
143
144static int __init cma_init_reserved_areas(void)
145{
146 int i;
147
148 for (i = 0; i < cma_area_count; i++)
149 cma_activate_area(&cma_areas[i]);
150
151 return 0;
152}
153core_initcall(cma_init_reserved_areas);
154
155/**
156 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
157 * @base: Base address of the reserved area
158 * @size: Size of the reserved area (in bytes),
159 * @order_per_bit: Order of pages represented by one bit on bitmap.
160 * @name: The name of the area. If this parameter is NULL, the name of
161 * the area will be set to "cmaN", where N is a running counter of
162 * used areas.
163 * @res_cma: Pointer to store the created cma region.
164 *
165 * This function creates custom contiguous area from already reserved memory.
166 */
167int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
168 unsigned int order_per_bit,
169 const char *name,
170 struct cma **res_cma)
171{
172 struct cma *cma;
173 phys_addr_t alignment;
174
175 /* Sanity checks */
176 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
177 pr_err("Not enough slots for CMA reserved regions!\n");
178 return -ENOSPC;
179 }
180
181 if (!size || !memblock_is_region_reserved(base, size))
182 return -EINVAL;
183
184 /* ensure minimal alignment required by mm core */
185 alignment = PAGE_SIZE <<
186 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
187
188 /* alignment should be aligned with order_per_bit */
189 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
190 return -EINVAL;
191
192 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
193 return -EINVAL;
194
195 /*
196 * Each reserved area must be initialised later, when more kernel
197 * subsystems (like slab allocator) are available.
198 */
199 cma = &cma_areas[cma_area_count];
200
201 if (name)
202 snprintf(cma->name, CMA_MAX_NAME, name);
203 else
204 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
205
206 cma->base_pfn = PFN_DOWN(base);
207 cma->count = size >> PAGE_SHIFT;
208 cma->order_per_bit = order_per_bit;
209 *res_cma = cma;
210 cma_area_count++;
211 totalcma_pages += (size / PAGE_SIZE);
212
213 return 0;
214}
215
216/**
217 * cma_declare_contiguous_nid() - reserve custom contiguous area
218 * @base: Base address of the reserved area optional, use 0 for any
219 * @size: Size of the reserved area (in bytes),
220 * @limit: End address of the reserved memory (optional, 0 for any).
221 * @alignment: Alignment for the CMA area, should be power of 2 or zero
222 * @order_per_bit: Order of pages represented by one bit on bitmap.
223 * @fixed: hint about where to place the reserved area
224 * @name: The name of the area. See function cma_init_reserved_mem()
225 * @res_cma: Pointer to store the created cma region.
226 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
227 *
228 * This function reserves memory from early allocator. It should be
229 * called by arch specific code once the early allocator (memblock or bootmem)
230 * has been activated and all other subsystems have already allocated/reserved
231 * memory. This function allows to create custom reserved areas.
232 *
233 * If @fixed is true, reserve contiguous area at exactly @base. If false,
234 * reserve in range from @base to @limit.
235 */
236int __init cma_declare_contiguous_nid(phys_addr_t base,
237 phys_addr_t size, phys_addr_t limit,
238 phys_addr_t alignment, unsigned int order_per_bit,
239 bool fixed, const char *name, struct cma **res_cma,
240 int nid)
241{
242 phys_addr_t memblock_end = memblock_end_of_DRAM();
243 phys_addr_t highmem_start;
244 int ret = 0;
245
246 /*
247 * We can't use __pa(high_memory) directly, since high_memory
248 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
249 * complain. Find the boundary by adding one to the last valid
250 * address.
251 */
252 highmem_start = __pa(high_memory - 1) + 1;
253 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
254 __func__, &size, &base, &limit, &alignment);
255
256 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
257 pr_err("Not enough slots for CMA reserved regions!\n");
258 return -ENOSPC;
259 }
260
261 if (!size)
262 return -EINVAL;
263
264 if (alignment && !is_power_of_2(alignment))
265 return -EINVAL;
266
267 /*
268 * Sanitise input arguments.
269 * Pages both ends in CMA area could be merged into adjacent unmovable
270 * migratetype page by page allocator's buddy algorithm. In the case,
271 * you couldn't get a contiguous memory, which is not what we want.
272 */
273 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
274 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
275 if (fixed && base & (alignment - 1)) {
276 ret = -EINVAL;
277 pr_err("Region at %pa must be aligned to %pa bytes\n",
278 &base, &alignment);
279 goto err;
280 }
281 base = ALIGN(base, alignment);
282 size = ALIGN(size, alignment);
283 limit &= ~(alignment - 1);
284
285 if (!base)
286 fixed = false;
287
288 /* size should be aligned with order_per_bit */
289 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
290 return -EINVAL;
291
292 /*
293 * If allocating at a fixed base the request region must not cross the
294 * low/high memory boundary.
295 */
296 if (fixed && base < highmem_start && base + size > highmem_start) {
297 ret = -EINVAL;
298 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
299 &base, &highmem_start);
300 goto err;
301 }
302
303 /*
304 * If the limit is unspecified or above the memblock end, its effective
305 * value will be the memblock end. Set it explicitly to simplify further
306 * checks.
307 */
308 if (limit == 0 || limit > memblock_end)
309 limit = memblock_end;
310
311 if (base + size > limit) {
312 ret = -EINVAL;
313 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
314 &size, &base, &limit);
315 goto err;
316 }
317
318 /* Reserve memory */
319 if (fixed) {
320 if (memblock_is_region_reserved(base, size) ||
321 memblock_reserve(base, size) < 0) {
322 ret = -EBUSY;
323 goto err;
324 }
325 } else {
326 phys_addr_t addr = 0;
327
328 /*
329 * All pages in the reserved area must come from the same zone.
330 * If the requested region crosses the low/high memory boundary,
331 * try allocating from high memory first and fall back to low
332 * memory in case of failure.
333 */
334 if (base < highmem_start && limit > highmem_start) {
335 addr = memblock_alloc_range_nid(size, alignment,
336 highmem_start, limit, nid, true);
337 limit = highmem_start;
338 }
339
340 if (!addr) {
341 addr = memblock_alloc_range_nid(size, alignment, base,
342 limit, nid, true);
343 if (!addr) {
344 ret = -ENOMEM;
345 goto err;
346 }
347 }
348
349 /*
350 * kmemleak scans/reads tracked objects for pointers to other
351 * objects but this address isn't mapped and accessible
352 */
353 kmemleak_ignore_phys(addr);
354 base = addr;
355 }
356
357 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
358 if (ret)
359 goto free_mem;
360
361 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
362 &base);
363 return 0;
364
365free_mem:
366 memblock_free(base, size);
367err:
368 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
369 return ret;
370}
371
372#ifdef CONFIG_CMA_DEBUG
373static void cma_debug_show_areas(struct cma *cma)
374{
375 unsigned long next_zero_bit, next_set_bit, nr_zero;
376 unsigned long start = 0;
377 unsigned long nr_part, nr_total = 0;
378 unsigned long nbits = cma_bitmap_maxno(cma);
379
380 mutex_lock(&cma->lock);
381 pr_info("number of available pages: ");
382 for (;;) {
383 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
384 if (next_zero_bit >= nbits)
385 break;
386 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
387 nr_zero = next_set_bit - next_zero_bit;
388 nr_part = nr_zero << cma->order_per_bit;
389 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
390 next_zero_bit);
391 nr_total += nr_part;
392 start = next_zero_bit + nr_zero;
393 }
394 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
395 mutex_unlock(&cma->lock);
396}
397#else
398static inline void cma_debug_show_areas(struct cma *cma) { }
399#endif
400
401/**
402 * cma_alloc() - allocate pages from contiguous area
403 * @cma: Contiguous memory region for which the allocation is performed.
404 * @count: Requested number of pages.
405 * @align: Requested alignment of pages (in PAGE_SIZE order).
406 * @no_warn: Avoid printing message about failed allocation
407 *
408 * This function allocates part of contiguous memory on specific
409 * contiguous memory area.
410 */
411struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
412 bool no_warn)
413{
414 unsigned long mask, offset;
415 unsigned long pfn = -1;
416 unsigned long start = 0;
417 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
418 size_t i;
419 struct page *page = NULL;
420 int ret = -ENOMEM;
421
422 if (!cma || !cma->count || !cma->bitmap)
423 return NULL;
424
425 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
426 count, align);
427
428 if (!count)
429 return NULL;
430
431 mask = cma_bitmap_aligned_mask(cma, align);
432 offset = cma_bitmap_aligned_offset(cma, align);
433 bitmap_maxno = cma_bitmap_maxno(cma);
434 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
435
436 if (bitmap_count > bitmap_maxno)
437 return NULL;
438
439 for (;;) {
440 mutex_lock(&cma->lock);
441 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
442 bitmap_maxno, start, bitmap_count, mask,
443 offset);
444 if (bitmap_no >= bitmap_maxno) {
445 mutex_unlock(&cma->lock);
446 break;
447 }
448 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
449 /*
450 * It's safe to drop the lock here. We've marked this region for
451 * our exclusive use. If the migration fails we will take the
452 * lock again and unmark it.
453 */
454 mutex_unlock(&cma->lock);
455
456 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
457 mutex_lock(&cma_mutex);
458 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
459 GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
460 mutex_unlock(&cma_mutex);
461 if (ret == 0) {
462 page = pfn_to_page(pfn);
463 break;
464 }
465
466 cma_clear_bitmap(cma, pfn, count);
467 if (ret != -EBUSY)
468 break;
469
470 pr_debug("%s(): memory range at %p is busy, retrying\n",
471 __func__, pfn_to_page(pfn));
472 /* try again with a bit different memory target */
473 start = bitmap_no + mask + 1;
474 }
475
476 trace_cma_alloc(pfn, page, count, align);
477
478 /*
479 * CMA can allocate multiple page blocks, which results in different
480 * blocks being marked with different tags. Reset the tags to ignore
481 * those page blocks.
482 */
483 if (page) {
484 for (i = 0; i < count; i++)
485 page_kasan_tag_reset(page + i);
486 }
487
488 if (ret && !no_warn) {
489 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
490 __func__, count, ret);
491 cma_debug_show_areas(cma);
492 }
493
494 pr_debug("%s(): returned %p\n", __func__, page);
495 return page;
496}
497
498/**
499 * cma_release() - release allocated pages
500 * @cma: Contiguous memory region for which the allocation is performed.
501 * @pages: Allocated pages.
502 * @count: Number of allocated pages.
503 *
504 * This function releases memory allocated by cma_alloc().
505 * It returns false when provided pages do not belong to contiguous area and
506 * true otherwise.
507 */
508bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
509{
510 unsigned long pfn;
511
512 if (!cma || !pages)
513 return false;
514
515 pr_debug("%s(page %p)\n", __func__, (void *)pages);
516
517 pfn = page_to_pfn(pages);
518
519 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
520 return false;
521
522 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
523
524 free_contig_range(pfn, count);
525 cma_clear_bitmap(cma, pfn, count);
526 trace_cma_release(pfn, pages, count);
527
528 return true;
529}
530
531int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
532{
533 int i;
534
535 for (i = 0; i < cma_area_count; i++) {
536 int ret = it(&cma_areas[i], data);
537
538 if (ret)
539 return ret;
540 }
541
542 return 0;
543}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Contiguous Memory Allocator
4 *
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
8 * Written by:
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 */
14
15#define pr_fmt(fmt) "cma: " fmt
16
17#define CREATE_TRACE_POINTS
18
19#include <linux/memblock.h>
20#include <linux/err.h>
21#include <linux/mm.h>
22#include <linux/sizes.h>
23#include <linux/slab.h>
24#include <linux/log2.h>
25#include <linux/cma.h>
26#include <linux/highmem.h>
27#include <linux/io.h>
28#include <linux/kmemleak.h>
29#include <trace/events/cma.h>
30
31#include "internal.h"
32#include "cma.h"
33
34struct cma cma_areas[MAX_CMA_AREAS];
35unsigned int cma_area_count;
36static DEFINE_MUTEX(cma_mutex);
37
38phys_addr_t cma_get_base(const struct cma *cma)
39{
40 return PFN_PHYS(cma->base_pfn);
41}
42
43unsigned long cma_get_size(const struct cma *cma)
44{
45 return cma->count << PAGE_SHIFT;
46}
47
48const char *cma_get_name(const struct cma *cma)
49{
50 return cma->name;
51}
52
53static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
54 unsigned int align_order)
55{
56 if (align_order <= cma->order_per_bit)
57 return 0;
58 return (1UL << (align_order - cma->order_per_bit)) - 1;
59}
60
61/*
62 * Find the offset of the base PFN from the specified align_order.
63 * The value returned is represented in order_per_bits.
64 */
65static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
66 unsigned int align_order)
67{
68 return (cma->base_pfn & ((1UL << align_order) - 1))
69 >> cma->order_per_bit;
70}
71
72static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
73 unsigned long pages)
74{
75 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
76}
77
78static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
79 unsigned long count)
80{
81 unsigned long bitmap_no, bitmap_count;
82 unsigned long flags;
83
84 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
85 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
86
87 spin_lock_irqsave(&cma->lock, flags);
88 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
89 spin_unlock_irqrestore(&cma->lock, flags);
90}
91
92static void __init cma_activate_area(struct cma *cma)
93{
94 unsigned long base_pfn = cma->base_pfn, pfn;
95 struct zone *zone;
96
97 cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
98 if (!cma->bitmap)
99 goto out_error;
100
101 /*
102 * alloc_contig_range() requires the pfn range specified to be in the
103 * same zone. Simplify by forcing the entire CMA resv range to be in the
104 * same zone.
105 */
106 WARN_ON_ONCE(!pfn_valid(base_pfn));
107 zone = page_zone(pfn_to_page(base_pfn));
108 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
109 WARN_ON_ONCE(!pfn_valid(pfn));
110 if (page_zone(pfn_to_page(pfn)) != zone)
111 goto not_in_zone;
112 }
113
114 for (pfn = base_pfn; pfn < base_pfn + cma->count;
115 pfn += pageblock_nr_pages)
116 init_cma_reserved_pageblock(pfn_to_page(pfn));
117
118 spin_lock_init(&cma->lock);
119
120#ifdef CONFIG_CMA_DEBUGFS
121 INIT_HLIST_HEAD(&cma->mem_head);
122 spin_lock_init(&cma->mem_head_lock);
123#endif
124
125 return;
126
127not_in_zone:
128 bitmap_free(cma->bitmap);
129out_error:
130 /* Expose all pages to the buddy, they are useless for CMA. */
131 if (!cma->reserve_pages_on_error) {
132 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
133 free_reserved_page(pfn_to_page(pfn));
134 }
135 totalcma_pages -= cma->count;
136 cma->count = 0;
137 pr_err("CMA area %s could not be activated\n", cma->name);
138}
139
140static int __init cma_init_reserved_areas(void)
141{
142 int i;
143
144 for (i = 0; i < cma_area_count; i++)
145 cma_activate_area(&cma_areas[i]);
146
147 return 0;
148}
149core_initcall(cma_init_reserved_areas);
150
151void __init cma_reserve_pages_on_error(struct cma *cma)
152{
153 cma->reserve_pages_on_error = true;
154}
155
156/**
157 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
158 * @base: Base address of the reserved area
159 * @size: Size of the reserved area (in bytes),
160 * @order_per_bit: Order of pages represented by one bit on bitmap.
161 * @name: The name of the area. If this parameter is NULL, the name of
162 * the area will be set to "cmaN", where N is a running counter of
163 * used areas.
164 * @res_cma: Pointer to store the created cma region.
165 *
166 * This function creates custom contiguous area from already reserved memory.
167 */
168int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
169 unsigned int order_per_bit,
170 const char *name,
171 struct cma **res_cma)
172{
173 struct cma *cma;
174
175 /* Sanity checks */
176 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
177 pr_err("Not enough slots for CMA reserved regions!\n");
178 return -ENOSPC;
179 }
180
181 if (!size || !memblock_is_region_reserved(base, size))
182 return -EINVAL;
183
184 /*
185 * CMA uses CMA_MIN_ALIGNMENT_BYTES as alignment requirement which
186 * needs pageblock_order to be initialized. Let's enforce it.
187 */
188 if (!pageblock_order) {
189 pr_err("pageblock_order not yet initialized. Called during early boot?\n");
190 return -EINVAL;
191 }
192
193 /* ensure minimal alignment required by mm core */
194 if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
195 return -EINVAL;
196
197 /*
198 * Each reserved area must be initialised later, when more kernel
199 * subsystems (like slab allocator) are available.
200 */
201 cma = &cma_areas[cma_area_count];
202
203 if (name)
204 snprintf(cma->name, CMA_MAX_NAME, name);
205 else
206 snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
207
208 cma->base_pfn = PFN_DOWN(base);
209 cma->count = size >> PAGE_SHIFT;
210 cma->order_per_bit = order_per_bit;
211 *res_cma = cma;
212 cma_area_count++;
213 totalcma_pages += cma->count;
214
215 return 0;
216}
217
218/**
219 * cma_declare_contiguous_nid() - reserve custom contiguous area
220 * @base: Base address of the reserved area optional, use 0 for any
221 * @size: Size of the reserved area (in bytes),
222 * @limit: End address of the reserved memory (optional, 0 for any).
223 * @alignment: Alignment for the CMA area, should be power of 2 or zero
224 * @order_per_bit: Order of pages represented by one bit on bitmap.
225 * @fixed: hint about where to place the reserved area
226 * @name: The name of the area. See function cma_init_reserved_mem()
227 * @res_cma: Pointer to store the created cma region.
228 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
229 *
230 * This function reserves memory from early allocator. It should be
231 * called by arch specific code once the early allocator (memblock or bootmem)
232 * has been activated and all other subsystems have already allocated/reserved
233 * memory. This function allows to create custom reserved areas.
234 *
235 * If @fixed is true, reserve contiguous area at exactly @base. If false,
236 * reserve in range from @base to @limit.
237 */
238int __init cma_declare_contiguous_nid(phys_addr_t base,
239 phys_addr_t size, phys_addr_t limit,
240 phys_addr_t alignment, unsigned int order_per_bit,
241 bool fixed, const char *name, struct cma **res_cma,
242 int nid)
243{
244 phys_addr_t memblock_end = memblock_end_of_DRAM();
245 phys_addr_t highmem_start;
246 int ret;
247
248 /*
249 * We can't use __pa(high_memory) directly, since high_memory
250 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
251 * complain. Find the boundary by adding one to the last valid
252 * address.
253 */
254 highmem_start = __pa(high_memory - 1) + 1;
255 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
256 __func__, &size, &base, &limit, &alignment);
257
258 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
259 pr_err("Not enough slots for CMA reserved regions!\n");
260 return -ENOSPC;
261 }
262
263 if (!size)
264 return -EINVAL;
265
266 if (alignment && !is_power_of_2(alignment))
267 return -EINVAL;
268
269 if (!IS_ENABLED(CONFIG_NUMA))
270 nid = NUMA_NO_NODE;
271
272 /* Sanitise input arguments. */
273 alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
274 if (fixed && base & (alignment - 1)) {
275 ret = -EINVAL;
276 pr_err("Region at %pa must be aligned to %pa bytes\n",
277 &base, &alignment);
278 goto err;
279 }
280 base = ALIGN(base, alignment);
281 size = ALIGN(size, alignment);
282 limit &= ~(alignment - 1);
283
284 if (!base)
285 fixed = false;
286
287 /* size should be aligned with order_per_bit */
288 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
289 return -EINVAL;
290
291 /*
292 * If allocating at a fixed base the request region must not cross the
293 * low/high memory boundary.
294 */
295 if (fixed && base < highmem_start && base + size > highmem_start) {
296 ret = -EINVAL;
297 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
298 &base, &highmem_start);
299 goto err;
300 }
301
302 /*
303 * If the limit is unspecified or above the memblock end, its effective
304 * value will be the memblock end. Set it explicitly to simplify further
305 * checks.
306 */
307 if (limit == 0 || limit > memblock_end)
308 limit = memblock_end;
309
310 if (base + size > limit) {
311 ret = -EINVAL;
312 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
313 &size, &base, &limit);
314 goto err;
315 }
316
317 /* Reserve memory */
318 if (fixed) {
319 if (memblock_is_region_reserved(base, size) ||
320 memblock_reserve(base, size) < 0) {
321 ret = -EBUSY;
322 goto err;
323 }
324 } else {
325 phys_addr_t addr = 0;
326
327 /*
328 * If there is enough memory, try a bottom-up allocation first.
329 * It will place the new cma area close to the start of the node
330 * and guarantee that the compaction is moving pages out of the
331 * cma area and not into it.
332 * Avoid using first 4GB to not interfere with constrained zones
333 * like DMA/DMA32.
334 */
335#ifdef CONFIG_PHYS_ADDR_T_64BIT
336 if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
337 memblock_set_bottom_up(true);
338 addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
339 limit, nid, true);
340 memblock_set_bottom_up(false);
341 }
342#endif
343
344 /*
345 * All pages in the reserved area must come from the same zone.
346 * If the requested region crosses the low/high memory boundary,
347 * try allocating from high memory first and fall back to low
348 * memory in case of failure.
349 */
350 if (!addr && base < highmem_start && limit > highmem_start) {
351 addr = memblock_alloc_range_nid(size, alignment,
352 highmem_start, limit, nid, true);
353 limit = highmem_start;
354 }
355
356 if (!addr) {
357 addr = memblock_alloc_range_nid(size, alignment, base,
358 limit, nid, true);
359 if (!addr) {
360 ret = -ENOMEM;
361 goto err;
362 }
363 }
364
365 /*
366 * kmemleak scans/reads tracked objects for pointers to other
367 * objects but this address isn't mapped and accessible
368 */
369 kmemleak_ignore_phys(addr);
370 base = addr;
371 }
372
373 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
374 if (ret)
375 goto free_mem;
376
377 pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size / SZ_1M,
378 &base, nid);
379 return 0;
380
381free_mem:
382 memblock_phys_free(base, size);
383err:
384 pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size / SZ_1M,
385 nid);
386 return ret;
387}
388
389static void cma_debug_show_areas(struct cma *cma)
390{
391 unsigned long next_zero_bit, next_set_bit, nr_zero;
392 unsigned long start = 0;
393 unsigned long nr_part, nr_total = 0;
394 unsigned long nbits = cma_bitmap_maxno(cma);
395
396 spin_lock_irq(&cma->lock);
397 pr_info("number of available pages: ");
398 for (;;) {
399 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
400 if (next_zero_bit >= nbits)
401 break;
402 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
403 nr_zero = next_set_bit - next_zero_bit;
404 nr_part = nr_zero << cma->order_per_bit;
405 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
406 next_zero_bit);
407 nr_total += nr_part;
408 start = next_zero_bit + nr_zero;
409 }
410 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
411 spin_unlock_irq(&cma->lock);
412}
413
414static struct page *__cma_alloc(struct cma *cma, unsigned long count,
415 unsigned int align, gfp_t gfp)
416{
417 unsigned long mask, offset;
418 unsigned long pfn = -1;
419 unsigned long start = 0;
420 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
421 unsigned long i;
422 struct page *page = NULL;
423 int ret = -ENOMEM;
424 const char *name = cma ? cma->name : NULL;
425
426 trace_cma_alloc_start(name, count, align);
427
428 if (!cma || !cma->count || !cma->bitmap)
429 return page;
430
431 pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__,
432 (void *)cma, cma->name, count, align);
433
434 if (!count)
435 return page;
436
437 mask = cma_bitmap_aligned_mask(cma, align);
438 offset = cma_bitmap_aligned_offset(cma, align);
439 bitmap_maxno = cma_bitmap_maxno(cma);
440 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
441
442 if (bitmap_count > bitmap_maxno)
443 return page;
444
445 for (;;) {
446 spin_lock_irq(&cma->lock);
447 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
448 bitmap_maxno, start, bitmap_count, mask,
449 offset);
450 if (bitmap_no >= bitmap_maxno) {
451 spin_unlock_irq(&cma->lock);
452 break;
453 }
454 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
455 /*
456 * It's safe to drop the lock here. We've marked this region for
457 * our exclusive use. If the migration fails we will take the
458 * lock again and unmark it.
459 */
460 spin_unlock_irq(&cma->lock);
461
462 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
463 mutex_lock(&cma_mutex);
464 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp);
465 mutex_unlock(&cma_mutex);
466 if (ret == 0) {
467 page = pfn_to_page(pfn);
468 break;
469 }
470
471 cma_clear_bitmap(cma, pfn, count);
472 if (ret != -EBUSY)
473 break;
474
475 pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n",
476 __func__, pfn, pfn_to_page(pfn));
477
478 trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
479 count, align);
480 /* try again with a bit different memory target */
481 start = bitmap_no + mask + 1;
482 }
483
484 /*
485 * CMA can allocate multiple page blocks, which results in different
486 * blocks being marked with different tags. Reset the tags to ignore
487 * those page blocks.
488 */
489 if (page) {
490 for (i = 0; i < count; i++)
491 page_kasan_tag_reset(nth_page(page, i));
492 }
493
494 if (ret && !(gfp & __GFP_NOWARN)) {
495 pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
496 __func__, cma->name, count, ret);
497 cma_debug_show_areas(cma);
498 }
499
500 pr_debug("%s(): returned %p\n", __func__, page);
501 trace_cma_alloc_finish(name, pfn, page, count, align, ret);
502 if (page) {
503 count_vm_event(CMA_ALLOC_SUCCESS);
504 cma_sysfs_account_success_pages(cma, count);
505 } else {
506 count_vm_event(CMA_ALLOC_FAIL);
507 cma_sysfs_account_fail_pages(cma, count);
508 }
509
510 return page;
511}
512
513/**
514 * cma_alloc() - allocate pages from contiguous area
515 * @cma: Contiguous memory region for which the allocation is performed.
516 * @count: Requested number of pages.
517 * @align: Requested alignment of pages (in PAGE_SIZE order).
518 * @no_warn: Avoid printing message about failed allocation
519 *
520 * This function allocates part of contiguous memory on specific
521 * contiguous memory area.
522 */
523struct page *cma_alloc(struct cma *cma, unsigned long count,
524 unsigned int align, bool no_warn)
525{
526 return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
527}
528
529struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
530{
531 struct page *page;
532
533 if (WARN_ON(!order || !(gfp & __GFP_COMP)))
534 return NULL;
535
536 page = __cma_alloc(cma, 1 << order, order, gfp);
537
538 return page ? page_folio(page) : NULL;
539}
540
541bool cma_pages_valid(struct cma *cma, const struct page *pages,
542 unsigned long count)
543{
544 unsigned long pfn;
545
546 if (!cma || !pages)
547 return false;
548
549 pfn = page_to_pfn(pages);
550
551 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) {
552 pr_debug("%s(page %p, count %lu)\n", __func__,
553 (void *)pages, count);
554 return false;
555 }
556
557 return true;
558}
559
560/**
561 * cma_release() - release allocated pages
562 * @cma: Contiguous memory region for which the allocation is performed.
563 * @pages: Allocated pages.
564 * @count: Number of allocated pages.
565 *
566 * This function releases memory allocated by cma_alloc().
567 * It returns false when provided pages do not belong to contiguous area and
568 * true otherwise.
569 */
570bool cma_release(struct cma *cma, const struct page *pages,
571 unsigned long count)
572{
573 unsigned long pfn;
574
575 if (!cma_pages_valid(cma, pages, count))
576 return false;
577
578 pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
579
580 pfn = page_to_pfn(pages);
581
582 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
583
584 free_contig_range(pfn, count);
585 cma_clear_bitmap(cma, pfn, count);
586 cma_sysfs_account_release_pages(cma, count);
587 trace_cma_release(cma->name, pfn, pages, count);
588
589 return true;
590}
591
592bool cma_free_folio(struct cma *cma, const struct folio *folio)
593{
594 if (WARN_ON(!folio_test_large(folio)))
595 return false;
596
597 return cma_release(cma, &folio->page, folio_nr_pages(folio));
598}
599
600int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
601{
602 int i;
603
604 for (i = 0; i < cma_area_count; i++) {
605 int ret = it(&cma_areas[i], data);
606
607 if (ret)
608 return ret;
609 }
610
611 return 0;
612}