Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Contiguous Memory Allocator
4 *
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
8 * Written by:
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 */
14
15#define pr_fmt(fmt) "cma: " fmt
16
17#ifdef CONFIG_CMA_DEBUG
18#ifndef DEBUG
19# define DEBUG
20#endif
21#endif
22#define CREATE_TRACE_POINTS
23
24#include <linux/memblock.h>
25#include <linux/err.h>
26#include <linux/mm.h>
27#include <linux/mutex.h>
28#include <linux/sizes.h>
29#include <linux/slab.h>
30#include <linux/log2.h>
31#include <linux/cma.h>
32#include <linux/highmem.h>
33#include <linux/io.h>
34#include <linux/kmemleak.h>
35#include <trace/events/cma.h>
36
37#include "cma.h"
38
39struct cma cma_areas[MAX_CMA_AREAS];
40unsigned cma_area_count;
41static DEFINE_MUTEX(cma_mutex);
42
43phys_addr_t cma_get_base(const struct cma *cma)
44{
45 return PFN_PHYS(cma->base_pfn);
46}
47
48unsigned long cma_get_size(const struct cma *cma)
49{
50 return cma->count << PAGE_SHIFT;
51}
52
53const char *cma_get_name(const struct cma *cma)
54{
55 return cma->name ? cma->name : "(undefined)";
56}
57
58static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
59 unsigned int align_order)
60{
61 if (align_order <= cma->order_per_bit)
62 return 0;
63 return (1UL << (align_order - cma->order_per_bit)) - 1;
64}
65
66/*
67 * Find the offset of the base PFN from the specified align_order.
68 * The value returned is represented in order_per_bits.
69 */
70static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
71 unsigned int align_order)
72{
73 return (cma->base_pfn & ((1UL << align_order) - 1))
74 >> cma->order_per_bit;
75}
76
77static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
78 unsigned long pages)
79{
80 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
81}
82
83static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
84 unsigned int count)
85{
86 unsigned long bitmap_no, bitmap_count;
87
88 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
89 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
90
91 mutex_lock(&cma->lock);
92 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
93 mutex_unlock(&cma->lock);
94}
95
96static int __init cma_activate_area(struct cma *cma)
97{
98 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
99 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
100 unsigned i = cma->count >> pageblock_order;
101 struct zone *zone;
102
103 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
104
105 if (!cma->bitmap) {
106 cma->count = 0;
107 return -ENOMEM;
108 }
109
110 WARN_ON_ONCE(!pfn_valid(pfn));
111 zone = page_zone(pfn_to_page(pfn));
112
113 do {
114 unsigned j;
115
116 base_pfn = pfn;
117 for (j = pageblock_nr_pages; j; --j, pfn++) {
118 WARN_ON_ONCE(!pfn_valid(pfn));
119 /*
120 * alloc_contig_range requires the pfn range
121 * specified to be in the same zone. Make this
122 * simple by forcing the entire CMA resv range
123 * to be in the same zone.
124 */
125 if (page_zone(pfn_to_page(pfn)) != zone)
126 goto not_in_zone;
127 }
128 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
129 } while (--i);
130
131 mutex_init(&cma->lock);
132
133#ifdef CONFIG_CMA_DEBUGFS
134 INIT_HLIST_HEAD(&cma->mem_head);
135 spin_lock_init(&cma->mem_head_lock);
136#endif
137
138 return 0;
139
140not_in_zone:
141 pr_err("CMA area %s could not be activated\n", cma->name);
142 kfree(cma->bitmap);
143 cma->count = 0;
144 return -EINVAL;
145}
146
147static int __init cma_init_reserved_areas(void)
148{
149 int i;
150
151 for (i = 0; i < cma_area_count; i++) {
152 int ret = cma_activate_area(&cma_areas[i]);
153
154 if (ret)
155 return ret;
156 }
157
158 return 0;
159}
160core_initcall(cma_init_reserved_areas);
161
162/**
163 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
164 * @base: Base address of the reserved area
165 * @size: Size of the reserved area (in bytes),
166 * @order_per_bit: Order of pages represented by one bit on bitmap.
167 * @name: The name of the area. If this parameter is NULL, the name of
168 * the area will be set to "cmaN", where N is a running counter of
169 * used areas.
170 * @res_cma: Pointer to store the created cma region.
171 *
172 * This function creates custom contiguous area from already reserved memory.
173 */
174int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
175 unsigned int order_per_bit,
176 const char *name,
177 struct cma **res_cma)
178{
179 struct cma *cma;
180 phys_addr_t alignment;
181
182 /* Sanity checks */
183 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
184 pr_err("Not enough slots for CMA reserved regions!\n");
185 return -ENOSPC;
186 }
187
188 if (!size || !memblock_is_region_reserved(base, size))
189 return -EINVAL;
190
191 /* ensure minimal alignment required by mm core */
192 alignment = PAGE_SIZE <<
193 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
194
195 /* alignment should be aligned with order_per_bit */
196 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
197 return -EINVAL;
198
199 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
200 return -EINVAL;
201
202 /*
203 * Each reserved area must be initialised later, when more kernel
204 * subsystems (like slab allocator) are available.
205 */
206 cma = &cma_areas[cma_area_count];
207 if (name) {
208 cma->name = name;
209 } else {
210 cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
211 if (!cma->name)
212 return -ENOMEM;
213 }
214 cma->base_pfn = PFN_DOWN(base);
215 cma->count = size >> PAGE_SHIFT;
216 cma->order_per_bit = order_per_bit;
217 *res_cma = cma;
218 cma_area_count++;
219 totalcma_pages += (size / PAGE_SIZE);
220
221 return 0;
222}
223
224/**
225 * cma_declare_contiguous() - reserve custom contiguous area
226 * @base: Base address of the reserved area optional, use 0 for any
227 * @size: Size of the reserved area (in bytes),
228 * @limit: End address of the reserved memory (optional, 0 for any).
229 * @alignment: Alignment for the CMA area, should be power of 2 or zero
230 * @order_per_bit: Order of pages represented by one bit on bitmap.
231 * @fixed: hint about where to place the reserved area
232 * @name: The name of the area. See function cma_init_reserved_mem()
233 * @res_cma: Pointer to store the created cma region.
234 *
235 * This function reserves memory from early allocator. It should be
236 * called by arch specific code once the early allocator (memblock or bootmem)
237 * has been activated and all other subsystems have already allocated/reserved
238 * memory. This function allows to create custom reserved areas.
239 *
240 * If @fixed is true, reserve contiguous area at exactly @base. If false,
241 * reserve in range from @base to @limit.
242 */
243int __init cma_declare_contiguous(phys_addr_t base,
244 phys_addr_t size, phys_addr_t limit,
245 phys_addr_t alignment, unsigned int order_per_bit,
246 bool fixed, const char *name, struct cma **res_cma)
247{
248 phys_addr_t memblock_end = memblock_end_of_DRAM();
249 phys_addr_t highmem_start;
250 int ret = 0;
251
252 /*
253 * We can't use __pa(high_memory) directly, since high_memory
254 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
255 * complain. Find the boundary by adding one to the last valid
256 * address.
257 */
258 highmem_start = __pa(high_memory - 1) + 1;
259 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
260 __func__, &size, &base, &limit, &alignment);
261
262 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
263 pr_err("Not enough slots for CMA reserved regions!\n");
264 return -ENOSPC;
265 }
266
267 if (!size)
268 return -EINVAL;
269
270 if (alignment && !is_power_of_2(alignment))
271 return -EINVAL;
272
273 /*
274 * Sanitise input arguments.
275 * Pages both ends in CMA area could be merged into adjacent unmovable
276 * migratetype page by page allocator's buddy algorithm. In the case,
277 * you couldn't get a contiguous memory, which is not what we want.
278 */
279 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
280 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
281 if (fixed && base & (alignment - 1)) {
282 ret = -EINVAL;
283 pr_err("Region at %pa must be aligned to %pa bytes\n",
284 &base, &alignment);
285 goto err;
286 }
287 base = ALIGN(base, alignment);
288 size = ALIGN(size, alignment);
289 limit &= ~(alignment - 1);
290
291 if (!base)
292 fixed = false;
293
294 /* size should be aligned with order_per_bit */
295 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
296 return -EINVAL;
297
298 /*
299 * If allocating at a fixed base the request region must not cross the
300 * low/high memory boundary.
301 */
302 if (fixed && base < highmem_start && base + size > highmem_start) {
303 ret = -EINVAL;
304 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
305 &base, &highmem_start);
306 goto err;
307 }
308
309 /*
310 * If the limit is unspecified or above the memblock end, its effective
311 * value will be the memblock end. Set it explicitly to simplify further
312 * checks.
313 */
314 if (limit == 0 || limit > memblock_end)
315 limit = memblock_end;
316
317 if (base + size > limit) {
318 ret = -EINVAL;
319 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
320 &size, &base, &limit);
321 goto err;
322 }
323
324 /* Reserve memory */
325 if (fixed) {
326 if (memblock_is_region_reserved(base, size) ||
327 memblock_reserve(base, size) < 0) {
328 ret = -EBUSY;
329 goto err;
330 }
331 } else {
332 phys_addr_t addr = 0;
333
334 /*
335 * All pages in the reserved area must come from the same zone.
336 * If the requested region crosses the low/high memory boundary,
337 * try allocating from high memory first and fall back to low
338 * memory in case of failure.
339 */
340 if (base < highmem_start && limit > highmem_start) {
341 addr = memblock_phys_alloc_range(size, alignment,
342 highmem_start, limit);
343 limit = highmem_start;
344 }
345
346 if (!addr) {
347 addr = memblock_phys_alloc_range(size, alignment, base,
348 limit);
349 if (!addr) {
350 ret = -ENOMEM;
351 goto err;
352 }
353 }
354
355 /*
356 * kmemleak scans/reads tracked objects for pointers to other
357 * objects but this address isn't mapped and accessible
358 */
359 kmemleak_ignore_phys(addr);
360 base = addr;
361 }
362
363 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
364 if (ret)
365 goto free_mem;
366
367 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
368 &base);
369 return 0;
370
371free_mem:
372 memblock_free(base, size);
373err:
374 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
375 return ret;
376}
377
378#ifdef CONFIG_CMA_DEBUG
379static void cma_debug_show_areas(struct cma *cma)
380{
381 unsigned long next_zero_bit, next_set_bit, nr_zero;
382 unsigned long start = 0;
383 unsigned long nr_part, nr_total = 0;
384 unsigned long nbits = cma_bitmap_maxno(cma);
385
386 mutex_lock(&cma->lock);
387 pr_info("number of available pages: ");
388 for (;;) {
389 next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
390 if (next_zero_bit >= nbits)
391 break;
392 next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
393 nr_zero = next_set_bit - next_zero_bit;
394 nr_part = nr_zero << cma->order_per_bit;
395 pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
396 next_zero_bit);
397 nr_total += nr_part;
398 start = next_zero_bit + nr_zero;
399 }
400 pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
401 mutex_unlock(&cma->lock);
402}
403#else
404static inline void cma_debug_show_areas(struct cma *cma) { }
405#endif
406
407/**
408 * cma_alloc() - allocate pages from contiguous area
409 * @cma: Contiguous memory region for which the allocation is performed.
410 * @count: Requested number of pages.
411 * @align: Requested alignment of pages (in PAGE_SIZE order).
412 * @no_warn: Avoid printing message about failed allocation
413 *
414 * This function allocates part of contiguous memory on specific
415 * contiguous memory area.
416 */
417struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
418 bool no_warn)
419{
420 unsigned long mask, offset;
421 unsigned long pfn = -1;
422 unsigned long start = 0;
423 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
424 size_t i;
425 struct page *page = NULL;
426 int ret = -ENOMEM;
427
428 if (!cma || !cma->count)
429 return NULL;
430
431 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
432 count, align);
433
434 if (!count)
435 return NULL;
436
437 mask = cma_bitmap_aligned_mask(cma, align);
438 offset = cma_bitmap_aligned_offset(cma, align);
439 bitmap_maxno = cma_bitmap_maxno(cma);
440 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
441
442 if (bitmap_count > bitmap_maxno)
443 return NULL;
444
445 for (;;) {
446 mutex_lock(&cma->lock);
447 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
448 bitmap_maxno, start, bitmap_count, mask,
449 offset);
450 if (bitmap_no >= bitmap_maxno) {
451 mutex_unlock(&cma->lock);
452 break;
453 }
454 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
455 /*
456 * It's safe to drop the lock here. We've marked this region for
457 * our exclusive use. If the migration fails we will take the
458 * lock again and unmark it.
459 */
460 mutex_unlock(&cma->lock);
461
462 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
463 mutex_lock(&cma_mutex);
464 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
465 GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
466 mutex_unlock(&cma_mutex);
467 if (ret == 0) {
468 page = pfn_to_page(pfn);
469 break;
470 }
471
472 cma_clear_bitmap(cma, pfn, count);
473 if (ret != -EBUSY)
474 break;
475
476 pr_debug("%s(): memory range at %p is busy, retrying\n",
477 __func__, pfn_to_page(pfn));
478 /* try again with a bit different memory target */
479 start = bitmap_no + mask + 1;
480 }
481
482 trace_cma_alloc(pfn, page, count, align);
483
484 /*
485 * CMA can allocate multiple page blocks, which results in different
486 * blocks being marked with different tags. Reset the tags to ignore
487 * those page blocks.
488 */
489 if (page) {
490 for (i = 0; i < count; i++)
491 page_kasan_tag_reset(page + i);
492 }
493
494 if (ret && !no_warn) {
495 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
496 __func__, count, ret);
497 cma_debug_show_areas(cma);
498 }
499
500 pr_debug("%s(): returned %p\n", __func__, page);
501 return page;
502}
503
504/**
505 * cma_release() - release allocated pages
506 * @cma: Contiguous memory region for which the allocation is performed.
507 * @pages: Allocated pages.
508 * @count: Number of allocated pages.
509 *
510 * This function releases memory allocated by cma_alloc().
511 * It returns false when provided pages do not belong to contiguous area and
512 * true otherwise.
513 */
514bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
515{
516 unsigned long pfn;
517
518 if (!cma || !pages)
519 return false;
520
521 pr_debug("%s(page %p)\n", __func__, (void *)pages);
522
523 pfn = page_to_pfn(pages);
524
525 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
526 return false;
527
528 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
529
530 free_contig_range(pfn, count);
531 cma_clear_bitmap(cma, pfn, count);
532 trace_cma_release(pfn, pages, count);
533
534 return true;
535}
536
537int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
538{
539 int i;
540
541 for (i = 0; i < cma_area_count; i++) {
542 int ret = it(&cma_areas[i], data);
543
544 if (ret)
545 return ret;
546 }
547
548 return 0;
549}
1/*
2 * Contiguous Memory Allocator
3 *
4 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Copyright IBM Corporation, 2013
6 * Copyright LG Electronics Inc., 2014
7 * Written by:
8 * Marek Szyprowski <m.szyprowski@samsung.com>
9 * Michal Nazarewicz <mina86@mina86.com>
10 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License or (at your optional) any later version of the license.
17 */
18
19#define pr_fmt(fmt) "cma: " fmt
20
21#ifdef CONFIG_CMA_DEBUG
22#ifndef DEBUG
23# define DEBUG
24#endif
25#endif
26#define CREATE_TRACE_POINTS
27
28#include <linux/memblock.h>
29#include <linux/err.h>
30#include <linux/mm.h>
31#include <linux/mutex.h>
32#include <linux/sizes.h>
33#include <linux/slab.h>
34#include <linux/log2.h>
35#include <linux/cma.h>
36#include <linux/highmem.h>
37#include <linux/io.h>
38#include <trace/events/cma.h>
39
40#include "cma.h"
41
42struct cma cma_areas[MAX_CMA_AREAS];
43unsigned cma_area_count;
44static DEFINE_MUTEX(cma_mutex);
45
46phys_addr_t cma_get_base(const struct cma *cma)
47{
48 return PFN_PHYS(cma->base_pfn);
49}
50
51unsigned long cma_get_size(const struct cma *cma)
52{
53 return cma->count << PAGE_SHIFT;
54}
55
56static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
57 int align_order)
58{
59 if (align_order <= cma->order_per_bit)
60 return 0;
61 return (1UL << (align_order - cma->order_per_bit)) - 1;
62}
63
64/*
65 * Find a PFN aligned to the specified order and return an offset represented in
66 * order_per_bits.
67 */
68static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
69 int align_order)
70{
71 if (align_order <= cma->order_per_bit)
72 return 0;
73
74 return (ALIGN(cma->base_pfn, (1UL << align_order))
75 - cma->base_pfn) >> cma->order_per_bit;
76}
77
78static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
79 unsigned long pages)
80{
81 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
82}
83
84static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
85 unsigned int count)
86{
87 unsigned long bitmap_no, bitmap_count;
88
89 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
90 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
91
92 mutex_lock(&cma->lock);
93 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
94 mutex_unlock(&cma->lock);
95}
96
97static int __init cma_activate_area(struct cma *cma)
98{
99 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
100 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
101 unsigned i = cma->count >> pageblock_order;
102 struct zone *zone;
103
104 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
105
106 if (!cma->bitmap)
107 return -ENOMEM;
108
109 WARN_ON_ONCE(!pfn_valid(pfn));
110 zone = page_zone(pfn_to_page(pfn));
111
112 do {
113 unsigned j;
114
115 base_pfn = pfn;
116 for (j = pageblock_nr_pages; j; --j, pfn++) {
117 WARN_ON_ONCE(!pfn_valid(pfn));
118 /*
119 * alloc_contig_range requires the pfn range
120 * specified to be in the same zone. Make this
121 * simple by forcing the entire CMA resv range
122 * to be in the same zone.
123 */
124 if (page_zone(pfn_to_page(pfn)) != zone)
125 goto err;
126 }
127 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
128 } while (--i);
129
130 mutex_init(&cma->lock);
131
132#ifdef CONFIG_CMA_DEBUGFS
133 INIT_HLIST_HEAD(&cma->mem_head);
134 spin_lock_init(&cma->mem_head_lock);
135#endif
136
137 return 0;
138
139err:
140 kfree(cma->bitmap);
141 cma->count = 0;
142 return -EINVAL;
143}
144
145static int __init cma_init_reserved_areas(void)
146{
147 int i;
148
149 for (i = 0; i < cma_area_count; i++) {
150 int ret = cma_activate_area(&cma_areas[i]);
151
152 if (ret)
153 return ret;
154 }
155
156 return 0;
157}
158core_initcall(cma_init_reserved_areas);
159
160/**
161 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
162 * @base: Base address of the reserved area
163 * @size: Size of the reserved area (in bytes),
164 * @order_per_bit: Order of pages represented by one bit on bitmap.
165 * @res_cma: Pointer to store the created cma region.
166 *
167 * This function creates custom contiguous area from already reserved memory.
168 */
169int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
170 unsigned int order_per_bit,
171 struct cma **res_cma)
172{
173 struct cma *cma;
174 phys_addr_t alignment;
175
176 /* Sanity checks */
177 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
178 pr_err("Not enough slots for CMA reserved regions!\n");
179 return -ENOSPC;
180 }
181
182 if (!size || !memblock_is_region_reserved(base, size))
183 return -EINVAL;
184
185 /* ensure minimal alignment required by mm core */
186 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
187
188 /* alignment should be aligned with order_per_bit */
189 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
190 return -EINVAL;
191
192 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
193 return -EINVAL;
194
195 /*
196 * Each reserved area must be initialised later, when more kernel
197 * subsystems (like slab allocator) are available.
198 */
199 cma = &cma_areas[cma_area_count];
200 cma->base_pfn = PFN_DOWN(base);
201 cma->count = size >> PAGE_SHIFT;
202 cma->order_per_bit = order_per_bit;
203 *res_cma = cma;
204 cma_area_count++;
205 totalcma_pages += (size / PAGE_SIZE);
206
207 return 0;
208}
209
210/**
211 * cma_declare_contiguous() - reserve custom contiguous area
212 * @base: Base address of the reserved area optional, use 0 for any
213 * @size: Size of the reserved area (in bytes),
214 * @limit: End address of the reserved memory (optional, 0 for any).
215 * @alignment: Alignment for the CMA area, should be power of 2 or zero
216 * @order_per_bit: Order of pages represented by one bit on bitmap.
217 * @fixed: hint about where to place the reserved area
218 * @res_cma: Pointer to store the created cma region.
219 *
220 * This function reserves memory from early allocator. It should be
221 * called by arch specific code once the early allocator (memblock or bootmem)
222 * has been activated and all other subsystems have already allocated/reserved
223 * memory. This function allows to create custom reserved areas.
224 *
225 * If @fixed is true, reserve contiguous area at exactly @base. If false,
226 * reserve in range from @base to @limit.
227 */
228int __init cma_declare_contiguous(phys_addr_t base,
229 phys_addr_t size, phys_addr_t limit,
230 phys_addr_t alignment, unsigned int order_per_bit,
231 bool fixed, struct cma **res_cma)
232{
233 phys_addr_t memblock_end = memblock_end_of_DRAM();
234 phys_addr_t highmem_start;
235 int ret = 0;
236
237#ifdef CONFIG_X86
238 /*
239 * high_memory isn't direct mapped memory so retrieving its physical
240 * address isn't appropriate. But it would be useful to check the
241 * physical address of the highmem boundary so it's justifiable to get
242 * the physical address from it. On x86 there is a validation check for
243 * this case, so the following workaround is needed to avoid it.
244 */
245 highmem_start = __pa_nodebug(high_memory);
246#else
247 highmem_start = __pa(high_memory);
248#endif
249 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
250 __func__, &size, &base, &limit, &alignment);
251
252 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
253 pr_err("Not enough slots for CMA reserved regions!\n");
254 return -ENOSPC;
255 }
256
257 if (!size)
258 return -EINVAL;
259
260 if (alignment && !is_power_of_2(alignment))
261 return -EINVAL;
262
263 /*
264 * Sanitise input arguments.
265 * Pages both ends in CMA area could be merged into adjacent unmovable
266 * migratetype page by page allocator's buddy algorithm. In the case,
267 * you couldn't get a contiguous memory, which is not what we want.
268 */
269 alignment = max(alignment,
270 (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
271 base = ALIGN(base, alignment);
272 size = ALIGN(size, alignment);
273 limit &= ~(alignment - 1);
274
275 if (!base)
276 fixed = false;
277
278 /* size should be aligned with order_per_bit */
279 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
280 return -EINVAL;
281
282 /*
283 * If allocating at a fixed base the request region must not cross the
284 * low/high memory boundary.
285 */
286 if (fixed && base < highmem_start && base + size > highmem_start) {
287 ret = -EINVAL;
288 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
289 &base, &highmem_start);
290 goto err;
291 }
292
293 /*
294 * If the limit is unspecified or above the memblock end, its effective
295 * value will be the memblock end. Set it explicitly to simplify further
296 * checks.
297 */
298 if (limit == 0 || limit > memblock_end)
299 limit = memblock_end;
300
301 /* Reserve memory */
302 if (fixed) {
303 if (memblock_is_region_reserved(base, size) ||
304 memblock_reserve(base, size) < 0) {
305 ret = -EBUSY;
306 goto err;
307 }
308 } else {
309 phys_addr_t addr = 0;
310
311 /*
312 * All pages in the reserved area must come from the same zone.
313 * If the requested region crosses the low/high memory boundary,
314 * try allocating from high memory first and fall back to low
315 * memory in case of failure.
316 */
317 if (base < highmem_start && limit > highmem_start) {
318 addr = memblock_alloc_range(size, alignment,
319 highmem_start, limit,
320 MEMBLOCK_NONE);
321 limit = highmem_start;
322 }
323
324 if (!addr) {
325 addr = memblock_alloc_range(size, alignment, base,
326 limit,
327 MEMBLOCK_NONE);
328 if (!addr) {
329 ret = -ENOMEM;
330 goto err;
331 }
332 }
333
334 /*
335 * kmemleak scans/reads tracked objects for pointers to other
336 * objects but this address isn't mapped and accessible
337 */
338 kmemleak_ignore(phys_to_virt(addr));
339 base = addr;
340 }
341
342 ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
343 if (ret)
344 goto err;
345
346 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
347 &base);
348 return 0;
349
350err:
351 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
352 return ret;
353}
354
355/**
356 * cma_alloc() - allocate pages from contiguous area
357 * @cma: Contiguous memory region for which the allocation is performed.
358 * @count: Requested number of pages.
359 * @align: Requested alignment of pages (in PAGE_SIZE order).
360 *
361 * This function allocates part of contiguous memory on specific
362 * contiguous memory area.
363 */
364struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
365{
366 unsigned long mask, offset;
367 unsigned long pfn = -1;
368 unsigned long start = 0;
369 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
370 struct page *page = NULL;
371 int ret;
372
373 if (!cma || !cma->count)
374 return NULL;
375
376 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
377 count, align);
378
379 if (!count)
380 return NULL;
381
382 mask = cma_bitmap_aligned_mask(cma, align);
383 offset = cma_bitmap_aligned_offset(cma, align);
384 bitmap_maxno = cma_bitmap_maxno(cma);
385 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
386
387 for (;;) {
388 mutex_lock(&cma->lock);
389 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
390 bitmap_maxno, start, bitmap_count, mask,
391 offset);
392 if (bitmap_no >= bitmap_maxno) {
393 mutex_unlock(&cma->lock);
394 break;
395 }
396 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
397 /*
398 * It's safe to drop the lock here. We've marked this region for
399 * our exclusive use. If the migration fails we will take the
400 * lock again and unmark it.
401 */
402 mutex_unlock(&cma->lock);
403
404 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
405 mutex_lock(&cma_mutex);
406 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
407 mutex_unlock(&cma_mutex);
408 if (ret == 0) {
409 page = pfn_to_page(pfn);
410 break;
411 }
412
413 cma_clear_bitmap(cma, pfn, count);
414 if (ret != -EBUSY)
415 break;
416
417 pr_debug("%s(): memory range at %p is busy, retrying\n",
418 __func__, pfn_to_page(pfn));
419 /* try again with a bit different memory target */
420 start = bitmap_no + mask + 1;
421 }
422
423 trace_cma_alloc(pfn, page, count, align);
424
425 pr_debug("%s(): returned %p\n", __func__, page);
426 return page;
427}
428
429/**
430 * cma_release() - release allocated pages
431 * @cma: Contiguous memory region for which the allocation is performed.
432 * @pages: Allocated pages.
433 * @count: Number of allocated pages.
434 *
435 * This function releases memory allocated by alloc_cma().
436 * It returns false when provided pages do not belong to contiguous area and
437 * true otherwise.
438 */
439bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
440{
441 unsigned long pfn;
442
443 if (!cma || !pages)
444 return false;
445
446 pr_debug("%s(page %p)\n", __func__, (void *)pages);
447
448 pfn = page_to_pfn(pages);
449
450 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
451 return false;
452
453 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
454
455 free_contig_range(pfn, count);
456 cma_clear_bitmap(cma, pfn, count);
457 trace_cma_release(pfn, pages, count);
458
459 return true;
460}