Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Contiguous Memory Allocator for DMA mapping framework
  3 * Copyright (c) 2010-2011 by Samsung Electronics.
  4 * Written by:
  5 *	Marek Szyprowski <m.szyprowski@samsung.com>
  6 *	Michal Nazarewicz <mina86@mina86.com>
  7 *
  8 * This program is free software; you can redistribute it and/or
  9 * modify it under the terms of the GNU General Public License as
 10 * published by the Free Software Foundation; either version 2 of the
 11 * License or (at your optional) any later version of the license.
 12 */
 13
 14#define pr_fmt(fmt) "cma: " fmt
 15
 16#ifdef CONFIG_CMA_DEBUG
 17#ifndef DEBUG
 18#  define DEBUG
 19#endif
 20#endif
 21
 22#include <asm/page.h>
 23#include <asm/dma-contiguous.h>
 24
 25#include <linux/memblock.h>
 26#include <linux/err.h>
 27#include <linux/sizes.h>
 28#include <linux/dma-contiguous.h>
 29#include <linux/cma.h>
 30
 31#ifdef CONFIG_CMA_SIZE_MBYTES
 32#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
 33#else
 34#define CMA_SIZE_MBYTES 0
 35#endif
 36
 37struct cma *dma_contiguous_default_area;
 38
 39/*
 40 * Default global CMA area size can be defined in kernel's .config.
 41 * This is useful mainly for distro maintainers to create a kernel
 42 * that works correctly for most supported systems.
 43 * The size can be set in bytes or as a percentage of the total memory
 44 * in the system.
 45 *
 46 * Users, who want to set the size of global CMA area for their system
 47 * should use cma= kernel parameter.
 48 */
 49static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
 50static phys_addr_t size_cmdline = -1;
 51static phys_addr_t base_cmdline;
 52static phys_addr_t limit_cmdline;
 53
 54static int __init early_cma(char *p)
 55{
 56	pr_debug("%s(%s)\n", __func__, p);
 57	size_cmdline = memparse(p, &p);
 58	if (*p != '@')
 59		return 0;
 60	base_cmdline = memparse(p + 1, &p);
 61	if (*p != '-') {
 62		limit_cmdline = base_cmdline + size_cmdline;
 63		return 0;
 64	}
 65	limit_cmdline = memparse(p + 1, &p);
 66
 67	return 0;
 68}
 69early_param("cma", early_cma);
 70
 71#ifdef CONFIG_CMA_SIZE_PERCENTAGE
 72
 73static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
 74{
 75	struct memblock_region *reg;
 76	unsigned long total_pages = 0;
 77
 78	/*
 79	 * We cannot use memblock_phys_mem_size() here, because
 80	 * memblock_analyze() has not been called yet.
 81	 */
 82	for_each_memblock(memory, reg)
 83		total_pages += memblock_region_memory_end_pfn(reg) -
 84			       memblock_region_memory_base_pfn(reg);
 85
 86	return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
 87}
 88
 89#else
 90
 91static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
 92{
 93	return 0;
 94}
 95
 96#endif
 97
 98/**
 99 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
100 * @limit: End address of the reserved memory (optional, 0 for any).
101 *
102 * This function reserves memory from early allocator. It should be
103 * called by arch specific code once the early allocator (memblock or bootmem)
104 * has been activated and all other subsystems have already allocated/reserved
105 * memory.
106 */
107void __init dma_contiguous_reserve(phys_addr_t limit)
108{
109	phys_addr_t selected_size = 0;
110	phys_addr_t selected_base = 0;
111	phys_addr_t selected_limit = limit;
112	bool fixed = false;
113
114	pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
115
116	if (size_cmdline != -1) {
117		selected_size = size_cmdline;
118		selected_base = base_cmdline;
119		selected_limit = min_not_zero(limit_cmdline, limit);
120		if (base_cmdline + size_cmdline == limit_cmdline)
121			fixed = true;
122	} else {
123#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
124		selected_size = size_bytes;
125#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
126		selected_size = cma_early_percent_memory();
127#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
128		selected_size = min(size_bytes, cma_early_percent_memory());
129#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
130		selected_size = max(size_bytes, cma_early_percent_memory());
131#endif
132	}
133
134	if (selected_size && !dma_contiguous_default_area) {
135		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
136			 (unsigned long)selected_size / SZ_1M);
137
138		dma_contiguous_reserve_area(selected_size, selected_base,
139					    selected_limit,
140					    &dma_contiguous_default_area,
141					    fixed);
142	}
143}
144
145/**
146 * dma_contiguous_reserve_area() - reserve custom contiguous area
147 * @size: Size of the reserved area (in bytes),
148 * @base: Base address of the reserved area optional, use 0 for any
149 * @limit: End address of the reserved memory (optional, 0 for any).
150 * @res_cma: Pointer to store the created cma region.
151 * @fixed: hint about where to place the reserved area
152 *
153 * This function reserves memory from early allocator. It should be
154 * called by arch specific code once the early allocator (memblock or bootmem)
155 * has been activated and all other subsystems have already allocated/reserved
156 * memory. This function allows to create custom reserved areas for specific
157 * devices.
158 *
159 * If @fixed is true, reserve contiguous area at exactly @base.  If false,
160 * reserve in range from @base to @limit.
161 */
162int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
163				       phys_addr_t limit, struct cma **res_cma,
164				       bool fixed)
165{
166	int ret;
167
168	ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
169	if (ret)
170		return ret;
171
172	/* Architecture specific contiguous memory fixup. */
173	dma_contiguous_early_fixup(cma_get_base(*res_cma),
174				cma_get_size(*res_cma));
175
176	return 0;
177}
178
179/**
180 * dma_alloc_from_contiguous() - allocate pages from contiguous area
181 * @dev:   Pointer to device for which the allocation is performed.
182 * @count: Requested number of pages.
183 * @align: Requested alignment of pages (in PAGE_SIZE order).
184 *
185 * This function allocates memory buffer for specified device. It uses
186 * device specific contiguous memory area if available or the default
187 * global one. Requires architecture specific dev_get_cma_area() helper
188 * function.
189 */
190struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
191				       unsigned int align)
192{
193	if (align > CONFIG_CMA_ALIGNMENT)
194		align = CONFIG_CMA_ALIGNMENT;
195
196	return cma_alloc(dev_get_cma_area(dev), count, align);
197}
198
199/**
200 * dma_release_from_contiguous() - release allocated pages
201 * @dev:   Pointer to device for which the pages were allocated.
202 * @pages: Allocated pages.
203 * @count: Number of allocated pages.
204 *
205 * This function releases memory allocated by dma_alloc_from_contiguous().
206 * It returns false when provided pages do not belong to contiguous area and
207 * true otherwise.
208 */
209bool dma_release_from_contiguous(struct device *dev, struct page *pages,
210				 int count)
211{
212	return cma_release(dev_get_cma_area(dev), pages, count);
213}
214
215/*
216 * Support for reserved memory regions defined in device tree
217 */
218#ifdef CONFIG_OF_RESERVED_MEM
219#include <linux/of.h>
220#include <linux/of_fdt.h>
221#include <linux/of_reserved_mem.h>
222
223#undef pr_fmt
224#define pr_fmt(fmt) fmt
225
226static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
227{
228	dev_set_cma_area(dev, rmem->priv);
229	return 0;
230}
231
232static void rmem_cma_device_release(struct reserved_mem *rmem,
233				    struct device *dev)
234{
235	dev_set_cma_area(dev, NULL);
236}
237
238static const struct reserved_mem_ops rmem_cma_ops = {
239	.device_init	= rmem_cma_device_init,
240	.device_release = rmem_cma_device_release,
241};
242
243static int __init rmem_cma_setup(struct reserved_mem *rmem)
244{
245	phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
246	phys_addr_t mask = align - 1;
247	unsigned long node = rmem->fdt_node;
248	struct cma *cma;
249	int err;
250
251	if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
252	    of_get_flat_dt_prop(node, "no-map", NULL))
253		return -EINVAL;
254
255	if ((rmem->base & mask) || (rmem->size & mask)) {
256		pr_err("Reserved memory: incorrect alignment of CMA region\n");
257		return -EINVAL;
258	}
259
260	err = cma_init_reserved_mem(rmem->base, rmem->size, 0, &cma);
261	if (err) {
262		pr_err("Reserved memory: unable to setup CMA region\n");
263		return err;
264	}
265	/* Architecture specific contiguous memory fixup. */
266	dma_contiguous_early_fixup(rmem->base, rmem->size);
267
268	if (of_get_flat_dt_prop(node, "linux,cma-default", NULL))
269		dma_contiguous_set_default(cma);
270
271	rmem->ops = &rmem_cma_ops;
272	rmem->priv = cma;
273
274	pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
275		&rmem->base, (unsigned long)rmem->size / SZ_1M);
276
277	return 0;
278}
279RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
280#endif