Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2012 ARM Ltd.
  4 * Copyright (C) 2020 Google LLC
  5 */
  6#include <linux/cma.h>
  7#include <linux/debugfs.h>
  8#include <linux/dma-contiguous.h>
  9#include <linux/dma-direct.h>
 10#include <linux/dma-noncoherent.h>
 11#include <linux/init.h>
 12#include <linux/genalloc.h>
 13#include <linux/set_memory.h>
 14#include <linux/slab.h>
 15#include <linux/workqueue.h>
 16
 17static struct gen_pool *atomic_pool_dma __ro_after_init;
 18static unsigned long pool_size_dma;
 19static struct gen_pool *atomic_pool_dma32 __ro_after_init;
 20static unsigned long pool_size_dma32;
 21static struct gen_pool *atomic_pool_kernel __ro_after_init;
 22static unsigned long pool_size_kernel;
 23
 24/* Size can be defined by the coherent_pool command line */
 25static size_t atomic_pool_size;
 26
 27/* Dynamic background expansion when the atomic pool is near capacity */
 28static struct work_struct atomic_pool_work;
 29
 30static int __init early_coherent_pool(char *p)
 31{
 32	atomic_pool_size = memparse(p, &p);
 33	return 0;
 34}
 35early_param("coherent_pool", early_coherent_pool);
 36
 37static void __init dma_atomic_pool_debugfs_init(void)
 38{
 39	struct dentry *root;
 40
 41	root = debugfs_create_dir("dma_pools", NULL);
 42	if (IS_ERR_OR_NULL(root))
 43		return;
 44
 45	debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
 46	debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
 47	debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
 48}
 49
 50static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
 51{
 52	if (gfp & __GFP_DMA)
 53		pool_size_dma += size;
 54	else if (gfp & __GFP_DMA32)
 55		pool_size_dma32 += size;
 56	else
 57		pool_size_kernel += size;
 58}
 59
 60static bool cma_in_zone(gfp_t gfp)
 61{
 62	unsigned long size;
 63	phys_addr_t end;
 64	struct cma *cma;
 65
 66	cma = dev_get_cma_area(NULL);
 67	if (!cma)
 68		return false;
 69
 70	size = cma_get_size(cma);
 71	if (!size)
 72		return false;
 73
 74	/* CMA can't cross zone boundaries, see cma_activate_area() */
 75	end = cma_get_base(cma) + size - 1;
 76	if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
 77		return end <= DMA_BIT_MASK(zone_dma_bits);
 78	if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
 79		return end <= DMA_BIT_MASK(32);
 80	return true;
 81}
 82
 83static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
 84			      gfp_t gfp)
 85{
 86	unsigned int order;
 87	struct page *page = NULL;
 88	void *addr;
 89	int ret = -ENOMEM;
 90
 91	/* Cannot allocate larger than MAX_ORDER-1 */
 92	order = min(get_order(pool_size), MAX_ORDER-1);
 93
 94	do {
 95		pool_size = 1 << (PAGE_SHIFT + order);
 96		if (cma_in_zone(gfp))
 97			page = dma_alloc_from_contiguous(NULL, 1 << order,
 98							 order, false);
 99		if (!page)
100			page = alloc_pages(gfp, order);
101	} while (!page && order-- > 0);
102	if (!page)
103		goto out;
104
105	arch_dma_prep_coherent(page, pool_size);
106
107#ifdef CONFIG_DMA_DIRECT_REMAP
108	addr = dma_common_contiguous_remap(page, pool_size,
109					   pgprot_dmacoherent(PAGE_KERNEL),
110					   __builtin_return_address(0));
111	if (!addr)
112		goto free_page;
113#else
114	addr = page_to_virt(page);
115#endif
116	/*
117	 * Memory in the atomic DMA pools must be unencrypted, the pools do not
118	 * shrink so no re-encryption occurs in dma_direct_free_pages().
119	 */
120	ret = set_memory_decrypted((unsigned long)page_to_virt(page),
121				   1 << order);
122	if (ret)
123		goto remove_mapping;
124	ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
125				pool_size, NUMA_NO_NODE);
126	if (ret)
127		goto encrypt_mapping;
128
129	dma_atomic_pool_size_add(gfp, pool_size);
130	return 0;
131
132encrypt_mapping:
133	ret = set_memory_encrypted((unsigned long)page_to_virt(page),
134				   1 << order);
135	if (WARN_ON_ONCE(ret)) {
136		/* Decrypt succeeded but encrypt failed, purposely leak */
137		goto out;
138	}
139remove_mapping:
140#ifdef CONFIG_DMA_DIRECT_REMAP
141	dma_common_free_remap(addr, pool_size);
142#endif
143free_page: __maybe_unused
144	__free_pages(page, order);
145out:
146	return ret;
147}
148
149static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
150{
151	if (pool && gen_pool_avail(pool) < atomic_pool_size)
152		atomic_pool_expand(pool, gen_pool_size(pool), gfp);
153}
154
155static void atomic_pool_work_fn(struct work_struct *work)
156{
157	if (IS_ENABLED(CONFIG_ZONE_DMA))
158		atomic_pool_resize(atomic_pool_dma,
159				   GFP_KERNEL | GFP_DMA);
160	if (IS_ENABLED(CONFIG_ZONE_DMA32))
161		atomic_pool_resize(atomic_pool_dma32,
162				   GFP_KERNEL | GFP_DMA32);
163	atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
164}
165
166static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
167						      gfp_t gfp)
168{
169	struct gen_pool *pool;
170	int ret;
171
172	pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
173	if (!pool)
174		return NULL;
175
176	gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
177
178	ret = atomic_pool_expand(pool, pool_size, gfp);
179	if (ret) {
180		gen_pool_destroy(pool);
181		pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
182		       pool_size >> 10, &gfp);
183		return NULL;
184	}
185
186	pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
187		gen_pool_size(pool) >> 10, &gfp);
188	return pool;
189}
190
191static int __init dma_atomic_pool_init(void)
192{
193	int ret = 0;
194
195	/*
196	 * If coherent_pool was not used on the command line, default the pool
197	 * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
198	 */
199	if (!atomic_pool_size) {
200		unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
201		pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
202		atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
203	}
204	INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
205
206	atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
207						    GFP_KERNEL);
208	if (!atomic_pool_kernel)
209		ret = -ENOMEM;
210	if (IS_ENABLED(CONFIG_ZONE_DMA)) {
211		atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
212						GFP_KERNEL | GFP_DMA);
213		if (!atomic_pool_dma)
214			ret = -ENOMEM;
215	}
216	if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
217		atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
218						GFP_KERNEL | GFP_DMA32);
219		if (!atomic_pool_dma32)
220			ret = -ENOMEM;
221	}
222
223	dma_atomic_pool_debugfs_init();
224	return ret;
225}
226postcore_initcall(dma_atomic_pool_init);
227
228static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
229{
230	if (prev == NULL) {
231		if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
232			return atomic_pool_dma32;
233		if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
234			return atomic_pool_dma;
235		return atomic_pool_kernel;
236	}
237	if (prev == atomic_pool_kernel)
238		return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
239	if (prev == atomic_pool_dma32)
240		return atomic_pool_dma;
241	return NULL;
242}
243
244static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
245		struct gen_pool *pool, void **cpu_addr,
246		bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
247{
248	unsigned long addr;
249	phys_addr_t phys;
250
251	addr = gen_pool_alloc(pool, size);
252	if (!addr)
253		return NULL;
254
255	phys = gen_pool_virt_to_phys(pool, addr);
256	if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
257		gen_pool_free(pool, addr, size);
258		return NULL;
259	}
260
261	if (gen_pool_avail(pool) < atomic_pool_size)
262		schedule_work(&atomic_pool_work);
263
264	*cpu_addr = (void *)addr;
265	memset(*cpu_addr, 0, size);
266	return pfn_to_page(__phys_to_pfn(phys));
267}
268
269struct page *dma_alloc_from_pool(struct device *dev, size_t size,
270		void **cpu_addr, gfp_t gfp,
271		bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
272{
273	struct gen_pool *pool = NULL;
274	struct page *page;
275
276	while ((pool = dma_guess_pool(pool, gfp))) {
277		page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
278					     phys_addr_ok);
279		if (page)
280			return page;
281	}
282
283	WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
284	return NULL;
285}
286
287bool dma_free_from_pool(struct device *dev, void *start, size_t size)
288{
289	struct gen_pool *pool = NULL;
290
291	while ((pool = dma_guess_pool(pool, 0))) {
292		if (!gen_pool_has_addr(pool, (unsigned long)start, size))
293			continue;
294		gen_pool_free(pool, (unsigned long)start, size);
295		return true;
296	}
297
298	return false;
299}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2012 ARM Ltd.
  4 * Copyright (C) 2020 Google LLC
  5 */
  6#include <linux/cma.h>
  7#include <linux/debugfs.h>
  8#include <linux/dma-map-ops.h>
  9#include <linux/dma-direct.h>
 
 10#include <linux/init.h>
 11#include <linux/genalloc.h>
 12#include <linux/set_memory.h>
 13#include <linux/slab.h>
 14#include <linux/workqueue.h>
 15
 16static struct gen_pool *atomic_pool_dma __ro_after_init;
 17static unsigned long pool_size_dma;
 18static struct gen_pool *atomic_pool_dma32 __ro_after_init;
 19static unsigned long pool_size_dma32;
 20static struct gen_pool *atomic_pool_kernel __ro_after_init;
 21static unsigned long pool_size_kernel;
 22
 23/* Size can be defined by the coherent_pool command line */
 24static size_t atomic_pool_size;
 25
 26/* Dynamic background expansion when the atomic pool is near capacity */
 27static struct work_struct atomic_pool_work;
 28
 29static int __init early_coherent_pool(char *p)
 30{
 31	atomic_pool_size = memparse(p, &p);
 32	return 0;
 33}
 34early_param("coherent_pool", early_coherent_pool);
 35
 36static void __init dma_atomic_pool_debugfs_init(void)
 37{
 38	struct dentry *root;
 39
 40	root = debugfs_create_dir("dma_pools", NULL);
 
 
 
 41	debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
 42	debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
 43	debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
 44}
 45
 46static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
 47{
 48	if (gfp & __GFP_DMA)
 49		pool_size_dma += size;
 50	else if (gfp & __GFP_DMA32)
 51		pool_size_dma32 += size;
 52	else
 53		pool_size_kernel += size;
 54}
 55
 56static bool cma_in_zone(gfp_t gfp)
 57{
 58	unsigned long size;
 59	phys_addr_t end;
 60	struct cma *cma;
 61
 62	cma = dev_get_cma_area(NULL);
 63	if (!cma)
 64		return false;
 65
 66	size = cma_get_size(cma);
 67	if (!size)
 68		return false;
 69
 70	/* CMA can't cross zone boundaries, see cma_activate_area() */
 71	end = cma_get_base(cma) + size - 1;
 72	if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
 73		return end <= DMA_BIT_MASK(zone_dma_bits);
 74	if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
 75		return end <= DMA_BIT_MASK(32);
 76	return true;
 77}
 78
 79static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
 80			      gfp_t gfp)
 81{
 82	unsigned int order;
 83	struct page *page = NULL;
 84	void *addr;
 85	int ret = -ENOMEM;
 86
 87	/* Cannot allocate larger than MAX_ORDER-1 */
 88	order = min(get_order(pool_size), MAX_ORDER-1);
 89
 90	do {
 91		pool_size = 1 << (PAGE_SHIFT + order);
 92		if (cma_in_zone(gfp))
 93			page = dma_alloc_from_contiguous(NULL, 1 << order,
 94							 order, false);
 95		if (!page)
 96			page = alloc_pages(gfp, order);
 97	} while (!page && order-- > 0);
 98	if (!page)
 99		goto out;
100
101	arch_dma_prep_coherent(page, pool_size);
102
103#ifdef CONFIG_DMA_DIRECT_REMAP
104	addr = dma_common_contiguous_remap(page, pool_size,
105					   pgprot_dmacoherent(PAGE_KERNEL),
106					   __builtin_return_address(0));
107	if (!addr)
108		goto free_page;
109#else
110	addr = page_to_virt(page);
111#endif
112	/*
113	 * Memory in the atomic DMA pools must be unencrypted, the pools do not
114	 * shrink so no re-encryption occurs in dma_direct_free().
115	 */
116	ret = set_memory_decrypted((unsigned long)page_to_virt(page),
117				   1 << order);
118	if (ret)
119		goto remove_mapping;
120	ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
121				pool_size, NUMA_NO_NODE);
122	if (ret)
123		goto encrypt_mapping;
124
125	dma_atomic_pool_size_add(gfp, pool_size);
126	return 0;
127
128encrypt_mapping:
129	ret = set_memory_encrypted((unsigned long)page_to_virt(page),
130				   1 << order);
131	if (WARN_ON_ONCE(ret)) {
132		/* Decrypt succeeded but encrypt failed, purposely leak */
133		goto out;
134	}
135remove_mapping:
136#ifdef CONFIG_DMA_DIRECT_REMAP
137	dma_common_free_remap(addr, pool_size);
138#endif
139free_page: __maybe_unused
140	__free_pages(page, order);
141out:
142	return ret;
143}
144
145static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
146{
147	if (pool && gen_pool_avail(pool) < atomic_pool_size)
148		atomic_pool_expand(pool, gen_pool_size(pool), gfp);
149}
150
151static void atomic_pool_work_fn(struct work_struct *work)
152{
153	if (IS_ENABLED(CONFIG_ZONE_DMA))
154		atomic_pool_resize(atomic_pool_dma,
155				   GFP_KERNEL | GFP_DMA);
156	if (IS_ENABLED(CONFIG_ZONE_DMA32))
157		atomic_pool_resize(atomic_pool_dma32,
158				   GFP_KERNEL | GFP_DMA32);
159	atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
160}
161
162static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
163						      gfp_t gfp)
164{
165	struct gen_pool *pool;
166	int ret;
167
168	pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
169	if (!pool)
170		return NULL;
171
172	gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
173
174	ret = atomic_pool_expand(pool, pool_size, gfp);
175	if (ret) {
176		gen_pool_destroy(pool);
177		pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
178		       pool_size >> 10, &gfp);
179		return NULL;
180	}
181
182	pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
183		gen_pool_size(pool) >> 10, &gfp);
184	return pool;
185}
186
187static int __init dma_atomic_pool_init(void)
188{
189	int ret = 0;
190
191	/*
192	 * If coherent_pool was not used on the command line, default the pool
193	 * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
194	 */
195	if (!atomic_pool_size) {
196		unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
197		pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
198		atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
199	}
200	INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
201
202	atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
203						    GFP_KERNEL);
204	if (!atomic_pool_kernel)
205		ret = -ENOMEM;
206	if (has_managed_dma()) {
207		atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
208						GFP_KERNEL | GFP_DMA);
209		if (!atomic_pool_dma)
210			ret = -ENOMEM;
211	}
212	if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
213		atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
214						GFP_KERNEL | GFP_DMA32);
215		if (!atomic_pool_dma32)
216			ret = -ENOMEM;
217	}
218
219	dma_atomic_pool_debugfs_init();
220	return ret;
221}
222postcore_initcall(dma_atomic_pool_init);
223
224static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
225{
226	if (prev == NULL) {
227		if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
228			return atomic_pool_dma32;
229		if (atomic_pool_dma && (gfp & GFP_DMA))
230			return atomic_pool_dma;
231		return atomic_pool_kernel;
232	}
233	if (prev == atomic_pool_kernel)
234		return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
235	if (prev == atomic_pool_dma32)
236		return atomic_pool_dma;
237	return NULL;
238}
239
240static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
241		struct gen_pool *pool, void **cpu_addr,
242		bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
243{
244	unsigned long addr;
245	phys_addr_t phys;
246
247	addr = gen_pool_alloc(pool, size);
248	if (!addr)
249		return NULL;
250
251	phys = gen_pool_virt_to_phys(pool, addr);
252	if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
253		gen_pool_free(pool, addr, size);
254		return NULL;
255	}
256
257	if (gen_pool_avail(pool) < atomic_pool_size)
258		schedule_work(&atomic_pool_work);
259
260	*cpu_addr = (void *)addr;
261	memset(*cpu_addr, 0, size);
262	return pfn_to_page(__phys_to_pfn(phys));
263}
264
265struct page *dma_alloc_from_pool(struct device *dev, size_t size,
266		void **cpu_addr, gfp_t gfp,
267		bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
268{
269	struct gen_pool *pool = NULL;
270	struct page *page;
271
272	while ((pool = dma_guess_pool(pool, gfp))) {
273		page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
274					     phys_addr_ok);
275		if (page)
276			return page;
277	}
278
279	WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
280	return NULL;
281}
282
283bool dma_free_from_pool(struct device *dev, void *start, size_t size)
284{
285	struct gen_pool *pool = NULL;
286
287	while ((pool = dma_guess_pool(pool, 0))) {
288		if (!gen_pool_has_addr(pool, (unsigned long)start, size))
289			continue;
290		gen_pool_free(pool, (unsigned long)start, size);
291		return true;
292	}
293
294	return false;
295}