Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2018 Christoph Hellwig.
  4 *
  5 * DMA operations that map physical memory directly without using an IOMMU.
  6 */
  7#include <linux/memblock.h> /* for max_pfn */
  8#include <linux/export.h>
  9#include <linux/mm.h>
 10#include <linux/dma-direct.h>
 11#include <linux/scatterlist.h>
 12#include <linux/dma-contiguous.h>
 13#include <linux/pfn.h>
 14#include <linux/vmalloc.h>
 15#include <linux/set_memory.h>
 16
 17/*
 18 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it
 19 * it for entirely different regions. In that case the arch code needs to
 20 * override the variable below for dma-direct to work properly.
 21 */
 22unsigned int zone_dma_bits __ro_after_init = 24;
 23
 24static inline dma_addr_t phys_to_dma_direct(struct device *dev,
 25		phys_addr_t phys)
 26{
 27	if (force_dma_unencrypted(dev))
 28		return __phys_to_dma(dev, phys);
 29	return phys_to_dma(dev, phys);
 30}
 31
 32static inline struct page *dma_direct_to_page(struct device *dev,
 33		dma_addr_t dma_addr)
 34{
 35	return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
 36}
 37
 38u64 dma_direct_get_required_mask(struct device *dev)
 39{
 40	phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
 41	u64 max_dma = phys_to_dma_direct(dev, phys);
 42
 43	return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
 44}
 45
 46static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
 47				  u64 *phys_limit)
 48{
 49	u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
 50
 51	if (force_dma_unencrypted(dev))
 52		*phys_limit = __dma_to_phys(dev, dma_limit);
 53	else
 54		*phys_limit = dma_to_phys(dev, dma_limit);
 55
 56	/*
 57	 * Optimistically try the zone that the physical address mask falls
 58	 * into first.  If that returns memory that isn't actually addressable
 59	 * we will fallback to the next lower zone and try again.
 60	 *
 61	 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
 62	 * zones.
 63	 */
 64	if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
 65		return GFP_DMA;
 66	if (*phys_limit <= DMA_BIT_MASK(32))
 67		return GFP_DMA32;
 68	return 0;
 69}
 70
 71static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
 72{
 73	return phys_to_dma_direct(dev, phys) + size - 1 <=
 74			min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
 75}
 76
 77/*
 78 * Decrypting memory is allowed to block, so if this device requires
 79 * unencrypted memory it must come from atomic pools.
 80 */
 81static inline bool dma_should_alloc_from_pool(struct device *dev, gfp_t gfp,
 82					      unsigned long attrs)
 83{
 84	if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
 85		return false;
 86	if (gfpflags_allow_blocking(gfp))
 87		return false;
 88	if (force_dma_unencrypted(dev))
 89		return true;
 90	if (!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
 91		return false;
 92	if (dma_alloc_need_uncached(dev, attrs))
 93		return true;
 94	return false;
 95}
 96
 97static inline bool dma_should_free_from_pool(struct device *dev,
 98					     unsigned long attrs)
 99{
100	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
101		return true;
102	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
103	    !force_dma_unencrypted(dev))
104		return false;
105	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
106		return true;
107	return false;
108}
109
110static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
111		gfp_t gfp, unsigned long attrs)
112{
113	int node = dev_to_node(dev);
114	struct page *page = NULL;
115	u64 phys_limit;
116
117	WARN_ON_ONCE(!PAGE_ALIGNED(size));
118
119	if (attrs & DMA_ATTR_NO_WARN)
120		gfp |= __GFP_NOWARN;
121
122	/* we always manually zero the memory once we are done: */
123	gfp &= ~__GFP_ZERO;
124	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
125					   &phys_limit);
126	page = dma_alloc_contiguous(dev, size, gfp);
127	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
128		dma_free_contiguous(dev, page, size);
129		page = NULL;
130	}
131again:
132	if (!page)
133		page = alloc_pages_node(node, gfp, get_order(size));
134	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
135		dma_free_contiguous(dev, page, size);
136		page = NULL;
137
138		if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
139		    phys_limit < DMA_BIT_MASK(64) &&
140		    !(gfp & (GFP_DMA32 | GFP_DMA))) {
141			gfp |= GFP_DMA32;
142			goto again;
143		}
144
145		if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
146			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
147			goto again;
148		}
149	}
150
151	return page;
152}
153
154void *dma_direct_alloc_pages(struct device *dev, size_t size,
155		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
156{
157	struct page *page;
158	void *ret;
159	int err;
160
161	size = PAGE_ALIGN(size);
162
163	if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
164		u64 phys_mask;
165
166		gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
167				&phys_mask);
168		page = dma_alloc_from_pool(dev, size, &ret, gfp,
169				dma_coherent_ok);
170		if (!page)
171			return NULL;
172		goto done;
173	}
174
175	page = __dma_direct_alloc_pages(dev, size, gfp, attrs);
176	if (!page)
177		return NULL;
178
179	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
180	    !force_dma_unencrypted(dev)) {
181		/* remove any dirty cache lines on the kernel alias */
182		if (!PageHighMem(page))
183			arch_dma_prep_coherent(page, size);
184		/* return the page pointer as the opaque cookie */
185		ret = page;
186		goto done;
187	}
188
189	if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
190	     dma_alloc_need_uncached(dev, attrs)) ||
191	    (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
192		/* remove any dirty cache lines on the kernel alias */
193		arch_dma_prep_coherent(page, size);
194
195		/* create a coherent mapping */
196		ret = dma_common_contiguous_remap(page, size,
197				dma_pgprot(dev, PAGE_KERNEL, attrs),
198				__builtin_return_address(0));
199		if (!ret)
200			goto out_free_pages;
201		if (force_dma_unencrypted(dev)) {
202			err = set_memory_decrypted((unsigned long)ret,
203						   1 << get_order(size));
204			if (err)
205				goto out_free_pages;
206		}
207		memset(ret, 0, size);
208		goto done;
209	}
210
211	if (PageHighMem(page)) {
212		/*
213		 * Depending on the cma= arguments and per-arch setup
214		 * dma_alloc_contiguous could return highmem pages.
215		 * Without remapping there is no way to return them here,
216		 * so log an error and fail.
217		 */
218		dev_info(dev, "Rejecting highmem page from CMA.\n");
219		goto out_free_pages;
220	}
221
222	ret = page_address(page);
223	if (force_dma_unencrypted(dev)) {
224		err = set_memory_decrypted((unsigned long)ret,
225					   1 << get_order(size));
226		if (err)
227			goto out_free_pages;
228	}
229
230	memset(ret, 0, size);
231
232	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
233	    dma_alloc_need_uncached(dev, attrs)) {
234		arch_dma_prep_coherent(page, size);
235		ret = arch_dma_set_uncached(ret, size);
236		if (IS_ERR(ret))
237			goto out_encrypt_pages;
238	}
239done:
240	if (force_dma_unencrypted(dev))
241		*dma_handle = __phys_to_dma(dev, page_to_phys(page));
242	else
243		*dma_handle = phys_to_dma(dev, page_to_phys(page));
244	return ret;
245
246out_encrypt_pages:
247	if (force_dma_unencrypted(dev)) {
248		err = set_memory_encrypted((unsigned long)page_address(page),
249					   1 << get_order(size));
250		/* If memory cannot be re-encrypted, it must be leaked */
251		if (err)
252			return NULL;
253	}
254out_free_pages:
255	dma_free_contiguous(dev, page, size);
256	return NULL;
257}
258
259void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
260		dma_addr_t dma_addr, unsigned long attrs)
261{
262	unsigned int page_order = get_order(size);
263
264	/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
265	if (dma_should_free_from_pool(dev, attrs) &&
266	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
267		return;
268
269	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
270	    !force_dma_unencrypted(dev)) {
271		/* cpu_addr is a struct page cookie, not a kernel address */
272		dma_free_contiguous(dev, cpu_addr, size);
273		return;
274	}
275
276	if (force_dma_unencrypted(dev))
277		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
278
279	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
280		vunmap(cpu_addr);
281	else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
282		arch_dma_clear_uncached(cpu_addr, size);
283
284	dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
285}
286
287void *dma_direct_alloc(struct device *dev, size_t size,
288		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
289{
290	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
291	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
292	    dma_alloc_need_uncached(dev, attrs))
293		return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
294	return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
295}
296
297void dma_direct_free(struct device *dev, size_t size,
298		void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
299{
300	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
301	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
302	    dma_alloc_need_uncached(dev, attrs))
303		arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
304	else
305		dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
306}
307
308#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
309    defined(CONFIG_SWIOTLB)
310void dma_direct_sync_sg_for_device(struct device *dev,
311		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
312{
313	struct scatterlist *sg;
314	int i;
315
316	for_each_sg(sgl, sg, nents, i) {
317		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
318
319		if (unlikely(is_swiotlb_buffer(paddr)))
320			swiotlb_tbl_sync_single(dev, paddr, sg->length,
321					dir, SYNC_FOR_DEVICE);
322
323		if (!dev_is_dma_coherent(dev))
324			arch_sync_dma_for_device(paddr, sg->length,
325					dir);
326	}
327}
328#endif
329
330#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
331    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
332    defined(CONFIG_SWIOTLB)
333void dma_direct_sync_sg_for_cpu(struct device *dev,
334		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
335{
336	struct scatterlist *sg;
337	int i;
338
339	for_each_sg(sgl, sg, nents, i) {
340		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
341
342		if (!dev_is_dma_coherent(dev))
343			arch_sync_dma_for_cpu(paddr, sg->length, dir);
344
345		if (unlikely(is_swiotlb_buffer(paddr)))
346			swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
347					SYNC_FOR_CPU);
348	}
349
350	if (!dev_is_dma_coherent(dev))
351		arch_sync_dma_for_cpu_all();
352}
353
354void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
355		int nents, enum dma_data_direction dir, unsigned long attrs)
356{
357	struct scatterlist *sg;
358	int i;
359
360	for_each_sg(sgl, sg, nents, i)
361		dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
362			     attrs);
363}
364#endif
365
366int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
367		enum dma_data_direction dir, unsigned long attrs)
368{
369	int i;
370	struct scatterlist *sg;
371
372	for_each_sg(sgl, sg, nents, i) {
373		sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
374				sg->offset, sg->length, dir, attrs);
375		if (sg->dma_address == DMA_MAPPING_ERROR)
376			goto out_unmap;
377		sg_dma_len(sg) = sg->length;
378	}
379
380	return nents;
381
382out_unmap:
383	dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
384	return 0;
385}
386
387dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
388		size_t size, enum dma_data_direction dir, unsigned long attrs)
389{
390	dma_addr_t dma_addr = paddr;
391
392	if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
393		dev_err_once(dev,
394			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
395			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
396		WARN_ON_ONCE(1);
397		return DMA_MAPPING_ERROR;
398	}
399
400	return dma_addr;
401}
402
403int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
404		void *cpu_addr, dma_addr_t dma_addr, size_t size,
405		unsigned long attrs)
406{
407	struct page *page = dma_direct_to_page(dev, dma_addr);
408	int ret;
409
410	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
411	if (!ret)
412		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
413	return ret;
414}
415
416bool dma_direct_can_mmap(struct device *dev)
417{
418	return dev_is_dma_coherent(dev) ||
419		IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
420}
421
422int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
423		void *cpu_addr, dma_addr_t dma_addr, size_t size,
424		unsigned long attrs)
425{
426	unsigned long user_count = vma_pages(vma);
427	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
428	unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
429	int ret = -ENXIO;
430
431	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
432
433	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
434		return ret;
435
436	if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
437		return -ENXIO;
438	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
439			user_count << PAGE_SHIFT, vma->vm_page_prot);
440}
441
442int dma_direct_supported(struct device *dev, u64 mask)
443{
444	u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
445
446	/*
447	 * Because 32-bit DMA masks are so common we expect every architecture
448	 * to be able to satisfy them - either by not supporting more physical
449	 * memory, or by providing a ZONE_DMA32.  If neither is the case, the
450	 * architecture needs to use an IOMMU instead of the direct mapping.
451	 */
452	if (mask >= DMA_BIT_MASK(32))
453		return 1;
454
455	/*
456	 * This check needs to be against the actual bit mask value, so
457	 * use __phys_to_dma() here so that the SME encryption mask isn't
458	 * part of the check.
459	 */
460	if (IS_ENABLED(CONFIG_ZONE_DMA))
461		min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
462	return mask >= __phys_to_dma(dev, min_mask);
463}
464
465size_t dma_direct_max_mapping_size(struct device *dev)
466{
467	/* If SWIOTLB is active, use its maximum mapping size */
468	if (is_swiotlb_active() &&
469	    (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE))
470		return swiotlb_max_mapping_size(dev);
471	return SIZE_MAX;
472}
473
474bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
475{
476	return !dev_is_dma_coherent(dev) ||
477		is_swiotlb_buffer(dma_to_phys(dev, dma_addr));
478}