Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  4 *
  5 * Provide default implementations of the DMA mapping callbacks for
  6 * busses using the iommu infrastructure
  7 */
  8
  9#include <linux/export.h>
 10#include <asm/iommu.h>
 11
 12/*
 13 * Generic iommu implementation
 14 */
 15
 16/* Allocates a contiguous real buffer and creates mappings over it.
 17 * Returns the virtual address of the buffer and sets dma_handle
 18 * to the dma address (mapping) of the first page.
 19 */
 20static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
 21				      dma_addr_t *dma_handle, gfp_t flag,
 22				      unsigned long attrs)
 23{
 24	return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
 25				    dma_handle, dev->coherent_dma_mask, flag,
 26				    dev_to_node(dev));
 27}
 28
 29static void dma_iommu_free_coherent(struct device *dev, size_t size,
 30				    void *vaddr, dma_addr_t dma_handle,
 31				    unsigned long attrs)
 32{
 33	iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
 34}
 35
 36/* Creates TCEs for a user provided buffer.  The user buffer must be
 37 * contiguous real kernel storage (not vmalloc).  The address passed here
 38 * comprises a page address and offset into that page. The dma_addr_t
 39 * returned will point to the same byte within the page as was passed in.
 40 */
 41static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
 42				     unsigned long offset, size_t size,
 43				     enum dma_data_direction direction,
 44				     unsigned long attrs)
 45{
 46	return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
 47			      size, device_to_mask(dev), direction, attrs);
 48}
 49
 50
 51static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
 52				 size_t size, enum dma_data_direction direction,
 53				 unsigned long attrs)
 54{
 55	iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
 56			 attrs);
 57}
 58
 59
 60static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
 61			    int nelems, enum dma_data_direction direction,
 62			    unsigned long attrs)
 63{
 64	return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
 65				device_to_mask(dev), direction, attrs);
 66}
 67
 68static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
 69		int nelems, enum dma_data_direction direction,
 70		unsigned long attrs)
 71{
 72	ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
 73			   direction, attrs);
 74}
 75
 76/* We support DMA to/from any memory page via the iommu */
 77int dma_iommu_dma_supported(struct device *dev, u64 mask)
 78{
 79	struct iommu_table *tbl = get_iommu_table_base(dev);
 80
 81	if (!tbl) {
 82		dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx"
 83			", table unavailable\n", mask);
 84		return 0;
 85	}
 86
 87	if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
 88		dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
 89		dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
 90				mask, tbl->it_offset << tbl->it_page_shift);
 91		return 0;
 92	} else
 93		return 1;
 94}
 95
 96static u64 dma_iommu_get_required_mask(struct device *dev)
 97{
 98	struct iommu_table *tbl = get_iommu_table_base(dev);
 99	u64 mask;
100	if (!tbl)
101		return 0;
102
103	mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
104	mask += mask - 1;
105
106	return mask;
107}
108
109int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
110{
111	return dma_addr == IOMMU_MAPPING_ERROR;
112}
113
114struct dma_map_ops dma_iommu_ops = {
115	.alloc			= dma_iommu_alloc_coherent,
116	.free			= dma_iommu_free_coherent,
117	.mmap			= dma_nommu_mmap_coherent,
118	.map_sg			= dma_iommu_map_sg,
119	.unmap_sg		= dma_iommu_unmap_sg,
120	.dma_supported		= dma_iommu_dma_supported,
121	.map_page		= dma_iommu_map_page,
122	.unmap_page		= dma_iommu_unmap_page,
123	.get_required_mask	= dma_iommu_get_required_mask,
124	.mapping_error		= dma_iommu_mapping_error,
125};
126EXPORT_SYMBOL(dma_iommu_ops);
v3.15
 
  1/*
  2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  3 *
  4 * Provide default implementations of the DMA mapping callbacks for
  5 * busses using the iommu infrastructure
  6 */
  7
  8#include <linux/export.h>
  9#include <asm/iommu.h>
 10
 11/*
 12 * Generic iommu implementation
 13 */
 14
 15/* Allocates a contiguous real buffer and creates mappings over it.
 16 * Returns the virtual address of the buffer and sets dma_handle
 17 * to the dma address (mapping) of the first page.
 18 */
 19static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
 20				      dma_addr_t *dma_handle, gfp_t flag,
 21				      struct dma_attrs *attrs)
 22{
 23	return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
 24				    dma_handle, dev->coherent_dma_mask, flag,
 25				    dev_to_node(dev));
 26}
 27
 28static void dma_iommu_free_coherent(struct device *dev, size_t size,
 29				    void *vaddr, dma_addr_t dma_handle,
 30				    struct dma_attrs *attrs)
 31{
 32	iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
 33}
 34
 35/* Creates TCEs for a user provided buffer.  The user buffer must be
 36 * contiguous real kernel storage (not vmalloc).  The address passed here
 37 * comprises a page address and offset into that page. The dma_addr_t
 38 * returned will point to the same byte within the page as was passed in.
 39 */
 40static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
 41				     unsigned long offset, size_t size,
 42				     enum dma_data_direction direction,
 43				     struct dma_attrs *attrs)
 44{
 45	return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
 46			      size, device_to_mask(dev), direction, attrs);
 47}
 48
 49
 50static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
 51				 size_t size, enum dma_data_direction direction,
 52				 struct dma_attrs *attrs)
 53{
 54	iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
 55			 attrs);
 56}
 57
 58
 59static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
 60			    int nelems, enum dma_data_direction direction,
 61			    struct dma_attrs *attrs)
 62{
 63	return iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
 64			    device_to_mask(dev), direction, attrs);
 65}
 66
 67static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
 68		int nelems, enum dma_data_direction direction,
 69		struct dma_attrs *attrs)
 70{
 71	iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction,
 72		       attrs);
 73}
 74
 75/* We support DMA to/from any memory page via the iommu */
 76static int dma_iommu_dma_supported(struct device *dev, u64 mask)
 77{
 78	struct iommu_table *tbl = get_iommu_table_base(dev);
 79
 80	if (!tbl) {
 81		dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx"
 82			", table unavailable\n", mask);
 83		return 0;
 84	}
 85
 86	if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
 87		dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
 88		dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
 89				mask, tbl->it_offset << tbl->it_page_shift);
 90		return 0;
 91	} else
 92		return 1;
 93}
 94
 95static u64 dma_iommu_get_required_mask(struct device *dev)
 96{
 97	struct iommu_table *tbl = get_iommu_table_base(dev);
 98	u64 mask;
 99	if (!tbl)
100		return 0;
101
102	mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
103	mask += mask - 1;
104
105	return mask;
106}
107
 
 
 
 
 
108struct dma_map_ops dma_iommu_ops = {
109	.alloc			= dma_iommu_alloc_coherent,
110	.free			= dma_iommu_free_coherent,
111	.mmap			= dma_direct_mmap_coherent,
112	.map_sg			= dma_iommu_map_sg,
113	.unmap_sg		= dma_iommu_unmap_sg,
114	.dma_supported		= dma_iommu_dma_supported,
115	.map_page		= dma_iommu_map_page,
116	.unmap_page		= dma_iommu_unmap_page,
117	.get_required_mask	= dma_iommu_get_required_mask,
 
118};
119EXPORT_SYMBOL(dma_iommu_ops);