Linux Audio

Check our new training course

Loading...
v3.1
 
 1/*
 2 * Copyright (c) 2007 Cisco Systems.  All rights reserved.
 3 *
 4 * This software is available to you under a choice of one of two
 5 * licenses.  You may choose to be licensed under the terms of the GNU
 6 * General Public License (GPL) Version 2, available from the file
 7 * COPYING in the main directory of this source tree, or the
 8 * OpenIB.org BSD license below:
 9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef IB_UMEM_H
34#define IB_UMEM_H
35
36#include <linux/list.h>
37#include <linux/scatterlist.h>
38#include <linux/workqueue.h>
 
39
40struct ib_ucontext;
 
 
41
42struct ib_umem {
43	struct ib_ucontext     *context;
 
 
44	size_t			length;
45	int			offset;
46	int			page_size;
47	int                     writable;
48	int                     hugetlb;
49	struct list_head	chunk_list;
50	struct work_struct	work;
51	struct mm_struct       *mm;
52	unsigned long		diff;
53};
54
55struct ib_umem_chunk {
56	struct list_head	list;
57	int                     nents;
58	int                     nmap;
59	struct scatterlist      page_list[0];
 
 
 
 
 
60};
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62#ifdef CONFIG_INFINIBAND_USER_MEM
63
64struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
65			    size_t size, int access, int dmasync);
66void ib_umem_release(struct ib_umem *umem);
67int ib_umem_page_count(struct ib_umem *umem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
69#else /* CONFIG_INFINIBAND_USER_MEM */
70
71#include <linux/err.h>
72
73static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context,
74					  unsigned long addr, size_t size,
75					  int access, int dmasync) {
76	return ERR_PTR(-EINVAL);
 
77}
78static inline void ib_umem_release(struct ib_umem *umem) { }
79static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
81#endif /* CONFIG_INFINIBAND_USER_MEM */
82
83#endif /* IB_UMEM_H */
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
  2/*
  3 * Copyright (c) 2007 Cisco Systems.  All rights reserved.
  4 * Copyright (c) 2020 Intel Corporation.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#ifndef IB_UMEM_H
  8#define IB_UMEM_H
  9
 10#include <linux/list.h>
 11#include <linux/scatterlist.h>
 12#include <linux/workqueue.h>
 13#include <rdma/ib_verbs.h>
 14
 15struct ib_ucontext;
 16struct ib_umem_odp;
 17struct dma_buf_attach_ops;
 18
 19struct ib_umem {
 20	struct ib_device       *ibdev;
 21	struct mm_struct       *owning_mm;
 22	u64 iova;
 23	size_t			length;
 24	unsigned long		address;
 25	u32 writable : 1;
 26	u32 is_odp : 1;
 27	u32 is_dmabuf : 1;
 
 28	struct work_struct	work;
 29	struct sg_append_table sgt_append;
 
 30};
 31
 32struct ib_umem_dmabuf {
 33	struct ib_umem umem;
 34	struct dma_buf_attachment *attach;
 35	struct sg_table *sgt;
 36	struct scatterlist *first_sg;
 37	struct scatterlist *last_sg;
 38	unsigned long first_sg_offset;
 39	unsigned long last_sg_trim;
 40	void *private;
 41	u8 pinned : 1;
 42};
 43
 44static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
 45{
 46	return container_of(umem, struct ib_umem_dmabuf, umem);
 47}
 48
 49/* Returns the offset of the umem start relative to the first page. */
 50static inline int ib_umem_offset(struct ib_umem *umem)
 51{
 52	return umem->address & ~PAGE_MASK;
 53}
 54
 55static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
 56					       unsigned long pgsz)
 57{
 58	return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
 59	       (pgsz - 1);
 60}
 61
 62static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
 63					    unsigned long pgsz)
 64{
 65	return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
 66			 ALIGN_DOWN(umem->iova, pgsz))) /
 67	       pgsz;
 68}
 69
 70static inline size_t ib_umem_num_pages(struct ib_umem *umem)
 71{
 72	return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
 73}
 74
 75static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
 76						struct ib_umem *umem,
 77						unsigned long pgsz)
 78{
 79	__rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
 80				umem->sgt_append.sgt.nents, pgsz);
 81}
 82
 83/**
 84 * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
 85 * @umem: umem to iterate over
 86 * @pgsz: Page size to split the list into
 87 *
 88 * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
 89 * returned DMA blocks will be aligned to pgsz and span the range:
 90 * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
 91 *
 92 * Performs exactly ib_umem_num_dma_blocks() iterations.
 93 */
 94#define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
 95	for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
 96	     __rdma_block_iter_next(biter);)
 97
 98#ifdef CONFIG_INFINIBAND_USER_MEM
 99
100struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
101			    size_t size, int access);
102void ib_umem_release(struct ib_umem *umem);
103int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
104		      size_t length);
105unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
106				     unsigned long pgsz_bitmap,
107				     unsigned long virt);
108
109/**
110 * ib_umem_find_best_pgoff - Find best HW page size
111 *
112 * @umem: umem struct
113 * @pgsz_bitmap bitmap of HW supported page sizes
114 * @pgoff_bitmask: Mask of bits that can be represented with an offset
115 *
116 * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
117 * an IOVA it accepts a bitmask specifying what address bits can be represented
118 * with a page offset.
119 *
120 * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
121 * and can support aligned offsets up to 4032 then pgoff_bitmask would be
122 * "111111000000".
123 *
124 * If the pgoff_bitmask requires either alignment in the low bit or an
125 * unavailable page size for the high bits, this function returns 0.
126 */
127static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
128						    unsigned long pgsz_bitmap,
129						    u64 pgoff_bitmask)
130{
131	struct scatterlist *sg = umem->sgt_append.sgt.sgl;
132	dma_addr_t dma_addr;
133
134	dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
135	return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
136				      dma_addr & pgoff_bitmask);
137}
138
139struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
140					  unsigned long offset, size_t size,
141					  int fd, int access,
142					  const struct dma_buf_attach_ops *ops);
143struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
144						 unsigned long offset,
145						 size_t size, int fd,
146						 int access);
147int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
148void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
149void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
150
151#else /* CONFIG_INFINIBAND_USER_MEM */
152
153#include <linux/err.h>
154
155static inline struct ib_umem *ib_umem_get(struct ib_device *device,
156					  unsigned long addr, size_t size,
157					  int access)
158{
159	return ERR_PTR(-EOPNOTSUPP);
160}
161static inline void ib_umem_release(struct ib_umem *umem) { }
162static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
163		      		    size_t length) {
164	return -EOPNOTSUPP;
165}
166static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
167						   unsigned long pgsz_bitmap,
168						   unsigned long virt)
169{
170	return 0;
171}
172static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
173						    unsigned long pgsz_bitmap,
174						    u64 pgoff_bitmask)
175{
176	return 0;
177}
178static inline
179struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
180					  unsigned long offset,
181					  size_t size, int fd,
182					  int access,
183					  struct dma_buf_attach_ops *ops)
184{
185	return ERR_PTR(-EOPNOTSUPP);
186}
187static inline struct ib_umem_dmabuf *
188ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
189			  size_t size, int fd, int access)
190{
191	return ERR_PTR(-EOPNOTSUPP);
192}
193static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
194{
195	return -EOPNOTSUPP;
196}
197static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
198static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
199
200#endif /* CONFIG_INFINIBAND_USER_MEM */
 
201#endif /* IB_UMEM_H */