Loading...
1/*
2 * Copyright (c) 2007 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef IB_UMEM_H
34#define IB_UMEM_H
35
36#include <linux/list.h>
37#include <linux/scatterlist.h>
38#include <linux/workqueue.h>
39
40struct ib_ucontext;
41
42struct ib_umem {
43 struct ib_ucontext *context;
44 size_t length;
45 int offset;
46 int page_size;
47 int writable;
48 int hugetlb;
49 struct list_head chunk_list;
50 struct work_struct work;
51 struct mm_struct *mm;
52 unsigned long diff;
53};
54
55struct ib_umem_chunk {
56 struct list_head list;
57 int nents;
58 int nmap;
59 struct scatterlist page_list[0];
60};
61
62#ifdef CONFIG_INFINIBAND_USER_MEM
63
64struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
65 size_t size, int access, int dmasync);
66void ib_umem_release(struct ib_umem *umem);
67int ib_umem_page_count(struct ib_umem *umem);
68
69#else /* CONFIG_INFINIBAND_USER_MEM */
70
71#include <linux/err.h>
72
73static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context,
74 unsigned long addr, size_t size,
75 int access, int dmasync) {
76 return ERR_PTR(-EINVAL);
77}
78static inline void ib_umem_release(struct ib_umem *umem) { }
79static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; }
80
81#endif /* CONFIG_INFINIBAND_USER_MEM */
82
83#endif /* IB_UMEM_H */
1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/*
3 * Copyright (c) 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2020 Intel Corporation. All rights reserved.
5 */
6
7#ifndef IB_UMEM_H
8#define IB_UMEM_H
9
10#include <linux/list.h>
11#include <linux/scatterlist.h>
12#include <linux/workqueue.h>
13#include <rdma/ib_verbs.h>
14
15struct ib_ucontext;
16struct ib_umem_odp;
17struct dma_buf_attach_ops;
18
19struct ib_umem {
20 struct ib_device *ibdev;
21 struct mm_struct *owning_mm;
22 u64 iova;
23 size_t length;
24 unsigned long address;
25 u32 writable : 1;
26 u32 is_odp : 1;
27 u32 is_dmabuf : 1;
28 struct sg_append_table sgt_append;
29};
30
31struct ib_umem_dmabuf {
32 struct ib_umem umem;
33 struct dma_buf_attachment *attach;
34 struct sg_table *sgt;
35 struct scatterlist *first_sg;
36 struct scatterlist *last_sg;
37 unsigned long first_sg_offset;
38 unsigned long last_sg_trim;
39 void *private;
40 u8 pinned : 1;
41 u8 revoked : 1;
42};
43
44static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
45{
46 return container_of(umem, struct ib_umem_dmabuf, umem);
47}
48
49/* Returns the offset of the umem start relative to the first page. */
50static inline int ib_umem_offset(struct ib_umem *umem)
51{
52 return umem->address & ~PAGE_MASK;
53}
54
55static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
56 unsigned long pgsz)
57{
58 return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
59 (pgsz - 1);
60}
61
62static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
63 unsigned long pgsz)
64{
65 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
66 ALIGN_DOWN(umem->iova, pgsz))) /
67 pgsz;
68}
69
70static inline size_t ib_umem_num_pages(struct ib_umem *umem)
71{
72 return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
73}
74
75static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
76 struct ib_umem *umem,
77 unsigned long pgsz)
78{
79 __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
80 umem->sgt_append.sgt.nents, pgsz);
81 biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
82 biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
83}
84
85static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
86{
87 return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
88}
89
90/**
91 * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
92 * @umem: umem to iterate over
93 * @pgsz: Page size to split the list into
94 *
95 * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
96 * returned DMA blocks will be aligned to pgsz and span the range:
97 * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
98 *
99 * Performs exactly ib_umem_num_dma_blocks() iterations.
100 */
101#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
102 for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
103 __rdma_umem_block_iter_next(biter);)
104
105#ifdef CONFIG_INFINIBAND_USER_MEM
106
107struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
108 size_t size, int access);
109void ib_umem_release(struct ib_umem *umem);
110int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
111 size_t length);
112unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
113 unsigned long pgsz_bitmap,
114 unsigned long virt);
115
116/**
117 * ib_umem_find_best_pgoff - Find best HW page size
118 *
119 * @umem: umem struct
120 * @pgsz_bitmap bitmap of HW supported page sizes
121 * @pgoff_bitmask: Mask of bits that can be represented with an offset
122 *
123 * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
124 * an IOVA it accepts a bitmask specifying what address bits can be represented
125 * with a page offset.
126 *
127 * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
128 * and can support aligned offsets up to 4032 then pgoff_bitmask would be
129 * "111111000000".
130 *
131 * If the pgoff_bitmask requires either alignment in the low bit or an
132 * unavailable page size for the high bits, this function returns 0.
133 */
134static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
135 unsigned long pgsz_bitmap,
136 u64 pgoff_bitmask)
137{
138 struct scatterlist *sg = umem->sgt_append.sgt.sgl;
139 dma_addr_t dma_addr;
140
141 dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
142 return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
143 dma_addr & pgoff_bitmask);
144}
145
146struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
147 unsigned long offset, size_t size,
148 int fd, int access,
149 const struct dma_buf_attach_ops *ops);
150struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
151 unsigned long offset,
152 size_t size, int fd,
153 int access);
154struct ib_umem_dmabuf *
155ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
156 struct device *dma_device,
157 unsigned long offset, size_t size,
158 int fd, int access);
159int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
160void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
161void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
162void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf);
163
164#else /* CONFIG_INFINIBAND_USER_MEM */
165
166#include <linux/err.h>
167
168static inline struct ib_umem *ib_umem_get(struct ib_device *device,
169 unsigned long addr, size_t size,
170 int access)
171{
172 return ERR_PTR(-EOPNOTSUPP);
173}
174static inline void ib_umem_release(struct ib_umem *umem) { }
175static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
176 size_t length) {
177 return -EOPNOTSUPP;
178}
179static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
180 unsigned long pgsz_bitmap,
181 unsigned long virt)
182{
183 return 0;
184}
185static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
186 unsigned long pgsz_bitmap,
187 u64 pgoff_bitmask)
188{
189 return 0;
190}
191static inline
192struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
193 unsigned long offset,
194 size_t size, int fd,
195 int access,
196 struct dma_buf_attach_ops *ops)
197{
198 return ERR_PTR(-EOPNOTSUPP);
199}
200static inline struct ib_umem_dmabuf *
201ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
202 size_t size, int fd, int access)
203{
204 return ERR_PTR(-EOPNOTSUPP);
205}
206
207static inline struct ib_umem_dmabuf *
208ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
209 struct device *dma_device,
210 unsigned long offset, size_t size,
211 int fd, int access)
212{
213 return ERR_PTR(-EOPNOTSUPP);
214}
215
216static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
217{
218 return -EOPNOTSUPP;
219}
220static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
221static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
222static inline void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf) {}
223
224#endif /* CONFIG_INFINIBAND_USER_MEM */
225#endif /* IB_UMEM_H */