Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.14.15.
  1/*
  2 * Copyright (c) 2016 Oracle.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32
 33#include "ib_mr.h"
 34
 35struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
 36{
 37	struct rds_ib_mr_pool *pool;
 38	struct rds_ib_mr *ibmr = NULL;
 39	struct rds_ib_fmr *fmr;
 40	int err = 0;
 41
 42	if (npages <= RDS_MR_8K_MSG_SIZE)
 43		pool = rds_ibdev->mr_8k_pool;
 44	else
 45		pool = rds_ibdev->mr_1m_pool;
 46
 47	ibmr = rds_ib_try_reuse_ibmr(pool);
 48	if (ibmr)
 49		return ibmr;
 50
 51	ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
 52			    rdsibdev_to_node(rds_ibdev));
 53	if (!ibmr) {
 54		err = -ENOMEM;
 55		goto out_no_cigar;
 56	}
 57
 58	fmr = &ibmr->u.fmr;
 59	fmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
 60			(IB_ACCESS_LOCAL_WRITE |
 61			 IB_ACCESS_REMOTE_READ |
 62			 IB_ACCESS_REMOTE_WRITE |
 63			 IB_ACCESS_REMOTE_ATOMIC),
 64			&pool->fmr_attr);
 65	if (IS_ERR(fmr->fmr)) {
 66		err = PTR_ERR(fmr->fmr);
 67		fmr->fmr = NULL;
 68		pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, err);
 69		goto out_no_cigar;
 70	}
 71
 72	ibmr->pool = pool;
 73	if (pool->pool_type == RDS_IB_MR_8K_POOL)
 74		rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
 75	else
 76		rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
 77
 78	return ibmr;
 79
 80out_no_cigar:
 81	kfree(ibmr);
 82	atomic_dec(&pool->item_count);
 83
 84	return ERR_PTR(err);
 85}
 86
 87static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
 88			  struct rds_ib_mr *ibmr, struct scatterlist *sg,
 89			  unsigned int nents)
 90{
 91	struct ib_device *dev = rds_ibdev->dev;
 92	struct rds_ib_fmr *fmr = &ibmr->u.fmr;
 93	struct scatterlist *scat = sg;
 94	u64 io_addr = 0;
 95	u64 *dma_pages;
 96	u32 len;
 97	int page_cnt, sg_dma_len;
 98	int i, j;
 99	int ret;
100
101	sg_dma_len = ib_dma_map_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
102	if (unlikely(!sg_dma_len)) {
103		pr_warn("RDS/IB: %s failed!\n", __func__);
104		return -EBUSY;
105	}
106
107	len = 0;
108	page_cnt = 0;
109
110	for (i = 0; i < sg_dma_len; ++i) {
111		unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
112		u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
113
114		if (dma_addr & ~PAGE_MASK) {
115			if (i > 0) {
116				ib_dma_unmap_sg(dev, sg, nents,
117						DMA_BIDIRECTIONAL);
118				return -EINVAL;
119			} else {
120				++page_cnt;
121			}
122		}
123		if ((dma_addr + dma_len) & ~PAGE_MASK) {
124			if (i < sg_dma_len - 1) {
125				ib_dma_unmap_sg(dev, sg, nents,
126						DMA_BIDIRECTIONAL);
127				return -EINVAL;
128			} else {
129				++page_cnt;
130			}
131		}
132
133		len += dma_len;
134	}
135
136	page_cnt += len >> PAGE_SHIFT;
137	if (page_cnt > ibmr->pool->fmr_attr.max_pages) {
138		ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
139		return -EINVAL;
140	}
141
142	dma_pages = kmalloc_array_node(sizeof(u64), page_cnt, GFP_ATOMIC,
143				       rdsibdev_to_node(rds_ibdev));
144	if (!dma_pages) {
145		ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
146		return -ENOMEM;
147	}
148
149	page_cnt = 0;
150	for (i = 0; i < sg_dma_len; ++i) {
151		unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
152		u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
153
154		for (j = 0; j < dma_len; j += PAGE_SIZE)
155			dma_pages[page_cnt++] =
156				(dma_addr & PAGE_MASK) + j;
157	}
158
159	ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
160	if (ret) {
161		ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
162		goto out;
163	}
164
165	/* Success - we successfully remapped the MR, so we can
166	 * safely tear down the old mapping.
167	 */
168	rds_ib_teardown_mr(ibmr);
169
170	ibmr->sg = scat;
171	ibmr->sg_len = nents;
172	ibmr->sg_dma_len = sg_dma_len;
173	ibmr->remap_count++;
174
175	if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
176		rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
177	else
178		rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
179	ret = 0;
180
181out:
182	kfree(dma_pages);
183
184	return ret;
185}
186
187struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *rds_ibdev,
188				 struct scatterlist *sg,
189				 unsigned long nents,
190				 u32 *key)
191{
192	struct rds_ib_mr *ibmr = NULL;
193	struct rds_ib_fmr *fmr;
194	int ret;
195
196	ibmr = rds_ib_alloc_fmr(rds_ibdev, nents);
197	if (IS_ERR(ibmr))
198		return ibmr;
199
200	ibmr->device = rds_ibdev;
201	fmr = &ibmr->u.fmr;
202	ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
203	if (ret == 0)
204		*key = fmr->fmr->rkey;
205	else
206		rds_ib_free_mr(ibmr, 0);
207
208	return ibmr;
209}
210
211void rds_ib_unreg_fmr(struct list_head *list, unsigned int *nfreed,
212		      unsigned long *unpinned, unsigned int goal)
213{
214	struct rds_ib_mr *ibmr, *next;
215	struct rds_ib_fmr *fmr;
216	LIST_HEAD(fmr_list);
217	int ret = 0;
218	unsigned int freed = *nfreed;
219
220	/* String all ib_mr's onto one list and hand them to  ib_unmap_fmr */
221	list_for_each_entry(ibmr, list, unmap_list) {
222		fmr = &ibmr->u.fmr;
223		list_add(&fmr->fmr->list, &fmr_list);
224	}
225
226	ret = ib_unmap_fmr(&fmr_list);
227	if (ret)
228		pr_warn("RDS/IB: FMR invalidation failed (err=%d)\n", ret);
229
230	/* Now we can destroy the DMA mapping and unpin any pages */
231	list_for_each_entry_safe(ibmr, next, list, unmap_list) {
232		fmr = &ibmr->u.fmr;
233		*unpinned += ibmr->sg_len;
234		__rds_ib_teardown_mr(ibmr);
235		if (freed < goal ||
236		    ibmr->remap_count >= ibmr->pool->fmr_attr.max_maps) {
237			if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
238				rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
239			else
240				rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
241			list_del(&ibmr->unmap_list);
242			ib_dealloc_fmr(fmr->fmr);
243			kfree(ibmr);
244			freed++;
245		}
246	}
247	*nfreed = freed;
248}
249
250void rds_ib_free_fmr_list(struct rds_ib_mr *ibmr)
251{
252	struct rds_ib_mr_pool *pool = ibmr->pool;
253
254	if (ibmr->remap_count >= pool->fmr_attr.max_maps)
255		llist_add(&ibmr->llnode, &pool->drop_list);
256	else
257		llist_add(&ibmr->llnode, &pool->free_list);
258}