Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1/*
  2 * Copyright (c) 2016 Oracle.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32
 33#include "ib_mr.h"
 34
 35struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
 36{
 37	struct rds_ib_mr_pool *pool;
 38	struct rds_ib_mr *ibmr = NULL;
 39	struct rds_ib_fmr *fmr;
 40	int err = 0;
 41
 42	if (npages <= RDS_MR_8K_MSG_SIZE)
 43		pool = rds_ibdev->mr_8k_pool;
 44	else
 45		pool = rds_ibdev->mr_1m_pool;
 46
 47	if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
 48		queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
 49
 50	/* Switch pools if one of the pool is reaching upper limit */
 51	if (atomic_read(&pool->dirty_count) >=  pool->max_items * 9 / 10) {
 52		if (pool->pool_type == RDS_IB_MR_8K_POOL)
 53			pool = rds_ibdev->mr_1m_pool;
 54		else
 55			pool = rds_ibdev->mr_8k_pool;
 56	}
 57
 58	ibmr = rds_ib_try_reuse_ibmr(pool);
 59	if (ibmr)
 60		return ibmr;
 61
 62	ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
 63			    rdsibdev_to_node(rds_ibdev));
 64	if (!ibmr) {
 65		err = -ENOMEM;
 66		goto out_no_cigar;
 67	}
 68
 69	fmr = &ibmr->u.fmr;
 70	fmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
 71			(IB_ACCESS_LOCAL_WRITE |
 72			 IB_ACCESS_REMOTE_READ |
 73			 IB_ACCESS_REMOTE_WRITE |
 74			 IB_ACCESS_REMOTE_ATOMIC),
 75			&pool->fmr_attr);
 76	if (IS_ERR(fmr->fmr)) {
 77		err = PTR_ERR(fmr->fmr);
 78		fmr->fmr = NULL;
 79		pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, err);
 80		goto out_no_cigar;
 81	}
 82
 83	ibmr->pool = pool;
 84	if (pool->pool_type == RDS_IB_MR_8K_POOL)
 85		rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
 86	else
 87		rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
 88
 89	return ibmr;
 90
 91out_no_cigar:
 92	kfree(ibmr);
 93	atomic_dec(&pool->item_count);
 94
 95	return ERR_PTR(err);
 96}
 97
 98static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
 99			  struct rds_ib_mr *ibmr, struct scatterlist *sg,
100			  unsigned int nents)
101{
102	struct ib_device *dev = rds_ibdev->dev;
103	struct rds_ib_fmr *fmr = &ibmr->u.fmr;
104	struct scatterlist *scat = sg;
105	u64 io_addr = 0;
106	u64 *dma_pages;
107	u32 len;
108	int page_cnt, sg_dma_len;
109	int i, j;
110	int ret;
111
112	sg_dma_len = ib_dma_map_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
113	if (unlikely(!sg_dma_len)) {
114		pr_warn("RDS/IB: %s failed!\n", __func__);
115		return -EBUSY;
116	}
117
118	len = 0;
119	page_cnt = 0;
120
121	for (i = 0; i < sg_dma_len; ++i) {
122		unsigned int dma_len = sg_dma_len(&scat[i]);
123		u64 dma_addr = sg_dma_address(&scat[i]);
124
125		if (dma_addr & ~PAGE_MASK) {
126			if (i > 0) {
127				ib_dma_unmap_sg(dev, sg, nents,
128						DMA_BIDIRECTIONAL);
129				return -EINVAL;
130			} else {
131				++page_cnt;
132			}
133		}
134		if ((dma_addr + dma_len) & ~PAGE_MASK) {
135			if (i < sg_dma_len - 1) {
136				ib_dma_unmap_sg(dev, sg, nents,
137						DMA_BIDIRECTIONAL);
138				return -EINVAL;
139			} else {
140				++page_cnt;
141			}
142		}
143
144		len += dma_len;
145	}
146
147	page_cnt += len >> PAGE_SHIFT;
148	if (page_cnt > ibmr->pool->fmr_attr.max_pages) {
149		ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
150		return -EINVAL;
151	}
152
153	dma_pages = kmalloc_array_node(sizeof(u64), page_cnt, GFP_ATOMIC,
154				       rdsibdev_to_node(rds_ibdev));
155	if (!dma_pages) {
156		ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
157		return -ENOMEM;
158	}
159
160	page_cnt = 0;
161	for (i = 0; i < sg_dma_len; ++i) {
162		unsigned int dma_len = sg_dma_len(&scat[i]);
163		u64 dma_addr = sg_dma_address(&scat[i]);
164
165		for (j = 0; j < dma_len; j += PAGE_SIZE)
166			dma_pages[page_cnt++] =
167				(dma_addr & PAGE_MASK) + j;
168	}
169
170	ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
171	if (ret) {
172		ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
173		goto out;
174	}
175
176	/* Success - we successfully remapped the MR, so we can
177	 * safely tear down the old mapping.
178	 */
179	rds_ib_teardown_mr(ibmr);
180
181	ibmr->sg = scat;
182	ibmr->sg_len = nents;
183	ibmr->sg_dma_len = sg_dma_len;
184	ibmr->remap_count++;
185
186	if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
187		rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
188	else
189		rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
190	ret = 0;
191
192out:
193	kfree(dma_pages);
194
195	return ret;
196}
197
198struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *rds_ibdev,
199				 struct scatterlist *sg,
200				 unsigned long nents,
201				 u32 *key)
202{
203	struct rds_ib_mr *ibmr = NULL;
204	struct rds_ib_fmr *fmr;
205	int ret;
206
207	ibmr = rds_ib_alloc_fmr(rds_ibdev, nents);
208	if (IS_ERR(ibmr))
209		return ibmr;
210
211	ibmr->device = rds_ibdev;
212	fmr = &ibmr->u.fmr;
213	ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
214	if (ret == 0)
215		*key = fmr->fmr->rkey;
216	else
217		rds_ib_free_mr(ibmr, 0);
218
219	return ibmr;
220}
221
222void rds_ib_unreg_fmr(struct list_head *list, unsigned int *nfreed,
223		      unsigned long *unpinned, unsigned int goal)
224{
225	struct rds_ib_mr *ibmr, *next;
226	struct rds_ib_fmr *fmr;
227	LIST_HEAD(fmr_list);
228	int ret = 0;
229	unsigned int freed = *nfreed;
230
231	/* String all ib_mr's onto one list and hand them to  ib_unmap_fmr */
232	list_for_each_entry(ibmr, list, unmap_list) {
233		fmr = &ibmr->u.fmr;
234		list_add(&fmr->fmr->list, &fmr_list);
235	}
236
237	ret = ib_unmap_fmr(&fmr_list);
238	if (ret)
239		pr_warn("RDS/IB: FMR invalidation failed (err=%d)\n", ret);
240
241	/* Now we can destroy the DMA mapping and unpin any pages */
242	list_for_each_entry_safe(ibmr, next, list, unmap_list) {
243		fmr = &ibmr->u.fmr;
244		*unpinned += ibmr->sg_len;
245		__rds_ib_teardown_mr(ibmr);
246		if (freed < goal ||
247		    ibmr->remap_count >= ibmr->pool->fmr_attr.max_maps) {
248			if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
249				rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
250			else
251				rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
252			list_del(&ibmr->unmap_list);
253			ib_dealloc_fmr(fmr->fmr);
254			kfree(ibmr);
255			freed++;
256		}
257	}
258	*nfreed = freed;
259}
260
261void rds_ib_free_fmr_list(struct rds_ib_mr *ibmr)
262{
263	struct rds_ib_mr_pool *pool = ibmr->pool;
264
265	if (ibmr->remap_count >= pool->fmr_attr.max_maps)
266		llist_add(&ibmr->llnode, &pool->drop_list);
267	else
268		llist_add(&ibmr->llnode, &pool->free_list);
269}