Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
  3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  4 *
  5 * This software is available to you under a choice of one of two
  6 * licenses.  You may choose to be licensed under the terms of the GNU
  7 * General Public License (GPL) Version 2, available from the file
  8 * COPYING in the main directory of this source tree, or the
  9 * OpenIB.org BSD license below:
 10 *
 11 *     Redistribution and use in source and binary forms, with or
 12 *     without modification, are permitted provided that the following
 13 *     conditions are met:
 14 *
 15 *      - Redistributions of source code must retain the above
 16 *        copyright notice, this list of conditions and the following
 17 *        disclaimer.
 18 *
 19 *      - Redistributions in binary form must reproduce the above
 20 *        copyright notice, this list of conditions and the following
 21 *        disclaimer in the documentation and/or other materials
 22 *        provided with the distribution.
 23 *
 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 31 * SOFTWARE.
 32 */
 33
 34#include <linux/errno.h>
 35#include <linux/spinlock.h>
 36#include <linux/export.h>
 37#include <linux/slab.h>
 38#include <linux/jhash.h>
 39#include <linux/kthread.h>
 40
 41#include <rdma/ib_fmr_pool.h>
 42
 43#include "core_priv.h"
 44
 45#define PFX "fmr_pool: "
 46
 47enum {
 48	IB_FMR_MAX_REMAPS = 32,
 49
 50	IB_FMR_HASH_BITS  = 8,
 51	IB_FMR_HASH_SIZE  = 1 << IB_FMR_HASH_BITS,
 52	IB_FMR_HASH_MASK  = IB_FMR_HASH_SIZE - 1
 53};
 54
 55/*
 56 * If an FMR is not in use, then the list member will point to either
 57 * its pool's free_list (if the FMR can be mapped again; that is,
 58 * remap_count < pool->max_remaps) or its pool's dirty_list (if the
 59 * FMR needs to be unmapped before being remapped).  In either of
 60 * these cases it is a bug if the ref_count is not 0.  In other words,
 61 * if ref_count is > 0, then the list member must not be linked into
 62 * either free_list or dirty_list.
 63 *
 64 * The cache_node member is used to link the FMR into a cache bucket
 65 * (if caching is enabled).  This is independent of the reference
 66 * count of the FMR.  When a valid FMR is released, its ref_count is
 67 * decremented, and if ref_count reaches 0, the FMR is placed in
 68 * either free_list or dirty_list as appropriate.  However, it is not
 69 * removed from the cache and may be "revived" if a call to
 70 * ib_fmr_register_physical() occurs before the FMR is remapped.  In
 71 * this case we just increment the ref_count and remove the FMR from
 72 * free_list/dirty_list.
 73 *
 74 * Before we remap an FMR from free_list, we remove it from the cache
 75 * (to prevent another user from obtaining a stale FMR).  When an FMR
 76 * is released, we add it to the tail of the free list, so that our
 77 * cache eviction policy is "least recently used."
 78 *
 79 * All manipulation of ref_count, list and cache_node is protected by
 80 * pool_lock to maintain consistency.
 81 */
 82
 83struct ib_fmr_pool {
 84	spinlock_t                pool_lock;
 85
 86	int                       pool_size;
 87	int                       max_pages;
 88	int			  max_remaps;
 89	int                       dirty_watermark;
 90	int                       dirty_len;
 91	struct list_head          free_list;
 92	struct list_head          dirty_list;
 93	struct hlist_head        *cache_bucket;
 94
 95	void                     (*flush_function)(struct ib_fmr_pool *pool,
 96						   void *              arg);
 97	void                     *flush_arg;
 98
 99	struct kthread_worker	  *worker;
100	struct kthread_work	  work;
101
102	atomic_t                  req_ser;
103	atomic_t                  flush_ser;
104
105	wait_queue_head_t         force_wait;
106};
107
108static inline u32 ib_fmr_hash(u64 first_page)
109{
110	return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
111		(IB_FMR_HASH_SIZE - 1);
112}
113
114/* Caller must hold pool_lock */
115static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
116						      u64 *page_list,
117						      int  page_list_len,
118						      u64  io_virtual_address)
119{
120	struct hlist_head *bucket;
121	struct ib_pool_fmr *fmr;
122
123	if (!pool->cache_bucket)
124		return NULL;
125
126	bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
127
128	hlist_for_each_entry(fmr, bucket, cache_node)
129		if (io_virtual_address == fmr->io_virtual_address &&
130		    page_list_len      == fmr->page_list_len      &&
131		    !memcmp(page_list, fmr->page_list,
132			    page_list_len * sizeof *page_list))
133			return fmr;
134
135	return NULL;
136}
137
138static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
139{
140	int                 ret;
141	struct ib_pool_fmr *fmr;
142	LIST_HEAD(unmap_list);
143	LIST_HEAD(fmr_list);
144
145	spin_lock_irq(&pool->pool_lock);
146
147	list_for_each_entry(fmr, &pool->dirty_list, list) {
148		hlist_del_init(&fmr->cache_node);
149		fmr->remap_count = 0;
150		list_add_tail(&fmr->fmr->list, &fmr_list);
151	}
152
153	list_splice_init(&pool->dirty_list, &unmap_list);
154	pool->dirty_len = 0;
155
156	spin_unlock_irq(&pool->pool_lock);
157
158	if (list_empty(&unmap_list)) {
159		return;
160	}
161
162	ret = ib_unmap_fmr(&fmr_list);
163	if (ret)
164		pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
165
166	spin_lock_irq(&pool->pool_lock);
167	list_splice(&unmap_list, &pool->free_list);
168	spin_unlock_irq(&pool->pool_lock);
169}
170
171static void ib_fmr_cleanup_func(struct kthread_work *work)
172{
173	struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work);
174
175	ib_fmr_batch_release(pool);
176	atomic_inc(&pool->flush_ser);
177	wake_up_interruptible(&pool->force_wait);
178
179	if (pool->flush_function)
180		pool->flush_function(pool, pool->flush_arg);
181
182	if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0)
183		kthread_queue_work(pool->worker, &pool->work);
184}
185
186/**
187 * ib_create_fmr_pool - Create an FMR pool
188 * @pd:Protection domain for FMRs
189 * @params:FMR pool parameters
190 *
191 * Create a pool of FMRs.  Return value is pointer to new pool or
192 * error code if creation failed.
193 */
194struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
195				       struct ib_fmr_pool_param *params)
196{
197	struct ib_device   *device;
198	struct ib_fmr_pool *pool;
199	int i;
200	int ret;
201	int max_remaps;
202
203	if (!params)
204		return ERR_PTR(-EINVAL);
205
206	device = pd->device;
207	if (!device->ops.alloc_fmr    || !device->ops.dealloc_fmr  ||
208	    !device->ops.map_phys_fmr || !device->ops.unmap_fmr) {
209		dev_info(&device->dev, "Device does not support FMRs\n");
210		return ERR_PTR(-ENOSYS);
211	}
212
213	if (!device->attrs.max_map_per_fmr)
214		max_remaps = IB_FMR_MAX_REMAPS;
215	else
216		max_remaps = device->attrs.max_map_per_fmr;
217
218	pool = kmalloc(sizeof *pool, GFP_KERNEL);
219	if (!pool)
220		return ERR_PTR(-ENOMEM);
221
222	pool->cache_bucket   = NULL;
223	pool->flush_function = params->flush_function;
224	pool->flush_arg      = params->flush_arg;
225
226	INIT_LIST_HEAD(&pool->free_list);
227	INIT_LIST_HEAD(&pool->dirty_list);
228
229	if (params->cache) {
230		pool->cache_bucket =
231			kmalloc_array(IB_FMR_HASH_SIZE,
232				      sizeof(*pool->cache_bucket),
233				      GFP_KERNEL);
234		if (!pool->cache_bucket) {
235			ret = -ENOMEM;
236			goto out_free_pool;
237		}
238
239		for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
240			INIT_HLIST_HEAD(pool->cache_bucket + i);
241	}
242
243	pool->pool_size       = 0;
244	pool->max_pages       = params->max_pages_per_fmr;
245	pool->max_remaps      = max_remaps;
246	pool->dirty_watermark = params->dirty_watermark;
247	pool->dirty_len       = 0;
248	spin_lock_init(&pool->pool_lock);
249	atomic_set(&pool->req_ser,   0);
250	atomic_set(&pool->flush_ser, 0);
251	init_waitqueue_head(&pool->force_wait);
252
253	pool->worker =
254		kthread_create_worker(0, "ib_fmr(%s)", dev_name(&device->dev));
255	if (IS_ERR(pool->worker)) {
256		pr_warn(PFX "couldn't start cleanup kthread worker\n");
257		ret = PTR_ERR(pool->worker);
258		goto out_free_pool;
259	}
260	kthread_init_work(&pool->work, ib_fmr_cleanup_func);
261
262	{
263		struct ib_pool_fmr *fmr;
264		struct ib_fmr_attr fmr_attr = {
265			.max_pages  = params->max_pages_per_fmr,
266			.max_maps   = pool->max_remaps,
267			.page_shift = params->page_shift
268		};
269		int bytes_per_fmr = sizeof *fmr;
270
271		if (pool->cache_bucket)
272			bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
273
274		for (i = 0; i < params->pool_size; ++i) {
275			fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
276			if (!fmr)
277				goto out_fail;
278
279			fmr->pool             = pool;
280			fmr->remap_count      = 0;
281			fmr->ref_count        = 0;
282			INIT_HLIST_NODE(&fmr->cache_node);
283
284			fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
285			if (IS_ERR(fmr->fmr)) {
286				pr_warn(PFX "fmr_create failed for FMR %d\n",
287					i);
288				kfree(fmr);
289				goto out_fail;
290			}
291
292			list_add_tail(&fmr->list, &pool->free_list);
293			++pool->pool_size;
294		}
295	}
296
297	return pool;
298
299 out_free_pool:
300	kfree(pool->cache_bucket);
301	kfree(pool);
302
303	return ERR_PTR(ret);
304
305 out_fail:
306	ib_destroy_fmr_pool(pool);
307
308	return ERR_PTR(-ENOMEM);
309}
310EXPORT_SYMBOL(ib_create_fmr_pool);
311
312/**
313 * ib_destroy_fmr_pool - Free FMR pool
314 * @pool:FMR pool to free
315 *
316 * Destroy an FMR pool and free all associated resources.
317 */
318void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
319{
320	struct ib_pool_fmr *fmr;
321	struct ib_pool_fmr *tmp;
322	LIST_HEAD(fmr_list);
323	int                 i;
324
325	kthread_destroy_worker(pool->worker);
326	ib_fmr_batch_release(pool);
327
328	i = 0;
329	list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
330		if (fmr->remap_count) {
331			INIT_LIST_HEAD(&fmr_list);
332			list_add_tail(&fmr->fmr->list, &fmr_list);
333			ib_unmap_fmr(&fmr_list);
334		}
335		ib_dealloc_fmr(fmr->fmr);
336		list_del(&fmr->list);
337		kfree(fmr);
338		++i;
339	}
340
341	if (i < pool->pool_size)
342		pr_warn(PFX "pool still has %d regions registered\n",
343			pool->pool_size - i);
344
345	kfree(pool->cache_bucket);
346	kfree(pool);
347}
348EXPORT_SYMBOL(ib_destroy_fmr_pool);
349
350/**
351 * ib_flush_fmr_pool - Invalidate all unmapped FMRs
352 * @pool:FMR pool to flush
353 *
354 * Ensure that all unmapped FMRs are fully invalidated.
355 */
356int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
357{
358	int serial;
359	struct ib_pool_fmr *fmr, *next;
360
361	/*
362	 * The free_list holds FMRs that may have been used
363	 * but have not been remapped enough times to be dirty.
364	 * Put them on the dirty list now so that the cleanup
365	 * thread will reap them too.
366	 */
367	spin_lock_irq(&pool->pool_lock);
368	list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
369		if (fmr->remap_count > 0)
370			list_move(&fmr->list, &pool->dirty_list);
371	}
372	spin_unlock_irq(&pool->pool_lock);
373
374	serial = atomic_inc_return(&pool->req_ser);
375	kthread_queue_work(pool->worker, &pool->work);
376
377	if (wait_event_interruptible(pool->force_wait,
378				     atomic_read(&pool->flush_ser) - serial >= 0))
379		return -EINTR;
380
381	return 0;
382}
383EXPORT_SYMBOL(ib_flush_fmr_pool);
384
385/**
386 * ib_fmr_pool_map_phys - Map an FMR from an FMR pool.
387 * @pool_handle: FMR pool to allocate FMR from
388 * @page_list: List of pages to map
389 * @list_len: Number of pages in @page_list
390 * @io_virtual_address: I/O virtual address for new FMR
391 */
392struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
393					 u64                *page_list,
394					 int                 list_len,
395					 u64                 io_virtual_address)
396{
397	struct ib_fmr_pool *pool = pool_handle;
398	struct ib_pool_fmr *fmr;
399	unsigned long       flags;
400	int                 result;
401
402	if (list_len < 1 || list_len > pool->max_pages)
403		return ERR_PTR(-EINVAL);
404
405	spin_lock_irqsave(&pool->pool_lock, flags);
406	fmr = ib_fmr_cache_lookup(pool,
407				  page_list,
408				  list_len,
409				  io_virtual_address);
410	if (fmr) {
411		/* found in cache */
412		++fmr->ref_count;
413		if (fmr->ref_count == 1) {
414			list_del(&fmr->list);
415		}
416
417		spin_unlock_irqrestore(&pool->pool_lock, flags);
418
419		return fmr;
420	}
421
422	if (list_empty(&pool->free_list)) {
423		spin_unlock_irqrestore(&pool->pool_lock, flags);
424		return ERR_PTR(-EAGAIN);
425	}
426
427	fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
428	list_del(&fmr->list);
429	hlist_del_init(&fmr->cache_node);
430	spin_unlock_irqrestore(&pool->pool_lock, flags);
431
432	result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
433				 io_virtual_address);
434
435	if (result) {
436		spin_lock_irqsave(&pool->pool_lock, flags);
437		list_add(&fmr->list, &pool->free_list);
438		spin_unlock_irqrestore(&pool->pool_lock, flags);
439
440		pr_warn(PFX "fmr_map returns %d\n", result);
441
442		return ERR_PTR(result);
443	}
444
445	++fmr->remap_count;
446	fmr->ref_count = 1;
447
448	if (pool->cache_bucket) {
449		fmr->io_virtual_address = io_virtual_address;
450		fmr->page_list_len      = list_len;
451		memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
452
453		spin_lock_irqsave(&pool->pool_lock, flags);
454		hlist_add_head(&fmr->cache_node,
455			       pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
456		spin_unlock_irqrestore(&pool->pool_lock, flags);
457	}
458
459	return fmr;
460}
461EXPORT_SYMBOL(ib_fmr_pool_map_phys);
462
463/**
464 * ib_fmr_pool_unmap - Unmap FMR
465 * @fmr:FMR to unmap
466 *
467 * Unmap an FMR.  The FMR mapping may remain valid until the FMR is
468 * reused (or until ib_flush_fmr_pool() is called).
469 */
470void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
471{
472	struct ib_fmr_pool *pool;
473	unsigned long flags;
474
475	pool = fmr->pool;
476
477	spin_lock_irqsave(&pool->pool_lock, flags);
478
479	--fmr->ref_count;
480	if (!fmr->ref_count) {
481		if (fmr->remap_count < pool->max_remaps) {
482			list_add_tail(&fmr->list, &pool->free_list);
483		} else {
484			list_add_tail(&fmr->list, &pool->dirty_list);
485			if (++pool->dirty_len >= pool->dirty_watermark) {
486				atomic_inc(&pool->req_ser);
487				kthread_queue_work(pool->worker, &pool->work);
488			}
489		}
490	}
491
492	spin_unlock_irqrestore(&pool->pool_lock, flags);
493}
494EXPORT_SYMBOL(ib_fmr_pool_unmap);