Loading...
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <linux/rculist.h>
36#include <linux/llist.h>
37
38#include "ib_mr.h"
39
40struct workqueue_struct *rds_ib_mr_wq;
41
42static DEFINE_PER_CPU(unsigned long, clean_list_grace);
43#define CLEAN_LIST_BUSY_BIT 0
44
45static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
46{
47 struct rds_ib_device *rds_ibdev;
48 struct rds_ib_ipaddr *i_ipaddr;
49
50 rcu_read_lock();
51 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
52 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
53 if (i_ipaddr->ipaddr == ipaddr) {
54 atomic_inc(&rds_ibdev->refcount);
55 rcu_read_unlock();
56 return rds_ibdev;
57 }
58 }
59 }
60 rcu_read_unlock();
61
62 return NULL;
63}
64
65static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
66{
67 struct rds_ib_ipaddr *i_ipaddr;
68
69 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
70 if (!i_ipaddr)
71 return -ENOMEM;
72
73 i_ipaddr->ipaddr = ipaddr;
74
75 spin_lock_irq(&rds_ibdev->spinlock);
76 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
77 spin_unlock_irq(&rds_ibdev->spinlock);
78
79 return 0;
80}
81
82static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
83{
84 struct rds_ib_ipaddr *i_ipaddr;
85 struct rds_ib_ipaddr *to_free = NULL;
86
87
88 spin_lock_irq(&rds_ibdev->spinlock);
89 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
90 if (i_ipaddr->ipaddr == ipaddr) {
91 list_del_rcu(&i_ipaddr->list);
92 to_free = i_ipaddr;
93 break;
94 }
95 }
96 spin_unlock_irq(&rds_ibdev->spinlock);
97
98 if (to_free)
99 kfree_rcu(to_free, rcu);
100}
101
102int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
103{
104 struct rds_ib_device *rds_ibdev_old;
105
106 rds_ibdev_old = rds_ib_get_device(ipaddr);
107 if (!rds_ibdev_old)
108 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
109
110 if (rds_ibdev_old != rds_ibdev) {
111 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
112 rds_ib_dev_put(rds_ibdev_old);
113 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
114 }
115 rds_ib_dev_put(rds_ibdev_old);
116
117 return 0;
118}
119
120void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
121{
122 struct rds_ib_connection *ic = conn->c_transport_data;
123
124 /* conn was previously on the nodev_conns_list */
125 spin_lock_irq(&ib_nodev_conns_lock);
126 BUG_ON(list_empty(&ib_nodev_conns));
127 BUG_ON(list_empty(&ic->ib_node));
128 list_del(&ic->ib_node);
129
130 spin_lock(&rds_ibdev->spinlock);
131 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
132 spin_unlock(&rds_ibdev->spinlock);
133 spin_unlock_irq(&ib_nodev_conns_lock);
134
135 ic->rds_ibdev = rds_ibdev;
136 atomic_inc(&rds_ibdev->refcount);
137}
138
139void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
140{
141 struct rds_ib_connection *ic = conn->c_transport_data;
142
143 /* place conn on nodev_conns_list */
144 spin_lock(&ib_nodev_conns_lock);
145
146 spin_lock_irq(&rds_ibdev->spinlock);
147 BUG_ON(list_empty(&ic->ib_node));
148 list_del(&ic->ib_node);
149 spin_unlock_irq(&rds_ibdev->spinlock);
150
151 list_add_tail(&ic->ib_node, &ib_nodev_conns);
152
153 spin_unlock(&ib_nodev_conns_lock);
154
155 ic->rds_ibdev = NULL;
156 rds_ib_dev_put(rds_ibdev);
157}
158
159void rds_ib_destroy_nodev_conns(void)
160{
161 struct rds_ib_connection *ic, *_ic;
162 LIST_HEAD(tmp_list);
163
164 /* avoid calling conn_destroy with irqs off */
165 spin_lock_irq(&ib_nodev_conns_lock);
166 list_splice(&ib_nodev_conns, &tmp_list);
167 spin_unlock_irq(&ib_nodev_conns_lock);
168
169 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
170 rds_conn_destroy(ic->conn);
171}
172
173void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
174{
175 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
176
177 iinfo->rdma_mr_max = pool_1m->max_items;
178 iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
179}
180
181struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
182{
183 struct rds_ib_mr *ibmr = NULL;
184 struct llist_node *ret;
185 unsigned long *flag;
186
187 preempt_disable();
188 flag = this_cpu_ptr(&clean_list_grace);
189 set_bit(CLEAN_LIST_BUSY_BIT, flag);
190 ret = llist_del_first(&pool->clean_list);
191 if (ret) {
192 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
193 if (pool->pool_type == RDS_IB_MR_8K_POOL)
194 rds_ib_stats_inc(s_ib_rdma_mr_8k_reused);
195 else
196 rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
197 }
198
199 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
200 preempt_enable();
201 return ibmr;
202}
203
204static inline void wait_clean_list_grace(void)
205{
206 int cpu;
207 unsigned long *flag;
208
209 for_each_online_cpu(cpu) {
210 flag = &per_cpu(clean_list_grace, cpu);
211 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
212 cpu_relax();
213 }
214}
215
216void rds_ib_sync_mr(void *trans_private, int direction)
217{
218 struct rds_ib_mr *ibmr = trans_private;
219 struct rds_ib_device *rds_ibdev = ibmr->device;
220
221 switch (direction) {
222 case DMA_FROM_DEVICE:
223 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
224 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
225 break;
226 case DMA_TO_DEVICE:
227 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
228 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
229 break;
230 }
231}
232
233void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
234{
235 struct rds_ib_device *rds_ibdev = ibmr->device;
236
237 if (ibmr->sg_dma_len) {
238 ib_dma_unmap_sg(rds_ibdev->dev,
239 ibmr->sg, ibmr->sg_len,
240 DMA_BIDIRECTIONAL);
241 ibmr->sg_dma_len = 0;
242 }
243
244 /* Release the s/g list */
245 if (ibmr->sg_len) {
246 unsigned int i;
247
248 for (i = 0; i < ibmr->sg_len; ++i) {
249 struct page *page = sg_page(&ibmr->sg[i]);
250
251 /* FIXME we need a way to tell a r/w MR
252 * from a r/o MR */
253 WARN_ON(!page->mapping && irqs_disabled());
254 set_page_dirty(page);
255 put_page(page);
256 }
257 kfree(ibmr->sg);
258
259 ibmr->sg = NULL;
260 ibmr->sg_len = 0;
261 }
262}
263
264void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
265{
266 unsigned int pinned = ibmr->sg_len;
267
268 __rds_ib_teardown_mr(ibmr);
269 if (pinned) {
270 struct rds_ib_mr_pool *pool = ibmr->pool;
271
272 atomic_sub(pinned, &pool->free_pinned);
273 }
274}
275
276static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
277{
278 unsigned int item_count;
279
280 item_count = atomic_read(&pool->item_count);
281 if (free_all)
282 return item_count;
283
284 return 0;
285}
286
287/*
288 * given an llist of mrs, put them all into the list_head for more processing
289 */
290static unsigned int llist_append_to_list(struct llist_head *llist,
291 struct list_head *list)
292{
293 struct rds_ib_mr *ibmr;
294 struct llist_node *node;
295 struct llist_node *next;
296 unsigned int count = 0;
297
298 node = llist_del_all(llist);
299 while (node) {
300 next = node->next;
301 ibmr = llist_entry(node, struct rds_ib_mr, llnode);
302 list_add_tail(&ibmr->unmap_list, list);
303 node = next;
304 count++;
305 }
306 return count;
307}
308
309/*
310 * this takes a list head of mrs and turns it into linked llist nodes
311 * of clusters. Each cluster has linked llist nodes of
312 * MR_CLUSTER_SIZE mrs that are ready for reuse.
313 */
314static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
315 struct list_head *list,
316 struct llist_node **nodes_head,
317 struct llist_node **nodes_tail)
318{
319 struct rds_ib_mr *ibmr;
320 struct llist_node *cur = NULL;
321 struct llist_node **next = nodes_head;
322
323 list_for_each_entry(ibmr, list, unmap_list) {
324 cur = &ibmr->llnode;
325 *next = cur;
326 next = &cur->next;
327 }
328 *next = NULL;
329 *nodes_tail = cur;
330}
331
332/*
333 * Flush our pool of MRs.
334 * At a minimum, all currently unused MRs are unmapped.
335 * If the number of MRs allocated exceeds the limit, we also try
336 * to free as many MRs as needed to get back to this limit.
337 */
338int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
339 int free_all, struct rds_ib_mr **ibmr_ret)
340{
341 struct rds_ib_mr *ibmr;
342 struct llist_node *clean_nodes;
343 struct llist_node *clean_tail;
344 LIST_HEAD(unmap_list);
345 unsigned long unpinned = 0;
346 unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
347
348 if (pool->pool_type == RDS_IB_MR_8K_POOL)
349 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
350 else
351 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
352
353 if (ibmr_ret) {
354 DEFINE_WAIT(wait);
355 while (!mutex_trylock(&pool->flush_lock)) {
356 ibmr = rds_ib_reuse_mr(pool);
357 if (ibmr) {
358 *ibmr_ret = ibmr;
359 finish_wait(&pool->flush_wait, &wait);
360 goto out_nolock;
361 }
362
363 prepare_to_wait(&pool->flush_wait, &wait,
364 TASK_UNINTERRUPTIBLE);
365 if (llist_empty(&pool->clean_list))
366 schedule();
367
368 ibmr = rds_ib_reuse_mr(pool);
369 if (ibmr) {
370 *ibmr_ret = ibmr;
371 finish_wait(&pool->flush_wait, &wait);
372 goto out_nolock;
373 }
374 }
375 finish_wait(&pool->flush_wait, &wait);
376 } else
377 mutex_lock(&pool->flush_lock);
378
379 if (ibmr_ret) {
380 ibmr = rds_ib_reuse_mr(pool);
381 if (ibmr) {
382 *ibmr_ret = ibmr;
383 goto out;
384 }
385 }
386
387 /* Get the list of all MRs to be dropped. Ordering matters -
388 * we want to put drop_list ahead of free_list.
389 */
390 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
391 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
392 if (free_all)
393 llist_append_to_list(&pool->clean_list, &unmap_list);
394
395 free_goal = rds_ib_flush_goal(pool, free_all);
396
397 if (list_empty(&unmap_list))
398 goto out;
399
400 if (pool->use_fastreg)
401 rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
402 else
403 rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
404
405 if (!list_empty(&unmap_list)) {
406 /* we have to make sure that none of the things we're about
407 * to put on the clean list would race with other cpus trying
408 * to pull items off. The llist would explode if we managed to
409 * remove something from the clean list and then add it back again
410 * while another CPU was spinning on that same item in llist_del_first.
411 *
412 * This is pretty unlikely, but just in case wait for an llist grace period
413 * here before adding anything back into the clean list.
414 */
415 wait_clean_list_grace();
416
417 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
418 if (ibmr_ret)
419 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
420
421 /* more than one entry in llist nodes */
422 if (clean_nodes->next)
423 llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
424
425 }
426
427 atomic_sub(unpinned, &pool->free_pinned);
428 atomic_sub(dirty_to_clean, &pool->dirty_count);
429 atomic_sub(nfreed, &pool->item_count);
430
431out:
432 mutex_unlock(&pool->flush_lock);
433 if (waitqueue_active(&pool->flush_wait))
434 wake_up(&pool->flush_wait);
435out_nolock:
436 return 0;
437}
438
439struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
440{
441 struct rds_ib_mr *ibmr = NULL;
442 int iter = 0;
443
444 if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
445 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
446
447 while (1) {
448 ibmr = rds_ib_reuse_mr(pool);
449 if (ibmr)
450 return ibmr;
451
452 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
453 break;
454
455 atomic_dec(&pool->item_count);
456
457 if (++iter > 2) {
458 if (pool->pool_type == RDS_IB_MR_8K_POOL)
459 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
460 else
461 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
462 return ERR_PTR(-EAGAIN);
463 }
464
465 /* We do have some empty MRs. Flush them out. */
466 if (pool->pool_type == RDS_IB_MR_8K_POOL)
467 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
468 else
469 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
470
471 rds_ib_flush_mr_pool(pool, 0, &ibmr);
472 if (ibmr)
473 return ibmr;
474 }
475
476 return ibmr;
477}
478
479static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
480{
481 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
482
483 rds_ib_flush_mr_pool(pool, 0, NULL);
484}
485
486void rds_ib_free_mr(void *trans_private, int invalidate)
487{
488 struct rds_ib_mr *ibmr = trans_private;
489 struct rds_ib_mr_pool *pool = ibmr->pool;
490 struct rds_ib_device *rds_ibdev = ibmr->device;
491
492 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
493
494 /* Return it to the pool's free list */
495 if (rds_ibdev->use_fastreg)
496 rds_ib_free_frmr_list(ibmr);
497 else
498 rds_ib_free_fmr_list(ibmr);
499
500 atomic_add(ibmr->sg_len, &pool->free_pinned);
501 atomic_inc(&pool->dirty_count);
502
503 /* If we've pinned too many pages, request a flush */
504 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
505 atomic_read(&pool->dirty_count) >= pool->max_items / 5)
506 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
507
508 if (invalidate) {
509 if (likely(!in_interrupt())) {
510 rds_ib_flush_mr_pool(pool, 0, NULL);
511 } else {
512 /* We get here if the user created a MR marked
513 * as use_once and invalidate at the same time.
514 */
515 queue_delayed_work(rds_ib_mr_wq,
516 &pool->flush_worker, 10);
517 }
518 }
519
520 rds_ib_dev_put(rds_ibdev);
521}
522
523void rds_ib_flush_mrs(void)
524{
525 struct rds_ib_device *rds_ibdev;
526
527 down_read(&rds_ib_devices_lock);
528 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
529 if (rds_ibdev->mr_8k_pool)
530 rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
531
532 if (rds_ibdev->mr_1m_pool)
533 rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
534 }
535 up_read(&rds_ib_devices_lock);
536}
537
538void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
539 struct rds_sock *rs, u32 *key_ret)
540{
541 struct rds_ib_device *rds_ibdev;
542 struct rds_ib_mr *ibmr = NULL;
543 struct rds_ib_connection *ic = rs->rs_conn->c_transport_data;
544 int ret;
545
546 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
547 if (!rds_ibdev) {
548 ret = -ENODEV;
549 goto out;
550 }
551
552 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
553 ret = -ENODEV;
554 goto out;
555 }
556
557 if (rds_ibdev->use_fastreg)
558 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
559 else
560 ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
561 if (ibmr)
562 rds_ibdev = NULL;
563
564 out:
565 if (!ibmr)
566 pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
567
568 if (rds_ibdev)
569 rds_ib_dev_put(rds_ibdev);
570
571 return ibmr;
572}
573
574void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
575{
576 cancel_delayed_work_sync(&pool->flush_worker);
577 rds_ib_flush_mr_pool(pool, 1, NULL);
578 WARN_ON(atomic_read(&pool->item_count));
579 WARN_ON(atomic_read(&pool->free_pinned));
580 kfree(pool);
581}
582
583struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
584 int pool_type)
585{
586 struct rds_ib_mr_pool *pool;
587
588 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
589 if (!pool)
590 return ERR_PTR(-ENOMEM);
591
592 pool->pool_type = pool_type;
593 init_llist_head(&pool->free_list);
594 init_llist_head(&pool->drop_list);
595 init_llist_head(&pool->clean_list);
596 mutex_init(&pool->flush_lock);
597 init_waitqueue_head(&pool->flush_wait);
598 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
599
600 if (pool_type == RDS_IB_MR_1M_POOL) {
601 /* +1 allows for unaligned MRs */
602 pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1;
603 pool->max_items = RDS_MR_1M_POOL_SIZE;
604 } else {
605 /* pool_type == RDS_IB_MR_8K_POOL */
606 pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1;
607 pool->max_items = RDS_MR_8K_POOL_SIZE;
608 }
609
610 pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
611 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
612 pool->fmr_attr.page_shift = PAGE_SHIFT;
613 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
614 pool->use_fastreg = rds_ibdev->use_fastreg;
615
616 return pool;
617}
618
619int rds_ib_mr_init(void)
620{
621 rds_ib_mr_wq = create_workqueue("rds_mr_flushd");
622 if (!rds_ib_mr_wq)
623 return -ENOMEM;
624 return 0;
625}
626
627/* By the time this is called all the IB devices should have been torn down and
628 * had their pools freed. As each pool is freed its work struct is waited on,
629 * so the pool flushing work queue should be idle by the time we get here.
630 */
631void rds_ib_mr_exit(void)
632{
633 destroy_workqueue(rds_ib_mr_wq);
634}
1/*
2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <linux/rculist.h>
36#include <linux/llist.h>
37
38#include "rds_single_path.h"
39#include "ib_mr.h"
40#include "rds.h"
41
42struct workqueue_struct *rds_ib_mr_wq;
43
44static void rds_ib_odp_mr_worker(struct work_struct *work);
45
46static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
47{
48 struct rds_ib_device *rds_ibdev;
49 struct rds_ib_ipaddr *i_ipaddr;
50
51 rcu_read_lock();
52 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
53 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
54 if (i_ipaddr->ipaddr == ipaddr) {
55 refcount_inc(&rds_ibdev->refcount);
56 rcu_read_unlock();
57 return rds_ibdev;
58 }
59 }
60 }
61 rcu_read_unlock();
62
63 return NULL;
64}
65
66static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
67{
68 struct rds_ib_ipaddr *i_ipaddr;
69
70 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
71 if (!i_ipaddr)
72 return -ENOMEM;
73
74 i_ipaddr->ipaddr = ipaddr;
75
76 spin_lock_irq(&rds_ibdev->spinlock);
77 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
78 spin_unlock_irq(&rds_ibdev->spinlock);
79
80 return 0;
81}
82
83static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
84{
85 struct rds_ib_ipaddr *i_ipaddr;
86 struct rds_ib_ipaddr *to_free = NULL;
87
88
89 spin_lock_irq(&rds_ibdev->spinlock);
90 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
91 if (i_ipaddr->ipaddr == ipaddr) {
92 list_del_rcu(&i_ipaddr->list);
93 to_free = i_ipaddr;
94 break;
95 }
96 }
97 spin_unlock_irq(&rds_ibdev->spinlock);
98
99 if (to_free)
100 kfree_rcu(to_free, rcu);
101}
102
103int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
104 struct in6_addr *ipaddr)
105{
106 struct rds_ib_device *rds_ibdev_old;
107
108 rds_ibdev_old = rds_ib_get_device(ipaddr->s6_addr32[3]);
109 if (!rds_ibdev_old)
110 return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
111
112 if (rds_ibdev_old != rds_ibdev) {
113 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr->s6_addr32[3]);
114 rds_ib_dev_put(rds_ibdev_old);
115 return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
116 }
117 rds_ib_dev_put(rds_ibdev_old);
118
119 return 0;
120}
121
122void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
123{
124 struct rds_ib_connection *ic = conn->c_transport_data;
125
126 /* conn was previously on the nodev_conns_list */
127 spin_lock_irq(&ib_nodev_conns_lock);
128 BUG_ON(list_empty(&ib_nodev_conns));
129 BUG_ON(list_empty(&ic->ib_node));
130 list_del(&ic->ib_node);
131
132 spin_lock(&rds_ibdev->spinlock);
133 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
134 spin_unlock(&rds_ibdev->spinlock);
135 spin_unlock_irq(&ib_nodev_conns_lock);
136
137 ic->rds_ibdev = rds_ibdev;
138 refcount_inc(&rds_ibdev->refcount);
139}
140
141void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
142{
143 struct rds_ib_connection *ic = conn->c_transport_data;
144
145 /* place conn on nodev_conns_list */
146 spin_lock(&ib_nodev_conns_lock);
147
148 spin_lock_irq(&rds_ibdev->spinlock);
149 BUG_ON(list_empty(&ic->ib_node));
150 list_del(&ic->ib_node);
151 spin_unlock_irq(&rds_ibdev->spinlock);
152
153 list_add_tail(&ic->ib_node, &ib_nodev_conns);
154
155 spin_unlock(&ib_nodev_conns_lock);
156
157 ic->rds_ibdev = NULL;
158 rds_ib_dev_put(rds_ibdev);
159}
160
161void rds_ib_destroy_nodev_conns(void)
162{
163 struct rds_ib_connection *ic, *_ic;
164 LIST_HEAD(tmp_list);
165
166 /* avoid calling conn_destroy with irqs off */
167 spin_lock_irq(&ib_nodev_conns_lock);
168 list_splice(&ib_nodev_conns, &tmp_list);
169 spin_unlock_irq(&ib_nodev_conns_lock);
170
171 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
172 rds_conn_destroy(ic->conn);
173}
174
175void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
176{
177 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
178
179 iinfo->rdma_mr_max = pool_1m->max_items;
180 iinfo->rdma_mr_size = pool_1m->max_pages;
181}
182
183#if IS_ENABLED(CONFIG_IPV6)
184void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
185 struct rds6_info_rdma_connection *iinfo6)
186{
187 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
188
189 iinfo6->rdma_mr_max = pool_1m->max_items;
190 iinfo6->rdma_mr_size = pool_1m->max_pages;
191}
192#endif
193
194struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
195{
196 struct rds_ib_mr *ibmr = NULL;
197 struct llist_node *ret;
198 unsigned long flags;
199
200 spin_lock_irqsave(&pool->clean_lock, flags);
201 ret = llist_del_first(&pool->clean_list);
202 spin_unlock_irqrestore(&pool->clean_lock, flags);
203 if (ret) {
204 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
205 if (pool->pool_type == RDS_IB_MR_8K_POOL)
206 rds_ib_stats_inc(s_ib_rdma_mr_8k_reused);
207 else
208 rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
209 }
210
211 return ibmr;
212}
213
214void rds_ib_sync_mr(void *trans_private, int direction)
215{
216 struct rds_ib_mr *ibmr = trans_private;
217 struct rds_ib_device *rds_ibdev = ibmr->device;
218
219 if (ibmr->odp)
220 return;
221
222 switch (direction) {
223 case DMA_FROM_DEVICE:
224 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
225 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
226 break;
227 case DMA_TO_DEVICE:
228 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
229 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
230 break;
231 }
232}
233
234void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
235{
236 struct rds_ib_device *rds_ibdev = ibmr->device;
237
238 if (ibmr->sg_dma_len) {
239 ib_dma_unmap_sg(rds_ibdev->dev,
240 ibmr->sg, ibmr->sg_len,
241 DMA_BIDIRECTIONAL);
242 ibmr->sg_dma_len = 0;
243 }
244
245 /* Release the s/g list */
246 if (ibmr->sg_len) {
247 unsigned int i;
248
249 for (i = 0; i < ibmr->sg_len; ++i) {
250 struct page *page = sg_page(&ibmr->sg[i]);
251
252 /* FIXME we need a way to tell a r/w MR
253 * from a r/o MR */
254 WARN_ON(!page->mapping && irqs_disabled());
255 set_page_dirty(page);
256 put_page(page);
257 }
258 kfree(ibmr->sg);
259
260 ibmr->sg = NULL;
261 ibmr->sg_len = 0;
262 }
263}
264
265void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
266{
267 unsigned int pinned = ibmr->sg_len;
268
269 __rds_ib_teardown_mr(ibmr);
270 if (pinned) {
271 struct rds_ib_mr_pool *pool = ibmr->pool;
272
273 atomic_sub(pinned, &pool->free_pinned);
274 }
275}
276
277static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
278{
279 unsigned int item_count;
280
281 item_count = atomic_read(&pool->item_count);
282 if (free_all)
283 return item_count;
284
285 return 0;
286}
287
288/*
289 * given an llist of mrs, put them all into the list_head for more processing
290 */
291static unsigned int llist_append_to_list(struct llist_head *llist,
292 struct list_head *list)
293{
294 struct rds_ib_mr *ibmr;
295 struct llist_node *node;
296 struct llist_node *next;
297 unsigned int count = 0;
298
299 node = llist_del_all(llist);
300 while (node) {
301 next = node->next;
302 ibmr = llist_entry(node, struct rds_ib_mr, llnode);
303 list_add_tail(&ibmr->unmap_list, list);
304 node = next;
305 count++;
306 }
307 return count;
308}
309
310/*
311 * this takes a list head of mrs and turns it into linked llist nodes
312 * of clusters. Each cluster has linked llist nodes of
313 * MR_CLUSTER_SIZE mrs that are ready for reuse.
314 */
315static void list_to_llist_nodes(struct list_head *list,
316 struct llist_node **nodes_head,
317 struct llist_node **nodes_tail)
318{
319 struct rds_ib_mr *ibmr;
320 struct llist_node *cur = NULL;
321 struct llist_node **next = nodes_head;
322
323 list_for_each_entry(ibmr, list, unmap_list) {
324 cur = &ibmr->llnode;
325 *next = cur;
326 next = &cur->next;
327 }
328 *next = NULL;
329 *nodes_tail = cur;
330}
331
332/*
333 * Flush our pool of MRs.
334 * At a minimum, all currently unused MRs are unmapped.
335 * If the number of MRs allocated exceeds the limit, we also try
336 * to free as many MRs as needed to get back to this limit.
337 */
338int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
339 int free_all, struct rds_ib_mr **ibmr_ret)
340{
341 struct rds_ib_mr *ibmr;
342 struct llist_node *clean_nodes;
343 struct llist_node *clean_tail;
344 LIST_HEAD(unmap_list);
345 unsigned long unpinned = 0;
346 unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
347
348 if (pool->pool_type == RDS_IB_MR_8K_POOL)
349 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
350 else
351 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
352
353 if (ibmr_ret) {
354 DEFINE_WAIT(wait);
355 while (!mutex_trylock(&pool->flush_lock)) {
356 ibmr = rds_ib_reuse_mr(pool);
357 if (ibmr) {
358 *ibmr_ret = ibmr;
359 finish_wait(&pool->flush_wait, &wait);
360 goto out_nolock;
361 }
362
363 prepare_to_wait(&pool->flush_wait, &wait,
364 TASK_UNINTERRUPTIBLE);
365 if (llist_empty(&pool->clean_list))
366 schedule();
367
368 ibmr = rds_ib_reuse_mr(pool);
369 if (ibmr) {
370 *ibmr_ret = ibmr;
371 finish_wait(&pool->flush_wait, &wait);
372 goto out_nolock;
373 }
374 }
375 finish_wait(&pool->flush_wait, &wait);
376 } else
377 mutex_lock(&pool->flush_lock);
378
379 if (ibmr_ret) {
380 ibmr = rds_ib_reuse_mr(pool);
381 if (ibmr) {
382 *ibmr_ret = ibmr;
383 goto out;
384 }
385 }
386
387 /* Get the list of all MRs to be dropped. Ordering matters -
388 * we want to put drop_list ahead of free_list.
389 */
390 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
391 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
392 if (free_all) {
393 unsigned long flags;
394
395 spin_lock_irqsave(&pool->clean_lock, flags);
396 llist_append_to_list(&pool->clean_list, &unmap_list);
397 spin_unlock_irqrestore(&pool->clean_lock, flags);
398 }
399
400 free_goal = rds_ib_flush_goal(pool, free_all);
401
402 if (list_empty(&unmap_list))
403 goto out;
404
405 rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
406
407 if (!list_empty(&unmap_list)) {
408 unsigned long flags;
409
410 list_to_llist_nodes(&unmap_list, &clean_nodes, &clean_tail);
411 if (ibmr_ret) {
412 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
413 clean_nodes = clean_nodes->next;
414 }
415 /* more than one entry in llist nodes */
416 if (clean_nodes) {
417 spin_lock_irqsave(&pool->clean_lock, flags);
418 llist_add_batch(clean_nodes, clean_tail,
419 &pool->clean_list);
420 spin_unlock_irqrestore(&pool->clean_lock, flags);
421 }
422 }
423
424 atomic_sub(unpinned, &pool->free_pinned);
425 atomic_sub(dirty_to_clean, &pool->dirty_count);
426 atomic_sub(nfreed, &pool->item_count);
427
428out:
429 mutex_unlock(&pool->flush_lock);
430 if (waitqueue_active(&pool->flush_wait))
431 wake_up(&pool->flush_wait);
432out_nolock:
433 return 0;
434}
435
436struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
437{
438 struct rds_ib_mr *ibmr = NULL;
439 int iter = 0;
440
441 while (1) {
442 ibmr = rds_ib_reuse_mr(pool);
443 if (ibmr)
444 return ibmr;
445
446 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
447 break;
448
449 atomic_dec(&pool->item_count);
450
451 if (++iter > 2) {
452 if (pool->pool_type == RDS_IB_MR_8K_POOL)
453 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
454 else
455 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
456 break;
457 }
458
459 /* We do have some empty MRs. Flush them out. */
460 if (pool->pool_type == RDS_IB_MR_8K_POOL)
461 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
462 else
463 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
464
465 rds_ib_flush_mr_pool(pool, 0, &ibmr);
466 if (ibmr)
467 return ibmr;
468 }
469
470 return NULL;
471}
472
473static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
474{
475 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
476
477 rds_ib_flush_mr_pool(pool, 0, NULL);
478}
479
480void rds_ib_free_mr(void *trans_private, int invalidate)
481{
482 struct rds_ib_mr *ibmr = trans_private;
483 struct rds_ib_mr_pool *pool = ibmr->pool;
484 struct rds_ib_device *rds_ibdev = ibmr->device;
485
486 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
487
488 if (ibmr->odp) {
489 /* A MR created and marked as use_once. We use delayed work,
490 * because there is a change that we are in interrupt and can't
491 * call to ib_dereg_mr() directly.
492 */
493 INIT_DELAYED_WORK(&ibmr->work, rds_ib_odp_mr_worker);
494 queue_delayed_work(rds_ib_mr_wq, &ibmr->work, 0);
495 return;
496 }
497
498 /* Return it to the pool's free list */
499 rds_ib_free_frmr_list(ibmr);
500
501 atomic_add(ibmr->sg_len, &pool->free_pinned);
502 atomic_inc(&pool->dirty_count);
503
504 /* If we've pinned too many pages, request a flush */
505 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
506 atomic_read(&pool->dirty_count) >= pool->max_items / 5)
507 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
508
509 if (invalidate) {
510 if (likely(!in_interrupt())) {
511 rds_ib_flush_mr_pool(pool, 0, NULL);
512 } else {
513 /* We get here if the user created a MR marked
514 * as use_once and invalidate at the same time.
515 */
516 queue_delayed_work(rds_ib_mr_wq,
517 &pool->flush_worker, 10);
518 }
519 }
520
521 rds_ib_dev_put(rds_ibdev);
522}
523
524void rds_ib_flush_mrs(void)
525{
526 struct rds_ib_device *rds_ibdev;
527
528 down_read(&rds_ib_devices_lock);
529 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
530 if (rds_ibdev->mr_8k_pool)
531 rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
532
533 if (rds_ibdev->mr_1m_pool)
534 rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
535 }
536 up_read(&rds_ib_devices_lock);
537}
538
539u32 rds_ib_get_lkey(void *trans_private)
540{
541 struct rds_ib_mr *ibmr = trans_private;
542
543 return ibmr->u.mr->lkey;
544}
545
546void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
547 struct rds_sock *rs, u32 *key_ret,
548 struct rds_connection *conn,
549 u64 start, u64 length, int need_odp)
550{
551 struct rds_ib_device *rds_ibdev;
552 struct rds_ib_mr *ibmr = NULL;
553 struct rds_ib_connection *ic = NULL;
554 int ret;
555
556 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]);
557 if (!rds_ibdev) {
558 ret = -ENODEV;
559 goto out;
560 }
561
562 if (need_odp == ODP_ZEROBASED || need_odp == ODP_VIRTUAL) {
563 u64 virt_addr = need_odp == ODP_ZEROBASED ? 0 : start;
564 int access_flags =
565 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
566 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC |
567 IB_ACCESS_ON_DEMAND);
568 struct ib_sge sge = {};
569 struct ib_mr *ib_mr;
570
571 if (!rds_ibdev->odp_capable) {
572 ret = -EOPNOTSUPP;
573 goto out;
574 }
575
576 ib_mr = ib_reg_user_mr(rds_ibdev->pd, start, length, virt_addr,
577 access_flags);
578
579 if (IS_ERR(ib_mr)) {
580 rdsdebug("rds_ib_get_user_mr returned %d\n",
581 IS_ERR(ib_mr));
582 ret = PTR_ERR(ib_mr);
583 goto out;
584 }
585 if (key_ret)
586 *key_ret = ib_mr->rkey;
587
588 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
589 if (!ibmr) {
590 ib_dereg_mr(ib_mr);
591 ret = -ENOMEM;
592 goto out;
593 }
594 ibmr->u.mr = ib_mr;
595 ibmr->odp = 1;
596
597 sge.addr = virt_addr;
598 sge.length = length;
599 sge.lkey = ib_mr->lkey;
600
601 ib_advise_mr(rds_ibdev->pd,
602 IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE,
603 IB_UVERBS_ADVISE_MR_FLAG_FLUSH, &sge, 1);
604 return ibmr;
605 }
606
607 if (conn)
608 ic = conn->c_transport_data;
609
610 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
611 ret = -ENODEV;
612 goto out;
613 }
614
615 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
616 if (IS_ERR(ibmr)) {
617 ret = PTR_ERR(ibmr);
618 pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
619 } else {
620 return ibmr;
621 }
622
623 out:
624 if (rds_ibdev)
625 rds_ib_dev_put(rds_ibdev);
626
627 return ERR_PTR(ret);
628}
629
630void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
631{
632 cancel_delayed_work_sync(&pool->flush_worker);
633 rds_ib_flush_mr_pool(pool, 1, NULL);
634 WARN_ON(atomic_read(&pool->item_count));
635 WARN_ON(atomic_read(&pool->free_pinned));
636 kfree(pool);
637}
638
639struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
640 int pool_type)
641{
642 struct rds_ib_mr_pool *pool;
643
644 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
645 if (!pool)
646 return ERR_PTR(-ENOMEM);
647
648 pool->pool_type = pool_type;
649 init_llist_head(&pool->free_list);
650 init_llist_head(&pool->drop_list);
651 init_llist_head(&pool->clean_list);
652 spin_lock_init(&pool->clean_lock);
653 mutex_init(&pool->flush_lock);
654 init_waitqueue_head(&pool->flush_wait);
655 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
656
657 if (pool_type == RDS_IB_MR_1M_POOL) {
658 /* +1 allows for unaligned MRs */
659 pool->max_pages = RDS_MR_1M_MSG_SIZE + 1;
660 pool->max_items = rds_ibdev->max_1m_mrs;
661 } else {
662 /* pool_type == RDS_IB_MR_8K_POOL */
663 pool->max_pages = RDS_MR_8K_MSG_SIZE + 1;
664 pool->max_items = rds_ibdev->max_8k_mrs;
665 }
666
667 pool->max_free_pinned = pool->max_items * pool->max_pages / 4;
668 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
669
670 return pool;
671}
672
673int rds_ib_mr_init(void)
674{
675 rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0);
676 if (!rds_ib_mr_wq)
677 return -ENOMEM;
678 return 0;
679}
680
681/* By the time this is called all the IB devices should have been torn down and
682 * had their pools freed. As each pool is freed its work struct is waited on,
683 * so the pool flushing work queue should be idle by the time we get here.
684 */
685void rds_ib_mr_exit(void)
686{
687 destroy_workqueue(rds_ib_mr_wq);
688}
689
690static void rds_ib_odp_mr_worker(struct work_struct *work)
691{
692 struct rds_ib_mr *ibmr;
693
694 ibmr = container_of(work, struct rds_ib_mr, work.work);
695 ib_dereg_mr(ibmr->u.mr);
696 kfree(ibmr);
697}