Loading...
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_umem_odp.h>
34#include <linux/kernel.h>
35#include <linux/dma-buf.h>
36#include <linux/dma-resv.h>
37
38#include "mlx5_ib.h"
39#include "cmd.h"
40#include "umr.h"
41#include "qp.h"
42
43#include <linux/mlx5/eq.h>
44
45/* Contains the details of a pagefault. */
46struct mlx5_pagefault {
47 u32 bytes_committed;
48 u32 token;
49 u8 event_subtype;
50 u8 type;
51 union {
52 /* Initiator or send message responder pagefault details. */
53 struct {
54 /* Received packet size, only valid for responders. */
55 u32 packet_size;
56 /*
57 * Number of resource holding WQE, depends on type.
58 */
59 u32 wq_num;
60 /*
61 * WQE index. Refers to either the send queue or
62 * receive queue, according to event_subtype.
63 */
64 u16 wqe_index;
65 } wqe;
66 /* RDMA responder pagefault details */
67 struct {
68 u32 r_key;
69 /*
70 * Received packet size, minimal size page fault
71 * resolution required for forward progress.
72 */
73 u32 packet_size;
74 u32 rdma_op_len;
75 u64 rdma_va;
76 } rdma;
77 };
78
79 struct mlx5_ib_pf_eq *eq;
80 struct work_struct work;
81};
82
83#define MAX_PREFETCH_LEN (4*1024*1024U)
84
85/* Timeout in ms to wait for an active mmu notifier to complete when handling
86 * a pagefault. */
87#define MMU_NOTIFIER_TIMEOUT 1000
88
89#define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
90#define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
91#define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
92#define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
93#define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
94
95#define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
96
97static u64 mlx5_imr_ksm_entries;
98
99static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
100 struct mlx5_ib_mr *imr, int flags)
101{
102 struct mlx5_klm *end = pklm + nentries;
103
104 if (flags & MLX5_IB_UPD_XLT_ZAP) {
105 for (; pklm != end; pklm++, idx++) {
106 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
107 pklm->key = mr_to_mdev(imr)->mkeys.null_mkey;
108 pklm->va = 0;
109 }
110 return;
111 }
112
113 /*
114 * The locking here is pretty subtle. Ideally the implicit_children
115 * xarray would be protected by the umem_mutex, however that is not
116 * possible. Instead this uses a weaker update-then-lock pattern:
117 *
118 * xa_store()
119 * mutex_lock(umem_mutex)
120 * mlx5r_umr_update_xlt()
121 * mutex_unlock(umem_mutex)
122 * destroy lkey
123 *
124 * ie any change the xarray must be followed by the locked update_xlt
125 * before destroying.
126 *
127 * The umem_mutex provides the acquire/release semantic needed to make
128 * the xa_store() visible to a racing thread.
129 */
130 lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
131
132 for (; pklm != end; pklm++, idx++) {
133 struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
134
135 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
136 if (mtt) {
137 pklm->key = cpu_to_be32(mtt->ibmr.lkey);
138 pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
139 } else {
140 pklm->key = mr_to_mdev(imr)->mkeys.null_mkey;
141 pklm->va = 0;
142 }
143 }
144}
145
146static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
147{
148 u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
149
150 if (umem_dma & ODP_READ_ALLOWED_BIT)
151 mtt_entry |= MLX5_IB_MTT_READ;
152 if (umem_dma & ODP_WRITE_ALLOWED_BIT)
153 mtt_entry |= MLX5_IB_MTT_WRITE;
154
155 return mtt_entry;
156}
157
158static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
159 struct mlx5_ib_mr *mr, int flags)
160{
161 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
162 dma_addr_t pa;
163 size_t i;
164
165 if (flags & MLX5_IB_UPD_XLT_ZAP)
166 return;
167
168 for (i = 0; i < nentries; i++) {
169 pa = odp->dma_list[idx + i];
170 pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
171 }
172}
173
174void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
175 struct mlx5_ib_mr *mr, int flags)
176{
177 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
178 populate_klm(xlt, idx, nentries, mr, flags);
179 } else {
180 populate_mtt(xlt, idx, nentries, mr, flags);
181 }
182}
183
184/*
185 * This must be called after the mr has been removed from implicit_children.
186 * NOTE: The MR does not necessarily have to be
187 * empty here, parallel page faults could have raced with the free process and
188 * added pages to it.
189 */
190static void free_implicit_child_mr_work(struct work_struct *work)
191{
192 struct mlx5_ib_mr *mr =
193 container_of(work, struct mlx5_ib_mr, odp_destroy.work);
194 struct mlx5_ib_mr *imr = mr->parent;
195 struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
196 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
197
198 mlx5r_deref_wait_odp_mkey(&mr->mmkey);
199
200 mutex_lock(&odp_imr->umem_mutex);
201 mlx5r_umr_update_xlt(mr->parent,
202 ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT, 1, 0,
203 MLX5_IB_UPD_XLT_INDIRECT | MLX5_IB_UPD_XLT_ATOMIC);
204 mutex_unlock(&odp_imr->umem_mutex);
205 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
206
207 mlx5r_deref_odp_mkey(&imr->mmkey);
208}
209
210static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
211{
212 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
213 unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
214 struct mlx5_ib_mr *imr = mr->parent;
215
216 if (!refcount_inc_not_zero(&imr->mmkey.usecount))
217 return;
218
219 xa_erase(&imr->implicit_children, idx);
220
221 /* Freeing a MR is a sleeping operation, so bounce to a work queue */
222 INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
223 queue_work(system_unbound_wq, &mr->odp_destroy.work);
224}
225
226static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
227 const struct mmu_notifier_range *range,
228 unsigned long cur_seq)
229{
230 struct ib_umem_odp *umem_odp =
231 container_of(mni, struct ib_umem_odp, notifier);
232 struct mlx5_ib_mr *mr;
233 const u64 umr_block_mask = MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1;
234 u64 idx = 0, blk_start_idx = 0;
235 u64 invalidations = 0;
236 unsigned long start;
237 unsigned long end;
238 int in_block = 0;
239 u64 addr;
240
241 if (!mmu_notifier_range_blockable(range))
242 return false;
243
244 mutex_lock(&umem_odp->umem_mutex);
245 mmu_interval_set_seq(mni, cur_seq);
246 /*
247 * If npages is zero then umem_odp->private may not be setup yet. This
248 * does not complete until after the first page is mapped for DMA.
249 */
250 if (!umem_odp->npages)
251 goto out;
252 mr = umem_odp->private;
253
254 start = max_t(u64, ib_umem_start(umem_odp), range->start);
255 end = min_t(u64, ib_umem_end(umem_odp), range->end);
256
257 /*
258 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
259 * while we are doing the invalidation, no page fault will attempt to
260 * overwrite the same MTTs. Concurent invalidations might race us,
261 * but they will write 0s as well, so no difference in the end result.
262 */
263 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
264 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
265 /*
266 * Strive to write the MTTs in chunks, but avoid overwriting
267 * non-existing MTTs. The huristic here can be improved to
268 * estimate the cost of another UMR vs. the cost of bigger
269 * UMR.
270 */
271 if (umem_odp->dma_list[idx] &
272 (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
273 if (!in_block) {
274 blk_start_idx = idx;
275 in_block = 1;
276 }
277
278 /* Count page invalidations */
279 invalidations += idx - blk_start_idx + 1;
280 } else {
281 u64 umr_offset = idx & umr_block_mask;
282
283 if (in_block && umr_offset == 0) {
284 mlx5r_umr_update_xlt(mr, blk_start_idx,
285 idx - blk_start_idx, 0,
286 MLX5_IB_UPD_XLT_ZAP |
287 MLX5_IB_UPD_XLT_ATOMIC);
288 in_block = 0;
289 }
290 }
291 }
292 if (in_block)
293 mlx5r_umr_update_xlt(mr, blk_start_idx,
294 idx - blk_start_idx + 1, 0,
295 MLX5_IB_UPD_XLT_ZAP |
296 MLX5_IB_UPD_XLT_ATOMIC);
297
298 mlx5_update_odp_stats(mr, invalidations, invalidations);
299
300 /*
301 * We are now sure that the device will not access the
302 * memory. We can safely unmap it, and mark it as dirty if
303 * needed.
304 */
305
306 ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
307
308 if (unlikely(!umem_odp->npages && mr->parent))
309 destroy_unused_implicit_child_mr(mr);
310out:
311 mutex_unlock(&umem_odp->umem_mutex);
312 return true;
313}
314
315const struct mmu_interval_notifier_ops mlx5_mn_ops = {
316 .invalidate = mlx5_ib_invalidate_range,
317};
318
319static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
320{
321 struct ib_odp_caps *caps = &dev->odp_caps;
322
323 memset(caps, 0, sizeof(*caps));
324
325 if (!MLX5_CAP_GEN(dev->mdev, pg) || !mlx5r_umr_can_load_pas(dev, 0))
326 return;
327
328 caps->general_caps = IB_ODP_SUPPORT;
329
330 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
331 dev->odp_max_size = U64_MAX;
332 else
333 dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
334
335 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
336 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
337
338 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
339 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
340
341 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
342 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
343
344 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
345 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
346
347 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
348 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
349
350 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
351 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
352
353 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
354 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
355
356 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
357 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
358
359 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
360 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
361
362 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
363 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
364
365 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
366 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
367
368 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
369 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
370
371 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
372 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
373
374 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
375 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
376
377 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
378 MLX5_CAP_GEN(dev->mdev, null_mkey) &&
379 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
380 !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
381 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
382}
383
384static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
385 struct mlx5_pagefault *pfault,
386 int error)
387{
388 int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
389 pfault->wqe.wq_num : pfault->token;
390 u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
391 int err;
392
393 MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
394 MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
395 MLX5_SET(page_fault_resume_in, in, token, pfault->token);
396 MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
397 MLX5_SET(page_fault_resume_in, in, error, !!error);
398
399 err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
400 if (err)
401 mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
402 wq_num, err);
403}
404
405static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
406 unsigned long idx)
407{
408 struct mlx5_ib_dev *dev = mr_to_mdev(imr);
409 struct ib_umem_odp *odp;
410 struct mlx5_ib_mr *mr;
411 struct mlx5_ib_mr *ret;
412 int err;
413
414 odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
415 idx * MLX5_IMR_MTT_SIZE,
416 MLX5_IMR_MTT_SIZE, &mlx5_mn_ops);
417 if (IS_ERR(odp))
418 return ERR_CAST(odp);
419
420 mr = mlx5_mr_cache_alloc(dev, imr->access_flags,
421 MLX5_MKC_ACCESS_MODE_MTT,
422 MLX5_IMR_MTT_ENTRIES);
423 if (IS_ERR(mr)) {
424 ib_umem_odp_release(odp);
425 return mr;
426 }
427
428 mr->access_flags = imr->access_flags;
429 mr->ibmr.pd = imr->ibmr.pd;
430 mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;
431 mr->umem = &odp->umem;
432 mr->ibmr.lkey = mr->mmkey.key;
433 mr->ibmr.rkey = mr->mmkey.key;
434 mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE;
435 mr->parent = imr;
436 odp->private = mr;
437
438 /*
439 * First refcount is owned by the xarray and second refconut
440 * is returned to the caller.
441 */
442 refcount_set(&mr->mmkey.usecount, 2);
443
444 err = mlx5r_umr_update_xlt(mr, 0,
445 MLX5_IMR_MTT_ENTRIES,
446 PAGE_SHIFT,
447 MLX5_IB_UPD_XLT_ZAP |
448 MLX5_IB_UPD_XLT_ENABLE);
449 if (err) {
450 ret = ERR_PTR(err);
451 goto out_mr;
452 }
453
454 xa_lock(&imr->implicit_children);
455 ret = __xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
456 GFP_KERNEL);
457 if (unlikely(ret)) {
458 if (xa_is_err(ret)) {
459 ret = ERR_PTR(xa_err(ret));
460 goto out_lock;
461 }
462 /*
463 * Another thread beat us to creating the child mr, use
464 * theirs.
465 */
466 refcount_inc(&ret->mmkey.usecount);
467 goto out_lock;
468 }
469 xa_unlock(&imr->implicit_children);
470
471 mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
472 return mr;
473
474out_lock:
475 xa_unlock(&imr->implicit_children);
476out_mr:
477 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
478 return ret;
479}
480
481struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
482 int access_flags)
483{
484 struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
485 struct ib_umem_odp *umem_odp;
486 struct mlx5_ib_mr *imr;
487 int err;
488
489 if (!mlx5r_umr_can_load_pas(dev, MLX5_IMR_MTT_ENTRIES * PAGE_SIZE))
490 return ERR_PTR(-EOPNOTSUPP);
491
492 umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
493 if (IS_ERR(umem_odp))
494 return ERR_CAST(umem_odp);
495
496 imr = mlx5_mr_cache_alloc(dev, access_flags, MLX5_MKC_ACCESS_MODE_KSM,
497 mlx5_imr_ksm_entries);
498 if (IS_ERR(imr)) {
499 ib_umem_odp_release(umem_odp);
500 return imr;
501 }
502
503 imr->access_flags = access_flags;
504 imr->ibmr.pd = &pd->ibpd;
505 imr->ibmr.iova = 0;
506 imr->umem = &umem_odp->umem;
507 imr->ibmr.lkey = imr->mmkey.key;
508 imr->ibmr.rkey = imr->mmkey.key;
509 imr->ibmr.device = &dev->ib_dev;
510 imr->is_odp_implicit = true;
511 xa_init(&imr->implicit_children);
512
513 err = mlx5r_umr_update_xlt(imr, 0,
514 mlx5_imr_ksm_entries,
515 MLX5_KSM_PAGE_SHIFT,
516 MLX5_IB_UPD_XLT_INDIRECT |
517 MLX5_IB_UPD_XLT_ZAP |
518 MLX5_IB_UPD_XLT_ENABLE);
519 if (err)
520 goto out_mr;
521
522 err = mlx5r_store_odp_mkey(dev, &imr->mmkey);
523 if (err)
524 goto out_mr;
525
526 mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr);
527 return imr;
528out_mr:
529 mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
530 mlx5_ib_dereg_mr(&imr->ibmr, NULL);
531 return ERR_PTR(err);
532}
533
534void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr)
535{
536 struct mlx5_ib_mr *mtt;
537 unsigned long idx;
538
539 /*
540 * If this is an implicit MR it is already invalidated so we can just
541 * delete the children mkeys.
542 */
543 xa_for_each(&mr->implicit_children, idx, mtt) {
544 xa_erase(&mr->implicit_children, idx);
545 mlx5_ib_dereg_mr(&mtt->ibmr, NULL);
546 }
547}
548
549#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
550#define MLX5_PF_FLAGS_SNAPSHOT BIT(2)
551#define MLX5_PF_FLAGS_ENABLE BIT(3)
552static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
553 u64 user_va, size_t bcnt, u32 *bytes_mapped,
554 u32 flags)
555{
556 int page_shift, ret, np;
557 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
558 u64 access_mask;
559 u64 start_idx;
560 bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT);
561 u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC;
562
563 if (flags & MLX5_PF_FLAGS_ENABLE)
564 xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
565
566 page_shift = odp->page_shift;
567 start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
568 access_mask = ODP_READ_ALLOWED_BIT;
569
570 if (odp->umem.writable && !downgrade)
571 access_mask |= ODP_WRITE_ALLOWED_BIT;
572
573 np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
574 if (np < 0)
575 return np;
576
577 /*
578 * No need to check whether the MTTs really belong to this MR, since
579 * ib_umem_odp_map_dma_and_lock already checks this.
580 */
581 ret = mlx5r_umr_update_xlt(mr, start_idx, np, page_shift, xlt_flags);
582 mutex_unlock(&odp->umem_mutex);
583
584 if (ret < 0) {
585 if (ret != -EAGAIN)
586 mlx5_ib_err(mr_to_mdev(mr),
587 "Failed to update mkey page tables\n");
588 goto out;
589 }
590
591 if (bytes_mapped) {
592 u32 new_mappings = (np << page_shift) -
593 (user_va - round_down(user_va, 1 << page_shift));
594
595 *bytes_mapped += min_t(u32, new_mappings, bcnt);
596 }
597
598 return np << (page_shift - PAGE_SHIFT);
599
600out:
601 return ret;
602}
603
604static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
605 struct ib_umem_odp *odp_imr, u64 user_va,
606 size_t bcnt, u32 *bytes_mapped, u32 flags)
607{
608 unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
609 unsigned long upd_start_idx = end_idx + 1;
610 unsigned long upd_len = 0;
611 unsigned long npages = 0;
612 int err;
613 int ret;
614
615 if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE ||
616 mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
617 return -EFAULT;
618
619 /* Fault each child mr that intersects with our interval. */
620 while (bcnt) {
621 unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT;
622 struct ib_umem_odp *umem_odp;
623 struct mlx5_ib_mr *mtt;
624 u64 len;
625
626 xa_lock(&imr->implicit_children);
627 mtt = xa_load(&imr->implicit_children, idx);
628 if (unlikely(!mtt)) {
629 xa_unlock(&imr->implicit_children);
630 mtt = implicit_get_child_mr(imr, idx);
631 if (IS_ERR(mtt)) {
632 ret = PTR_ERR(mtt);
633 goto out;
634 }
635 upd_start_idx = min(upd_start_idx, idx);
636 upd_len = idx - upd_start_idx + 1;
637 } else {
638 refcount_inc(&mtt->mmkey.usecount);
639 xa_unlock(&imr->implicit_children);
640 }
641
642 umem_odp = to_ib_umem_odp(mtt->umem);
643 len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
644 user_va;
645
646 ret = pagefault_real_mr(mtt, umem_odp, user_va, len,
647 bytes_mapped, flags);
648
649 mlx5r_deref_odp_mkey(&mtt->mmkey);
650
651 if (ret < 0)
652 goto out;
653 user_va += len;
654 bcnt -= len;
655 npages += ret;
656 }
657
658 ret = npages;
659
660 /*
661 * Any time the implicit_children are changed we must perform an
662 * update of the xlt before exiting to ensure the HW and the
663 * implicit_children remains synchronized.
664 */
665out:
666 if (likely(!upd_len))
667 return ret;
668
669 /*
670 * Notice this is not strictly ordered right, the KSM is updated after
671 * the implicit_children is updated, so a parallel page fault could
672 * see a MR that is not yet visible in the KSM. This is similar to a
673 * parallel page fault seeing a MR that is being concurrently removed
674 * from the KSM. Both of these improbable situations are resolved
675 * safely by resuming the HW and then taking another page fault. The
676 * next pagefault handler will see the new information.
677 */
678 mutex_lock(&odp_imr->umem_mutex);
679 err = mlx5r_umr_update_xlt(imr, upd_start_idx, upd_len, 0,
680 MLX5_IB_UPD_XLT_INDIRECT |
681 MLX5_IB_UPD_XLT_ATOMIC);
682 mutex_unlock(&odp_imr->umem_mutex);
683 if (err) {
684 mlx5_ib_err(mr_to_mdev(imr), "Failed to update PAS\n");
685 return err;
686 }
687 return ret;
688}
689
690static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
691 u32 *bytes_mapped, u32 flags)
692{
693 struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
694 u32 xlt_flags = 0;
695 int err;
696 unsigned int page_size;
697
698 if (flags & MLX5_PF_FLAGS_ENABLE)
699 xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
700
701 dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
702 err = ib_umem_dmabuf_map_pages(umem_dmabuf);
703 if (err) {
704 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
705 return err;
706 }
707
708 page_size = mlx5_umem_find_best_pgsz(&umem_dmabuf->umem, mkc,
709 log_page_size, 0,
710 umem_dmabuf->umem.iova);
711 if (unlikely(page_size < PAGE_SIZE)) {
712 ib_umem_dmabuf_unmap_pages(umem_dmabuf);
713 err = -EINVAL;
714 } else {
715 err = mlx5r_umr_update_mr_pas(mr, xlt_flags);
716 }
717 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
718
719 if (err)
720 return err;
721
722 if (bytes_mapped)
723 *bytes_mapped += bcnt;
724
725 return ib_umem_num_pages(mr->umem);
726}
727
728/*
729 * Returns:
730 * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
731 * not accessible, or the MR is no longer valid.
732 * -EAGAIN/-ENOMEM: The operation should be retried
733 *
734 * -EINVAL/others: General internal malfunction
735 * >0: Number of pages mapped
736 */
737static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
738 u32 *bytes_mapped, u32 flags)
739{
740 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
741
742 if (unlikely(io_virt < mr->ibmr.iova))
743 return -EFAULT;
744
745 if (mr->umem->is_dmabuf)
746 return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags);
747
748 if (!odp->is_implicit_odp) {
749 u64 user_va;
750
751 if (check_add_overflow(io_virt - mr->ibmr.iova,
752 (u64)odp->umem.address, &user_va))
753 return -EFAULT;
754 if (unlikely(user_va >= ib_umem_end(odp) ||
755 ib_umem_end(odp) - user_va < bcnt))
756 return -EFAULT;
757 return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
758 flags);
759 }
760 return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
761 flags);
762}
763
764int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
765{
766 int ret;
767
768 ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), mr->umem->address,
769 mr->umem->length, NULL,
770 MLX5_PF_FLAGS_SNAPSHOT | MLX5_PF_FLAGS_ENABLE);
771 return ret >= 0 ? 0 : ret;
772}
773
774int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
775{
776 int ret;
777
778 ret = pagefault_dmabuf_mr(mr, mr->umem->length, NULL,
779 MLX5_PF_FLAGS_ENABLE);
780
781 return ret >= 0 ? 0 : ret;
782}
783
784struct pf_frame {
785 struct pf_frame *next;
786 u32 key;
787 u64 io_virt;
788 size_t bcnt;
789 int depth;
790};
791
792static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
793{
794 if (!mmkey)
795 return false;
796 if (mmkey->type == MLX5_MKEY_MW ||
797 mmkey->type == MLX5_MKEY_INDIRECT_DEVX)
798 return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
799 return mmkey->key == key;
800}
801
802/*
803 * Handle a single data segment in a page-fault WQE or RDMA region.
804 *
805 * Returns number of OS pages retrieved on success. The caller may continue to
806 * the next data segment.
807 * Can return the following error codes:
808 * -EAGAIN to designate a temporary error. The caller will abort handling the
809 * page fault and resolve it.
810 * -EFAULT when there's an error mapping the requested pages. The caller will
811 * abort the page fault handling.
812 */
813static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
814 struct ib_pd *pd, u32 key,
815 u64 io_virt, size_t bcnt,
816 u32 *bytes_committed,
817 u32 *bytes_mapped)
818{
819 int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
820 struct pf_frame *head = NULL, *frame;
821 struct mlx5_ib_mkey *mmkey;
822 struct mlx5_ib_mr *mr;
823 struct mlx5_klm *pklm;
824 u32 *out = NULL;
825 size_t offset;
826
827 io_virt += *bytes_committed;
828 bcnt -= *bytes_committed;
829
830next_mr:
831 xa_lock(&dev->odp_mkeys);
832 mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
833 if (!mmkey) {
834 xa_unlock(&dev->odp_mkeys);
835 mlx5_ib_dbg(
836 dev,
837 "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
838 key);
839 if (bytes_mapped)
840 *bytes_mapped += bcnt;
841 /*
842 * The user could specify a SGL with multiple lkeys and only
843 * some of them are ODP. Treat the non-ODP ones as fully
844 * faulted.
845 */
846 ret = 0;
847 goto end;
848 }
849 refcount_inc(&mmkey->usecount);
850 xa_unlock(&dev->odp_mkeys);
851
852 if (!mkey_is_eq(mmkey, key)) {
853 mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
854 ret = -EFAULT;
855 goto end;
856 }
857
858 switch (mmkey->type) {
859 case MLX5_MKEY_MR:
860 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
861
862 ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
863 if (ret < 0)
864 goto end;
865
866 mlx5_update_odp_stats(mr, faults, ret);
867
868 npages += ret;
869 ret = 0;
870 break;
871
872 case MLX5_MKEY_MW:
873 case MLX5_MKEY_INDIRECT_DEVX:
874 if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
875 mlx5_ib_dbg(dev, "indirection level exceeded\n");
876 ret = -EFAULT;
877 goto end;
878 }
879
880 outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
881 sizeof(*pklm) * (mmkey->ndescs - 2);
882
883 if (outlen > cur_outlen) {
884 kfree(out);
885 out = kzalloc(outlen, GFP_KERNEL);
886 if (!out) {
887 ret = -ENOMEM;
888 goto end;
889 }
890 cur_outlen = outlen;
891 }
892
893 pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
894 bsf0_klm0_pas_mtt0_1);
895
896 ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen);
897 if (ret)
898 goto end;
899
900 offset = io_virt - MLX5_GET64(query_mkey_out, out,
901 memory_key_mkey_entry.start_addr);
902
903 for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) {
904 if (offset >= be32_to_cpu(pklm->bcount)) {
905 offset -= be32_to_cpu(pklm->bcount);
906 continue;
907 }
908
909 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
910 if (!frame) {
911 ret = -ENOMEM;
912 goto end;
913 }
914
915 frame->key = be32_to_cpu(pklm->key);
916 frame->io_virt = be64_to_cpu(pklm->va) + offset;
917 frame->bcnt = min_t(size_t, bcnt,
918 be32_to_cpu(pklm->bcount) - offset);
919 frame->depth = depth + 1;
920 frame->next = head;
921 head = frame;
922
923 bcnt -= frame->bcnt;
924 offset = 0;
925 }
926 break;
927
928 default:
929 mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
930 ret = -EFAULT;
931 goto end;
932 }
933
934 if (head) {
935 frame = head;
936 head = frame->next;
937
938 key = frame->key;
939 io_virt = frame->io_virt;
940 bcnt = frame->bcnt;
941 depth = frame->depth;
942 kfree(frame);
943
944 mlx5r_deref_odp_mkey(mmkey);
945 goto next_mr;
946 }
947
948end:
949 if (mmkey)
950 mlx5r_deref_odp_mkey(mmkey);
951 while (head) {
952 frame = head;
953 head = frame->next;
954 kfree(frame);
955 }
956 kfree(out);
957
958 *bytes_committed = 0;
959 return ret ? ret : npages;
960}
961
962/*
963 * Parse a series of data segments for page fault handling.
964 *
965 * @dev: Pointer to mlx5 IB device
966 * @pfault: contains page fault information.
967 * @wqe: points at the first data segment in the WQE.
968 * @wqe_end: points after the end of the WQE.
969 * @bytes_mapped: receives the number of bytes that the function was able to
970 * map. This allows the caller to decide intelligently whether
971 * enough memory was mapped to resolve the page fault
972 * successfully (e.g. enough for the next MTU, or the entire
973 * WQE).
974 * @total_wqe_bytes: receives the total data size of this WQE in bytes (minus
975 * the committed bytes).
976 * @receive_queue: receive WQE end of sg list
977 *
978 * Returns the number of pages loaded if positive, zero for an empty WQE, or a
979 * negative error code.
980 */
981static int pagefault_data_segments(struct mlx5_ib_dev *dev,
982 struct mlx5_pagefault *pfault,
983 void *wqe,
984 void *wqe_end, u32 *bytes_mapped,
985 u32 *total_wqe_bytes, bool receive_queue)
986{
987 int ret = 0, npages = 0;
988 u64 io_virt;
989 __be32 key;
990 u32 byte_count;
991 size_t bcnt;
992 int inline_segment;
993
994 if (bytes_mapped)
995 *bytes_mapped = 0;
996 if (total_wqe_bytes)
997 *total_wqe_bytes = 0;
998
999 while (wqe < wqe_end) {
1000 struct mlx5_wqe_data_seg *dseg = wqe;
1001
1002 io_virt = be64_to_cpu(dseg->addr);
1003 key = dseg->lkey;
1004 byte_count = be32_to_cpu(dseg->byte_count);
1005 inline_segment = !!(byte_count & MLX5_INLINE_SEG);
1006 bcnt = byte_count & ~MLX5_INLINE_SEG;
1007
1008 if (inline_segment) {
1009 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
1010 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
1011 16);
1012 } else {
1013 wqe += sizeof(*dseg);
1014 }
1015
1016 /* receive WQE end of sg list. */
1017 if (receive_queue && bcnt == 0 &&
1018 key == dev->mkeys.terminate_scatter_list_mkey &&
1019 io_virt == 0)
1020 break;
1021
1022 if (!inline_segment && total_wqe_bytes) {
1023 *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
1024 pfault->bytes_committed);
1025 }
1026
1027 /* A zero length data segment designates a length of 2GB. */
1028 if (bcnt == 0)
1029 bcnt = 1U << 31;
1030
1031 if (inline_segment || bcnt <= pfault->bytes_committed) {
1032 pfault->bytes_committed -=
1033 min_t(size_t, bcnt,
1034 pfault->bytes_committed);
1035 continue;
1036 }
1037
1038 ret = pagefault_single_data_segment(dev, NULL, be32_to_cpu(key),
1039 io_virt, bcnt,
1040 &pfault->bytes_committed,
1041 bytes_mapped);
1042 if (ret < 0)
1043 break;
1044 npages += ret;
1045 }
1046
1047 return ret < 0 ? ret : npages;
1048}
1049
1050/*
1051 * Parse initiator WQE. Advances the wqe pointer to point at the
1052 * scatter-gather list, and set wqe_end to the end of the WQE.
1053 */
1054static int mlx5_ib_mr_initiator_pfault_handler(
1055 struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
1056 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
1057{
1058 struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
1059 u16 wqe_index = pfault->wqe.wqe_index;
1060 struct mlx5_base_av *av;
1061 unsigned ds, opcode;
1062 u32 qpn = qp->trans_qp.base.mqp.qpn;
1063
1064 ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
1065 if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
1066 mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
1067 ds, wqe_length);
1068 return -EFAULT;
1069 }
1070
1071 if (ds == 0) {
1072 mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
1073 wqe_index, qpn);
1074 return -EFAULT;
1075 }
1076
1077 *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
1078 *wqe += sizeof(*ctrl);
1079
1080 opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
1081 MLX5_WQE_CTRL_OPCODE_MASK;
1082
1083 if (qp->type == IB_QPT_XRC_INI)
1084 *wqe += sizeof(struct mlx5_wqe_xrc_seg);
1085
1086 if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) {
1087 av = *wqe;
1088 if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
1089 *wqe += sizeof(struct mlx5_av);
1090 else
1091 *wqe += sizeof(struct mlx5_base_av);
1092 }
1093
1094 switch (opcode) {
1095 case MLX5_OPCODE_RDMA_WRITE:
1096 case MLX5_OPCODE_RDMA_WRITE_IMM:
1097 case MLX5_OPCODE_RDMA_READ:
1098 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
1099 break;
1100 case MLX5_OPCODE_ATOMIC_CS:
1101 case MLX5_OPCODE_ATOMIC_FA:
1102 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
1103 *wqe += sizeof(struct mlx5_wqe_atomic_seg);
1104 break;
1105 }
1106
1107 return 0;
1108}
1109
1110/*
1111 * Parse responder WQE and set wqe_end to the end of the WQE.
1112 */
1113static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev,
1114 struct mlx5_ib_srq *srq,
1115 void **wqe, void **wqe_end,
1116 int wqe_length)
1117{
1118 int wqe_size = 1 << srq->msrq.wqe_shift;
1119
1120 if (wqe_size > wqe_length) {
1121 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1122 return -EFAULT;
1123 }
1124
1125 *wqe_end = *wqe + wqe_size;
1126 *wqe += sizeof(struct mlx5_wqe_srq_next_seg);
1127
1128 return 0;
1129}
1130
1131static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
1132 struct mlx5_ib_qp *qp,
1133 void *wqe, void **wqe_end,
1134 int wqe_length)
1135{
1136 struct mlx5_ib_wq *wq = &qp->rq;
1137 int wqe_size = 1 << wq->wqe_shift;
1138
1139 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
1140 mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
1141 return -EFAULT;
1142 }
1143
1144 if (wqe_size > wqe_length) {
1145 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1146 return -EFAULT;
1147 }
1148
1149 *wqe_end = wqe + wqe_size;
1150
1151 return 0;
1152}
1153
1154static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
1155 u32 wq_num, int pf_type)
1156{
1157 struct mlx5_core_rsc_common *common = NULL;
1158 struct mlx5_core_srq *srq;
1159
1160 switch (pf_type) {
1161 case MLX5_WQE_PF_TYPE_RMP:
1162 srq = mlx5_cmd_get_srq(dev, wq_num);
1163 if (srq)
1164 common = &srq->common;
1165 break;
1166 case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
1167 case MLX5_WQE_PF_TYPE_RESP:
1168 case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
1169 common = mlx5_core_res_hold(dev, wq_num, MLX5_RES_QP);
1170 break;
1171 default:
1172 break;
1173 }
1174
1175 return common;
1176}
1177
1178static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
1179{
1180 struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res;
1181
1182 return to_mibqp(mqp);
1183}
1184
1185static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
1186{
1187 struct mlx5_core_srq *msrq =
1188 container_of(res, struct mlx5_core_srq, common);
1189
1190 return to_mibsrq(msrq);
1191}
1192
1193static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
1194 struct mlx5_pagefault *pfault)
1195{
1196 bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
1197 u16 wqe_index = pfault->wqe.wqe_index;
1198 void *wqe, *wqe_start = NULL, *wqe_end = NULL;
1199 u32 bytes_mapped, total_wqe_bytes;
1200 struct mlx5_core_rsc_common *res;
1201 int resume_with_error = 1;
1202 struct mlx5_ib_qp *qp;
1203 size_t bytes_copied;
1204 int ret = 0;
1205
1206 res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
1207 if (!res) {
1208 mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
1209 return;
1210 }
1211
1212 if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ &&
1213 res->res != MLX5_RES_XSRQ) {
1214 mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n",
1215 pfault->type);
1216 goto resolve_page_fault;
1217 }
1218
1219 wqe_start = (void *)__get_free_page(GFP_KERNEL);
1220 if (!wqe_start) {
1221 mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
1222 goto resolve_page_fault;
1223 }
1224
1225 wqe = wqe_start;
1226 qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
1227 if (qp && sq) {
1228 ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
1229 &bytes_copied);
1230 if (ret)
1231 goto read_user;
1232 ret = mlx5_ib_mr_initiator_pfault_handler(
1233 dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
1234 } else if (qp && !sq) {
1235 ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
1236 &bytes_copied);
1237 if (ret)
1238 goto read_user;
1239 ret = mlx5_ib_mr_responder_pfault_handler_rq(
1240 dev, qp, wqe, &wqe_end, bytes_copied);
1241 } else if (!qp) {
1242 struct mlx5_ib_srq *srq = res_to_srq(res);
1243
1244 ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
1245 &bytes_copied);
1246 if (ret)
1247 goto read_user;
1248 ret = mlx5_ib_mr_responder_pfault_handler_srq(
1249 dev, srq, &wqe, &wqe_end, bytes_copied);
1250 }
1251
1252 if (ret < 0 || wqe >= wqe_end)
1253 goto resolve_page_fault;
1254
1255 ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped,
1256 &total_wqe_bytes, !sq);
1257 if (ret == -EAGAIN)
1258 goto out;
1259
1260 if (ret < 0 || total_wqe_bytes > bytes_mapped)
1261 goto resolve_page_fault;
1262
1263out:
1264 ret = 0;
1265 resume_with_error = 0;
1266
1267read_user:
1268 if (ret)
1269 mlx5_ib_err(
1270 dev,
1271 "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n",
1272 ret, wqe_index, pfault->token);
1273
1274resolve_page_fault:
1275 mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
1276 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
1277 pfault->wqe.wq_num, resume_with_error,
1278 pfault->type);
1279 mlx5_core_res_put(res);
1280 free_page((unsigned long)wqe_start);
1281}
1282
1283static int pages_in_range(u64 address, u32 length)
1284{
1285 return (ALIGN(address + length, PAGE_SIZE) -
1286 (address & PAGE_MASK)) >> PAGE_SHIFT;
1287}
1288
1289static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
1290 struct mlx5_pagefault *pfault)
1291{
1292 u64 address;
1293 u32 length;
1294 u32 prefetch_len = pfault->bytes_committed;
1295 int prefetch_activated = 0;
1296 u32 rkey = pfault->rdma.r_key;
1297 int ret;
1298
1299 /* The RDMA responder handler handles the page fault in two parts.
1300 * First it brings the necessary pages for the current packet
1301 * (and uses the pfault context), and then (after resuming the QP)
1302 * prefetches more pages. The second operation cannot use the pfault
1303 * context and therefore uses the dummy_pfault context allocated on
1304 * the stack */
1305 pfault->rdma.rdma_va += pfault->bytes_committed;
1306 pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
1307 pfault->rdma.rdma_op_len);
1308 pfault->bytes_committed = 0;
1309
1310 address = pfault->rdma.rdma_va;
1311 length = pfault->rdma.rdma_op_len;
1312
1313 /* For some operations, the hardware cannot tell the exact message
1314 * length, and in those cases it reports zero. Use prefetch
1315 * logic. */
1316 if (length == 0) {
1317 prefetch_activated = 1;
1318 length = pfault->rdma.packet_size;
1319 prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
1320 }
1321
1322 ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
1323 &pfault->bytes_committed, NULL);
1324 if (ret == -EAGAIN) {
1325 /* We're racing with an invalidation, don't prefetch */
1326 prefetch_activated = 0;
1327 } else if (ret < 0 || pages_in_range(address, length) > ret) {
1328 mlx5_ib_page_fault_resume(dev, pfault, 1);
1329 if (ret != -ENOENT)
1330 mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
1331 ret, pfault->token, pfault->type);
1332 return;
1333 }
1334
1335 mlx5_ib_page_fault_resume(dev, pfault, 0);
1336 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
1337 pfault->token, pfault->type,
1338 prefetch_activated);
1339
1340 /* At this point, there might be a new pagefault already arriving in
1341 * the eq, switch to the dummy pagefault for the rest of the
1342 * processing. We're still OK with the objects being alive as the
1343 * work-queue is being fenced. */
1344
1345 if (prefetch_activated) {
1346 u32 bytes_committed = 0;
1347
1348 ret = pagefault_single_data_segment(dev, NULL, rkey, address,
1349 prefetch_len,
1350 &bytes_committed, NULL);
1351 if (ret < 0 && ret != -EAGAIN) {
1352 mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
1353 ret, pfault->token, address, prefetch_len);
1354 }
1355 }
1356}
1357
1358static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
1359{
1360 u8 event_subtype = pfault->event_subtype;
1361
1362 switch (event_subtype) {
1363 case MLX5_PFAULT_SUBTYPE_WQE:
1364 mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
1365 break;
1366 case MLX5_PFAULT_SUBTYPE_RDMA:
1367 mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
1368 break;
1369 default:
1370 mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
1371 event_subtype);
1372 mlx5_ib_page_fault_resume(dev, pfault, 1);
1373 }
1374}
1375
1376static void mlx5_ib_eqe_pf_action(struct work_struct *work)
1377{
1378 struct mlx5_pagefault *pfault = container_of(work,
1379 struct mlx5_pagefault,
1380 work);
1381 struct mlx5_ib_pf_eq *eq = pfault->eq;
1382
1383 mlx5_ib_pfault(eq->dev, pfault);
1384 mempool_free(pfault, eq->pool);
1385}
1386
1387static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
1388{
1389 struct mlx5_eqe_page_fault *pf_eqe;
1390 struct mlx5_pagefault *pfault;
1391 struct mlx5_eqe *eqe;
1392 int cc = 0;
1393
1394 while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) {
1395 pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
1396 if (!pfault) {
1397 schedule_work(&eq->work);
1398 break;
1399 }
1400
1401 pf_eqe = &eqe->data.page_fault;
1402 pfault->event_subtype = eqe->sub_type;
1403 pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
1404
1405 mlx5_ib_dbg(eq->dev,
1406 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
1407 eqe->sub_type, pfault->bytes_committed);
1408
1409 switch (eqe->sub_type) {
1410 case MLX5_PFAULT_SUBTYPE_RDMA:
1411 /* RDMA based event */
1412 pfault->type =
1413 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
1414 pfault->token =
1415 be32_to_cpu(pf_eqe->rdma.pftype_token) &
1416 MLX5_24BIT_MASK;
1417 pfault->rdma.r_key =
1418 be32_to_cpu(pf_eqe->rdma.r_key);
1419 pfault->rdma.packet_size =
1420 be16_to_cpu(pf_eqe->rdma.packet_length);
1421 pfault->rdma.rdma_op_len =
1422 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
1423 pfault->rdma.rdma_va =
1424 be64_to_cpu(pf_eqe->rdma.rdma_va);
1425 mlx5_ib_dbg(eq->dev,
1426 "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
1427 pfault->type, pfault->token,
1428 pfault->rdma.r_key);
1429 mlx5_ib_dbg(eq->dev,
1430 "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
1431 pfault->rdma.rdma_op_len,
1432 pfault->rdma.rdma_va);
1433 break;
1434
1435 case MLX5_PFAULT_SUBTYPE_WQE:
1436 /* WQE based event */
1437 pfault->type =
1438 (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
1439 pfault->token =
1440 be32_to_cpu(pf_eqe->wqe.token);
1441 pfault->wqe.wq_num =
1442 be32_to_cpu(pf_eqe->wqe.pftype_wq) &
1443 MLX5_24BIT_MASK;
1444 pfault->wqe.wqe_index =
1445 be16_to_cpu(pf_eqe->wqe.wqe_index);
1446 pfault->wqe.packet_size =
1447 be16_to_cpu(pf_eqe->wqe.packet_length);
1448 mlx5_ib_dbg(eq->dev,
1449 "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
1450 pfault->type, pfault->token,
1451 pfault->wqe.wq_num,
1452 pfault->wqe.wqe_index);
1453 break;
1454
1455 default:
1456 mlx5_ib_warn(eq->dev,
1457 "Unsupported page fault event sub-type: 0x%02hhx\n",
1458 eqe->sub_type);
1459 /* Unsupported page faults should still be
1460 * resolved by the page fault handler
1461 */
1462 }
1463
1464 pfault->eq = eq;
1465 INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
1466 queue_work(eq->wq, &pfault->work);
1467
1468 cc = mlx5_eq_update_cc(eq->core, ++cc);
1469 }
1470
1471 mlx5_eq_update_ci(eq->core, cc, 1);
1472}
1473
1474static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
1475 void *data)
1476{
1477 struct mlx5_ib_pf_eq *eq =
1478 container_of(nb, struct mlx5_ib_pf_eq, irq_nb);
1479 unsigned long flags;
1480
1481 if (spin_trylock_irqsave(&eq->lock, flags)) {
1482 mlx5_ib_eq_pf_process(eq);
1483 spin_unlock_irqrestore(&eq->lock, flags);
1484 } else {
1485 schedule_work(&eq->work);
1486 }
1487
1488 return IRQ_HANDLED;
1489}
1490
1491/* mempool_refill() was proposed but unfortunately wasn't accepted
1492 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
1493 * Cheap workaround.
1494 */
1495static void mempool_refill(mempool_t *pool)
1496{
1497 while (pool->curr_nr < pool->min_nr)
1498 mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
1499}
1500
1501static void mlx5_ib_eq_pf_action(struct work_struct *work)
1502{
1503 struct mlx5_ib_pf_eq *eq =
1504 container_of(work, struct mlx5_ib_pf_eq, work);
1505
1506 mempool_refill(eq->pool);
1507
1508 spin_lock_irq(&eq->lock);
1509 mlx5_ib_eq_pf_process(eq);
1510 spin_unlock_irq(&eq->lock);
1511}
1512
1513enum {
1514 MLX5_IB_NUM_PF_EQE = 0x1000,
1515 MLX5_IB_NUM_PF_DRAIN = 64,
1516};
1517
1518int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1519{
1520 struct mlx5_eq_param param = {};
1521 int err = 0;
1522
1523 mutex_lock(&dev->odp_eq_mutex);
1524 if (eq->core)
1525 goto unlock;
1526 INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
1527 spin_lock_init(&eq->lock);
1528 eq->dev = dev;
1529
1530 eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
1531 sizeof(struct mlx5_pagefault));
1532 if (!eq->pool) {
1533 err = -ENOMEM;
1534 goto unlock;
1535 }
1536
1537 eq->wq = alloc_workqueue("mlx5_ib_page_fault",
1538 WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
1539 MLX5_NUM_CMD_EQE);
1540 if (!eq->wq) {
1541 err = -ENOMEM;
1542 goto err_mempool;
1543 }
1544
1545 eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
1546 param = (struct mlx5_eq_param) {
1547 .nent = MLX5_IB_NUM_PF_EQE,
1548 };
1549 param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
1550 eq->core = mlx5_eq_create_generic(dev->mdev, ¶m);
1551 if (IS_ERR(eq->core)) {
1552 err = PTR_ERR(eq->core);
1553 goto err_wq;
1554 }
1555 err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
1556 if (err) {
1557 mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err);
1558 goto err_eq;
1559 }
1560
1561 mutex_unlock(&dev->odp_eq_mutex);
1562 return 0;
1563err_eq:
1564 mlx5_eq_destroy_generic(dev->mdev, eq->core);
1565err_wq:
1566 eq->core = NULL;
1567 destroy_workqueue(eq->wq);
1568err_mempool:
1569 mempool_destroy(eq->pool);
1570unlock:
1571 mutex_unlock(&dev->odp_eq_mutex);
1572 return err;
1573}
1574
1575static int
1576mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1577{
1578 int err;
1579
1580 if (!eq->core)
1581 return 0;
1582 mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
1583 err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
1584 cancel_work_sync(&eq->work);
1585 destroy_workqueue(eq->wq);
1586 mempool_destroy(eq->pool);
1587
1588 return err;
1589}
1590
1591int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
1592{
1593 struct mlx5r_cache_rb_key rb_key = {
1594 .access_mode = MLX5_MKC_ACCESS_MODE_KSM,
1595 .ndescs = mlx5_imr_ksm_entries,
1596 };
1597 struct mlx5_cache_ent *ent;
1598
1599 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1600 return 0;
1601
1602 ent = mlx5r_cache_create_ent_locked(dev, rb_key, true);
1603 if (IS_ERR(ent))
1604 return PTR_ERR(ent);
1605
1606 return 0;
1607}
1608
1609static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
1610 .advise_mr = mlx5_ib_advise_mr,
1611};
1612
1613int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1614{
1615 internal_fill_odp_caps(dev);
1616
1617 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1618 return 0;
1619
1620 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
1621
1622 mutex_init(&dev->odp_eq_mutex);
1623 return 0;
1624}
1625
1626void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
1627{
1628 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1629 return;
1630
1631 mlx5_ib_odp_destroy_eq(dev, &dev->odp_pf_eq);
1632}
1633
1634int mlx5_ib_odp_init(void)
1635{
1636 mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
1637 MLX5_IMR_MTT_BITS);
1638
1639 return 0;
1640}
1641
1642struct prefetch_mr_work {
1643 struct work_struct work;
1644 u32 pf_flags;
1645 u32 num_sge;
1646 struct {
1647 u64 io_virt;
1648 struct mlx5_ib_mr *mr;
1649 size_t length;
1650 } frags[];
1651};
1652
1653static void destroy_prefetch_work(struct prefetch_mr_work *work)
1654{
1655 u32 i;
1656
1657 for (i = 0; i < work->num_sge; ++i)
1658 mlx5r_deref_odp_mkey(&work->frags[i].mr->mmkey);
1659
1660 kvfree(work);
1661}
1662
1663static struct mlx5_ib_mr *
1664get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
1665 u32 lkey)
1666{
1667 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1668 struct mlx5_ib_mr *mr = NULL;
1669 struct mlx5_ib_mkey *mmkey;
1670
1671 xa_lock(&dev->odp_mkeys);
1672 mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
1673 if (!mmkey || mmkey->key != lkey) {
1674 mr = ERR_PTR(-ENOENT);
1675 goto end;
1676 }
1677 if (mmkey->type != MLX5_MKEY_MR) {
1678 mr = ERR_PTR(-EINVAL);
1679 goto end;
1680 }
1681
1682 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
1683
1684 if (mr->ibmr.pd != pd) {
1685 mr = ERR_PTR(-EPERM);
1686 goto end;
1687 }
1688
1689 /* prefetch with write-access must be supported by the MR */
1690 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
1691 !mr->umem->writable) {
1692 mr = ERR_PTR(-EPERM);
1693 goto end;
1694 }
1695
1696 refcount_inc(&mmkey->usecount);
1697end:
1698 xa_unlock(&dev->odp_mkeys);
1699 return mr;
1700}
1701
1702static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
1703{
1704 struct prefetch_mr_work *work =
1705 container_of(w, struct prefetch_mr_work, work);
1706 u32 bytes_mapped = 0;
1707 int ret;
1708 u32 i;
1709
1710 /* We rely on IB/core that work is executed if we have num_sge != 0 only. */
1711 WARN_ON(!work->num_sge);
1712 for (i = 0; i < work->num_sge; ++i) {
1713 ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
1714 work->frags[i].length, &bytes_mapped,
1715 work->pf_flags);
1716 if (ret <= 0)
1717 continue;
1718 mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
1719 }
1720
1721 destroy_prefetch_work(work);
1722}
1723
1724static int init_prefetch_work(struct ib_pd *pd,
1725 enum ib_uverbs_advise_mr_advice advice,
1726 u32 pf_flags, struct prefetch_mr_work *work,
1727 struct ib_sge *sg_list, u32 num_sge)
1728{
1729 u32 i;
1730
1731 INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
1732 work->pf_flags = pf_flags;
1733
1734 for (i = 0; i < num_sge; ++i) {
1735 struct mlx5_ib_mr *mr;
1736
1737 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
1738 if (IS_ERR(mr)) {
1739 work->num_sge = i;
1740 return PTR_ERR(mr);
1741 }
1742 work->frags[i].io_virt = sg_list[i].addr;
1743 work->frags[i].length = sg_list[i].length;
1744 work->frags[i].mr = mr;
1745 }
1746 work->num_sge = num_sge;
1747 return 0;
1748}
1749
1750static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
1751 enum ib_uverbs_advise_mr_advice advice,
1752 u32 pf_flags, struct ib_sge *sg_list,
1753 u32 num_sge)
1754{
1755 u32 bytes_mapped = 0;
1756 int ret = 0;
1757 u32 i;
1758
1759 for (i = 0; i < num_sge; ++i) {
1760 struct mlx5_ib_mr *mr;
1761
1762 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
1763 if (IS_ERR(mr))
1764 return PTR_ERR(mr);
1765 ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
1766 &bytes_mapped, pf_flags);
1767 if (ret < 0) {
1768 mlx5r_deref_odp_mkey(&mr->mmkey);
1769 return ret;
1770 }
1771 mlx5_update_odp_stats(mr, prefetch, ret);
1772 mlx5r_deref_odp_mkey(&mr->mmkey);
1773 }
1774
1775 return 0;
1776}
1777
1778int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1779 enum ib_uverbs_advise_mr_advice advice,
1780 u32 flags, struct ib_sge *sg_list, u32 num_sge)
1781{
1782 u32 pf_flags = 0;
1783 struct prefetch_mr_work *work;
1784 int rc;
1785
1786 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
1787 pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
1788
1789 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
1790 pf_flags |= MLX5_PF_FLAGS_SNAPSHOT;
1791
1792 if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
1793 return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list,
1794 num_sge);
1795
1796 work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL);
1797 if (!work)
1798 return -ENOMEM;
1799
1800 rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
1801 if (rc) {
1802 destroy_prefetch_work(work);
1803 return rc;
1804 }
1805 queue_work(system_unbound_wq, &work->work);
1806 return 0;
1807}
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_umem_odp.h>
34#include <linux/kernel.h>
35#include <linux/dma-buf.h>
36#include <linux/dma-resv.h>
37
38#include "mlx5_ib.h"
39#include "cmd.h"
40#include "umr.h"
41#include "qp.h"
42
43#include <linux/mlx5/eq.h>
44
45/* Contains the details of a pagefault. */
46struct mlx5_pagefault {
47 u32 bytes_committed;
48 u32 token;
49 u8 event_subtype;
50 u8 type;
51 union {
52 /* Initiator or send message responder pagefault details. */
53 struct {
54 /* Received packet size, only valid for responders. */
55 u32 packet_size;
56 /*
57 * Number of resource holding WQE, depends on type.
58 */
59 u32 wq_num;
60 /*
61 * WQE index. Refers to either the send queue or
62 * receive queue, according to event_subtype.
63 */
64 u16 wqe_index;
65 } wqe;
66 /* RDMA responder pagefault details */
67 struct {
68 u32 r_key;
69 /*
70 * Received packet size, minimal size page fault
71 * resolution required for forward progress.
72 */
73 u32 packet_size;
74 u32 rdma_op_len;
75 u64 rdma_va;
76 } rdma;
77 };
78
79 struct mlx5_ib_pf_eq *eq;
80 struct work_struct work;
81};
82
83#define MAX_PREFETCH_LEN (4*1024*1024U)
84
85/* Timeout in ms to wait for an active mmu notifier to complete when handling
86 * a pagefault. */
87#define MMU_NOTIFIER_TIMEOUT 1000
88
89#define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
90#define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
91#define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
92#define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
93#define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
94
95#define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
96
97static u64 mlx5_imr_ksm_entries;
98
99static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
100 struct mlx5_ib_mr *imr, int flags)
101{
102 struct mlx5_klm *end = pklm + nentries;
103
104 if (flags & MLX5_IB_UPD_XLT_ZAP) {
105 for (; pklm != end; pklm++, idx++) {
106 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
107 pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey);
108 pklm->va = 0;
109 }
110 return;
111 }
112
113 /*
114 * The locking here is pretty subtle. Ideally the implicit_children
115 * xarray would be protected by the umem_mutex, however that is not
116 * possible. Instead this uses a weaker update-then-lock pattern:
117 *
118 * xa_store()
119 * mutex_lock(umem_mutex)
120 * mlx5r_umr_update_xlt()
121 * mutex_unlock(umem_mutex)
122 * destroy lkey
123 *
124 * ie any change the xarray must be followed by the locked update_xlt
125 * before destroying.
126 *
127 * The umem_mutex provides the acquire/release semantic needed to make
128 * the xa_store() visible to a racing thread.
129 */
130 lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
131
132 for (; pklm != end; pklm++, idx++) {
133 struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
134
135 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
136 if (mtt) {
137 pklm->key = cpu_to_be32(mtt->ibmr.lkey);
138 pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
139 } else {
140 pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey);
141 pklm->va = 0;
142 }
143 }
144}
145
146static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
147{
148 u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
149
150 if (umem_dma & ODP_READ_ALLOWED_BIT)
151 mtt_entry |= MLX5_IB_MTT_READ;
152 if (umem_dma & ODP_WRITE_ALLOWED_BIT)
153 mtt_entry |= MLX5_IB_MTT_WRITE;
154
155 return mtt_entry;
156}
157
158static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
159 struct mlx5_ib_mr *mr, int flags)
160{
161 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
162 dma_addr_t pa;
163 size_t i;
164
165 if (flags & MLX5_IB_UPD_XLT_ZAP)
166 return;
167
168 for (i = 0; i < nentries; i++) {
169 pa = odp->dma_list[idx + i];
170 pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
171 }
172}
173
174void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
175 struct mlx5_ib_mr *mr, int flags)
176{
177 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
178 populate_klm(xlt, idx, nentries, mr, flags);
179 } else {
180 populate_mtt(xlt, idx, nentries, mr, flags);
181 }
182}
183
184/*
185 * This must be called after the mr has been removed from implicit_children.
186 * NOTE: The MR does not necessarily have to be
187 * empty here, parallel page faults could have raced with the free process and
188 * added pages to it.
189 */
190static void free_implicit_child_mr_work(struct work_struct *work)
191{
192 struct mlx5_ib_mr *mr =
193 container_of(work, struct mlx5_ib_mr, odp_destroy.work);
194 struct mlx5_ib_mr *imr = mr->parent;
195 struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
196 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
197
198 mlx5r_deref_wait_odp_mkey(&mr->mmkey);
199
200 mutex_lock(&odp_imr->umem_mutex);
201 mlx5r_umr_update_xlt(mr->parent,
202 ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT, 1, 0,
203 MLX5_IB_UPD_XLT_INDIRECT | MLX5_IB_UPD_XLT_ATOMIC);
204 mutex_unlock(&odp_imr->umem_mutex);
205 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
206
207 mlx5r_deref_odp_mkey(&imr->mmkey);
208}
209
210static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
211{
212 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
213 unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
214 struct mlx5_ib_mr *imr = mr->parent;
215
216 if (!refcount_inc_not_zero(&imr->mmkey.usecount))
217 return;
218
219 xa_erase(&imr->implicit_children, idx);
220
221 /* Freeing a MR is a sleeping operation, so bounce to a work queue */
222 INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
223 queue_work(system_unbound_wq, &mr->odp_destroy.work);
224}
225
226static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
227 const struct mmu_notifier_range *range,
228 unsigned long cur_seq)
229{
230 struct ib_umem_odp *umem_odp =
231 container_of(mni, struct ib_umem_odp, notifier);
232 struct mlx5_ib_mr *mr;
233 const u64 umr_block_mask = MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1;
234 u64 idx = 0, blk_start_idx = 0;
235 u64 invalidations = 0;
236 unsigned long start;
237 unsigned long end;
238 int in_block = 0;
239 u64 addr;
240
241 if (!mmu_notifier_range_blockable(range))
242 return false;
243
244 mutex_lock(&umem_odp->umem_mutex);
245 mmu_interval_set_seq(mni, cur_seq);
246 /*
247 * If npages is zero then umem_odp->private may not be setup yet. This
248 * does not complete until after the first page is mapped for DMA.
249 */
250 if (!umem_odp->npages)
251 goto out;
252 mr = umem_odp->private;
253
254 start = max_t(u64, ib_umem_start(umem_odp), range->start);
255 end = min_t(u64, ib_umem_end(umem_odp), range->end);
256
257 /*
258 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
259 * while we are doing the invalidation, no page fault will attempt to
260 * overwrite the same MTTs. Concurent invalidations might race us,
261 * but they will write 0s as well, so no difference in the end result.
262 */
263 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
264 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
265 /*
266 * Strive to write the MTTs in chunks, but avoid overwriting
267 * non-existing MTTs. The huristic here can be improved to
268 * estimate the cost of another UMR vs. the cost of bigger
269 * UMR.
270 */
271 if (umem_odp->dma_list[idx] &
272 (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
273 if (!in_block) {
274 blk_start_idx = idx;
275 in_block = 1;
276 }
277
278 /* Count page invalidations */
279 invalidations += idx - blk_start_idx + 1;
280 } else {
281 u64 umr_offset = idx & umr_block_mask;
282
283 if (in_block && umr_offset == 0) {
284 mlx5r_umr_update_xlt(mr, blk_start_idx,
285 idx - blk_start_idx, 0,
286 MLX5_IB_UPD_XLT_ZAP |
287 MLX5_IB_UPD_XLT_ATOMIC);
288 in_block = 0;
289 }
290 }
291 }
292 if (in_block)
293 mlx5r_umr_update_xlt(mr, blk_start_idx,
294 idx - blk_start_idx + 1, 0,
295 MLX5_IB_UPD_XLT_ZAP |
296 MLX5_IB_UPD_XLT_ATOMIC);
297
298 mlx5_update_odp_stats(mr, invalidations, invalidations);
299
300 /*
301 * We are now sure that the device will not access the
302 * memory. We can safely unmap it, and mark it as dirty if
303 * needed.
304 */
305
306 ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
307
308 if (unlikely(!umem_odp->npages && mr->parent))
309 destroy_unused_implicit_child_mr(mr);
310out:
311 mutex_unlock(&umem_odp->umem_mutex);
312 return true;
313}
314
315const struct mmu_interval_notifier_ops mlx5_mn_ops = {
316 .invalidate = mlx5_ib_invalidate_range,
317};
318
319static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
320{
321 struct ib_odp_caps *caps = &dev->odp_caps;
322
323 memset(caps, 0, sizeof(*caps));
324
325 if (!MLX5_CAP_GEN(dev->mdev, pg) || !mlx5r_umr_can_load_pas(dev, 0))
326 return;
327
328 caps->general_caps = IB_ODP_SUPPORT;
329
330 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
331 dev->odp_max_size = U64_MAX;
332 else
333 dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
334
335 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
336 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
337
338 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
339 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
340
341 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
342 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
343
344 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
345 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
346
347 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
348 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
349
350 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
351 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
352
353 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
354 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
355
356 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
357 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
358
359 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
360 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
361
362 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
363 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
364
365 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
366 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
367
368 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
369 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
370
371 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
372 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
373
374 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
375 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
376
377 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
378 MLX5_CAP_GEN(dev->mdev, null_mkey) &&
379 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
380 !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
381 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
382}
383
384static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
385 struct mlx5_pagefault *pfault,
386 int error)
387{
388 int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
389 pfault->wqe.wq_num : pfault->token;
390 u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
391 int err;
392
393 MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
394 MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
395 MLX5_SET(page_fault_resume_in, in, token, pfault->token);
396 MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
397 MLX5_SET(page_fault_resume_in, in, error, !!error);
398
399 err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
400 if (err)
401 mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
402 wq_num, err);
403}
404
405static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
406 unsigned long idx)
407{
408 struct mlx5_ib_dev *dev = mr_to_mdev(imr);
409 struct ib_umem_odp *odp;
410 struct mlx5_ib_mr *mr;
411 struct mlx5_ib_mr *ret;
412 int err;
413
414 odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
415 idx * MLX5_IMR_MTT_SIZE,
416 MLX5_IMR_MTT_SIZE, &mlx5_mn_ops);
417 if (IS_ERR(odp))
418 return ERR_CAST(odp);
419
420 mr = mlx5_mr_cache_alloc(dev, &dev->cache.ent[MLX5_IMR_MTT_CACHE_ENTRY],
421 imr->access_flags);
422 if (IS_ERR(mr)) {
423 ib_umem_odp_release(odp);
424 return mr;
425 }
426
427 mr->access_flags = imr->access_flags;
428 mr->ibmr.pd = imr->ibmr.pd;
429 mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;
430 mr->umem = &odp->umem;
431 mr->ibmr.lkey = mr->mmkey.key;
432 mr->ibmr.rkey = mr->mmkey.key;
433 mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE;
434 mr->parent = imr;
435 odp->private = mr;
436
437 /*
438 * First refcount is owned by the xarray and second refconut
439 * is returned to the caller.
440 */
441 refcount_set(&mr->mmkey.usecount, 2);
442
443 err = mlx5r_umr_update_xlt(mr, 0,
444 MLX5_IMR_MTT_ENTRIES,
445 PAGE_SHIFT,
446 MLX5_IB_UPD_XLT_ZAP |
447 MLX5_IB_UPD_XLT_ENABLE);
448 if (err) {
449 ret = ERR_PTR(err);
450 goto out_mr;
451 }
452
453 xa_lock(&imr->implicit_children);
454 ret = __xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
455 GFP_KERNEL);
456 if (unlikely(ret)) {
457 if (xa_is_err(ret)) {
458 ret = ERR_PTR(xa_err(ret));
459 goto out_lock;
460 }
461 /*
462 * Another thread beat us to creating the child mr, use
463 * theirs.
464 */
465 refcount_inc(&ret->mmkey.usecount);
466 goto out_lock;
467 }
468 xa_unlock(&imr->implicit_children);
469
470 mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
471 return mr;
472
473out_lock:
474 xa_unlock(&imr->implicit_children);
475out_mr:
476 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
477 return ret;
478}
479
480struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
481 int access_flags)
482{
483 struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
484 struct ib_umem_odp *umem_odp;
485 struct mlx5_ib_mr *imr;
486 int err;
487
488 if (!mlx5r_umr_can_load_pas(dev, MLX5_IMR_MTT_ENTRIES * PAGE_SIZE))
489 return ERR_PTR(-EOPNOTSUPP);
490
491 umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
492 if (IS_ERR(umem_odp))
493 return ERR_CAST(umem_odp);
494
495 imr = mlx5_mr_cache_alloc(dev,
496 &dev->cache.ent[MLX5_IMR_KSM_CACHE_ENTRY],
497 access_flags);
498 if (IS_ERR(imr)) {
499 ib_umem_odp_release(umem_odp);
500 return imr;
501 }
502
503 imr->access_flags = access_flags;
504 imr->ibmr.pd = &pd->ibpd;
505 imr->ibmr.iova = 0;
506 imr->umem = &umem_odp->umem;
507 imr->ibmr.lkey = imr->mmkey.key;
508 imr->ibmr.rkey = imr->mmkey.key;
509 imr->ibmr.device = &dev->ib_dev;
510 imr->is_odp_implicit = true;
511 xa_init(&imr->implicit_children);
512
513 err = mlx5r_umr_update_xlt(imr, 0,
514 mlx5_imr_ksm_entries,
515 MLX5_KSM_PAGE_SHIFT,
516 MLX5_IB_UPD_XLT_INDIRECT |
517 MLX5_IB_UPD_XLT_ZAP |
518 MLX5_IB_UPD_XLT_ENABLE);
519 if (err)
520 goto out_mr;
521
522 err = mlx5r_store_odp_mkey(dev, &imr->mmkey);
523 if (err)
524 goto out_mr;
525
526 mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr);
527 return imr;
528out_mr:
529 mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
530 mlx5_ib_dereg_mr(&imr->ibmr, NULL);
531 return ERR_PTR(err);
532}
533
534void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr)
535{
536 struct mlx5_ib_mr *mtt;
537 unsigned long idx;
538
539 /*
540 * If this is an implicit MR it is already invalidated so we can just
541 * delete the children mkeys.
542 */
543 xa_for_each(&mr->implicit_children, idx, mtt) {
544 xa_erase(&mr->implicit_children, idx);
545 mlx5_ib_dereg_mr(&mtt->ibmr, NULL);
546 }
547}
548
549#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
550#define MLX5_PF_FLAGS_SNAPSHOT BIT(2)
551#define MLX5_PF_FLAGS_ENABLE BIT(3)
552static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
553 u64 user_va, size_t bcnt, u32 *bytes_mapped,
554 u32 flags)
555{
556 int page_shift, ret, np;
557 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
558 u64 access_mask;
559 u64 start_idx;
560 bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT);
561 u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC;
562
563 if (flags & MLX5_PF_FLAGS_ENABLE)
564 xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
565
566 page_shift = odp->page_shift;
567 start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
568 access_mask = ODP_READ_ALLOWED_BIT;
569
570 if (odp->umem.writable && !downgrade)
571 access_mask |= ODP_WRITE_ALLOWED_BIT;
572
573 np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
574 if (np < 0)
575 return np;
576
577 /*
578 * No need to check whether the MTTs really belong to this MR, since
579 * ib_umem_odp_map_dma_and_lock already checks this.
580 */
581 ret = mlx5r_umr_update_xlt(mr, start_idx, np, page_shift, xlt_flags);
582 mutex_unlock(&odp->umem_mutex);
583
584 if (ret < 0) {
585 if (ret != -EAGAIN)
586 mlx5_ib_err(mr_to_mdev(mr),
587 "Failed to update mkey page tables\n");
588 goto out;
589 }
590
591 if (bytes_mapped) {
592 u32 new_mappings = (np << page_shift) -
593 (user_va - round_down(user_va, 1 << page_shift));
594
595 *bytes_mapped += min_t(u32, new_mappings, bcnt);
596 }
597
598 return np << (page_shift - PAGE_SHIFT);
599
600out:
601 return ret;
602}
603
604static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
605 struct ib_umem_odp *odp_imr, u64 user_va,
606 size_t bcnt, u32 *bytes_mapped, u32 flags)
607{
608 unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
609 unsigned long upd_start_idx = end_idx + 1;
610 unsigned long upd_len = 0;
611 unsigned long npages = 0;
612 int err;
613 int ret;
614
615 if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE ||
616 mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
617 return -EFAULT;
618
619 /* Fault each child mr that intersects with our interval. */
620 while (bcnt) {
621 unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT;
622 struct ib_umem_odp *umem_odp;
623 struct mlx5_ib_mr *mtt;
624 u64 len;
625
626 xa_lock(&imr->implicit_children);
627 mtt = xa_load(&imr->implicit_children, idx);
628 if (unlikely(!mtt)) {
629 xa_unlock(&imr->implicit_children);
630 mtt = implicit_get_child_mr(imr, idx);
631 if (IS_ERR(mtt)) {
632 ret = PTR_ERR(mtt);
633 goto out;
634 }
635 upd_start_idx = min(upd_start_idx, idx);
636 upd_len = idx - upd_start_idx + 1;
637 } else {
638 refcount_inc(&mtt->mmkey.usecount);
639 xa_unlock(&imr->implicit_children);
640 }
641
642 umem_odp = to_ib_umem_odp(mtt->umem);
643 len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
644 user_va;
645
646 ret = pagefault_real_mr(mtt, umem_odp, user_va, len,
647 bytes_mapped, flags);
648
649 mlx5r_deref_odp_mkey(&mtt->mmkey);
650
651 if (ret < 0)
652 goto out;
653 user_va += len;
654 bcnt -= len;
655 npages += ret;
656 }
657
658 ret = npages;
659
660 /*
661 * Any time the implicit_children are changed we must perform an
662 * update of the xlt before exiting to ensure the HW and the
663 * implicit_children remains synchronized.
664 */
665out:
666 if (likely(!upd_len))
667 return ret;
668
669 /*
670 * Notice this is not strictly ordered right, the KSM is updated after
671 * the implicit_children is updated, so a parallel page fault could
672 * see a MR that is not yet visible in the KSM. This is similar to a
673 * parallel page fault seeing a MR that is being concurrently removed
674 * from the KSM. Both of these improbable situations are resolved
675 * safely by resuming the HW and then taking another page fault. The
676 * next pagefault handler will see the new information.
677 */
678 mutex_lock(&odp_imr->umem_mutex);
679 err = mlx5r_umr_update_xlt(imr, upd_start_idx, upd_len, 0,
680 MLX5_IB_UPD_XLT_INDIRECT |
681 MLX5_IB_UPD_XLT_ATOMIC);
682 mutex_unlock(&odp_imr->umem_mutex);
683 if (err) {
684 mlx5_ib_err(mr_to_mdev(imr), "Failed to update PAS\n");
685 return err;
686 }
687 return ret;
688}
689
690static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
691 u32 *bytes_mapped, u32 flags)
692{
693 struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
694 u32 xlt_flags = 0;
695 int err;
696 unsigned int page_size;
697
698 if (flags & MLX5_PF_FLAGS_ENABLE)
699 xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
700
701 dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
702 err = ib_umem_dmabuf_map_pages(umem_dmabuf);
703 if (err) {
704 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
705 return err;
706 }
707
708 page_size = mlx5_umem_find_best_pgsz(&umem_dmabuf->umem, mkc,
709 log_page_size, 0,
710 umem_dmabuf->umem.iova);
711 if (unlikely(page_size < PAGE_SIZE)) {
712 ib_umem_dmabuf_unmap_pages(umem_dmabuf);
713 err = -EINVAL;
714 } else {
715 err = mlx5r_umr_update_mr_pas(mr, xlt_flags);
716 }
717 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
718
719 if (err)
720 return err;
721
722 if (bytes_mapped)
723 *bytes_mapped += bcnt;
724
725 return ib_umem_num_pages(mr->umem);
726}
727
728/*
729 * Returns:
730 * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
731 * not accessible, or the MR is no longer valid.
732 * -EAGAIN/-ENOMEM: The operation should be retried
733 *
734 * -EINVAL/others: General internal malfunction
735 * >0: Number of pages mapped
736 */
737static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
738 u32 *bytes_mapped, u32 flags)
739{
740 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
741
742 if (unlikely(io_virt < mr->ibmr.iova))
743 return -EFAULT;
744
745 if (mr->umem->is_dmabuf)
746 return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags);
747
748 if (!odp->is_implicit_odp) {
749 u64 user_va;
750
751 if (check_add_overflow(io_virt - mr->ibmr.iova,
752 (u64)odp->umem.address, &user_va))
753 return -EFAULT;
754 if (unlikely(user_va >= ib_umem_end(odp) ||
755 ib_umem_end(odp) - user_va < bcnt))
756 return -EFAULT;
757 return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
758 flags);
759 }
760 return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
761 flags);
762}
763
764int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
765{
766 int ret;
767
768 ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), mr->umem->address,
769 mr->umem->length, NULL,
770 MLX5_PF_FLAGS_SNAPSHOT | MLX5_PF_FLAGS_ENABLE);
771 return ret >= 0 ? 0 : ret;
772}
773
774int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
775{
776 int ret;
777
778 ret = pagefault_dmabuf_mr(mr, mr->umem->length, NULL,
779 MLX5_PF_FLAGS_ENABLE);
780
781 return ret >= 0 ? 0 : ret;
782}
783
784struct pf_frame {
785 struct pf_frame *next;
786 u32 key;
787 u64 io_virt;
788 size_t bcnt;
789 int depth;
790};
791
792static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
793{
794 if (!mmkey)
795 return false;
796 if (mmkey->type == MLX5_MKEY_MW ||
797 mmkey->type == MLX5_MKEY_INDIRECT_DEVX)
798 return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
799 return mmkey->key == key;
800}
801
802/*
803 * Handle a single data segment in a page-fault WQE or RDMA region.
804 *
805 * Returns number of OS pages retrieved on success. The caller may continue to
806 * the next data segment.
807 * Can return the following error codes:
808 * -EAGAIN to designate a temporary error. The caller will abort handling the
809 * page fault and resolve it.
810 * -EFAULT when there's an error mapping the requested pages. The caller will
811 * abort the page fault handling.
812 */
813static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
814 struct ib_pd *pd, u32 key,
815 u64 io_virt, size_t bcnt,
816 u32 *bytes_committed,
817 u32 *bytes_mapped)
818{
819 int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
820 struct pf_frame *head = NULL, *frame;
821 struct mlx5_ib_mkey *mmkey;
822 struct mlx5_ib_mr *mr;
823 struct mlx5_klm *pklm;
824 u32 *out = NULL;
825 size_t offset;
826
827 io_virt += *bytes_committed;
828 bcnt -= *bytes_committed;
829
830next_mr:
831 xa_lock(&dev->odp_mkeys);
832 mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
833 if (!mmkey) {
834 xa_unlock(&dev->odp_mkeys);
835 mlx5_ib_dbg(
836 dev,
837 "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
838 key);
839 if (bytes_mapped)
840 *bytes_mapped += bcnt;
841 /*
842 * The user could specify a SGL with multiple lkeys and only
843 * some of them are ODP. Treat the non-ODP ones as fully
844 * faulted.
845 */
846 ret = 0;
847 goto end;
848 }
849 refcount_inc(&mmkey->usecount);
850 xa_unlock(&dev->odp_mkeys);
851
852 if (!mkey_is_eq(mmkey, key)) {
853 mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
854 ret = -EFAULT;
855 goto end;
856 }
857
858 switch (mmkey->type) {
859 case MLX5_MKEY_MR:
860 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
861
862 ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
863 if (ret < 0)
864 goto end;
865
866 mlx5_update_odp_stats(mr, faults, ret);
867
868 npages += ret;
869 ret = 0;
870 break;
871
872 case MLX5_MKEY_MW:
873 case MLX5_MKEY_INDIRECT_DEVX:
874 if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
875 mlx5_ib_dbg(dev, "indirection level exceeded\n");
876 ret = -EFAULT;
877 goto end;
878 }
879
880 outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
881 sizeof(*pklm) * (mmkey->ndescs - 2);
882
883 if (outlen > cur_outlen) {
884 kfree(out);
885 out = kzalloc(outlen, GFP_KERNEL);
886 if (!out) {
887 ret = -ENOMEM;
888 goto end;
889 }
890 cur_outlen = outlen;
891 }
892
893 pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
894 bsf0_klm0_pas_mtt0_1);
895
896 ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen);
897 if (ret)
898 goto end;
899
900 offset = io_virt - MLX5_GET64(query_mkey_out, out,
901 memory_key_mkey_entry.start_addr);
902
903 for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) {
904 if (offset >= be32_to_cpu(pklm->bcount)) {
905 offset -= be32_to_cpu(pklm->bcount);
906 continue;
907 }
908
909 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
910 if (!frame) {
911 ret = -ENOMEM;
912 goto end;
913 }
914
915 frame->key = be32_to_cpu(pklm->key);
916 frame->io_virt = be64_to_cpu(pklm->va) + offset;
917 frame->bcnt = min_t(size_t, bcnt,
918 be32_to_cpu(pklm->bcount) - offset);
919 frame->depth = depth + 1;
920 frame->next = head;
921 head = frame;
922
923 bcnt -= frame->bcnt;
924 offset = 0;
925 }
926 break;
927
928 default:
929 mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
930 ret = -EFAULT;
931 goto end;
932 }
933
934 if (head) {
935 frame = head;
936 head = frame->next;
937
938 key = frame->key;
939 io_virt = frame->io_virt;
940 bcnt = frame->bcnt;
941 depth = frame->depth;
942 kfree(frame);
943
944 mlx5r_deref_odp_mkey(mmkey);
945 goto next_mr;
946 }
947
948end:
949 if (mmkey)
950 mlx5r_deref_odp_mkey(mmkey);
951 while (head) {
952 frame = head;
953 head = frame->next;
954 kfree(frame);
955 }
956 kfree(out);
957
958 *bytes_committed = 0;
959 return ret ? ret : npages;
960}
961
962/*
963 * Parse a series of data segments for page fault handling.
964 *
965 * @dev: Pointer to mlx5 IB device
966 * @pfault: contains page fault information.
967 * @wqe: points at the first data segment in the WQE.
968 * @wqe_end: points after the end of the WQE.
969 * @bytes_mapped: receives the number of bytes that the function was able to
970 * map. This allows the caller to decide intelligently whether
971 * enough memory was mapped to resolve the page fault
972 * successfully (e.g. enough for the next MTU, or the entire
973 * WQE).
974 * @total_wqe_bytes: receives the total data size of this WQE in bytes (minus
975 * the committed bytes).
976 * @receive_queue: receive WQE end of sg list
977 *
978 * Returns the number of pages loaded if positive, zero for an empty WQE, or a
979 * negative error code.
980 */
981static int pagefault_data_segments(struct mlx5_ib_dev *dev,
982 struct mlx5_pagefault *pfault,
983 void *wqe,
984 void *wqe_end, u32 *bytes_mapped,
985 u32 *total_wqe_bytes, bool receive_queue)
986{
987 int ret = 0, npages = 0;
988 u64 io_virt;
989 u32 key;
990 u32 byte_count;
991 size_t bcnt;
992 int inline_segment;
993
994 if (bytes_mapped)
995 *bytes_mapped = 0;
996 if (total_wqe_bytes)
997 *total_wqe_bytes = 0;
998
999 while (wqe < wqe_end) {
1000 struct mlx5_wqe_data_seg *dseg = wqe;
1001
1002 io_virt = be64_to_cpu(dseg->addr);
1003 key = be32_to_cpu(dseg->lkey);
1004 byte_count = be32_to_cpu(dseg->byte_count);
1005 inline_segment = !!(byte_count & MLX5_INLINE_SEG);
1006 bcnt = byte_count & ~MLX5_INLINE_SEG;
1007
1008 if (inline_segment) {
1009 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
1010 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
1011 16);
1012 } else {
1013 wqe += sizeof(*dseg);
1014 }
1015
1016 /* receive WQE end of sg list. */
1017 if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
1018 io_virt == 0)
1019 break;
1020
1021 if (!inline_segment && total_wqe_bytes) {
1022 *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
1023 pfault->bytes_committed);
1024 }
1025
1026 /* A zero length data segment designates a length of 2GB. */
1027 if (bcnt == 0)
1028 bcnt = 1U << 31;
1029
1030 if (inline_segment || bcnt <= pfault->bytes_committed) {
1031 pfault->bytes_committed -=
1032 min_t(size_t, bcnt,
1033 pfault->bytes_committed);
1034 continue;
1035 }
1036
1037 ret = pagefault_single_data_segment(dev, NULL, key,
1038 io_virt, bcnt,
1039 &pfault->bytes_committed,
1040 bytes_mapped);
1041 if (ret < 0)
1042 break;
1043 npages += ret;
1044 }
1045
1046 return ret < 0 ? ret : npages;
1047}
1048
1049/*
1050 * Parse initiator WQE. Advances the wqe pointer to point at the
1051 * scatter-gather list, and set wqe_end to the end of the WQE.
1052 */
1053static int mlx5_ib_mr_initiator_pfault_handler(
1054 struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
1055 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
1056{
1057 struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
1058 u16 wqe_index = pfault->wqe.wqe_index;
1059 struct mlx5_base_av *av;
1060 unsigned ds, opcode;
1061 u32 qpn = qp->trans_qp.base.mqp.qpn;
1062
1063 ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
1064 if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
1065 mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
1066 ds, wqe_length);
1067 return -EFAULT;
1068 }
1069
1070 if (ds == 0) {
1071 mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
1072 wqe_index, qpn);
1073 return -EFAULT;
1074 }
1075
1076 *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
1077 *wqe += sizeof(*ctrl);
1078
1079 opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
1080 MLX5_WQE_CTRL_OPCODE_MASK;
1081
1082 if (qp->type == IB_QPT_XRC_INI)
1083 *wqe += sizeof(struct mlx5_wqe_xrc_seg);
1084
1085 if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) {
1086 av = *wqe;
1087 if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
1088 *wqe += sizeof(struct mlx5_av);
1089 else
1090 *wqe += sizeof(struct mlx5_base_av);
1091 }
1092
1093 switch (opcode) {
1094 case MLX5_OPCODE_RDMA_WRITE:
1095 case MLX5_OPCODE_RDMA_WRITE_IMM:
1096 case MLX5_OPCODE_RDMA_READ:
1097 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
1098 break;
1099 case MLX5_OPCODE_ATOMIC_CS:
1100 case MLX5_OPCODE_ATOMIC_FA:
1101 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
1102 *wqe += sizeof(struct mlx5_wqe_atomic_seg);
1103 break;
1104 }
1105
1106 return 0;
1107}
1108
1109/*
1110 * Parse responder WQE and set wqe_end to the end of the WQE.
1111 */
1112static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev,
1113 struct mlx5_ib_srq *srq,
1114 void **wqe, void **wqe_end,
1115 int wqe_length)
1116{
1117 int wqe_size = 1 << srq->msrq.wqe_shift;
1118
1119 if (wqe_size > wqe_length) {
1120 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1121 return -EFAULT;
1122 }
1123
1124 *wqe_end = *wqe + wqe_size;
1125 *wqe += sizeof(struct mlx5_wqe_srq_next_seg);
1126
1127 return 0;
1128}
1129
1130static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
1131 struct mlx5_ib_qp *qp,
1132 void *wqe, void **wqe_end,
1133 int wqe_length)
1134{
1135 struct mlx5_ib_wq *wq = &qp->rq;
1136 int wqe_size = 1 << wq->wqe_shift;
1137
1138 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
1139 mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
1140 return -EFAULT;
1141 }
1142
1143 if (wqe_size > wqe_length) {
1144 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1145 return -EFAULT;
1146 }
1147
1148 *wqe_end = wqe + wqe_size;
1149
1150 return 0;
1151}
1152
1153static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
1154 u32 wq_num, int pf_type)
1155{
1156 struct mlx5_core_rsc_common *common = NULL;
1157 struct mlx5_core_srq *srq;
1158
1159 switch (pf_type) {
1160 case MLX5_WQE_PF_TYPE_RMP:
1161 srq = mlx5_cmd_get_srq(dev, wq_num);
1162 if (srq)
1163 common = &srq->common;
1164 break;
1165 case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
1166 case MLX5_WQE_PF_TYPE_RESP:
1167 case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
1168 common = mlx5_core_res_hold(dev, wq_num, MLX5_RES_QP);
1169 break;
1170 default:
1171 break;
1172 }
1173
1174 return common;
1175}
1176
1177static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
1178{
1179 struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res;
1180
1181 return to_mibqp(mqp);
1182}
1183
1184static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
1185{
1186 struct mlx5_core_srq *msrq =
1187 container_of(res, struct mlx5_core_srq, common);
1188
1189 return to_mibsrq(msrq);
1190}
1191
1192static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
1193 struct mlx5_pagefault *pfault)
1194{
1195 bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
1196 u16 wqe_index = pfault->wqe.wqe_index;
1197 void *wqe, *wqe_start = NULL, *wqe_end = NULL;
1198 u32 bytes_mapped, total_wqe_bytes;
1199 struct mlx5_core_rsc_common *res;
1200 int resume_with_error = 1;
1201 struct mlx5_ib_qp *qp;
1202 size_t bytes_copied;
1203 int ret = 0;
1204
1205 res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
1206 if (!res) {
1207 mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
1208 return;
1209 }
1210
1211 if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ &&
1212 res->res != MLX5_RES_XSRQ) {
1213 mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n",
1214 pfault->type);
1215 goto resolve_page_fault;
1216 }
1217
1218 wqe_start = (void *)__get_free_page(GFP_KERNEL);
1219 if (!wqe_start) {
1220 mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
1221 goto resolve_page_fault;
1222 }
1223
1224 wqe = wqe_start;
1225 qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
1226 if (qp && sq) {
1227 ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
1228 &bytes_copied);
1229 if (ret)
1230 goto read_user;
1231 ret = mlx5_ib_mr_initiator_pfault_handler(
1232 dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
1233 } else if (qp && !sq) {
1234 ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
1235 &bytes_copied);
1236 if (ret)
1237 goto read_user;
1238 ret = mlx5_ib_mr_responder_pfault_handler_rq(
1239 dev, qp, wqe, &wqe_end, bytes_copied);
1240 } else if (!qp) {
1241 struct mlx5_ib_srq *srq = res_to_srq(res);
1242
1243 ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
1244 &bytes_copied);
1245 if (ret)
1246 goto read_user;
1247 ret = mlx5_ib_mr_responder_pfault_handler_srq(
1248 dev, srq, &wqe, &wqe_end, bytes_copied);
1249 }
1250
1251 if (ret < 0 || wqe >= wqe_end)
1252 goto resolve_page_fault;
1253
1254 ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped,
1255 &total_wqe_bytes, !sq);
1256 if (ret == -EAGAIN)
1257 goto out;
1258
1259 if (ret < 0 || total_wqe_bytes > bytes_mapped)
1260 goto resolve_page_fault;
1261
1262out:
1263 ret = 0;
1264 resume_with_error = 0;
1265
1266read_user:
1267 if (ret)
1268 mlx5_ib_err(
1269 dev,
1270 "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n",
1271 ret, wqe_index, pfault->token);
1272
1273resolve_page_fault:
1274 mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
1275 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
1276 pfault->wqe.wq_num, resume_with_error,
1277 pfault->type);
1278 mlx5_core_res_put(res);
1279 free_page((unsigned long)wqe_start);
1280}
1281
1282static int pages_in_range(u64 address, u32 length)
1283{
1284 return (ALIGN(address + length, PAGE_SIZE) -
1285 (address & PAGE_MASK)) >> PAGE_SHIFT;
1286}
1287
1288static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
1289 struct mlx5_pagefault *pfault)
1290{
1291 u64 address;
1292 u32 length;
1293 u32 prefetch_len = pfault->bytes_committed;
1294 int prefetch_activated = 0;
1295 u32 rkey = pfault->rdma.r_key;
1296 int ret;
1297
1298 /* The RDMA responder handler handles the page fault in two parts.
1299 * First it brings the necessary pages for the current packet
1300 * (and uses the pfault context), and then (after resuming the QP)
1301 * prefetches more pages. The second operation cannot use the pfault
1302 * context and therefore uses the dummy_pfault context allocated on
1303 * the stack */
1304 pfault->rdma.rdma_va += pfault->bytes_committed;
1305 pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
1306 pfault->rdma.rdma_op_len);
1307 pfault->bytes_committed = 0;
1308
1309 address = pfault->rdma.rdma_va;
1310 length = pfault->rdma.rdma_op_len;
1311
1312 /* For some operations, the hardware cannot tell the exact message
1313 * length, and in those cases it reports zero. Use prefetch
1314 * logic. */
1315 if (length == 0) {
1316 prefetch_activated = 1;
1317 length = pfault->rdma.packet_size;
1318 prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
1319 }
1320
1321 ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
1322 &pfault->bytes_committed, NULL);
1323 if (ret == -EAGAIN) {
1324 /* We're racing with an invalidation, don't prefetch */
1325 prefetch_activated = 0;
1326 } else if (ret < 0 || pages_in_range(address, length) > ret) {
1327 mlx5_ib_page_fault_resume(dev, pfault, 1);
1328 if (ret != -ENOENT)
1329 mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
1330 ret, pfault->token, pfault->type);
1331 return;
1332 }
1333
1334 mlx5_ib_page_fault_resume(dev, pfault, 0);
1335 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
1336 pfault->token, pfault->type,
1337 prefetch_activated);
1338
1339 /* At this point, there might be a new pagefault already arriving in
1340 * the eq, switch to the dummy pagefault for the rest of the
1341 * processing. We're still OK with the objects being alive as the
1342 * work-queue is being fenced. */
1343
1344 if (prefetch_activated) {
1345 u32 bytes_committed = 0;
1346
1347 ret = pagefault_single_data_segment(dev, NULL, rkey, address,
1348 prefetch_len,
1349 &bytes_committed, NULL);
1350 if (ret < 0 && ret != -EAGAIN) {
1351 mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
1352 ret, pfault->token, address, prefetch_len);
1353 }
1354 }
1355}
1356
1357static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
1358{
1359 u8 event_subtype = pfault->event_subtype;
1360
1361 switch (event_subtype) {
1362 case MLX5_PFAULT_SUBTYPE_WQE:
1363 mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
1364 break;
1365 case MLX5_PFAULT_SUBTYPE_RDMA:
1366 mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
1367 break;
1368 default:
1369 mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
1370 event_subtype);
1371 mlx5_ib_page_fault_resume(dev, pfault, 1);
1372 }
1373}
1374
1375static void mlx5_ib_eqe_pf_action(struct work_struct *work)
1376{
1377 struct mlx5_pagefault *pfault = container_of(work,
1378 struct mlx5_pagefault,
1379 work);
1380 struct mlx5_ib_pf_eq *eq = pfault->eq;
1381
1382 mlx5_ib_pfault(eq->dev, pfault);
1383 mempool_free(pfault, eq->pool);
1384}
1385
1386static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
1387{
1388 struct mlx5_eqe_page_fault *pf_eqe;
1389 struct mlx5_pagefault *pfault;
1390 struct mlx5_eqe *eqe;
1391 int cc = 0;
1392
1393 while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) {
1394 pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
1395 if (!pfault) {
1396 schedule_work(&eq->work);
1397 break;
1398 }
1399
1400 pf_eqe = &eqe->data.page_fault;
1401 pfault->event_subtype = eqe->sub_type;
1402 pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
1403
1404 mlx5_ib_dbg(eq->dev,
1405 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
1406 eqe->sub_type, pfault->bytes_committed);
1407
1408 switch (eqe->sub_type) {
1409 case MLX5_PFAULT_SUBTYPE_RDMA:
1410 /* RDMA based event */
1411 pfault->type =
1412 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
1413 pfault->token =
1414 be32_to_cpu(pf_eqe->rdma.pftype_token) &
1415 MLX5_24BIT_MASK;
1416 pfault->rdma.r_key =
1417 be32_to_cpu(pf_eqe->rdma.r_key);
1418 pfault->rdma.packet_size =
1419 be16_to_cpu(pf_eqe->rdma.packet_length);
1420 pfault->rdma.rdma_op_len =
1421 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
1422 pfault->rdma.rdma_va =
1423 be64_to_cpu(pf_eqe->rdma.rdma_va);
1424 mlx5_ib_dbg(eq->dev,
1425 "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
1426 pfault->type, pfault->token,
1427 pfault->rdma.r_key);
1428 mlx5_ib_dbg(eq->dev,
1429 "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
1430 pfault->rdma.rdma_op_len,
1431 pfault->rdma.rdma_va);
1432 break;
1433
1434 case MLX5_PFAULT_SUBTYPE_WQE:
1435 /* WQE based event */
1436 pfault->type =
1437 (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
1438 pfault->token =
1439 be32_to_cpu(pf_eqe->wqe.token);
1440 pfault->wqe.wq_num =
1441 be32_to_cpu(pf_eqe->wqe.pftype_wq) &
1442 MLX5_24BIT_MASK;
1443 pfault->wqe.wqe_index =
1444 be16_to_cpu(pf_eqe->wqe.wqe_index);
1445 pfault->wqe.packet_size =
1446 be16_to_cpu(pf_eqe->wqe.packet_length);
1447 mlx5_ib_dbg(eq->dev,
1448 "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
1449 pfault->type, pfault->token,
1450 pfault->wqe.wq_num,
1451 pfault->wqe.wqe_index);
1452 break;
1453
1454 default:
1455 mlx5_ib_warn(eq->dev,
1456 "Unsupported page fault event sub-type: 0x%02hhx\n",
1457 eqe->sub_type);
1458 /* Unsupported page faults should still be
1459 * resolved by the page fault handler
1460 */
1461 }
1462
1463 pfault->eq = eq;
1464 INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
1465 queue_work(eq->wq, &pfault->work);
1466
1467 cc = mlx5_eq_update_cc(eq->core, ++cc);
1468 }
1469
1470 mlx5_eq_update_ci(eq->core, cc, 1);
1471}
1472
1473static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
1474 void *data)
1475{
1476 struct mlx5_ib_pf_eq *eq =
1477 container_of(nb, struct mlx5_ib_pf_eq, irq_nb);
1478 unsigned long flags;
1479
1480 if (spin_trylock_irqsave(&eq->lock, flags)) {
1481 mlx5_ib_eq_pf_process(eq);
1482 spin_unlock_irqrestore(&eq->lock, flags);
1483 } else {
1484 schedule_work(&eq->work);
1485 }
1486
1487 return IRQ_HANDLED;
1488}
1489
1490/* mempool_refill() was proposed but unfortunately wasn't accepted
1491 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
1492 * Cheap workaround.
1493 */
1494static void mempool_refill(mempool_t *pool)
1495{
1496 while (pool->curr_nr < pool->min_nr)
1497 mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
1498}
1499
1500static void mlx5_ib_eq_pf_action(struct work_struct *work)
1501{
1502 struct mlx5_ib_pf_eq *eq =
1503 container_of(work, struct mlx5_ib_pf_eq, work);
1504
1505 mempool_refill(eq->pool);
1506
1507 spin_lock_irq(&eq->lock);
1508 mlx5_ib_eq_pf_process(eq);
1509 spin_unlock_irq(&eq->lock);
1510}
1511
1512enum {
1513 MLX5_IB_NUM_PF_EQE = 0x1000,
1514 MLX5_IB_NUM_PF_DRAIN = 64,
1515};
1516
1517int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1518{
1519 struct mlx5_eq_param param = {};
1520 int err = 0;
1521
1522 mutex_lock(&dev->odp_eq_mutex);
1523 if (eq->core)
1524 goto unlock;
1525 INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
1526 spin_lock_init(&eq->lock);
1527 eq->dev = dev;
1528
1529 eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
1530 sizeof(struct mlx5_pagefault));
1531 if (!eq->pool) {
1532 err = -ENOMEM;
1533 goto unlock;
1534 }
1535
1536 eq->wq = alloc_workqueue("mlx5_ib_page_fault",
1537 WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
1538 MLX5_NUM_CMD_EQE);
1539 if (!eq->wq) {
1540 err = -ENOMEM;
1541 goto err_mempool;
1542 }
1543
1544 eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
1545 param = (struct mlx5_eq_param) {
1546 .nent = MLX5_IB_NUM_PF_EQE,
1547 };
1548 param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
1549 eq->core = mlx5_eq_create_generic(dev->mdev, ¶m);
1550 if (IS_ERR(eq->core)) {
1551 err = PTR_ERR(eq->core);
1552 goto err_wq;
1553 }
1554 err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
1555 if (err) {
1556 mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err);
1557 goto err_eq;
1558 }
1559
1560 mutex_unlock(&dev->odp_eq_mutex);
1561 return 0;
1562err_eq:
1563 mlx5_eq_destroy_generic(dev->mdev, eq->core);
1564err_wq:
1565 eq->core = NULL;
1566 destroy_workqueue(eq->wq);
1567err_mempool:
1568 mempool_destroy(eq->pool);
1569unlock:
1570 mutex_unlock(&dev->odp_eq_mutex);
1571 return err;
1572}
1573
1574static int
1575mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1576{
1577 int err;
1578
1579 if (!eq->core)
1580 return 0;
1581 mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
1582 err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
1583 cancel_work_sync(&eq->work);
1584 destroy_workqueue(eq->wq);
1585 mempool_destroy(eq->pool);
1586
1587 return err;
1588}
1589
1590void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent)
1591{
1592 if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1593 return;
1594
1595 switch (ent->order - 2) {
1596 case MLX5_IMR_MTT_CACHE_ENTRY:
1597 ent->page = PAGE_SHIFT;
1598 ent->ndescs = MLX5_IMR_MTT_ENTRIES;
1599 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1600 ent->limit = 0;
1601 break;
1602
1603 case MLX5_IMR_KSM_CACHE_ENTRY:
1604 ent->page = MLX5_KSM_PAGE_SHIFT;
1605 ent->ndescs = mlx5_imr_ksm_entries;
1606 ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
1607 ent->limit = 0;
1608 break;
1609 }
1610}
1611
1612static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
1613 .advise_mr = mlx5_ib_advise_mr,
1614};
1615
1616int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1617{
1618 int ret = 0;
1619
1620 internal_fill_odp_caps(dev);
1621
1622 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1623 return ret;
1624
1625 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
1626
1627 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
1628 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
1629 if (ret) {
1630 mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret);
1631 return ret;
1632 }
1633 }
1634
1635 mutex_init(&dev->odp_eq_mutex);
1636 return ret;
1637}
1638
1639void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
1640{
1641 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1642 return;
1643
1644 mlx5_ib_odp_destroy_eq(dev, &dev->odp_pf_eq);
1645}
1646
1647int mlx5_ib_odp_init(void)
1648{
1649 mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
1650 MLX5_IMR_MTT_BITS);
1651
1652 return 0;
1653}
1654
1655struct prefetch_mr_work {
1656 struct work_struct work;
1657 u32 pf_flags;
1658 u32 num_sge;
1659 struct {
1660 u64 io_virt;
1661 struct mlx5_ib_mr *mr;
1662 size_t length;
1663 } frags[];
1664};
1665
1666static void destroy_prefetch_work(struct prefetch_mr_work *work)
1667{
1668 u32 i;
1669
1670 for (i = 0; i < work->num_sge; ++i)
1671 mlx5r_deref_odp_mkey(&work->frags[i].mr->mmkey);
1672
1673 kvfree(work);
1674}
1675
1676static struct mlx5_ib_mr *
1677get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
1678 u32 lkey)
1679{
1680 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1681 struct mlx5_ib_mr *mr = NULL;
1682 struct mlx5_ib_mkey *mmkey;
1683
1684 xa_lock(&dev->odp_mkeys);
1685 mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
1686 if (!mmkey || mmkey->key != lkey) {
1687 mr = ERR_PTR(-ENOENT);
1688 goto end;
1689 }
1690 if (mmkey->type != MLX5_MKEY_MR) {
1691 mr = ERR_PTR(-EINVAL);
1692 goto end;
1693 }
1694
1695 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
1696
1697 if (mr->ibmr.pd != pd) {
1698 mr = ERR_PTR(-EPERM);
1699 goto end;
1700 }
1701
1702 /* prefetch with write-access must be supported by the MR */
1703 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
1704 !mr->umem->writable) {
1705 mr = ERR_PTR(-EPERM);
1706 goto end;
1707 }
1708
1709 refcount_inc(&mmkey->usecount);
1710end:
1711 xa_unlock(&dev->odp_mkeys);
1712 return mr;
1713}
1714
1715static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
1716{
1717 struct prefetch_mr_work *work =
1718 container_of(w, struct prefetch_mr_work, work);
1719 u32 bytes_mapped = 0;
1720 int ret;
1721 u32 i;
1722
1723 /* We rely on IB/core that work is executed if we have num_sge != 0 only. */
1724 WARN_ON(!work->num_sge);
1725 for (i = 0; i < work->num_sge; ++i) {
1726 ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
1727 work->frags[i].length, &bytes_mapped,
1728 work->pf_flags);
1729 if (ret <= 0)
1730 continue;
1731 mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
1732 }
1733
1734 destroy_prefetch_work(work);
1735}
1736
1737static int init_prefetch_work(struct ib_pd *pd,
1738 enum ib_uverbs_advise_mr_advice advice,
1739 u32 pf_flags, struct prefetch_mr_work *work,
1740 struct ib_sge *sg_list, u32 num_sge)
1741{
1742 u32 i;
1743
1744 INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
1745 work->pf_flags = pf_flags;
1746
1747 for (i = 0; i < num_sge; ++i) {
1748 struct mlx5_ib_mr *mr;
1749
1750 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
1751 if (IS_ERR(mr)) {
1752 work->num_sge = i;
1753 return PTR_ERR(mr);
1754 }
1755 work->frags[i].io_virt = sg_list[i].addr;
1756 work->frags[i].length = sg_list[i].length;
1757 work->frags[i].mr = mr;
1758 }
1759 work->num_sge = num_sge;
1760 return 0;
1761}
1762
1763static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
1764 enum ib_uverbs_advise_mr_advice advice,
1765 u32 pf_flags, struct ib_sge *sg_list,
1766 u32 num_sge)
1767{
1768 u32 bytes_mapped = 0;
1769 int ret = 0;
1770 u32 i;
1771
1772 for (i = 0; i < num_sge; ++i) {
1773 struct mlx5_ib_mr *mr;
1774
1775 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
1776 if (IS_ERR(mr))
1777 return PTR_ERR(mr);
1778 ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
1779 &bytes_mapped, pf_flags);
1780 if (ret < 0) {
1781 mlx5r_deref_odp_mkey(&mr->mmkey);
1782 return ret;
1783 }
1784 mlx5_update_odp_stats(mr, prefetch, ret);
1785 mlx5r_deref_odp_mkey(&mr->mmkey);
1786 }
1787
1788 return 0;
1789}
1790
1791int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1792 enum ib_uverbs_advise_mr_advice advice,
1793 u32 flags, struct ib_sge *sg_list, u32 num_sge)
1794{
1795 u32 pf_flags = 0;
1796 struct prefetch_mr_work *work;
1797 int rc;
1798
1799 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
1800 pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
1801
1802 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
1803 pf_flags |= MLX5_PF_FLAGS_SNAPSHOT;
1804
1805 if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
1806 return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list,
1807 num_sge);
1808
1809 work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL);
1810 if (!work)
1811 return -ENOMEM;
1812
1813 rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
1814 if (rc) {
1815 destroy_prefetch_work(work);
1816 return rc;
1817 }
1818 queue_work(system_unbound_wq, &work->work);
1819 return 0;
1820}