Loading...
1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
3
4#ifndef _MLX5_IB_UMR_H
5#define _MLX5_IB_UMR_H
6
7#include "mlx5_ib.h"
8
9
10#define MLX5_MAX_UMR_SHIFT 16
11#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
12
13#define MLX5_IB_UMR_OCTOWORD 16
14#define MLX5_IB_UMR_XLT_ALIGNMENT 64
15
16int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev);
17void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev);
18
19static inline bool mlx5r_umr_can_load_pas(struct mlx5_ib_dev *dev,
20 size_t length)
21{
22 /*
23 * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
24 * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
25 * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
26 * can never be enabled without this capability. Simplify this weird
27 * quirky hardware by just saying it can't use PAS lists with UMR at
28 * all.
29 */
30 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
31 return false;
32
33 /*
34 * length is the size of the MR in bytes when mlx5_ib_update_xlt() is
35 * used.
36 */
37 if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
38 length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE)
39 return false;
40 return true;
41}
42
43/*
44 * true if an existing MR can be reconfigured to new access_flags using UMR.
45 * Older HW cannot use UMR to update certain elements of the MKC. See
46 * get_umr_update_access_mask() and umr_check_mkey_mask()
47 */
48static inline bool mlx5r_umr_can_reconfig(struct mlx5_ib_dev *dev,
49 unsigned int current_access_flags,
50 unsigned int target_access_flags)
51{
52 unsigned int diffs = current_access_flags ^ target_access_flags;
53
54 if ((diffs & IB_ACCESS_REMOTE_ATOMIC) &&
55 MLX5_CAP_GEN(dev->mdev, atomic) &&
56 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
57 return false;
58
59 if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
60 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
61 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
62 return false;
63
64 if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
65 (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
66 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) &&
67 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
68 return false;
69
70 return true;
71}
72
73static inline u64 mlx5r_umr_get_xlt_octo(u64 bytes)
74{
75 return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
76 MLX5_IB_UMR_OCTOWORD;
77}
78
79struct mlx5r_umr_context {
80 struct ib_cqe cqe;
81 enum ib_wc_status status;
82 struct completion done;
83};
84
85struct mlx5r_umr_wqe {
86 struct mlx5_wqe_umr_ctrl_seg ctrl_seg;
87 struct mlx5_mkey_seg mkey_seg;
88 struct mlx5_wqe_data_seg data_seg;
89};
90
91int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr);
92int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
93 int access_flags);
94int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
95int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
96 int page_shift, int flags);
97
98#endif /* _MLX5_IB_UMR_H */
1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
3
4#ifndef _MLX5_IB_UMR_H
5#define _MLX5_IB_UMR_H
6
7#include "mlx5_ib.h"
8
9
10#define MLX5_MAX_UMR_SHIFT 16
11#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
12
13#define MLX5_IB_UMR_OCTOWORD 16
14#define MLX5_IB_UMR_XLT_ALIGNMENT 64
15
16int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev);
17void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev);
18
19static inline bool mlx5r_umr_can_load_pas(struct mlx5_ib_dev *dev,
20 size_t length)
21{
22 /*
23 * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
24 * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
25 * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
26 * can never be enabled without this capability. Simplify this weird
27 * quirky hardware by just saying it can't use PAS lists with UMR at
28 * all.
29 */
30 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
31 return false;
32
33 /*
34 * length is the size of the MR in bytes when mlx5_ib_update_xlt() is
35 * used.
36 */
37 if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
38 length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE)
39 return false;
40 return true;
41}
42
43/*
44 * true if an existing MR can be reconfigured to new access_flags using UMR.
45 * Older HW cannot use UMR to update certain elements of the MKC. See
46 * get_umr_update_access_mask() and umr_check_mkey_mask()
47 */
48static inline bool mlx5r_umr_can_reconfig(struct mlx5_ib_dev *dev,
49 unsigned int current_access_flags,
50 unsigned int target_access_flags)
51{
52 unsigned int diffs = current_access_flags ^ target_access_flags;
53
54 if ((diffs & IB_ACCESS_REMOTE_ATOMIC) &&
55 MLX5_CAP_GEN(dev->mdev, atomic) &&
56 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
57 return false;
58
59 if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
60 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
61 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
62 return false;
63
64 if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
65 (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) ||
66 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) &&
67 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
68 return false;
69
70 return true;
71}
72
73static inline u64 mlx5r_umr_get_xlt_octo(u64 bytes)
74{
75 return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
76 MLX5_IB_UMR_OCTOWORD;
77}
78
79struct mlx5r_umr_context {
80 struct ib_cqe cqe;
81 enum ib_wc_status status;
82 struct completion done;
83};
84
85struct mlx5r_umr_wqe {
86 struct mlx5_wqe_umr_ctrl_seg ctrl_seg;
87 struct mlx5_mkey_seg mkey_seg;
88 struct mlx5_wqe_data_seg data_seg;
89};
90
91int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr);
92int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
93 int access_flags);
94int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
95int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
96 int page_shift, int flags);
97
98#endif /* _MLX5_IB_UMR_H */