Loading...
1// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
2// Copyright (c) 2019 Hisilicon Limited.
3
4#include <rdma/rdma_cm.h>
5#include <rdma/restrack.h>
6#include <uapi/rdma/rdma_netlink.h>
7#include "hnae3.h"
8#include "hns_roce_common.h"
9#include "hns_roce_device.h"
10#include "hns_roce_hw_v2.h"
11
12int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
13{
14 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
15 struct nlattr *table_attr;
16
17 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
18 if (!table_attr)
19 return -EMSGSIZE;
20
21 if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth))
22 goto err;
23
24 if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index))
25 goto err;
26
27 if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size))
28 goto err;
29
30 if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn))
31 goto err;
32
33 nla_nest_end(msg, table_attr);
34
35 return 0;
36
37err:
38 nla_nest_cancel(msg, table_attr);
39
40 return -EMSGSIZE;
41}
42
43int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
44{
45 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
46 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
47 struct hns_roce_v2_cq_context context;
48 int ret;
49
50 if (!hr_dev->hw->query_cqc)
51 return -EINVAL;
52
53 ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context);
54 if (ret)
55 return -EINVAL;
56
57 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
58
59 return ret;
60}
61
62int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
63{
64 struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
65 struct nlattr *table_attr;
66
67 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
68 if (!table_attr)
69 return -EMSGSIZE;
70
71 if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt))
72 goto err;
73
74 if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs))
75 goto err;
76
77 if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt))
78 goto err;
79
80 if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs))
81 goto err;
82
83 if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt))
84 goto err;
85
86 nla_nest_end(msg, table_attr);
87
88 return 0;
89
90err:
91 nla_nest_cancel(msg, table_attr);
92
93 return -EMSGSIZE;
94}
95
96int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
97{
98 struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
99 struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
100 struct hns_roce_v2_qp_context context;
101 int ret;
102
103 if (!hr_dev->hw->query_qpc)
104 return -EINVAL;
105
106 ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context);
107 if (ret)
108 return -EINVAL;
109
110 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
111
112 return ret;
113}
114
115int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr)
116{
117 struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
118 struct nlattr *table_attr;
119
120 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
121 if (!table_attr)
122 return -EMSGSIZE;
123
124 if (rdma_nl_put_driver_u32_hex(msg, "pbl_hop_num", hr_mr->pbl_hop_num))
125 goto err;
126
127 if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift",
128 hr_mr->pbl_mtr.hem_cfg.ba_pg_shift))
129 goto err;
130
131 if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift",
132 hr_mr->pbl_mtr.hem_cfg.buf_pg_shift))
133 goto err;
134
135 nla_nest_end(msg, table_attr);
136
137 return 0;
138
139err:
140 nla_nest_cancel(msg, table_attr);
141
142 return -EMSGSIZE;
143}
144
145int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
146{
147 struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
148 struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
149 struct hns_roce_v2_mpt_entry context;
150 int ret;
151
152 if (!hr_dev->hw->query_mpt)
153 return -EINVAL;
154
155 ret = hr_dev->hw->query_mpt(hr_dev, hr_mr->key, &context);
156 if (ret)
157 return -EINVAL;
158
159 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
160
161 return ret;
162}
163
164int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq)
165{
166 struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq);
167 struct nlattr *table_attr;
168
169 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
170 if (!table_attr)
171 return -EMSGSIZE;
172
173 if (rdma_nl_put_driver_u32_hex(msg, "srqn", hr_srq->srqn))
174 goto err;
175
176 if (rdma_nl_put_driver_u32_hex(msg, "wqe_cnt", hr_srq->wqe_cnt))
177 goto err;
178
179 if (rdma_nl_put_driver_u32_hex(msg, "max_gs", hr_srq->max_gs))
180 goto err;
181
182 if (rdma_nl_put_driver_u32_hex(msg, "xrcdn", hr_srq->xrcdn))
183 goto err;
184
185 nla_nest_end(msg, table_attr);
186
187 return 0;
188
189err:
190 nla_nest_cancel(msg, table_attr);
191 return -EMSGSIZE;
192}
193
194int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq)
195{
196 struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
197 struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq);
198 struct hns_roce_srq_context context;
199 int ret;
200
201 if (!hr_dev->hw->query_srqc)
202 return -EINVAL;
203
204 ret = hr_dev->hw->query_srqc(hr_dev, hr_srq->srqn, &context);
205 if (ret)
206 return ret;
207
208 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
209
210 return ret;
211}
1// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
2// Copyright (c) 2019 Hisilicon Limited.
3
4#include <rdma/rdma_cm.h>
5#include <rdma/restrack.h>
6#include <uapi/rdma/rdma_netlink.h>
7#include "hnae3.h"
8#include "hns_roce_common.h"
9#include "hns_roce_device.h"
10#include "hns_roce_hw_v2.h"
11
12static int hns_roce_fill_cq(struct sk_buff *msg,
13 struct hns_roce_v2_cq_context *context)
14{
15 if (rdma_nl_put_driver_u32(msg, "state",
16 roce_get_field(context->byte_4_pg_ceqn,
17 V2_CQC_BYTE_4_ARM_ST_M,
18 V2_CQC_BYTE_4_ARM_ST_S)))
19 goto err;
20
21 if (rdma_nl_put_driver_u32(msg, "ceqn",
22 roce_get_field(context->byte_4_pg_ceqn,
23 V2_CQC_BYTE_4_CEQN_M,
24 V2_CQC_BYTE_4_CEQN_S)))
25 goto err;
26
27 if (rdma_nl_put_driver_u32(msg, "cqn",
28 roce_get_field(context->byte_8_cqn,
29 V2_CQC_BYTE_8_CQN_M,
30 V2_CQC_BYTE_8_CQN_S)))
31 goto err;
32
33 if (rdma_nl_put_driver_u32(msg, "hopnum",
34 roce_get_field(context->byte_16_hop_addr,
35 V2_CQC_BYTE_16_CQE_HOP_NUM_M,
36 V2_CQC_BYTE_16_CQE_HOP_NUM_S)))
37 goto err;
38
39 if (rdma_nl_put_driver_u32(
40 msg, "pi",
41 roce_get_field(context->byte_28_cq_pi,
42 V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M,
43 V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S)))
44 goto err;
45
46 if (rdma_nl_put_driver_u32(
47 msg, "ci",
48 roce_get_field(context->byte_32_cq_ci,
49 V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M,
50 V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S)))
51 goto err;
52
53 if (rdma_nl_put_driver_u32(
54 msg, "coalesce",
55 roce_get_field(context->byte_56_cqe_period_maxcnt,
56 V2_CQC_BYTE_56_CQ_MAX_CNT_M,
57 V2_CQC_BYTE_56_CQ_MAX_CNT_S)))
58 goto err;
59
60 if (rdma_nl_put_driver_u32(
61 msg, "period",
62 roce_get_field(context->byte_56_cqe_period_maxcnt,
63 V2_CQC_BYTE_56_CQ_PERIOD_M,
64 V2_CQC_BYTE_56_CQ_PERIOD_S)))
65 goto err;
66
67 if (rdma_nl_put_driver_u32(msg, "cnt",
68 roce_get_field(context->byte_52_cqe_cnt,
69 V2_CQC_BYTE_52_CQE_CNT_M,
70 V2_CQC_BYTE_52_CQE_CNT_S)))
71 goto err;
72
73 return 0;
74
75err:
76 return -EMSGSIZE;
77}
78
79static int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
80 struct rdma_restrack_entry *res)
81{
82 struct ib_cq *ib_cq = container_of(res, struct ib_cq, res);
83 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
84 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
85 struct hns_roce_v2_cq_context *context;
86 struct nlattr *table_attr;
87 int ret;
88
89 if (!hr_dev->dfx->query_cqc_info)
90 return -EINVAL;
91
92 context = kzalloc(sizeof(struct hns_roce_v2_cq_context), GFP_KERNEL);
93 if (!context)
94 return -ENOMEM;
95
96 ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context);
97 if (ret)
98 goto err;
99
100 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
101 if (!table_attr)
102 goto err;
103
104 if (hns_roce_fill_cq(msg, context))
105 goto err_cancel_table;
106
107 nla_nest_end(msg, table_attr);
108 kfree(context);
109
110 return 0;
111
112err_cancel_table:
113 nla_nest_cancel(msg, table_attr);
114err:
115 kfree(context);
116 return -EMSGSIZE;
117}
118
119int hns_roce_fill_res_entry(struct sk_buff *msg,
120 struct rdma_restrack_entry *res)
121{
122 if (res->type == RDMA_RESTRACK_CQ)
123 return hns_roce_fill_res_cq_entry(msg, res);
124
125 return 0;
126}