Loading...
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2017-2020, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include "cmd.h"
7
8int mlx5r_cmd_query_special_mkeys(struct mlx5_ib_dev *dev)
9{
10 u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
11 u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
12 bool is_terminate, is_dump, is_null;
13 int err;
14
15 is_terminate = MLX5_CAP_GEN(dev->mdev, terminate_scatter_list_mkey);
16 is_dump = MLX5_CAP_GEN(dev->mdev, dump_fill_mkey);
17 is_null = MLX5_CAP_GEN(dev->mdev, null_mkey);
18
19 dev->mkeys.terminate_scatter_list_mkey = MLX5_TERMINATE_SCATTER_LIST_LKEY;
20 if (!is_terminate && !is_dump && !is_null)
21 return 0;
22
23 MLX5_SET(query_special_contexts_in, in, opcode,
24 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
25 err = mlx5_cmd_exec_inout(dev->mdev, query_special_contexts, in, out);
26 if (err)
27 return err;
28
29 if (is_dump)
30 dev->mkeys.dump_fill_mkey = MLX5_GET(query_special_contexts_out,
31 out, dump_fill_mkey);
32
33 if (is_null)
34 dev->mkeys.null_mkey = cpu_to_be32(
35 MLX5_GET(query_special_contexts_out, out, null_mkey));
36
37 if (is_terminate)
38 dev->mkeys.terminate_scatter_list_mkey =
39 cpu_to_be32(MLX5_GET(query_special_contexts_out, out,
40 terminate_scatter_list_mkey));
41
42 return 0;
43}
44
45int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
46 void *out)
47{
48 u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = {};
49
50 MLX5_SET(query_cong_params_in, in, opcode,
51 MLX5_CMD_OP_QUERY_CONG_PARAMS);
52 MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
53
54 return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
55}
56
57void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
58{
59 u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
60
61 MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
62 MLX5_SET(destroy_tir_in, in, tirn, tirn);
63 MLX5_SET(destroy_tir_in, in, uid, uid);
64 mlx5_cmd_exec_in(dev, destroy_tir, in);
65}
66
67void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
68{
69 u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
70
71 MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
72 MLX5_SET(destroy_tis_in, in, tisn, tisn);
73 MLX5_SET(destroy_tis_in, in, uid, uid);
74 mlx5_cmd_exec_in(dev, destroy_tis, in);
75}
76
77int mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
78{
79 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
80
81 MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
82 MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
83 MLX5_SET(destroy_rqt_in, in, uid, uid);
84 return mlx5_cmd_exec_in(dev, destroy_rqt, in);
85}
86
87int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
88 u16 uid)
89{
90 u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
91 u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
92 int err;
93
94 MLX5_SET(alloc_transport_domain_in, in, opcode,
95 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
96 MLX5_SET(alloc_transport_domain_in, in, uid, uid);
97
98 err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
99 if (!err)
100 *tdn = MLX5_GET(alloc_transport_domain_out, out,
101 transport_domain);
102
103 return err;
104}
105
106void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
107 u16 uid)
108{
109 u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
110
111 MLX5_SET(dealloc_transport_domain_in, in, opcode,
112 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
113 MLX5_SET(dealloc_transport_domain_in, in, uid, uid);
114 MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
115 mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
116}
117
118int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
119{
120 u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
121
122 MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
123 MLX5_SET(dealloc_pd_in, in, pd, pdn);
124 MLX5_SET(dealloc_pd_in, in, uid, uid);
125 return mlx5_cmd_exec_in(dev, dealloc_pd, in);
126}
127
128int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
129 u32 qpn, u16 uid)
130{
131 u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
132 void *gid;
133
134 MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
135 MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
136 MLX5_SET(attach_to_mcg_in, in, uid, uid);
137 gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
138 memcpy(gid, mgid, sizeof(*mgid));
139 return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
140}
141
142int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
143 u32 qpn, u16 uid)
144{
145 u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
146 void *gid;
147
148 MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
149 MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
150 MLX5_SET(detach_from_mcg_in, in, uid, uid);
151 gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
152 memcpy(gid, mgid, sizeof(*mgid));
153 return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
154}
155
156int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
157{
158 u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
159 u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
160 int err;
161
162 MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
163 MLX5_SET(alloc_xrcd_in, in, uid, uid);
164 err = mlx5_cmd_exec_inout(dev, alloc_xrcd, in, out);
165 if (!err)
166 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
167 return err;
168}
169
170int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
171{
172 u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
173
174 MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
175 MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
176 MLX5_SET(dealloc_xrcd_in, in, uid, uid);
177 return mlx5_cmd_exec_in(dev, dealloc_xrcd, in);
178}
179
180int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
181 u16 opmod, u8 port)
182{
183 int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
184 int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
185 int err = -ENOMEM;
186 void *data;
187 void *resp;
188 u32 *out;
189 u32 *in;
190
191 in = kzalloc(inlen, GFP_KERNEL);
192 out = kzalloc(outlen, GFP_KERNEL);
193 if (!in || !out)
194 goto out;
195
196 MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
197 MLX5_SET(mad_ifc_in, in, op_mod, opmod);
198 MLX5_SET(mad_ifc_in, in, port, port);
199
200 data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
201 memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
202
203 err = mlx5_cmd_exec_inout(dev, mad_ifc, in, out);
204 if (err)
205 goto out;
206
207 resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
208 memcpy(outb, resp,
209 MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
210
211out:
212 kfree(out);
213 kfree(in);
214 return err;
215}
216
217int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid)
218{
219 u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
220 u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
221 int err;
222
223 MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
224 MLX5_SET(alloc_uar_in, in, uid, uid);
225 err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
226 if (err)
227 return err;
228
229 *uarn = MLX5_GET(alloc_uar_out, out, uar);
230 return 0;
231}
232
233int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid)
234{
235 u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
236
237 MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
238 MLX5_SET(dealloc_uar_in, in, uar, uarn);
239 MLX5_SET(dealloc_uar_in, in, uid, uid);
240 return mlx5_cmd_exec_in(dev, dealloc_uar, in);
241}
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2017-2020, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include "cmd.h"
7
8int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
9{
10 u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
11 u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
12 int err;
13
14 MLX5_SET(query_special_contexts_in, in, opcode,
15 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
16 err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
17 if (!err)
18 *mkey = MLX5_GET(query_special_contexts_out, out,
19 dump_fill_mkey);
20 return err;
21}
22
23int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
24{
25 u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
26 u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
27 int err;
28
29 MLX5_SET(query_special_contexts_in, in, opcode,
30 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
31 err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
32 if (!err)
33 *null_mkey = MLX5_GET(query_special_contexts_out, out,
34 null_mkey);
35 return err;
36}
37
38int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
39 void *out)
40{
41 u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = {};
42
43 MLX5_SET(query_cong_params_in, in, opcode,
44 MLX5_CMD_OP_QUERY_CONG_PARAMS);
45 MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
46
47 return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
48}
49
50int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
51 u64 length, u32 alignment)
52{
53 struct mlx5_core_dev *dev = dm->dev;
54 u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
55 >> PAGE_SHIFT;
56 u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
57 u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
58 u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
59 u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
60 u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
61 u32 mlx5_alignment;
62 u64 page_idx = 0;
63 int ret = 0;
64
65 if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
66 return -EINVAL;
67
68 /* mlx5 device sets alignment as 64*2^driver_value
69 * so normalizing is needed.
70 */
71 mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
72 alignment - MLX5_MEMIC_BASE_ALIGN;
73 if (mlx5_alignment > max_alignment)
74 return -EINVAL;
75
76 MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
77 MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
78 MLX5_SET(alloc_memic_in, in, memic_size, length);
79 MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
80 mlx5_alignment);
81
82 while (page_idx < num_memic_hw_pages) {
83 spin_lock(&dm->lock);
84 page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
85 num_memic_hw_pages,
86 page_idx,
87 num_pages, 0);
88
89 if (page_idx < num_memic_hw_pages)
90 bitmap_set(dm->memic_alloc_pages,
91 page_idx, num_pages);
92
93 spin_unlock(&dm->lock);
94
95 if (page_idx >= num_memic_hw_pages)
96 break;
97
98 MLX5_SET64(alloc_memic_in, in, range_start_addr,
99 hw_start_addr + (page_idx * PAGE_SIZE));
100
101 ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
102 if (ret) {
103 spin_lock(&dm->lock);
104 bitmap_clear(dm->memic_alloc_pages,
105 page_idx, num_pages);
106 spin_unlock(&dm->lock);
107
108 if (ret == -EAGAIN) {
109 page_idx++;
110 continue;
111 }
112
113 return ret;
114 }
115
116 *addr = dev->bar_addr +
117 MLX5_GET64(alloc_memic_out, out, memic_start_addr);
118
119 return 0;
120 }
121
122 return -ENOMEM;
123}
124
125void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
126{
127 struct mlx5_core_dev *dev = dm->dev;
128 u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
129 u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
130 u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
131 u64 start_page_idx;
132 int err;
133
134 addr -= dev->bar_addr;
135 start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
136
137 MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
138 MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
139 MLX5_SET(dealloc_memic_in, in, memic_size, length);
140
141 err = mlx5_cmd_exec_in(dev, dealloc_memic, in);
142 if (err)
143 return;
144
145 spin_lock(&dm->lock);
146 bitmap_clear(dm->memic_alloc_pages,
147 start_page_idx, num_pages);
148 spin_unlock(&dm->lock);
149}
150
151void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
152{
153 u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
154
155 MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
156 MLX5_SET(destroy_tir_in, in, tirn, tirn);
157 MLX5_SET(destroy_tir_in, in, uid, uid);
158 mlx5_cmd_exec_in(dev, destroy_tir, in);
159}
160
161void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
162{
163 u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
164
165 MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
166 MLX5_SET(destroy_tis_in, in, tisn, tisn);
167 MLX5_SET(destroy_tis_in, in, uid, uid);
168 mlx5_cmd_exec_in(dev, destroy_tis, in);
169}
170
171void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
172{
173 u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
174
175 MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
176 MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
177 MLX5_SET(destroy_rqt_in, in, uid, uid);
178 mlx5_cmd_exec_in(dev, destroy_rqt, in);
179}
180
181int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
182 u16 uid)
183{
184 u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
185 u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
186 int err;
187
188 MLX5_SET(alloc_transport_domain_in, in, opcode,
189 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
190 MLX5_SET(alloc_transport_domain_in, in, uid, uid);
191
192 err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
193 if (!err)
194 *tdn = MLX5_GET(alloc_transport_domain_out, out,
195 transport_domain);
196
197 return err;
198}
199
200void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
201 u16 uid)
202{
203 u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
204
205 MLX5_SET(dealloc_transport_domain_in, in, opcode,
206 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
207 MLX5_SET(dealloc_transport_domain_in, in, uid, uid);
208 MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
209 mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
210}
211
212void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
213{
214 u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
215
216 MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
217 MLX5_SET(dealloc_pd_in, in, pd, pdn);
218 MLX5_SET(dealloc_pd_in, in, uid, uid);
219 mlx5_cmd_exec_in(dev, dealloc_pd, in);
220}
221
222int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
223 u32 qpn, u16 uid)
224{
225 u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
226 void *gid;
227
228 MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
229 MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
230 MLX5_SET(attach_to_mcg_in, in, uid, uid);
231 gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
232 memcpy(gid, mgid, sizeof(*mgid));
233 return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
234}
235
236int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
237 u32 qpn, u16 uid)
238{
239 u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
240 void *gid;
241
242 MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
243 MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
244 MLX5_SET(detach_from_mcg_in, in, uid, uid);
245 gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
246 memcpy(gid, mgid, sizeof(*mgid));
247 return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
248}
249
250int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
251{
252 u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
253 u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
254 int err;
255
256 MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
257 MLX5_SET(alloc_xrcd_in, in, uid, uid);
258 err = mlx5_cmd_exec_inout(dev, alloc_xrcd, in, out);
259 if (!err)
260 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
261 return err;
262}
263
264int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
265{
266 u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
267
268 MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
269 MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
270 MLX5_SET(dealloc_xrcd_in, in, uid, uid);
271 return mlx5_cmd_exec_in(dev, dealloc_xrcd, in);
272}
273
274int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
275 u16 opmod, u8 port)
276{
277 int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
278 int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
279 int err = -ENOMEM;
280 void *data;
281 void *resp;
282 u32 *out;
283 u32 *in;
284
285 in = kzalloc(inlen, GFP_KERNEL);
286 out = kzalloc(outlen, GFP_KERNEL);
287 if (!in || !out)
288 goto out;
289
290 MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
291 MLX5_SET(mad_ifc_in, in, op_mod, opmod);
292 MLX5_SET(mad_ifc_in, in, port, port);
293
294 data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
295 memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
296
297 err = mlx5_cmd_exec_inout(dev, mad_ifc, in, out);
298 if (err)
299 goto out;
300
301 resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
302 memcpy(outb, resp,
303 MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
304
305out:
306 kfree(out);
307 kfree(in);
308 return err;
309}