Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2/*
  3 * Copyright (c) 2013-2018, Mellanox Technologies inc.  All rights reserved.
  4 */
  5
  6#include <linux/kernel.h>
  7#include <linux/mlx5/driver.h>
  8#include "mlx5_ib.h"
  9#include "srq.h"
 10#include "qp.h"
 11
 12static int get_pas_size(struct mlx5_srq_attr *in)
 13{
 14	u32 log_page_size = in->log_page_size + 12;
 15	u32 log_srq_size  = in->log_size;
 16	u32 log_rq_stride = in->wqe_shift;
 17	u32 page_offset   = in->page_offset;
 18	u32 po_quanta	  = 1 << (log_page_size - 6);
 19	u32 rq_sz	  = 1 << (log_srq_size + 4 + log_rq_stride);
 20	u32 page_size	  = 1 << log_page_size;
 21	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
 22	u32 rq_num_pas    = DIV_ROUND_UP(rq_sz_po, page_size);
 23
 24	return rq_num_pas * sizeof(u64);
 25}
 26
 27static void set_wq(void *wq, struct mlx5_srq_attr *in)
 28{
 29	MLX5_SET(wq,   wq, wq_signature,  !!(in->flags
 30		 & MLX5_SRQ_FLAG_WQ_SIG));
 31	MLX5_SET(wq,   wq, log_wq_pg_sz,  in->log_page_size);
 32	MLX5_SET(wq,   wq, log_wq_stride, in->wqe_shift + 4);
 33	MLX5_SET(wq,   wq, log_wq_sz,     in->log_size);
 34	MLX5_SET(wq,   wq, page_offset,   in->page_offset);
 35	MLX5_SET(wq,   wq, lwm,		  in->lwm);
 36	MLX5_SET(wq,   wq, pd,		  in->pd);
 37	MLX5_SET64(wq, wq, dbr_addr,	  in->db_record);
 38}
 39
 40static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
 41{
 42	MLX5_SET(srqc,   srqc, wq_signature,  !!(in->flags
 43		 & MLX5_SRQ_FLAG_WQ_SIG));
 44	MLX5_SET(srqc,   srqc, log_page_size, in->log_page_size);
 45	MLX5_SET(srqc,   srqc, log_rq_stride, in->wqe_shift);
 46	MLX5_SET(srqc,   srqc, log_srq_size,  in->log_size);
 47	MLX5_SET(srqc,   srqc, page_offset,   in->page_offset);
 48	MLX5_SET(srqc,	 srqc, lwm,	      in->lwm);
 49	MLX5_SET(srqc,	 srqc, pd,	      in->pd);
 50	MLX5_SET64(srqc, srqc, dbr_addr,      in->db_record);
 51	MLX5_SET(srqc,	 srqc, xrcd,	      in->xrcd);
 52	MLX5_SET(srqc,	 srqc, cqn,	      in->cqn);
 53}
 54
 55static void get_wq(void *wq, struct mlx5_srq_attr *in)
 56{
 57	if (MLX5_GET(wq, wq, wq_signature))
 58		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
 59	in->log_page_size = MLX5_GET(wq,   wq, log_wq_pg_sz);
 60	in->wqe_shift	  = MLX5_GET(wq,   wq, log_wq_stride) - 4;
 61	in->log_size	  = MLX5_GET(wq,   wq, log_wq_sz);
 62	in->page_offset   = MLX5_GET(wq,   wq, page_offset);
 63	in->lwm		  = MLX5_GET(wq,   wq, lwm);
 64	in->pd		  = MLX5_GET(wq,   wq, pd);
 65	in->db_record	  = MLX5_GET64(wq, wq, dbr_addr);
 66}
 67
 68static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
 69{
 70	if (MLX5_GET(srqc, srqc, wq_signature))
 71		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
 72	in->log_page_size = MLX5_GET(srqc,   srqc, log_page_size);
 73	in->wqe_shift	  = MLX5_GET(srqc,   srqc, log_rq_stride);
 74	in->log_size	  = MLX5_GET(srqc,   srqc, log_srq_size);
 75	in->page_offset   = MLX5_GET(srqc,   srqc, page_offset);
 76	in->lwm		  = MLX5_GET(srqc,   srqc, lwm);
 77	in->pd		  = MLX5_GET(srqc,   srqc, pd);
 78	in->db_record	  = MLX5_GET64(srqc, srqc, dbr_addr);
 79}
 80
 81struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
 82{
 83	struct mlx5_srq_table *table = &dev->srq_table;
 84	struct mlx5_core_srq *srq;
 85
 86	xa_lock_irq(&table->array);
 87	srq = xa_load(&table->array, srqn);
 88	if (srq)
 89		refcount_inc(&srq->common.refcount);
 90	xa_unlock_irq(&table->array);
 91
 92	return srq;
 93}
 94
 95static int __set_srq_page_size(struct mlx5_srq_attr *in,
 96			       unsigned long page_size)
 97{
 98	if (!page_size)
 99		return -EINVAL;
100	in->log_page_size = order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT;
101
102	if (WARN_ON(get_pas_size(in) !=
103		    ib_umem_num_dma_blocks(in->umem, page_size) * sizeof(u64)))
104		return -EINVAL;
105	return 0;
106}
107
108#define set_srq_page_size(in, typ, log_pgsz_fld)                               \
109	__set_srq_page_size(in, mlx5_umem_find_best_quantized_pgoff(           \
110					(in)->umem, typ, log_pgsz_fld,         \
111					MLX5_ADAPTER_PAGE_SHIFT, page_offset,  \
112					64, &(in)->page_offset))
113
114static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
115			  struct mlx5_srq_attr *in)
116{
117	u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
118	void *create_in;
119	void *srqc;
120	void *pas;
121	int pas_size;
122	int inlen;
123	int err;
124
125	if (in->umem) {
126		err = set_srq_page_size(in, srqc, log_page_size);
127		if (err)
128			return err;
129	}
130
131	pas_size  = get_pas_size(in);
132	inlen	  = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
133	create_in = kvzalloc(inlen, GFP_KERNEL);
134	if (!create_in)
135		return -ENOMEM;
136
137	MLX5_SET(create_srq_in, create_in, uid, in->uid);
138	srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
139	pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
140
141	set_srqc(srqc, in);
142	if (in->umem)
143		mlx5_ib_populate_pas(
144			in->umem,
145			1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
146			pas, 0);
147	else
148		memcpy(pas, in->pas, pas_size);
149
150	MLX5_SET(create_srq_in, create_in, opcode,
151		 MLX5_CMD_OP_CREATE_SRQ);
152
153	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
154			    sizeof(create_out));
155	kvfree(create_in);
156	if (!err) {
157		srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
158		srq->uid = in->uid;
159	}
160
161	return err;
162}
163
164static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
165{
166	u32 in[MLX5_ST_SZ_DW(destroy_srq_in)] = {};
167
168	MLX5_SET(destroy_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
169	MLX5_SET(destroy_srq_in, in, srqn, srq->srqn);
170	MLX5_SET(destroy_srq_in, in, uid, srq->uid);
171
172	return mlx5_cmd_exec_in(dev->mdev, destroy_srq, in);
173}
174
175static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
176		       u16 lwm, int is_srq)
177{
178	u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
179
180	MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
181	MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
182	MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
183	MLX5_SET(arm_rq_in, in, lwm, lwm);
184	MLX5_SET(arm_rq_in, in, uid, srq->uid);
185
186	return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
187}
188
189static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
190			 struct mlx5_srq_attr *out)
191{
192	u32 in[MLX5_ST_SZ_DW(query_srq_in)] = {};
193	u32 *srq_out;
194	void *srqc;
195	int err;
196
197	srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
198	if (!srq_out)
199		return -ENOMEM;
200
201	MLX5_SET(query_srq_in, in, opcode, MLX5_CMD_OP_QUERY_SRQ);
202	MLX5_SET(query_srq_in, in, srqn, srq->srqn);
203	err = mlx5_cmd_exec_inout(dev->mdev, query_srq, in, srq_out);
204	if (err)
205		goto out;
206
207	srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
208	get_srqc(srqc, out);
209	if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
210		out->flags |= MLX5_SRQ_FLAG_ERR;
211out:
212	kvfree(srq_out);
213	return err;
214}
215
216static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
217			      struct mlx5_core_srq *srq,
218			      struct mlx5_srq_attr *in)
219{
220	u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
221	void *create_in;
222	void *xrc_srqc;
223	void *pas;
224	int pas_size;
225	int inlen;
226	int err;
227
228	if (in->umem) {
229		err = set_srq_page_size(in, xrc_srqc, log_page_size);
230		if (err)
231			return err;
232	}
233
234	pas_size  = get_pas_size(in);
235	inlen	  = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
236	create_in = kvzalloc(inlen, GFP_KERNEL);
237	if (!create_in)
238		return -ENOMEM;
239
240	MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
241	xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
242				xrc_srq_context_entry);
243	pas	 = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
244
245	set_srqc(xrc_srqc, in);
246	MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
247	if (in->umem)
248		mlx5_ib_populate_pas(
249			in->umem,
250			1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
251			pas, 0);
252	else
253		memcpy(pas, in->pas, pas_size);
254	MLX5_SET(create_xrc_srq_in, create_in, opcode,
255		 MLX5_CMD_OP_CREATE_XRC_SRQ);
256
257	memset(create_out, 0, sizeof(create_out));
258	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
259			    sizeof(create_out));
260	if (err)
261		goto out;
262
263	srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
264	srq->uid = in->uid;
265out:
266	kvfree(create_in);
267	return err;
268}
269
270static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
271			       struct mlx5_core_srq *srq)
272{
273	u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {};
274
275	MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
276	MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, srq->srqn);
277	MLX5_SET(destroy_xrc_srq_in, in, uid, srq->uid);
278
279	return mlx5_cmd_exec_in(dev->mdev, destroy_xrc_srq, in);
280}
281
282static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
283			   u16 lwm)
284{
285	u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {};
286
287	MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
288	MLX5_SET(arm_xrc_srq_in, in, op_mod,
289		 MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
290	MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, srq->srqn);
291	MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
292	MLX5_SET(arm_xrc_srq_in, in, uid, srq->uid);
293
294	return  mlx5_cmd_exec_in(dev->mdev, arm_xrc_srq, in);
295}
296
297static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
298			     struct mlx5_core_srq *srq,
299			     struct mlx5_srq_attr *out)
300{
301	u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {};
302	u32 *xrcsrq_out;
303	void *xrc_srqc;
304	int err;
305
306	xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
307	if (!xrcsrq_out)
308		return -ENOMEM;
309
310	MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
311	MLX5_SET(query_xrc_srq_in, in, xrc_srqn, srq->srqn);
312
313	err = mlx5_cmd_exec_inout(dev->mdev, query_xrc_srq, in, xrcsrq_out);
314	if (err)
315		goto out;
316
317	xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
318				xrc_srq_context_entry);
319	get_srqc(xrc_srqc, out);
320	if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
321		out->flags |= MLX5_SRQ_FLAG_ERR;
322
323out:
324	kvfree(xrcsrq_out);
325	return err;
326}
327
328static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
329			  struct mlx5_srq_attr *in)
330{
331	void *create_out = NULL;
332	void *create_in = NULL;
333	void *rmpc;
334	void *wq;
335	void *pas;
336	int pas_size;
337	int outlen;
338	int inlen;
339	int err;
340
341	if (in->umem) {
342		err = set_srq_page_size(in, wq, log_wq_pg_sz);
343		if (err)
344			return err;
345	}
346
347	pas_size = get_pas_size(in);
348	inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
349	outlen = MLX5_ST_SZ_BYTES(create_rmp_out);
350	create_in = kvzalloc(inlen, GFP_KERNEL);
351	create_out = kvzalloc(outlen, GFP_KERNEL);
352	if (!create_in || !create_out) {
353		err = -ENOMEM;
354		goto out;
355	}
356
357	rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
358	wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
359
360	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
361	MLX5_SET(create_rmp_in, create_in, uid, in->uid);
362	pas = MLX5_ADDR_OF(rmpc, rmpc, wq.pas);
363
364	set_wq(wq, in);
365	if (in->umem)
366		mlx5_ib_populate_pas(
367			in->umem,
368			1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
369			pas, 0);
370	else
371		memcpy(pas, in->pas, pas_size);
372
373	MLX5_SET(create_rmp_in, create_in, opcode, MLX5_CMD_OP_CREATE_RMP);
374	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
375	if (!err) {
376		srq->srqn = MLX5_GET(create_rmp_out, create_out, rmpn);
377		srq->uid = in->uid;
378	}
379
380out:
381	kvfree(create_in);
382	kvfree(create_out);
383	return err;
384}
385
386static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
387{
388	u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
389
390	MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
391	MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
392	MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
393	return mlx5_cmd_exec_in(dev->mdev, destroy_rmp, in);
394}
395
396static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
397		       u16 lwm)
398{
399	void *out = NULL;
400	void *in = NULL;
401	void *rmpc;
402	void *wq;
403	void *bitmask;
404	int outlen;
405	int inlen;
406	int err;
407
408	inlen = MLX5_ST_SZ_BYTES(modify_rmp_in);
409	outlen = MLX5_ST_SZ_BYTES(modify_rmp_out);
410
411	in = kvzalloc(inlen, GFP_KERNEL);
412	out = kvzalloc(outlen, GFP_KERNEL);
413	if (!in || !out) {
414		err = -ENOMEM;
415		goto out;
416	}
417
418	rmpc =	  MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
419	bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
420	wq   =	  MLX5_ADDR_OF(rmpc,	        rmpc, wq);
421
422	MLX5_SET(modify_rmp_in, in,	 rmp_state, MLX5_RMPC_STATE_RDY);
423	MLX5_SET(modify_rmp_in, in,	 rmpn,      srq->srqn);
424	MLX5_SET(modify_rmp_in, in, uid, srq->uid);
425	MLX5_SET(wq,		wq,	 lwm,	    lwm);
426	MLX5_SET(rmp_bitmask,	bitmask, lwm,	    1);
427	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
428	MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
429
430	err = mlx5_cmd_exec_inout(dev->mdev, modify_rmp, in, out);
431
432out:
433	kvfree(in);
434	kvfree(out);
435	return err;
436}
437
438static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
439			 struct mlx5_srq_attr *out)
440{
441	u32 *rmp_out = NULL;
442	u32 *rmp_in = NULL;
443	void *rmpc;
444	int outlen;
445	int inlen;
446	int err;
447
448	outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
449	inlen = MLX5_ST_SZ_BYTES(query_rmp_in);
450
451	rmp_out = kvzalloc(outlen, GFP_KERNEL);
452	rmp_in = kvzalloc(inlen, GFP_KERNEL);
453	if (!rmp_out || !rmp_in) {
454		err = -ENOMEM;
455		goto out;
456	}
457
458	MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
459	MLX5_SET(query_rmp_in, rmp_in, rmpn,   srq->srqn);
460	err = mlx5_cmd_exec_inout(dev->mdev, query_rmp, rmp_in, rmp_out);
461	if (err)
462		goto out;
463
464	rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
465	get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
466	if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
467		out->flags |= MLX5_SRQ_FLAG_ERR;
468
469out:
470	kvfree(rmp_out);
471	kvfree(rmp_in);
472	return err;
473}
474
475static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
476			  struct mlx5_srq_attr *in)
477{
478	u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
479	void *create_in;
480	void *xrqc;
481	void *wq;
482	void *pas;
483	int pas_size;
484	int inlen;
485	int err;
486
487	if (in->umem) {
488		err = set_srq_page_size(in, wq, log_wq_pg_sz);
489		if (err)
490			return err;
491	}
492
493	pas_size = get_pas_size(in);
494	inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
495	create_in = kvzalloc(inlen, GFP_KERNEL);
496	if (!create_in)
497		return -ENOMEM;
498
499	xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
500	wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
501	pas = MLX5_ADDR_OF(xrqc, xrqc, wq.pas);
502
503	set_wq(wq, in);
504	if (in->umem)
505		mlx5_ib_populate_pas(
506			in->umem,
507			1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
508			pas, 0);
509	else
510		memcpy(pas, in->pas, pas_size);
511
512	if (in->type == IB_SRQT_TM) {
513		MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
514		if (in->flags & MLX5_SRQ_FLAG_RNDV)
515			MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV);
516		MLX5_SET(xrqc, xrqc,
517			 tag_matching_topology_context.log_matching_list_sz,
518			 in->tm_log_list_size);
519	}
520	MLX5_SET(xrqc, xrqc, user_index, in->user_index);
521	MLX5_SET(xrqc, xrqc, cqn, in->cqn);
522	MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
523	MLX5_SET(create_xrq_in, create_in, uid, in->uid);
524	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
525			    sizeof(create_out));
526	kvfree(create_in);
527	if (!err) {
528		srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
529		srq->uid = in->uid;
530	}
531
532	return err;
533}
534
535static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
536{
537	u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {};
538
539	MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
540	MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
541	MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
542
543	return mlx5_cmd_exec_in(dev->mdev, destroy_xrq, in);
544}
545
546static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
547		       struct mlx5_core_srq *srq,
548		       u16 lwm)
549{
550	u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
551
552	MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
553	MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
554	MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
555	MLX5_SET(arm_rq_in, in, lwm, lwm);
556	MLX5_SET(arm_rq_in, in, uid, srq->uid);
557
558	return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
559}
560
561static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
562			 struct mlx5_srq_attr *out)
563{
564	u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {};
565	u32 *xrq_out;
566	int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
567	void *xrqc;
568	int err;
569
570	xrq_out = kvzalloc(outlen, GFP_KERNEL);
571	if (!xrq_out)
572		return -ENOMEM;
573
574	MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
575	MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
576
577	err = mlx5_cmd_exec_inout(dev->mdev, query_xrq, in, xrq_out);
578	if (err)
579		goto out;
580
581	xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context);
582	get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out);
583	if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD)
584		out->flags |= MLX5_SRQ_FLAG_ERR;
585	out->tm_next_tag =
586		MLX5_GET(xrqc, xrqc,
587			 tag_matching_topology_context.append_next_index);
588	out->tm_hw_phase_cnt =
589		MLX5_GET(xrqc, xrqc,
590			 tag_matching_topology_context.hw_phase_cnt);
591	out->tm_sw_phase_cnt =
592		MLX5_GET(xrqc, xrqc,
593			 tag_matching_topology_context.sw_phase_cnt);
594
595out:
596	kvfree(xrq_out);
597	return err;
598}
599
600static int create_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
601			    struct mlx5_srq_attr *in)
602{
603	if (!dev->mdev->issi)
604		return create_srq_cmd(dev, srq, in);
605	switch (srq->common.res) {
606	case MLX5_RES_XSRQ:
607		return create_xrc_srq_cmd(dev, srq, in);
608	case MLX5_RES_XRQ:
609		return create_xrq_cmd(dev, srq, in);
610	default:
611		return create_rmp_cmd(dev, srq, in);
612	}
613}
614
615static int destroy_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
616{
617	if (!dev->mdev->issi)
618		return destroy_srq_cmd(dev, srq);
619	switch (srq->common.res) {
620	case MLX5_RES_XSRQ:
621		return destroy_xrc_srq_cmd(dev, srq);
622	case MLX5_RES_XRQ:
623		return destroy_xrq_cmd(dev, srq);
624	default:
625		return destroy_rmp_cmd(dev, srq);
626	}
627}
628
629int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
630			struct mlx5_srq_attr *in)
631{
632	struct mlx5_srq_table *table = &dev->srq_table;
633	int err;
634
635	switch (in->type) {
636	case IB_SRQT_XRC:
637		srq->common.res = MLX5_RES_XSRQ;
638		break;
639	case IB_SRQT_TM:
640		srq->common.res = MLX5_RES_XRQ;
641		break;
642	default:
643		srq->common.res = MLX5_RES_SRQ;
644	}
645
646	err = create_srq_split(dev, srq, in);
647	if (err)
648		return err;
649
650	refcount_set(&srq->common.refcount, 1);
651	init_completion(&srq->common.free);
652
653	err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
654	if (err)
655		goto err_destroy_srq_split;
656
657	return 0;
658
659err_destroy_srq_split:
660	destroy_srq_split(dev, srq);
661
662	return err;
663}
664
665int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
666{
667	struct mlx5_srq_table *table = &dev->srq_table;
668	struct mlx5_core_srq *tmp;
669	int err;
670
671	/* Delete entry, but leave index occupied */
672	tmp = xa_cmpxchg_irq(&table->array, srq->srqn, srq, XA_ZERO_ENTRY, 0);
673	if (WARN_ON(tmp != srq))
674		return xa_err(tmp) ?: -EINVAL;
675
676	err = destroy_srq_split(dev, srq);
677	if (err) {
678		/*
679		 * We don't need to check returned result for an error,
680		 * because  we are storing in pre-allocated space xarray
681		 * entry and it can't fail at this stage.
682		 */
683		xa_cmpxchg_irq(&table->array, srq->srqn, XA_ZERO_ENTRY, srq, 0);
684		return err;
685	}
686	xa_erase_irq(&table->array, srq->srqn);
687
688	mlx5_core_res_put(&srq->common);
689	wait_for_completion(&srq->common.free);
690	return 0;
691}
692
693int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
694		       struct mlx5_srq_attr *out)
695{
696	if (!dev->mdev->issi)
697		return query_srq_cmd(dev, srq, out);
698	switch (srq->common.res) {
699	case MLX5_RES_XSRQ:
700		return query_xrc_srq_cmd(dev, srq, out);
701	case MLX5_RES_XRQ:
702		return query_xrq_cmd(dev, srq, out);
703	default:
704		return query_rmp_cmd(dev, srq, out);
705	}
706}
707
708int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
709		     u16 lwm, int is_srq)
710{
711	if (!dev->mdev->issi)
712		return arm_srq_cmd(dev, srq, lwm, is_srq);
713	switch (srq->common.res) {
714	case MLX5_RES_XSRQ:
715		return arm_xrc_srq_cmd(dev, srq, lwm);
716	case MLX5_RES_XRQ:
717		return arm_xrq_cmd(dev, srq, lwm);
718	default:
719		return arm_rmp_cmd(dev, srq, lwm);
720	}
721}
722
723static int srq_event_notifier(struct notifier_block *nb,
724			      unsigned long type, void *data)
725{
726	struct mlx5_srq_table *table;
727	struct mlx5_core_srq *srq;
728	struct mlx5_eqe *eqe;
729	u32 srqn;
730
731	if (type != MLX5_EVENT_TYPE_SRQ_CATAS_ERROR &&
732	    type != MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)
733		return NOTIFY_DONE;
734
735	table = container_of(nb, struct mlx5_srq_table, nb);
736
737	eqe = data;
738	srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
739
740	xa_lock(&table->array);
741	srq = xa_load(&table->array, srqn);
742	if (srq)
743		refcount_inc(&srq->common.refcount);
744	xa_unlock(&table->array);
745
746	if (!srq)
747		return NOTIFY_OK;
748
749	srq->event(srq, eqe->type);
750
751	mlx5_core_res_put(&srq->common);
752
753	return NOTIFY_OK;
754}
755
756int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
757{
758	struct mlx5_srq_table *table = &dev->srq_table;
759
760	memset(table, 0, sizeof(*table));
761	xa_init_flags(&table->array, XA_FLAGS_LOCK_IRQ);
762
763	table->nb.notifier_call = srq_event_notifier;
764	mlx5_notifier_register(dev->mdev, &table->nb);
765
766	return 0;
767}
768
769void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev)
770{
771	struct mlx5_srq_table *table = &dev->srq_table;
772
773	mlx5_notifier_unregister(dev->mdev, &table->nb);
774}
v5.9
  1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2/*
  3 * Copyright (c) 2013-2018, Mellanox Technologies inc.  All rights reserved.
  4 */
  5
  6#include <linux/kernel.h>
  7#include <linux/mlx5/driver.h>
  8#include "mlx5_ib.h"
  9#include "srq.h"
 10#include "qp.h"
 11
 12static int get_pas_size(struct mlx5_srq_attr *in)
 13{
 14	u32 log_page_size = in->log_page_size + 12;
 15	u32 log_srq_size  = in->log_size;
 16	u32 log_rq_stride = in->wqe_shift;
 17	u32 page_offset   = in->page_offset;
 18	u32 po_quanta	  = 1 << (log_page_size - 6);
 19	u32 rq_sz	  = 1 << (log_srq_size + 4 + log_rq_stride);
 20	u32 page_size	  = 1 << log_page_size;
 21	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
 22	u32 rq_num_pas    = DIV_ROUND_UP(rq_sz_po, page_size);
 23
 24	return rq_num_pas * sizeof(u64);
 25}
 26
 27static void set_wq(void *wq, struct mlx5_srq_attr *in)
 28{
 29	MLX5_SET(wq,   wq, wq_signature,  !!(in->flags
 30		 & MLX5_SRQ_FLAG_WQ_SIG));
 31	MLX5_SET(wq,   wq, log_wq_pg_sz,  in->log_page_size);
 32	MLX5_SET(wq,   wq, log_wq_stride, in->wqe_shift + 4);
 33	MLX5_SET(wq,   wq, log_wq_sz,     in->log_size);
 34	MLX5_SET(wq,   wq, page_offset,   in->page_offset);
 35	MLX5_SET(wq,   wq, lwm,		  in->lwm);
 36	MLX5_SET(wq,   wq, pd,		  in->pd);
 37	MLX5_SET64(wq, wq, dbr_addr,	  in->db_record);
 38}
 39
 40static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
 41{
 42	MLX5_SET(srqc,   srqc, wq_signature,  !!(in->flags
 43		 & MLX5_SRQ_FLAG_WQ_SIG));
 44	MLX5_SET(srqc,   srqc, log_page_size, in->log_page_size);
 45	MLX5_SET(srqc,   srqc, log_rq_stride, in->wqe_shift);
 46	MLX5_SET(srqc,   srqc, log_srq_size,  in->log_size);
 47	MLX5_SET(srqc,   srqc, page_offset,   in->page_offset);
 48	MLX5_SET(srqc,	 srqc, lwm,	      in->lwm);
 49	MLX5_SET(srqc,	 srqc, pd,	      in->pd);
 50	MLX5_SET64(srqc, srqc, dbr_addr,      in->db_record);
 51	MLX5_SET(srqc,	 srqc, xrcd,	      in->xrcd);
 52	MLX5_SET(srqc,	 srqc, cqn,	      in->cqn);
 53}
 54
 55static void get_wq(void *wq, struct mlx5_srq_attr *in)
 56{
 57	if (MLX5_GET(wq, wq, wq_signature))
 58		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
 59	in->log_page_size = MLX5_GET(wq,   wq, log_wq_pg_sz);
 60	in->wqe_shift	  = MLX5_GET(wq,   wq, log_wq_stride) - 4;
 61	in->log_size	  = MLX5_GET(wq,   wq, log_wq_sz);
 62	in->page_offset   = MLX5_GET(wq,   wq, page_offset);
 63	in->lwm		  = MLX5_GET(wq,   wq, lwm);
 64	in->pd		  = MLX5_GET(wq,   wq, pd);
 65	in->db_record	  = MLX5_GET64(wq, wq, dbr_addr);
 66}
 67
 68static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
 69{
 70	if (MLX5_GET(srqc, srqc, wq_signature))
 71		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
 72	in->log_page_size = MLX5_GET(srqc,   srqc, log_page_size);
 73	in->wqe_shift	  = MLX5_GET(srqc,   srqc, log_rq_stride);
 74	in->log_size	  = MLX5_GET(srqc,   srqc, log_srq_size);
 75	in->page_offset   = MLX5_GET(srqc,   srqc, page_offset);
 76	in->lwm		  = MLX5_GET(srqc,   srqc, lwm);
 77	in->pd		  = MLX5_GET(srqc,   srqc, pd);
 78	in->db_record	  = MLX5_GET64(srqc, srqc, dbr_addr);
 79}
 80
 81struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
 82{
 83	struct mlx5_srq_table *table = &dev->srq_table;
 84	struct mlx5_core_srq *srq;
 85
 86	xa_lock_irq(&table->array);
 87	srq = xa_load(&table->array, srqn);
 88	if (srq)
 89		refcount_inc(&srq->common.refcount);
 90	xa_unlock_irq(&table->array);
 91
 92	return srq;
 93}
 94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 95static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
 96			  struct mlx5_srq_attr *in)
 97{
 98	u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
 99	void *create_in;
100	void *srqc;
101	void *pas;
102	int pas_size;
103	int inlen;
104	int err;
105
 
 
 
 
 
 
106	pas_size  = get_pas_size(in);
107	inlen	  = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
108	create_in = kvzalloc(inlen, GFP_KERNEL);
109	if (!create_in)
110		return -ENOMEM;
111
112	MLX5_SET(create_srq_in, create_in, uid, in->uid);
113	srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
114	pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
115
116	set_srqc(srqc, in);
117	memcpy(pas, in->pas, pas_size);
 
 
 
 
 
 
118
119	MLX5_SET(create_srq_in, create_in, opcode,
120		 MLX5_CMD_OP_CREATE_SRQ);
121
122	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
123			    sizeof(create_out));
124	kvfree(create_in);
125	if (!err) {
126		srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
127		srq->uid = in->uid;
128	}
129
130	return err;
131}
132
133static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
134{
135	u32 in[MLX5_ST_SZ_DW(destroy_srq_in)] = {};
136
137	MLX5_SET(destroy_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
138	MLX5_SET(destroy_srq_in, in, srqn, srq->srqn);
139	MLX5_SET(destroy_srq_in, in, uid, srq->uid);
140
141	return mlx5_cmd_exec_in(dev->mdev, destroy_srq, in);
142}
143
144static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
145		       u16 lwm, int is_srq)
146{
147	u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
148
149	MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
150	MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
151	MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
152	MLX5_SET(arm_rq_in, in, lwm, lwm);
153	MLX5_SET(arm_rq_in, in, uid, srq->uid);
154
155	return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
156}
157
158static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
159			 struct mlx5_srq_attr *out)
160{
161	u32 in[MLX5_ST_SZ_DW(query_srq_in)] = {};
162	u32 *srq_out;
163	void *srqc;
164	int err;
165
166	srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
167	if (!srq_out)
168		return -ENOMEM;
169
170	MLX5_SET(query_srq_in, in, opcode, MLX5_CMD_OP_QUERY_SRQ);
171	MLX5_SET(query_srq_in, in, srqn, srq->srqn);
172	err = mlx5_cmd_exec_inout(dev->mdev, query_srq, in, srq_out);
173	if (err)
174		goto out;
175
176	srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
177	get_srqc(srqc, out);
178	if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
179		out->flags |= MLX5_SRQ_FLAG_ERR;
180out:
181	kvfree(srq_out);
182	return err;
183}
184
185static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
186			      struct mlx5_core_srq *srq,
187			      struct mlx5_srq_attr *in)
188{
189	u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
190	void *create_in;
191	void *xrc_srqc;
192	void *pas;
193	int pas_size;
194	int inlen;
195	int err;
196
 
 
 
 
 
 
197	pas_size  = get_pas_size(in);
198	inlen	  = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
199	create_in = kvzalloc(inlen, GFP_KERNEL);
200	if (!create_in)
201		return -ENOMEM;
202
203	MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
204	xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
205				xrc_srq_context_entry);
206	pas	 = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
207
208	set_srqc(xrc_srqc, in);
209	MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
210	memcpy(pas, in->pas, pas_size);
 
 
 
 
 
 
211	MLX5_SET(create_xrc_srq_in, create_in, opcode,
212		 MLX5_CMD_OP_CREATE_XRC_SRQ);
213
214	memset(create_out, 0, sizeof(create_out));
215	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
216			    sizeof(create_out));
217	if (err)
218		goto out;
219
220	srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
221	srq->uid = in->uid;
222out:
223	kvfree(create_in);
224	return err;
225}
226
227static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
228			       struct mlx5_core_srq *srq)
229{
230	u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {};
231
232	MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
233	MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, srq->srqn);
234	MLX5_SET(destroy_xrc_srq_in, in, uid, srq->uid);
235
236	return mlx5_cmd_exec_in(dev->mdev, destroy_xrc_srq, in);
237}
238
239static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
240			   u16 lwm)
241{
242	u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {};
243
244	MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
245	MLX5_SET(arm_xrc_srq_in, in, op_mod,
246		 MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
247	MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, srq->srqn);
248	MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
249	MLX5_SET(arm_xrc_srq_in, in, uid, srq->uid);
250
251	return  mlx5_cmd_exec_in(dev->mdev, arm_xrc_srq, in);
252}
253
254static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
255			     struct mlx5_core_srq *srq,
256			     struct mlx5_srq_attr *out)
257{
258	u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {};
259	u32 *xrcsrq_out;
260	void *xrc_srqc;
261	int err;
262
263	xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
264	if (!xrcsrq_out)
265		return -ENOMEM;
266
267	MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
268	MLX5_SET(query_xrc_srq_in, in, xrc_srqn, srq->srqn);
269
270	err = mlx5_cmd_exec_inout(dev->mdev, query_xrc_srq, in, xrcsrq_out);
271	if (err)
272		goto out;
273
274	xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
275				xrc_srq_context_entry);
276	get_srqc(xrc_srqc, out);
277	if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
278		out->flags |= MLX5_SRQ_FLAG_ERR;
279
280out:
281	kvfree(xrcsrq_out);
282	return err;
283}
284
285static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
286			  struct mlx5_srq_attr *in)
287{
288	void *create_out = NULL;
289	void *create_in = NULL;
290	void *rmpc;
291	void *wq;
 
292	int pas_size;
293	int outlen;
294	int inlen;
295	int err;
296
 
 
 
 
 
 
297	pas_size = get_pas_size(in);
298	inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
299	outlen = MLX5_ST_SZ_BYTES(create_rmp_out);
300	create_in = kvzalloc(inlen, GFP_KERNEL);
301	create_out = kvzalloc(outlen, GFP_KERNEL);
302	if (!create_in || !create_out) {
303		err = -ENOMEM;
304		goto out;
305	}
306
307	rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
308	wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
309
310	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
311	MLX5_SET(create_rmp_in, create_in, uid, in->uid);
 
 
312	set_wq(wq, in);
313	memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
 
 
 
 
 
 
314
315	MLX5_SET(create_rmp_in, create_in, opcode, MLX5_CMD_OP_CREATE_RMP);
316	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
317	if (!err) {
318		srq->srqn = MLX5_GET(create_rmp_out, create_out, rmpn);
319		srq->uid = in->uid;
320	}
321
322out:
323	kvfree(create_in);
324	kvfree(create_out);
325	return err;
326}
327
328static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
329{
330	u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
331
332	MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
333	MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
334	MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
335	return mlx5_cmd_exec_in(dev->mdev, destroy_rmp, in);
336}
337
338static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
339		       u16 lwm)
340{
341	void *out = NULL;
342	void *in = NULL;
343	void *rmpc;
344	void *wq;
345	void *bitmask;
346	int outlen;
347	int inlen;
348	int err;
349
350	inlen = MLX5_ST_SZ_BYTES(modify_rmp_in);
351	outlen = MLX5_ST_SZ_BYTES(modify_rmp_out);
352
353	in = kvzalloc(inlen, GFP_KERNEL);
354	out = kvzalloc(outlen, GFP_KERNEL);
355	if (!in || !out) {
356		err = -ENOMEM;
357		goto out;
358	}
359
360	rmpc =	  MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
361	bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
362	wq   =	  MLX5_ADDR_OF(rmpc,	        rmpc, wq);
363
364	MLX5_SET(modify_rmp_in, in,	 rmp_state, MLX5_RMPC_STATE_RDY);
365	MLX5_SET(modify_rmp_in, in,	 rmpn,      srq->srqn);
366	MLX5_SET(modify_rmp_in, in, uid, srq->uid);
367	MLX5_SET(wq,		wq,	 lwm,	    lwm);
368	MLX5_SET(rmp_bitmask,	bitmask, lwm,	    1);
369	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
370	MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
371
372	err = mlx5_cmd_exec_inout(dev->mdev, modify_rmp, in, out);
373
374out:
375	kvfree(in);
376	kvfree(out);
377	return err;
378}
379
380static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
381			 struct mlx5_srq_attr *out)
382{
383	u32 *rmp_out = NULL;
384	u32 *rmp_in = NULL;
385	void *rmpc;
386	int outlen;
387	int inlen;
388	int err;
389
390	outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
391	inlen = MLX5_ST_SZ_BYTES(query_rmp_in);
392
393	rmp_out = kvzalloc(outlen, GFP_KERNEL);
394	rmp_in = kvzalloc(inlen, GFP_KERNEL);
395	if (!rmp_out || !rmp_in) {
396		err = -ENOMEM;
397		goto out;
398	}
399
400	MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
401	MLX5_SET(query_rmp_in, rmp_in, rmpn,   srq->srqn);
402	err = mlx5_cmd_exec_inout(dev->mdev, query_rmp, rmp_in, rmp_out);
403	if (err)
404		goto out;
405
406	rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
407	get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
408	if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
409		out->flags |= MLX5_SRQ_FLAG_ERR;
410
411out:
412	kvfree(rmp_out);
413	kvfree(rmp_in);
414	return err;
415}
416
417static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
418			  struct mlx5_srq_attr *in)
419{
420	u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
421	void *create_in;
422	void *xrqc;
423	void *wq;
 
424	int pas_size;
425	int inlen;
426	int err;
427
 
 
 
 
 
 
428	pas_size = get_pas_size(in);
429	inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
430	create_in = kvzalloc(inlen, GFP_KERNEL);
431	if (!create_in)
432		return -ENOMEM;
433
434	xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
435	wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
 
436
437	set_wq(wq, in);
438	memcpy(MLX5_ADDR_OF(xrqc, xrqc, wq.pas), in->pas, pas_size);
 
 
 
 
 
 
439
440	if (in->type == IB_SRQT_TM) {
441		MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
442		if (in->flags & MLX5_SRQ_FLAG_RNDV)
443			MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV);
444		MLX5_SET(xrqc, xrqc,
445			 tag_matching_topology_context.log_matching_list_sz,
446			 in->tm_log_list_size);
447	}
448	MLX5_SET(xrqc, xrqc, user_index, in->user_index);
449	MLX5_SET(xrqc, xrqc, cqn, in->cqn);
450	MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
451	MLX5_SET(create_xrq_in, create_in, uid, in->uid);
452	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
453			    sizeof(create_out));
454	kvfree(create_in);
455	if (!err) {
456		srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
457		srq->uid = in->uid;
458	}
459
460	return err;
461}
462
463static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
464{
465	u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {};
466
467	MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
468	MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
469	MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
470
471	return mlx5_cmd_exec_in(dev->mdev, destroy_xrq, in);
472}
473
474static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
475		       struct mlx5_core_srq *srq,
476		       u16 lwm)
477{
478	u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
479
480	MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
481	MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
482	MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
483	MLX5_SET(arm_rq_in, in, lwm, lwm);
484	MLX5_SET(arm_rq_in, in, uid, srq->uid);
485
486	return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
487}
488
489static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
490			 struct mlx5_srq_attr *out)
491{
492	u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {};
493	u32 *xrq_out;
494	int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
495	void *xrqc;
496	int err;
497
498	xrq_out = kvzalloc(outlen, GFP_KERNEL);
499	if (!xrq_out)
500		return -ENOMEM;
501
502	MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
503	MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
504
505	err = mlx5_cmd_exec_inout(dev->mdev, query_xrq, in, xrq_out);
506	if (err)
507		goto out;
508
509	xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context);
510	get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out);
511	if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD)
512		out->flags |= MLX5_SRQ_FLAG_ERR;
513	out->tm_next_tag =
514		MLX5_GET(xrqc, xrqc,
515			 tag_matching_topology_context.append_next_index);
516	out->tm_hw_phase_cnt =
517		MLX5_GET(xrqc, xrqc,
518			 tag_matching_topology_context.hw_phase_cnt);
519	out->tm_sw_phase_cnt =
520		MLX5_GET(xrqc, xrqc,
521			 tag_matching_topology_context.sw_phase_cnt);
522
523out:
524	kvfree(xrq_out);
525	return err;
526}
527
528static int create_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
529			    struct mlx5_srq_attr *in)
530{
531	if (!dev->mdev->issi)
532		return create_srq_cmd(dev, srq, in);
533	switch (srq->common.res) {
534	case MLX5_RES_XSRQ:
535		return create_xrc_srq_cmd(dev, srq, in);
536	case MLX5_RES_XRQ:
537		return create_xrq_cmd(dev, srq, in);
538	default:
539		return create_rmp_cmd(dev, srq, in);
540	}
541}
542
543static int destroy_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
544{
545	if (!dev->mdev->issi)
546		return destroy_srq_cmd(dev, srq);
547	switch (srq->common.res) {
548	case MLX5_RES_XSRQ:
549		return destroy_xrc_srq_cmd(dev, srq);
550	case MLX5_RES_XRQ:
551		return destroy_xrq_cmd(dev, srq);
552	default:
553		return destroy_rmp_cmd(dev, srq);
554	}
555}
556
557int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
558			struct mlx5_srq_attr *in)
559{
560	struct mlx5_srq_table *table = &dev->srq_table;
561	int err;
562
563	switch (in->type) {
564	case IB_SRQT_XRC:
565		srq->common.res = MLX5_RES_XSRQ;
566		break;
567	case IB_SRQT_TM:
568		srq->common.res = MLX5_RES_XRQ;
569		break;
570	default:
571		srq->common.res = MLX5_RES_SRQ;
572	}
573
574	err = create_srq_split(dev, srq, in);
575	if (err)
576		return err;
577
578	refcount_set(&srq->common.refcount, 1);
579	init_completion(&srq->common.free);
580
581	err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
582	if (err)
583		goto err_destroy_srq_split;
584
585	return 0;
586
587err_destroy_srq_split:
588	destroy_srq_split(dev, srq);
589
590	return err;
591}
592
593void mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
594{
595	struct mlx5_srq_table *table = &dev->srq_table;
596	struct mlx5_core_srq *tmp;
597	int err;
598
599	tmp = xa_erase_irq(&table->array, srq->srqn);
600	if (!tmp || tmp != srq)
601		return;
 
602
603	err = destroy_srq_split(dev, srq);
604	if (err)
605		return;
 
 
 
 
 
 
 
 
606
607	mlx5_core_res_put(&srq->common);
608	wait_for_completion(&srq->common.free);
 
609}
610
611int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
612		       struct mlx5_srq_attr *out)
613{
614	if (!dev->mdev->issi)
615		return query_srq_cmd(dev, srq, out);
616	switch (srq->common.res) {
617	case MLX5_RES_XSRQ:
618		return query_xrc_srq_cmd(dev, srq, out);
619	case MLX5_RES_XRQ:
620		return query_xrq_cmd(dev, srq, out);
621	default:
622		return query_rmp_cmd(dev, srq, out);
623	}
624}
625
626int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
627		     u16 lwm, int is_srq)
628{
629	if (!dev->mdev->issi)
630		return arm_srq_cmd(dev, srq, lwm, is_srq);
631	switch (srq->common.res) {
632	case MLX5_RES_XSRQ:
633		return arm_xrc_srq_cmd(dev, srq, lwm);
634	case MLX5_RES_XRQ:
635		return arm_xrq_cmd(dev, srq, lwm);
636	default:
637		return arm_rmp_cmd(dev, srq, lwm);
638	}
639}
640
641static int srq_event_notifier(struct notifier_block *nb,
642			      unsigned long type, void *data)
643{
644	struct mlx5_srq_table *table;
645	struct mlx5_core_srq *srq;
646	struct mlx5_eqe *eqe;
647	u32 srqn;
648
649	if (type != MLX5_EVENT_TYPE_SRQ_CATAS_ERROR &&
650	    type != MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)
651		return NOTIFY_DONE;
652
653	table = container_of(nb, struct mlx5_srq_table, nb);
654
655	eqe = data;
656	srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
657
658	xa_lock(&table->array);
659	srq = xa_load(&table->array, srqn);
660	if (srq)
661		refcount_inc(&srq->common.refcount);
662	xa_unlock(&table->array);
663
664	if (!srq)
665		return NOTIFY_OK;
666
667	srq->event(srq, eqe->type);
668
669	mlx5_core_res_put(&srq->common);
670
671	return NOTIFY_OK;
672}
673
674int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
675{
676	struct mlx5_srq_table *table = &dev->srq_table;
677
678	memset(table, 0, sizeof(*table));
679	xa_init_flags(&table->array, XA_FLAGS_LOCK_IRQ);
680
681	table->nb.notifier_call = srq_event_notifier;
682	mlx5_notifier_register(dev->mdev, &table->nb);
683
684	return 0;
685}
686
687void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev)
688{
689	struct mlx5_srq_table *table = &dev->srq_table;
690
691	mlx5_notifier_unregister(dev->mdev, &table->nb);
692}