Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2/*
  3 * Copyright (c) 2018 Hisilicon Limited.
  4 */
  5
  6#include <linux/pci.h>
  7#include <rdma/ib_umem.h>
  8#include <rdma/uverbs_ioctl.h>
  9#include "hns_roce_device.h"
 10#include "hns_roce_cmd.h"
 11#include "hns_roce_hem.h"
 12
 13void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
 14{
 15	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
 16	struct hns_roce_srq *srq;
 17
 18	xa_lock(&srq_table->xa);
 19	srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
 20	if (srq)
 21		refcount_inc(&srq->refcount);
 22	xa_unlock(&srq_table->xa);
 23
 24	if (!srq) {
 25		dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
 26		return;
 27	}
 28
 29	srq->event(srq, event_type);
 30
 31	if (refcount_dec_and_test(&srq->refcount))
 32		complete(&srq->free);
 33}
 34
 35static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
 36				  enum hns_roce_event event_type)
 37{
 38	struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
 39	struct ib_srq *ibsrq = &srq->ibsrq;
 40	struct ib_event event;
 41
 42	if (ibsrq->event_handler) {
 43		event.device      = ibsrq->device;
 44		event.element.srq = ibsrq;
 45		switch (event_type) {
 46		case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
 47			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
 48			break;
 49		case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
 50			event.event = IB_EVENT_SRQ_ERR;
 51			break;
 52		default:
 53			dev_err(hr_dev->dev,
 54			   "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
 55			   event_type, srq->srqn);
 56			return;
 57		}
 58
 59		ibsrq->event_handler(&event, ibsrq->srq_context);
 60	}
 61}
 62
 63static int alloc_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
 
 
 64{
 65	struct hns_roce_ida *srq_ida = &hr_dev->srq_table.srq_ida;
 66	int id;
 67
 68	id = ida_alloc_range(&srq_ida->ida, srq_ida->min, srq_ida->max,
 69			     GFP_KERNEL);
 70	if (id < 0) {
 71		ibdev_err(&hr_dev->ib_dev, "failed to alloc srq(%d).\n", id);
 72		return -ENOMEM;
 73	}
 74
 75	srq->srqn = id;
 76
 77	return 0;
 78}
 79
 80static void free_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
 
 
 81{
 82	ida_free(&hr_dev->srq_table.srq_ida.ida, (int)srq->srqn);
 
 
 83}
 84
 85static int hns_roce_create_srqc(struct hns_roce_dev *hr_dev,
 86				struct hns_roce_srq *srq)
 
 87{
 88	struct ib_device *ibdev = &hr_dev->ib_dev;
 89	struct hns_roce_cmd_mailbox *mailbox;
 
 
 
 
 90	int ret;
 91
 92	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
 93	if (IS_ERR(mailbox)) {
 94		ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
 95		return PTR_ERR(mailbox);
 
 
 
 
 
 96	}
 97
 98	ret = hr_dev->hw->write_srqc(srq, mailbox->buf);
 99	if (ret) {
100		ibdev_err(ibdev, "failed to write SRQC.\n");
101		goto err_mbox;
 
 
 
 
102	}
103
104	ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_SRQ,
105				     srq->srqn);
106	if (ret)
107		ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
108
109err_mbox:
110	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
111	return ret;
112}
113
114static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
115{
116	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
117	struct ib_device *ibdev = &hr_dev->ib_dev;
118	int ret;
119
120	ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
121	if (ret) {
122		ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
123		return ret;
124	}
125
126	ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
127	if (ret) {
128		ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
129		goto err_put;
 
 
 
 
 
130	}
131
132	ret = hns_roce_create_srqc(hr_dev, srq);
 
 
 
 
 
133	if (ret)
134		goto err_xa;
135
136	return 0;
 
 
137
138err_xa:
139	xa_erase(&srq_table->xa, srq->srqn);
 
140err_put:
141	hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
142
 
 
143	return ret;
144}
145
146static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
 
147{
148	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
149	int ret;
150
151	ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_SRQ,
152				      srq->srqn);
153	if (ret)
154		dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
155			ret, srq->srqn);
156
157	xa_erase(&srq_table->xa, srq->srqn);
158
159	if (refcount_dec_and_test(&srq->refcount))
160		complete(&srq->free);
161	wait_for_completion(&srq->free);
162
163	hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
 
164}
165
166static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
167			 struct ib_udata *udata, unsigned long addr)
168{
169	struct hns_roce_idx_que *idx_que = &srq->idx_que;
170	struct ib_device *ibdev = &hr_dev->ib_dev;
171	struct hns_roce_buf_attr buf_attr = {};
 
172	int ret;
173
174	srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
 
175
176	buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + PAGE_SHIFT;
177	buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
178					srq->idx_que.entry_shift);
179	buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
180	buf_attr.region_count = 1;
181
182	ret = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
183				  hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT,
184				  udata, addr);
185	if (ret) {
186		ibdev_err(ibdev,
187			  "failed to alloc SRQ idx mtr, ret = %d.\n", ret);
188		return ret;
 
 
 
 
 
 
 
 
 
 
189	}
190
191	if (!udata) {
192		idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
193		if (!idx_que->bitmap) {
194			ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n");
195			ret = -ENOMEM;
196			goto err_idx_mtr;
197		}
198	}
199
200	idx_que->head = 0;
201	idx_que->tail = 0;
 
 
 
 
 
202
203	return 0;
204err_idx_mtr:
205	hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
206
207	return ret;
208}
209
210static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
211{
212	struct hns_roce_idx_que *idx_que = &srq->idx_que;
213
214	bitmap_free(idx_que->bitmap);
215	idx_que->bitmap = NULL;
216	hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
217}
218
219static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev,
220			     struct hns_roce_srq *srq,
221			     struct ib_udata *udata, unsigned long addr)
222{
223	struct ib_device *ibdev = &hr_dev->ib_dev;
224	struct hns_roce_buf_attr buf_attr = {};
225	int ret;
226
227	srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
228						      HNS_ROCE_SGE_SIZE *
229						      srq->max_gs)));
230
231	buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + PAGE_SHIFT;
232	buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
233							 srq->wqe_shift);
234	buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
235	buf_attr.region_count = 1;
236
237	ret = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
238				  hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT,
239				  udata, addr);
240	if (ret)
241		ibdev_err(ibdev,
242			  "failed to alloc SRQ buf mtr, ret = %d.\n", ret);
243
244	return ret;
245}
246
247static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev,
248			     struct hns_roce_srq *srq)
249{
250	hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
251}
252
253static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
254{
255	srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
256	if (!srq->wrid)
257		return -ENOMEM;
258
259	return 0;
260}
261
262static void free_srq_wrid(struct hns_roce_srq *srq)
263{
264	kvfree(srq->wrid);
265	srq->wrid = NULL;
266}
267
268static u32 proc_srq_sge(struct hns_roce_dev *dev, struct hns_roce_srq *hr_srq,
269			bool user)
270{
271	u32 max_sge = dev->caps.max_srq_sges;
272
273	if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
274		return max_sge;
275
276	/* Reserve SGEs only for HIP08 in kernel; The userspace driver will
277	 * calculate number of max_sge with reserved SGEs when allocating wqe
278	 * buf, so there is no need to do this again in kernel. But the number
279	 * may exceed the capacity of SGEs recorded in the firmware, so the
280	 * kernel driver should just adapt the value accordingly.
281	 */
282	if (user)
283		max_sge = roundup_pow_of_two(max_sge + 1);
284	else
285		hr_srq->rsv_sge = 1;
286
287	return max_sge;
288}
289
290static int set_srq_basic_param(struct hns_roce_srq *srq,
291			       struct ib_srq_init_attr *init_attr,
292			       struct ib_udata *udata)
293{
294	struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
295	struct ib_srq_attr *attr = &init_attr->attr;
296	u32 max_sge;
297
298	max_sge = proc_srq_sge(hr_dev, srq, !!udata);
299	if (attr->max_wr > hr_dev->caps.max_srq_wrs ||
300	    attr->max_sge > max_sge) {
301		ibdev_err(&hr_dev->ib_dev,
302			  "invalid SRQ attr, depth = %u, sge = %u.\n",
303			  attr->max_wr, attr->max_sge);
304		return -EINVAL;
305	}
306
307	attr->max_wr = max_t(u32, attr->max_wr, HNS_ROCE_MIN_SRQ_WQE_NUM);
308	srq->wqe_cnt = roundup_pow_of_two(attr->max_wr);
309	srq->max_gs = roundup_pow_of_two(attr->max_sge + srq->rsv_sge);
310
311	attr->max_wr = srq->wqe_cnt;
312	attr->max_sge = srq->max_gs - srq->rsv_sge;
313	attr->srq_limit = 0;
314
315	return 0;
316}
317
318static void set_srq_ext_param(struct hns_roce_srq *srq,
319			      struct ib_srq_init_attr *init_attr)
320{
321	srq->cqn = ib_srq_has_cq(init_attr->srq_type) ?
322		   to_hr_cq(init_attr->ext.cq)->cqn : 0;
323
324	srq->xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
325		     to_hr_xrcd(init_attr->ext.xrc.xrcd)->xrcdn : 0;
326}
327
328static int set_srq_param(struct hns_roce_srq *srq,
329			 struct ib_srq_init_attr *init_attr,
330			 struct ib_udata *udata)
331{
 
 
332	int ret;
333
334	ret = set_srq_basic_param(srq, init_attr, udata);
335	if (ret)
336		return ret;
337
338	set_srq_ext_param(srq, init_attr);
 
339
340	return 0;
341}
 
 
342
343static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
344			 struct ib_udata *udata)
345{
346	struct hns_roce_ib_create_srq ucmd = {};
347	int ret;
348
349	if (udata) {
350		ret = ib_copy_from_udata(&ucmd, udata,
351					 min(udata->inlen, sizeof(ucmd)));
352		if (ret) {
353			ibdev_err(&hr_dev->ib_dev,
354				  "failed to copy SRQ udata, ret = %d.\n",
355				  ret);
356			return ret;
357		}
358	}
359
360	ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
 
 
 
361	if (ret)
362		return ret;
363
364	ret = alloc_srq_wqe_buf(hr_dev, srq, udata, ucmd.buf_addr);
 
 
365	if (ret)
366		goto err_idx;
367
368	if (!udata) {
369		ret = alloc_srq_wrid(hr_dev, srq);
370		if (ret)
371			goto err_wqe_buf;
372	}
373
374	return 0;
375
376err_wqe_buf:
377	free_srq_wqe_buf(hr_dev, srq);
378err_idx:
379	free_srq_idx(hr_dev, srq);
380
381	return ret;
382}
383
384static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
385{
386	free_srq_wrid(srq);
387	free_srq_wqe_buf(hr_dev, srq);
388	free_srq_idx(hr_dev, srq);
389}
390
391static int get_srq_ucmd(struct hns_roce_srq *srq, struct ib_udata *udata,
392			struct hns_roce_ib_create_srq *ucmd)
393{
394	struct ib_device *ibdev = srq->ibsrq.device;
395	int ret;
396
397	ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd)));
398	if (ret) {
399		ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n", ret);
400		return ret;
401	}
402
403	return 0;
404}
405
406static void free_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
407			struct ib_udata *udata)
408{
409	struct hns_roce_ucontext *uctx;
410
411	if (!(srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB))
412		return;
413
414	srq->cap_flags &= ~HNS_ROCE_SRQ_CAP_RECORD_DB;
415	if (udata) {
416		uctx = rdma_udata_to_drv_context(udata,
417						 struct hns_roce_ucontext,
418						 ibucontext);
419		hns_roce_db_unmap_user(uctx, &srq->rdb);
420	} else {
421		hns_roce_free_db(hr_dev, &srq->rdb);
422	}
423}
424
425static int alloc_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
426			struct ib_udata *udata,
427			struct hns_roce_ib_create_srq_resp *resp)
428{
429	struct hns_roce_ib_create_srq ucmd = {};
430	struct hns_roce_ucontext *uctx;
431	int ret;
432
433	if (udata) {
434		ret = get_srq_ucmd(srq, udata, &ucmd);
435		if (ret)
436			return ret;
437
438		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB) &&
439		    (ucmd.req_cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)) {
440			uctx = rdma_udata_to_drv_context(udata,
441					struct hns_roce_ucontext, ibucontext);
442			ret = hns_roce_db_map_user(uctx, ucmd.db_addr,
443						   &srq->rdb);
444			if (ret)
445				return ret;
446
447			srq->cap_flags |= HNS_ROCE_RSP_SRQ_CAP_RECORD_DB;
448		}
449	} else {
450		if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB) {
451			ret = hns_roce_alloc_db(hr_dev, &srq->rdb, 1);
452			if (ret)
453				return ret;
454
455			*srq->rdb.db_record = 0;
456			srq->cap_flags |= HNS_ROCE_RSP_SRQ_CAP_RECORD_DB;
457		}
458		srq->db_reg = hr_dev->reg_base + SRQ_DB_REG;
459	}
460
461	return 0;
462}
463
464int hns_roce_create_srq(struct ib_srq *ib_srq,
465			struct ib_srq_init_attr *init_attr,
466			struct ib_udata *udata)
467{
468	struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
469	struct hns_roce_ib_create_srq_resp resp = {};
470	struct hns_roce_srq *srq = to_hr_srq(ib_srq);
471	int ret;
 
 
 
 
 
 
 
 
472
473	mutex_init(&srq->mutex);
474	spin_lock_init(&srq->lock);
475
476	ret = set_srq_param(srq, init_attr, udata);
477	if (ret)
478		goto err_out;
479
480	ret = alloc_srq_buf(hr_dev, srq, udata);
481	if (ret)
482		goto err_out;
483
484	ret = alloc_srq_db(hr_dev, srq, udata, &resp);
485	if (ret)
486		goto err_srq_buf;
487
488	ret = alloc_srqn(hr_dev, srq);
489	if (ret)
490		goto err_srq_db;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
491
492	ret = alloc_srqc(hr_dev, srq);
 
 
 
 
 
 
493	if (ret)
494		goto err_srqn;
 
 
 
495
496	if (udata) {
497		resp.cap_flags = srq->cap_flags;
498		resp.srqn = srq->srqn;
499		if (ib_copy_to_udata(udata, &resp,
500				     min(udata->outlen, sizeof(resp)))) {
501			ret = -EFAULT;
502			goto err_srqc;
503		}
504	}
505
506	srq->event = hns_roce_ib_srq_event;
507	refcount_set(&srq->refcount, 1);
508	init_completion(&srq->free);
509
510	return 0;
511
512err_srqc:
513	free_srqc(hr_dev, srq);
514err_srqn:
515	free_srqn(hr_dev, srq);
516err_srq_db:
517	free_srq_db(hr_dev, srq, udata);
518err_srq_buf:
519	free_srq_buf(hr_dev, srq);
520err_out:
521	atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_SRQ_CREATE_ERR_CNT]);
522
 
 
 
 
 
 
 
523	return ret;
524}
525
526int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
527{
528	struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
529	struct hns_roce_srq *srq = to_hr_srq(ibsrq);
530
531	free_srqc(hr_dev, srq);
532	free_srqn(hr_dev, srq);
533	free_srq_db(hr_dev, srq, udata);
534	free_srq_buf(hr_dev, srq);
535	return 0;
 
 
 
 
 
 
 
536}
537
538void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
539{
540	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
541	struct hns_roce_ida *srq_ida = &srq_table->srq_ida;
542
543	xa_init(&srq_table->xa);
544
545	ida_init(&srq_ida->ida);
546	srq_ida->max = hr_dev->caps.num_srqs - 1;
547	srq_ida->min = hr_dev->caps.reserved_srqs;
 
 
 
 
 
548}
v5.4
  1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2/*
  3 * Copyright (c) 2018 Hisilicon Limited.
  4 */
  5
 
  6#include <rdma/ib_umem.h>
  7#include <rdma/hns-abi.h>
  8#include "hns_roce_device.h"
  9#include "hns_roce_cmd.h"
 10#include "hns_roce_hem.h"
 11
 12void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
 13{
 14	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
 15	struct hns_roce_srq *srq;
 16
 17	xa_lock(&srq_table->xa);
 18	srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
 19	if (srq)
 20		atomic_inc(&srq->refcount);
 21	xa_unlock(&srq_table->xa);
 22
 23	if (!srq) {
 24		dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
 25		return;
 26	}
 27
 28	srq->event(srq, event_type);
 29
 30	if (atomic_dec_and_test(&srq->refcount))
 31		complete(&srq->free);
 32}
 33
 34static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
 35				  enum hns_roce_event event_type)
 36{
 37	struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
 38	struct ib_srq *ibsrq = &srq->ibsrq;
 39	struct ib_event event;
 40
 41	if (ibsrq->event_handler) {
 42		event.device      = ibsrq->device;
 43		event.element.srq = ibsrq;
 44		switch (event_type) {
 45		case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
 46			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
 47			break;
 48		case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
 49			event.event = IB_EVENT_SRQ_ERR;
 50			break;
 51		default:
 52			dev_err(hr_dev->dev,
 53			   "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
 54			   event_type, srq->srqn);
 55			return;
 56		}
 57
 58		ibsrq->event_handler(&event, ibsrq->srq_context);
 59	}
 60}
 61
 62static int hns_roce_sw2hw_srq(struct hns_roce_dev *dev,
 63			      struct hns_roce_cmd_mailbox *mailbox,
 64			      unsigned long srq_num)
 65{
 66	return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
 67				 HNS_ROCE_CMD_SW2HW_SRQ,
 68				 HNS_ROCE_CMD_TIMEOUT_MSECS);
 
 
 
 
 
 
 
 
 
 
 69}
 70
 71static int hns_roce_hw2sw_srq(struct hns_roce_dev *dev,
 72			     struct hns_roce_cmd_mailbox *mailbox,
 73			     unsigned long srq_num)
 74{
 75	return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
 76				 mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_SRQ,
 77				 HNS_ROCE_CMD_TIMEOUT_MSECS);
 78}
 79
 80static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
 81			      u16 xrcd, struct hns_roce_mtt *hr_mtt,
 82			      u64 db_rec_addr, struct hns_roce_srq *srq)
 83{
 84	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
 85	struct hns_roce_cmd_mailbox *mailbox;
 86	dma_addr_t dma_handle_wqe;
 87	dma_addr_t dma_handle_idx;
 88	u64 *mtts_wqe;
 89	u64 *mtts_idx;
 90	int ret;
 91
 92	/* Get the physical address of srq buf */
 93	mtts_wqe = hns_roce_table_find(hr_dev,
 94				       &hr_dev->mr_table.mtt_srqwqe_table,
 95				       srq->mtt.first_seg,
 96				       &dma_handle_wqe);
 97	if (!mtts_wqe) {
 98		dev_err(hr_dev->dev,
 99			"SRQ alloc.Failed to find srq buf addr.\n");
100		return -EINVAL;
101	}
102
103	/* Get physical address of idx que buf */
104	mtts_idx = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_idx_table,
105				       srq->idx_que.mtt.first_seg,
106				       &dma_handle_idx);
107	if (!mtts_idx) {
108		dev_err(hr_dev->dev,
109			"SRQ alloc.Failed to find idx que buf addr.\n");
110		return -EINVAL;
111	}
112
113	ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
114	if (ret == -1) {
115		dev_err(hr_dev->dev, "SRQ alloc.Failed to alloc index.\n");
116		return -ENOMEM;
117	}
 
 
 
 
 
 
 
 
 
 
118
119	ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
120	if (ret)
121		goto err_out;
 
 
122
123	ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
124	if (ret)
 
125		goto err_put;
126
127	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
128	if (IS_ERR(mailbox)) {
129		ret = PTR_ERR(mailbox);
130		goto err_xa;
131	}
132
133	hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf,
134			       mtts_wqe, mtts_idx, dma_handle_wqe,
135			       dma_handle_idx);
136
137	ret = hns_roce_sw2hw_srq(hr_dev, mailbox, srq->srqn);
138	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
139	if (ret)
140		goto err_xa;
141
142	atomic_set(&srq->refcount, 1);
143	init_completion(&srq->free);
144	return ret;
145
146err_xa:
147	xa_erase(&srq_table->xa, srq->srqn);
148
149err_put:
150	hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
151
152err_out:
153	hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
154	return ret;
155}
156
157static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
158			      struct hns_roce_srq *srq)
159{
160	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
161	int ret;
162
163	ret = hns_roce_hw2sw_srq(hr_dev, NULL, srq->srqn);
 
164	if (ret)
165		dev_err(hr_dev->dev, "HW2SW_SRQ failed (%d) for CQN %06lx\n",
166			ret, srq->srqn);
167
168	xa_erase(&srq_table->xa, srq->srqn);
169
170	if (atomic_dec_and_test(&srq->refcount))
171		complete(&srq->free);
172	wait_for_completion(&srq->free);
173
174	hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
175	hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
176}
177
178static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
179			   int srq_buf_size)
180{
181	struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
182	struct hns_roce_ib_create_srq  ucmd;
183	u32 page_shift;
184	u32 npages;
185	int ret;
186
187	if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
188		return -EFAULT;
189
190	srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
191	if (IS_ERR(srq->umem))
192		return PTR_ERR(srq->umem);
193
194	npages = (ib_umem_page_count(srq->umem) +
195		(1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
196		(1 << hr_dev->caps.srqwqe_buf_pg_sz);
197	page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
198	ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt);
199	if (ret)
200		goto err_user_buf;
201
202	ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
203	if (ret)
204		goto err_user_srq_mtt;
205
206	/* config index queue BA */
207	srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
208					srq->idx_que.buf_size, 0, 0);
209	if (IS_ERR(srq->idx_que.umem)) {
210		dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
211		ret = PTR_ERR(srq->idx_que.umem);
212		goto err_user_srq_mtt;
213	}
214
215	ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->idx_que.umem),
216				PAGE_SHIFT, &srq->idx_que.mtt);
217
218	if (ret) {
219		dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
220		goto err_user_idx_mtt;
 
221	}
222
223	ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
224					 srq->idx_que.umem);
225	if (ret) {
226		dev_err(hr_dev->dev,
227			"hns_roce_ib_umem_write_mtt error for idx que\n");
228		goto err_user_idx_buf;
229	}
230
231	return 0;
 
 
232
233err_user_idx_buf:
234	hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
 
 
 
 
235
236err_user_idx_mtt:
237	ib_umem_release(srq->idx_que.umem);
 
 
238
239err_user_srq_mtt:
240	hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
 
 
 
 
 
241
242err_user_buf:
243	ib_umem_release(srq->umem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
245	return ret;
246}
247
248static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
249				   u32 page_shift)
250{
251	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
252	struct hns_roce_idx_que *idx_que = &srq->idx_que;
253
254	idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL);
255	if (!idx_que->bitmap)
 
 
256		return -ENOMEM;
257
258	idx_que->buf_size = srq->idx_que.buf_size;
 
 
 
 
 
 
 
 
 
 
 
 
259
260	if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
261			       &idx_que->idx_buf, page_shift)) {
262		bitmap_free(idx_que->bitmap);
263		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264	}
265
 
 
 
 
 
 
 
 
266	return 0;
267}
268
269static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
 
 
 
 
 
 
 
 
 
 
 
 
270{
271	struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
272	u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
273	int ret;
274
275	if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2,
276			       &srq->buf, page_shift))
277		return -ENOMEM;
278
279	srq->head = 0;
280	srq->tail = srq->max - 1;
281
282	ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
283				&srq->mtt);
284	if (ret)
285		goto err_kernel_buf;
286
287	ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
288	if (ret)
289		goto err_kernel_srq_mtt;
 
 
290
291	page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
292	ret = hns_roce_create_idx_que(srq->ibsrq.pd, srq, page_shift);
293	if (ret) {
294		dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret);
295		goto err_kernel_srq_mtt;
 
 
 
 
296	}
297
298	/* Init mtt table for idx_que */
299	ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
300				srq->idx_que.idx_buf.page_shift,
301				&srq->idx_que.mtt);
302	if (ret)
303		goto err_kernel_create_idx;
304
305	/* Write buffer address into the mtt table */
306	ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
307				     &srq->idx_que.idx_buf);
308	if (ret)
309		goto err_kernel_idx_buf;
310
311	srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
312	if (!srq->wrid) {
313		ret = -ENOMEM;
314		goto err_kernel_idx_buf;
315	}
316
317	return 0;
318
319err_kernel_idx_buf:
320	hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
 
 
 
 
 
321
322err_kernel_create_idx:
323	hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
324			  &srq->idx_que.idx_buf);
325	kfree(srq->idx_que.bitmap);
 
 
326
327err_kernel_srq_mtt:
328	hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
 
 
 
329
330err_kernel_buf:
331	hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
 
 
 
332
333	return ret;
334}
335
336static void destroy_user_srq(struct hns_roce_dev *hr_dev,
337			     struct hns_roce_srq *srq)
338{
339	hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
340	ib_umem_release(srq->idx_que.umem);
341	hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
342	ib_umem_release(srq->umem);
 
 
 
 
 
 
 
 
 
 
343}
344
345static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
346			       struct hns_roce_srq *srq, int srq_buf_size)
 
347{
348	kvfree(srq->wrid);
349	hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
350	hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf);
351	kfree(srq->idx_que.bitmap);
352	hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
353	hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354}
355
356int hns_roce_create_srq(struct ib_srq *ib_srq,
357			struct ib_srq_init_attr *srq_init_attr,
358			struct ib_udata *udata)
359{
360	struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
361	struct hns_roce_ib_create_srq_resp resp = {};
362	struct hns_roce_srq *srq = to_hr_srq(ib_srq);
363	int srq_desc_size;
364	int srq_buf_size;
365	int ret = 0;
366	u32 cqn;
367
368	/* Check the actual SRQ wqe and SRQ sge num */
369	if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
370	    srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
371		return -EINVAL;
372
373	mutex_init(&srq->mutex);
374	spin_lock_init(&srq->lock);
375
376	srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
377	srq->max_gs = srq_init_attr->attr.max_sge;
 
378
379	srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs));
 
 
380
381	srq->wqe_shift = ilog2(srq_desc_size);
 
 
382
383	srq_buf_size = srq->max * srq_desc_size;
384
385	srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
386	srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
387	srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
388	srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
389
390	if (udata) {
391		ret = create_user_srq(srq, udata, srq_buf_size);
392		if (ret) {
393			dev_err(hr_dev->dev, "Create user srq failed\n");
394			goto err_srq;
395		}
396	} else {
397		ret = create_kernel_srq(srq, srq_buf_size);
398		if (ret) {
399			dev_err(hr_dev->dev, "Create kernel srq failed\n");
400			goto err_srq;
401		}
402	}
403
404	cqn = ib_srq_has_cq(srq_init_attr->srq_type) ?
405	      to_hr_cq(srq_init_attr->ext.cq)->cqn : 0;
406
407	srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
408
409	ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(ib_srq->pd)->pdn, cqn, 0,
410				 &srq->mtt, 0, srq);
411	if (ret)
412		goto err_wrid;
413
414	srq->event = hns_roce_ib_srq_event;
415	resp.srqn = srq->srqn;
416
417	if (udata) {
 
 
418		if (ib_copy_to_udata(udata, &resp,
419				     min(udata->outlen, sizeof(resp)))) {
420			ret = -EFAULT;
421			goto err_srqc_alloc;
422		}
423	}
424
 
 
 
 
425	return 0;
426
427err_srqc_alloc:
428	hns_roce_srq_free(hr_dev, srq);
 
 
 
 
 
 
 
 
429
430err_wrid:
431	if (udata)
432		destroy_user_srq(hr_dev, srq);
433	else
434		destroy_kernel_srq(hr_dev, srq, srq_buf_size);
435
436err_srq:
437	return ret;
438}
439
440void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
441{
442	struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
443	struct hns_roce_srq *srq = to_hr_srq(ibsrq);
444
445	hns_roce_srq_free(hr_dev, srq);
446	hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
447
448	if (udata) {
449		hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
450	} else {
451		kvfree(srq->wrid);
452		hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
453				  &srq->buf);
454	}
455	ib_umem_release(srq->idx_que.umem);
456	ib_umem_release(srq->umem);
457}
458
459int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
460{
461	struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
 
462
463	xa_init(&srq_table->xa);
464
465	return hns_roce_bitmap_init(&srq_table->bitmap, hr_dev->caps.num_srqs,
466				    hr_dev->caps.num_srqs - 1,
467				    hr_dev->caps.reserved_srqs, 0);
468}
469
470void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev)
471{
472	hns_roce_bitmap_cleanup(&hr_dev->srq_table.bitmap);
473}