Linux Audio

Check our new training course

Loading...
  1/*
  2 * Copyright (c) 2016 Hisilicon Limited.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32
 
 33#include <rdma/ib_umem.h>
 34#include <rdma/uverbs_ioctl.h>
 35#include "hns_roce_device.h"
 36#include "hns_roce_cmd.h"
 37#include "hns_roce_hem.h"
 38#include "hns_roce_common.h"
 39
 40static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
 41{
 42	u32 least_load = bank[0].inuse;
 43	u8 bankid = 0;
 44	u32 bankcnt;
 45	u8 i;
 46
 47	for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
 48		bankcnt = bank[i].inuse;
 49		if (bankcnt < least_load) {
 50			least_load = bankcnt;
 51			bankid = i;
 52		}
 53	}
 54
 55	return bankid;
 56}
 57
 58static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
 59{
 60	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
 61	struct hns_roce_bank *bank;
 62	u8 bankid;
 63	int id;
 64
 65	mutex_lock(&cq_table->bank_mutex);
 66	bankid = get_least_load_bankid_for_cq(cq_table->bank);
 67	bank = &cq_table->bank[bankid];
 68
 69	id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
 70	if (id < 0) {
 71		mutex_unlock(&cq_table->bank_mutex);
 72		return id;
 73	}
 74
 75	/* the lower 2 bits is bankid */
 76	hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid;
 77	bank->inuse++;
 78	mutex_unlock(&cq_table->bank_mutex);
 79
 80	return 0;
 81}
 82
 83static inline u8 get_cq_bankid(unsigned long cqn)
 84{
 85	/* The lower 2 bits of CQN are used to hash to different banks */
 86	return (u8)(cqn & GENMASK(1, 0));
 87}
 88
 89static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn)
 90{
 91	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
 92	struct hns_roce_bank *bank;
 93
 94	bank = &cq_table->bank[get_cq_bankid(cqn)];
 95
 96	ida_free(&bank->ida, cqn >> CQ_BANKID_SHIFT);
 97
 98	mutex_lock(&cq_table->bank_mutex);
 99	bank->inuse--;
100	mutex_unlock(&cq_table->bank_mutex);
101}
102
103static int hns_roce_create_cqc(struct hns_roce_dev *hr_dev,
104			       struct hns_roce_cq *hr_cq,
105			       u64 *mtts, dma_addr_t dma_handle)
106{
107	struct ib_device *ibdev = &hr_dev->ib_dev;
108	struct hns_roce_cmd_mailbox *mailbox;
109	int ret;
110
111	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
112	if (IS_ERR(mailbox)) {
113		ibdev_err(ibdev, "failed to alloc mailbox for CQC.\n");
114		return PTR_ERR(mailbox);
115	}
116
117	hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
118
119	ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_CQC,
120				     hr_cq->cqn);
121	if (ret)
122		ibdev_err(ibdev,
123			  "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
124			  hr_cq->cqn, ret);
125
126	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
127
128	return ret;
129}
130
131static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
132{
133	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
134	struct ib_device *ibdev = &hr_dev->ib_dev;
135	u64 mtts[MTT_MIN_COUNT] = {};
 
 
136	int ret;
137
138	ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts));
139	if (ret) {
 
140		ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
141		return ret;
142	}
143
144	/* Get CQC memory HEM(Hardware Entry Memory) table */
145	ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
146	if (ret) {
147		ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n",
148			  hr_cq->cqn, ret);
149		return ret;
150	}
151
152	ret = xa_err(xa_store_irq(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
153	if (ret) {
154		ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
155		goto err_put;
156	}
157
158	ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts,
159				  hns_roce_get_mtr_ba(&hr_cq->mtr));
160	if (ret)
 
161		goto err_xa;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
163	return 0;
164
165err_xa:
166	xa_erase_irq(&cq_table->array, hr_cq->cqn);
 
167err_put:
168	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
169
 
170	return ret;
171}
172
173static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
174{
175	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
176	struct device *dev = hr_dev->dev;
177	int ret;
178
179	ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC,
180				      hr_cq->cqn);
 
181	if (ret)
182		dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
183			hr_cq->cqn);
184
185	xa_erase_irq(&cq_table->array, hr_cq->cqn);
186
187	/* Waiting interrupt process procedure carried out */
188	synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
189
190	/* wait for all interrupt processed */
191	if (refcount_dec_and_test(&hr_cq->refcount))
192		complete(&hr_cq->free);
193	wait_for_completion(&hr_cq->free);
194
195	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
196}
197
198static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
199			struct ib_udata *udata, unsigned long addr)
200{
201	struct ib_device *ibdev = &hr_dev->ib_dev;
202	struct hns_roce_buf_attr buf_attr = {};
203	int ret;
204
205	buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_SHIFT;
206	buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
207	buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
208	buf_attr.region_count = 1;
209
210	ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
211				  hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT,
212				  udata, addr);
213	if (ret)
214		ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret);
215
216	return ret;
217}
218
219static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
220{
221	hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
222}
223
224static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
225		       struct ib_udata *udata, unsigned long addr,
226		       struct hns_roce_ib_create_cq_resp *resp)
227{
228	bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB;
229	struct hns_roce_ucontext *uctx;
230	int err;
231
232	if (udata) {
233		if (has_db &&
234		    udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
235			uctx = rdma_udata_to_drv_context(udata,
236					struct hns_roce_ucontext, ibucontext);
237			err = hns_roce_db_map_user(uctx, addr, &hr_cq->db);
238			if (err)
239				return err;
240			hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
241			resp->cap_flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
242		}
243	} else {
244		if (has_db) {
245			err = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
246			if (err)
247				return err;
248			hr_cq->set_ci_db = hr_cq->db.db_record;
249			*hr_cq->set_ci_db = 0;
250			hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
251		}
252		hr_cq->db_reg = hr_dev->reg_base + hr_dev->odb_offset +
253				DB_REG_OFFSET * hr_dev->priv_uar.index;
254	}
255
256	return 0;
257}
258
259static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
260		       struct ib_udata *udata)
261{
262	struct hns_roce_ucontext *uctx;
263
264	if (!(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB))
265		return;
266
267	hr_cq->flags &= ~HNS_ROCE_CQ_FLAG_RECORD_DB;
268	if (udata) {
269		uctx = rdma_udata_to_drv_context(udata,
270						 struct hns_roce_ucontext,
271						 ibucontext);
272		hns_roce_db_unmap_user(uctx, &hr_cq->db);
273	} else {
274		hns_roce_free_db(hr_dev, &hr_cq->db);
275	}
276}
277
278static int verify_cq_create_attr(struct hns_roce_dev *hr_dev,
279				 const struct ib_cq_init_attr *attr)
280{
281	struct ib_device *ibdev = &hr_dev->ib_dev;
282
283	if (!attr->cqe || attr->cqe > hr_dev->caps.max_cqes) {
284		ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
285			  attr->cqe, hr_dev->caps.max_cqes);
286		return -EINVAL;
287	}
288
289	if (attr->comp_vector >= hr_dev->caps.num_comp_vectors) {
290		ibdev_err(ibdev, "failed to check CQ vector = %u, max = %d.\n",
291			  attr->comp_vector, hr_dev->caps.num_comp_vectors);
292		return -EINVAL;
293	}
294
295	return 0;
296}
297
298static int get_cq_ucmd(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
299		       struct hns_roce_ib_create_cq *ucmd)
300{
301	struct ib_device *ibdev = hr_cq->ib_cq.device;
302	int ret;
303
304	ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd)));
305	if (ret) {
306		ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n", ret);
307		return ret;
308	}
309
310	return 0;
311}
312
313static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
314			 struct hns_roce_ib_create_cq *ucmd)
315{
316	struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
317
318	cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
319	cq_entries = roundup_pow_of_two(cq_entries);
320	hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
321	hr_cq->cq_depth = cq_entries;
322	hr_cq->vector = vector;
323
324	spin_lock_init(&hr_cq->lock);
325	INIT_LIST_HEAD(&hr_cq->sq_list);
326	INIT_LIST_HEAD(&hr_cq->rq_list);
327}
328
329static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
330			struct hns_roce_ib_create_cq *ucmd)
331{
332	struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
333
334	if (!udata) {
335		hr_cq->cqe_size = hr_dev->caps.cqe_sz;
336		return 0;
337	}
338
339	if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
340		if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
341		    ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
342			ibdev_err(&hr_dev->ib_dev,
343				  "invalid cqe size %u.\n", ucmd->cqe_size);
344			return -EINVAL;
345		}
346
347		hr_cq->cqe_size = ucmd->cqe_size;
348	} else {
349		hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
350	}
351
352	return 0;
353}
354
355int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
356		       struct ib_udata *udata)
357{
358	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
359	struct hns_roce_ib_create_cq_resp resp = {};
360	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
361	struct ib_device *ibdev = &hr_dev->ib_dev;
362	struct hns_roce_ib_create_cq ucmd = {};
363	int ret;
364
365	if (attr->flags) {
366		ret = -EOPNOTSUPP;
367		goto err_out;
368	}
369
370	ret = verify_cq_create_attr(hr_dev, attr);
371	if (ret)
372		goto err_out;
373
374	if (udata) {
375		ret = get_cq_ucmd(hr_cq, udata, &ucmd);
376		if (ret)
377			goto err_out;
378	}
379
380	set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
381
382	ret = set_cqe_size(hr_cq, udata, &ucmd);
383	if (ret)
384		goto err_out;
385
386	ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
387	if (ret) {
388		ibdev_err(ibdev, "failed to alloc CQ buf, ret = %d.\n", ret);
389		goto err_out;
390	}
391
392	ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
393	if (ret) {
394		ibdev_err(ibdev, "failed to alloc CQ db, ret = %d.\n", ret);
395		goto err_cq_buf;
396	}
397
398	ret = alloc_cqn(hr_dev, hr_cq);
399	if (ret) {
400		ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
401		goto err_cq_db;
402	}
403
404	ret = alloc_cqc(hr_dev, hr_cq);
405	if (ret) {
406		ibdev_err(ibdev,
407			  "failed to alloc CQ context, ret = %d.\n", ret);
408		goto err_cqn;
409	}
410
 
 
 
 
 
 
 
 
 
411	if (udata) {
412		resp.cqn = hr_cq->cqn;
413		ret = ib_copy_to_udata(udata, &resp,
414				       min(udata->outlen, sizeof(resp)));
415		if (ret)
416			goto err_cqc;
417	}
418
419	hr_cq->cons_index = 0;
420	hr_cq->arm_sn = 1;
421	refcount_set(&hr_cq->refcount, 1);
422	init_completion(&hr_cq->free);
423
424	return 0;
425
426err_cqc:
427	free_cqc(hr_dev, hr_cq);
428err_cqn:
429	free_cqn(hr_dev, hr_cq->cqn);
430err_cq_db:
431	free_cq_db(hr_dev, hr_cq, udata);
432err_cq_buf:
433	free_cq_buf(hr_dev, hr_cq);
434err_out:
435	atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CQ_CREATE_ERR_CNT]);
436
437	return ret;
438}
439
440int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
441{
442	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
443	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
444
 
 
 
445	free_cqc(hr_dev, hr_cq);
446	free_cqn(hr_dev, hr_cq->cqn);
447	free_cq_db(hr_dev, hr_cq, udata);
448	free_cq_buf(hr_dev, hr_cq);
449
450	return 0;
451}
452
453void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
454{
455	struct hns_roce_cq *hr_cq;
456	struct ib_cq *ibcq;
457
458	hr_cq = xa_load(&hr_dev->cq_table.array,
459			cqn & (hr_dev->caps.num_cqs - 1));
460	if (!hr_cq) {
461		dev_warn(hr_dev->dev, "completion event for bogus CQ 0x%06x\n",
462			 cqn);
463		return;
464	}
465
466	++hr_cq->arm_sn;
467	ibcq = &hr_cq->ib_cq;
468	if (ibcq->comp_handler)
469		ibcq->comp_handler(ibcq, ibcq->cq_context);
470}
471
472void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
473{
474	struct device *dev = hr_dev->dev;
475	struct hns_roce_cq *hr_cq;
476	struct ib_event event;
477	struct ib_cq *ibcq;
478
 
 
 
 
 
 
 
479	if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
480	    event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
481	    event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
482		dev_err(dev, "unexpected event type 0x%x on CQ 0x%06x\n",
483			event_type, cqn);
484		return;
485	}
486
487	xa_lock(&hr_dev->cq_table.array);
488	hr_cq = xa_load(&hr_dev->cq_table.array,
489			cqn & (hr_dev->caps.num_cqs - 1));
490	if (hr_cq)
491		refcount_inc(&hr_cq->refcount);
492	xa_unlock(&hr_dev->cq_table.array);
493	if (!hr_cq) {
494		dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
495		return;
496	}
497
498	ibcq = &hr_cq->ib_cq;
499	if (ibcq->event_handler) {
500		event.device = ibcq->device;
501		event.element.cq = ibcq;
502		event.event = IB_EVENT_CQ_ERR;
503		ibcq->event_handler(&event, ibcq->cq_context);
504	}
505
506	if (refcount_dec_and_test(&hr_cq->refcount))
507		complete(&hr_cq->free);
508}
509
510void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
511{
512	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
513	unsigned int reserved_from_bot;
514	unsigned int i;
515
516	mutex_init(&cq_table->bank_mutex);
517	xa_init(&cq_table->array);
518
519	reserved_from_bot = hr_dev->caps.reserved_cqs;
520
521	for (i = 0; i < reserved_from_bot; i++) {
522		cq_table->bank[get_cq_bankid(i)].inuse++;
523		cq_table->bank[get_cq_bankid(i)].min++;
524	}
525
526	for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) {
527		ida_init(&cq_table->bank[i].ida);
528		cq_table->bank[i].max = hr_dev->caps.num_cqs /
529					HNS_ROCE_CQ_BANK_NUM - 1;
530	}
531}
532
533void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
534{
535	int i;
536
537	for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
538		ida_destroy(&hr_dev->cq_table.bank[i].ida);
539}
  1/*
  2 * Copyright (c) 2016 Hisilicon Limited.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32
 33#include <linux/platform_device.h>
 34#include <rdma/ib_umem.h>
 35#include <rdma/uverbs_ioctl.h>
 36#include "hns_roce_device.h"
 37#include "hns_roce_cmd.h"
 38#include "hns_roce_hem.h"
 39#include "hns_roce_common.h"
 40
 41static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
 42{
 43	u32 least_load = bank[0].inuse;
 44	u8 bankid = 0;
 45	u32 bankcnt;
 46	u8 i;
 47
 48	for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
 49		bankcnt = bank[i].inuse;
 50		if (bankcnt < least_load) {
 51			least_load = bankcnt;
 52			bankid = i;
 53		}
 54	}
 55
 56	return bankid;
 57}
 58
 59static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
 60{
 61	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
 62	struct hns_roce_bank *bank;
 63	u8 bankid;
 64	int id;
 65
 66	mutex_lock(&cq_table->bank_mutex);
 67	bankid = get_least_load_bankid_for_cq(cq_table->bank);
 68	bank = &cq_table->bank[bankid];
 69
 70	id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
 71	if (id < 0) {
 72		mutex_unlock(&cq_table->bank_mutex);
 73		return id;
 74	}
 75
 76	/* the lower 2 bits is bankid */
 77	hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid;
 78	bank->inuse++;
 79	mutex_unlock(&cq_table->bank_mutex);
 80
 81	return 0;
 82}
 83
 84static inline u8 get_cq_bankid(unsigned long cqn)
 85{
 86	/* The lower 2 bits of CQN are used to hash to different banks */
 87	return (u8)(cqn & GENMASK(1, 0));
 88}
 89
 90static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn)
 91{
 92	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
 93	struct hns_roce_bank *bank;
 94
 95	bank = &cq_table->bank[get_cq_bankid(cqn)];
 96
 97	ida_free(&bank->ida, cqn >> CQ_BANKID_SHIFT);
 98
 99	mutex_lock(&cq_table->bank_mutex);
100	bank->inuse--;
101	mutex_unlock(&cq_table->bank_mutex);
102}
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
105{
106	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
107	struct ib_device *ibdev = &hr_dev->ib_dev;
108	struct hns_roce_cmd_mailbox *mailbox;
109	u64 mtts[MTT_MIN_COUNT] = { 0 };
110	dma_addr_t dma_handle;
111	int ret;
112
113	ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
114				&dma_handle);
115	if (!ret) {
116		ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
117		return -EINVAL;
118	}
119
120	/* Get CQC memory HEM(Hardware Entry Memory) table */
121	ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
122	if (ret) {
123		ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n",
124			  hr_cq->cqn, ret);
125		goto err_out;
126	}
127
128	ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
129	if (ret) {
130		ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
131		goto err_put;
132	}
133
134	/* Allocate mailbox memory */
135	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
136	if (IS_ERR(mailbox)) {
137		ret = PTR_ERR(mailbox);
138		goto err_xa;
139	}
140
141	hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
142
143	/* Send mailbox to hw */
144	ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0,
145			HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS);
146	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
147	if (ret) {
148		ibdev_err(ibdev,
149			  "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
150			  hr_cq->cqn, ret);
151		goto err_xa;
152	}
153
154	hr_cq->cons_index = 0;
155	hr_cq->arm_sn = 1;
156
157	refcount_set(&hr_cq->refcount, 1);
158	init_completion(&hr_cq->free);
159
160	return 0;
161
162err_xa:
163	xa_erase(&cq_table->array, hr_cq->cqn);
164
165err_put:
166	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
167
168err_out:
169	return ret;
170}
171
172static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
173{
174	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
175	struct device *dev = hr_dev->dev;
176	int ret;
177
178	ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1,
179				HNS_ROCE_CMD_DESTROY_CQC,
180				HNS_ROCE_CMD_TIMEOUT_MSECS);
181	if (ret)
182		dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
183			hr_cq->cqn);
184
185	xa_erase(&cq_table->array, hr_cq->cqn);
186
187	/* Waiting interrupt process procedure carried out */
188	synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
189
190	/* wait for all interrupt processed */
191	if (refcount_dec_and_test(&hr_cq->refcount))
192		complete(&hr_cq->free);
193	wait_for_completion(&hr_cq->free);
194
195	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
196}
197
198static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
199			struct ib_udata *udata, unsigned long addr)
200{
201	struct ib_device *ibdev = &hr_dev->ib_dev;
202	struct hns_roce_buf_attr buf_attr = {};
203	int ret;
204
205	buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_SHIFT;
206	buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
207	buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
208	buf_attr.region_count = 1;
209
210	ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
211				  hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT,
212				  udata, addr);
213	if (ret)
214		ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret);
215
216	return ret;
217}
218
219static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
220{
221	hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
222}
223
224static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
225		       struct ib_udata *udata, unsigned long addr,
226		       struct hns_roce_ib_create_cq_resp *resp)
227{
228	bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB;
229	struct hns_roce_ucontext *uctx;
230	int err;
231
232	if (udata) {
233		if (has_db &&
234		    udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
235			uctx = rdma_udata_to_drv_context(udata,
236					struct hns_roce_ucontext, ibucontext);
237			err = hns_roce_db_map_user(uctx, addr, &hr_cq->db);
238			if (err)
239				return err;
240			hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
241			resp->cap_flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
242		}
243	} else {
244		if (has_db) {
245			err = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
246			if (err)
247				return err;
248			hr_cq->set_ci_db = hr_cq->db.db_record;
249			*hr_cq->set_ci_db = 0;
250			hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
251		}
252		hr_cq->db_reg = hr_dev->reg_base + hr_dev->odb_offset +
253				DB_REG_OFFSET * hr_dev->priv_uar.index;
254	}
255
256	return 0;
257}
258
259static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
260		       struct ib_udata *udata)
261{
262	struct hns_roce_ucontext *uctx;
263
264	if (!(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB))
265		return;
266
267	hr_cq->flags &= ~HNS_ROCE_CQ_FLAG_RECORD_DB;
268	if (udata) {
269		uctx = rdma_udata_to_drv_context(udata,
270						 struct hns_roce_ucontext,
271						 ibucontext);
272		hns_roce_db_unmap_user(uctx, &hr_cq->db);
273	} else {
274		hns_roce_free_db(hr_dev, &hr_cq->db);
275	}
276}
277
278static int verify_cq_create_attr(struct hns_roce_dev *hr_dev,
279				 const struct ib_cq_init_attr *attr)
280{
281	struct ib_device *ibdev = &hr_dev->ib_dev;
282
283	if (!attr->cqe || attr->cqe > hr_dev->caps.max_cqes) {
284		ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
285			  attr->cqe, hr_dev->caps.max_cqes);
286		return -EINVAL;
287	}
288
289	if (attr->comp_vector >= hr_dev->caps.num_comp_vectors) {
290		ibdev_err(ibdev, "failed to check CQ vector = %u, max = %d.\n",
291			  attr->comp_vector, hr_dev->caps.num_comp_vectors);
292		return -EINVAL;
293	}
294
295	return 0;
296}
297
298static int get_cq_ucmd(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
299		       struct hns_roce_ib_create_cq *ucmd)
300{
301	struct ib_device *ibdev = hr_cq->ib_cq.device;
302	int ret;
303
304	ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd)));
305	if (ret) {
306		ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n", ret);
307		return ret;
308	}
309
310	return 0;
311}
312
313static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
314			 struct hns_roce_ib_create_cq *ucmd)
315{
316	struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
317
318	cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
319	cq_entries = roundup_pow_of_two(cq_entries);
320	hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
321	hr_cq->cq_depth = cq_entries;
322	hr_cq->vector = vector;
323
324	spin_lock_init(&hr_cq->lock);
325	INIT_LIST_HEAD(&hr_cq->sq_list);
326	INIT_LIST_HEAD(&hr_cq->rq_list);
327}
328
329static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
330			struct hns_roce_ib_create_cq *ucmd)
331{
332	struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
333
334	if (!udata) {
335		hr_cq->cqe_size = hr_dev->caps.cqe_sz;
336		return 0;
337	}
338
339	if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
340		if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
341		    ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
342			ibdev_err(&hr_dev->ib_dev,
343				  "invalid cqe size %u.\n", ucmd->cqe_size);
344			return -EINVAL;
345		}
346
347		hr_cq->cqe_size = ucmd->cqe_size;
348	} else {
349		hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
350	}
351
352	return 0;
353}
354
355int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
356		       struct ib_udata *udata)
357{
358	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
359	struct hns_roce_ib_create_cq_resp resp = {};
360	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
361	struct ib_device *ibdev = &hr_dev->ib_dev;
362	struct hns_roce_ib_create_cq ucmd = {};
363	int ret;
364
365	if (attr->flags)
366		return -EOPNOTSUPP;
 
 
367
368	ret = verify_cq_create_attr(hr_dev, attr);
369	if (ret)
370		return ret;
371
372	if (udata) {
373		ret = get_cq_ucmd(hr_cq, udata, &ucmd);
374		if (ret)
375			return ret;
376	}
377
378	set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
379
380	ret = set_cqe_size(hr_cq, udata, &ucmd);
381	if (ret)
382		return ret;
383
384	ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
385	if (ret) {
386		ibdev_err(ibdev, "failed to alloc CQ buf, ret = %d.\n", ret);
387		return ret;
388	}
389
390	ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
391	if (ret) {
392		ibdev_err(ibdev, "failed to alloc CQ db, ret = %d.\n", ret);
393		goto err_cq_buf;
394	}
395
396	ret = alloc_cqn(hr_dev, hr_cq);
397	if (ret) {
398		ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
399		goto err_cq_db;
400	}
401
402	ret = alloc_cqc(hr_dev, hr_cq);
403	if (ret) {
404		ibdev_err(ibdev,
405			  "failed to alloc CQ context, ret = %d.\n", ret);
406		goto err_cqn;
407	}
408
409	/*
410	 * For the QP created by kernel space, tptr value should be initialized
411	 * to zero; For the QP created by user space, it will cause synchronous
412	 * problems if tptr is set to zero here, so we initialize it in user
413	 * space.
414	 */
415	if (!udata && hr_cq->tptr_addr)
416		*hr_cq->tptr_addr = 0;
417
418	if (udata) {
419		resp.cqn = hr_cq->cqn;
420		ret = ib_copy_to_udata(udata, &resp,
421				       min(udata->outlen, sizeof(resp)));
422		if (ret)
423			goto err_cqc;
424	}
425
 
 
 
 
 
426	return 0;
427
428err_cqc:
429	free_cqc(hr_dev, hr_cq);
430err_cqn:
431	free_cqn(hr_dev, hr_cq->cqn);
432err_cq_db:
433	free_cq_db(hr_dev, hr_cq, udata);
434err_cq_buf:
435	free_cq_buf(hr_dev, hr_cq);
 
 
 
436	return ret;
437}
438
439int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
440{
441	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
442	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
443
444	if (hr_dev->hw->destroy_cq)
445		hr_dev->hw->destroy_cq(ib_cq, udata);
446
447	free_cqc(hr_dev, hr_cq);
448	free_cqn(hr_dev, hr_cq->cqn);
449	free_cq_db(hr_dev, hr_cq, udata);
450	free_cq_buf(hr_dev, hr_cq);
451
452	return 0;
453}
454
455void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
456{
457	struct hns_roce_cq *hr_cq;
458	struct ib_cq *ibcq;
459
460	hr_cq = xa_load(&hr_dev->cq_table.array,
461			cqn & (hr_dev->caps.num_cqs - 1));
462	if (!hr_cq) {
463		dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n",
464			 cqn);
465		return;
466	}
467
468	++hr_cq->arm_sn;
469	ibcq = &hr_cq->ib_cq;
470	if (ibcq->comp_handler)
471		ibcq->comp_handler(ibcq, ibcq->cq_context);
472}
473
474void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
475{
476	struct device *dev = hr_dev->dev;
477	struct hns_roce_cq *hr_cq;
478	struct ib_event event;
479	struct ib_cq *ibcq;
480
481	hr_cq = xa_load(&hr_dev->cq_table.array,
482			cqn & (hr_dev->caps.num_cqs - 1));
483	if (!hr_cq) {
484		dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn);
485		return;
486	}
487
488	if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
489	    event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
490	    event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
491		dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n",
492			event_type, cqn);
493		return;
494	}
495
496	refcount_inc(&hr_cq->refcount);
 
 
 
 
 
 
 
 
 
497
498	ibcq = &hr_cq->ib_cq;
499	if (ibcq->event_handler) {
500		event.device = ibcq->device;
501		event.element.cq = ibcq;
502		event.event = IB_EVENT_CQ_ERR;
503		ibcq->event_handler(&event, ibcq->cq_context);
504	}
505
506	if (refcount_dec_and_test(&hr_cq->refcount))
507		complete(&hr_cq->free);
508}
509
510void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
511{
512	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
513	unsigned int reserved_from_bot;
514	unsigned int i;
515
516	mutex_init(&cq_table->bank_mutex);
517	xa_init(&cq_table->array);
518
519	reserved_from_bot = hr_dev->caps.reserved_cqs;
520
521	for (i = 0; i < reserved_from_bot; i++) {
522		cq_table->bank[get_cq_bankid(i)].inuse++;
523		cq_table->bank[get_cq_bankid(i)].min++;
524	}
525
526	for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) {
527		ida_init(&cq_table->bank[i].ida);
528		cq_table->bank[i].max = hr_dev->caps.num_cqs /
529					HNS_ROCE_CQ_BANK_NUM - 1;
530	}
531}
532
533void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
534{
535	int i;
536
537	for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
538		ida_destroy(&hr_dev->cq_table.bank[i].ida);
539}