Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
  4 */
  5
  6#include "mana_ib.h"
  7
  8static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
  9				      struct net_device *ndev,
 10				      mana_handle_t default_rxobj,
 11				      mana_handle_t ind_table[],
 12				      u32 log_ind_tbl_size, u32 rx_hash_key_len,
 13				      u8 *rx_hash_key)
 14{
 15	struct mana_port_context *mpc = netdev_priv(ndev);
 16	struct mana_cfg_rx_steer_req *req = NULL;
 17	struct mana_cfg_rx_steer_resp resp = {};
 18	mana_handle_t *req_indir_tab;
 19	struct gdma_context *gc;
 20	struct gdma_dev *mdev;
 21	u32 req_buf_size;
 22	int i, err;
 23
 24	mdev = dev->gdma_dev;
 25	gc = mdev->gdma_context;
 26
 27	req_buf_size =
 28		sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
 29	req = kzalloc(req_buf_size, GFP_KERNEL);
 30	if (!req)
 31		return -ENOMEM;
 32
 33	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
 34			     sizeof(resp));
 35
 36	req->vport = mpc->port_handle;
 37	req->rx_enable = 1;
 38	req->update_default_rxobj = 1;
 39	req->default_rxobj = default_rxobj;
 40	req->hdr.dev_id = mdev->dev_id;
 41
 42	/* If there are more than 1 entries in indirection table, enable RSS */
 43	if (log_ind_tbl_size)
 44		req->rss_enable = true;
 45
 46	req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
 47	req->indir_tab_offset = sizeof(*req);
 48	req->update_indir_tab = true;
 49
 50	req_indir_tab = (mana_handle_t *)(req + 1);
 51	/* The ind table passed to the hardware must have
 52	 * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
 53	 * ind_table to MANA_INDIRECT_TABLE_SIZE if required
 54	 */
 55	ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
 56	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
 57		req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
 58		ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
 59			  req_indir_tab[i]);
 60	}
 61
 62	req->update_hashkey = true;
 63	if (rx_hash_key_len)
 64		memcpy(req->hashkey, rx_hash_key, rx_hash_key_len);
 65	else
 66		netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
 67
 68	ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
 69		  req->vport, default_rxobj);
 70
 71	err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
 72	if (err) {
 73		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
 74		goto out;
 75	}
 76
 77	if (resp.hdr.status) {
 78		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
 79			   resp.hdr.status);
 80		err = -EPROTO;
 81		goto out;
 82	}
 83
 84	netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n",
 85		    mpc->port_handle, log_ind_tbl_size);
 86
 87out:
 88	kfree(req);
 89	return err;
 90}
 91
 92static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
 93				 struct ib_qp_init_attr *attr,
 94				 struct ib_udata *udata)
 95{
 96	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
 97	struct mana_ib_dev *mdev =
 98		container_of(pd->device, struct mana_ib_dev, ib_dev);
 99	struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
100	struct mana_ib_create_qp_rss_resp resp = {};
101	struct mana_ib_create_qp_rss ucmd = {};
102	struct gdma_dev *gd = mdev->gdma_dev;
103	mana_handle_t *mana_ind_table;
104	struct mana_port_context *mpc;
105	struct mana_context *mc;
106	struct net_device *ndev;
107	struct mana_ib_cq *cq;
108	struct mana_ib_wq *wq;
109	unsigned int ind_tbl_size;
110	struct ib_cq *ibcq;
111	struct ib_wq *ibwq;
112	int i = 0;
113	u32 port;
114	int ret;
115
116	mc = gd->driver_data;
117
118	if (!udata || udata->inlen < sizeof(ucmd))
119		return -EINVAL;
120
121	ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
122	if (ret) {
123		ibdev_dbg(&mdev->ib_dev,
124			  "Failed copy from udata for create rss-qp, err %d\n",
125			  ret);
126		return ret;
127	}
128
129	if (attr->cap.max_recv_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
130		ibdev_dbg(&mdev->ib_dev,
131			  "Requested max_recv_wr %d exceeding limit\n",
132			  attr->cap.max_recv_wr);
133		return -EINVAL;
134	}
135
136	if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
137		ibdev_dbg(&mdev->ib_dev,
138			  "Requested max_recv_sge %d exceeding limit\n",
139			  attr->cap.max_recv_sge);
140		return -EINVAL;
141	}
142
143	ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
144	if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) {
145		ibdev_dbg(&mdev->ib_dev,
146			  "Indirect table size %d exceeding limit\n",
147			  ind_tbl_size);
148		return -EINVAL;
149	}
150
151	if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
152		ibdev_dbg(&mdev->ib_dev,
153			  "RX Hash function is not supported, %d\n",
154			  ucmd.rx_hash_function);
155		return -EINVAL;
156	}
157
158	/* IB ports start with 1, MANA start with 0 */
159	port = ucmd.port;
160	if (port < 1 || port > mc->num_ports) {
161		ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
162			  port);
163		return -EINVAL;
164	}
165	ndev = mc->ports[port - 1];
166	mpc = netdev_priv(ndev);
167
168	ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
169		  ucmd.rx_hash_function, port);
170
171	mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t),
172				 GFP_KERNEL);
173	if (!mana_ind_table) {
174		ret = -ENOMEM;
175		goto fail;
176	}
177
178	qp->port = port;
179
180	for (i = 0; i < ind_tbl_size; i++) {
181		struct mana_obj_spec wq_spec = {};
182		struct mana_obj_spec cq_spec = {};
183
184		ibwq = ind_tbl->ind_tbl[i];
185		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
186
187		ibcq = ibwq->cq;
188		cq = container_of(ibcq, struct mana_ib_cq, ibcq);
189
190		wq_spec.gdma_region = wq->gdma_region;
191		wq_spec.queue_size = wq->wq_buf_size;
192
193		cq_spec.gdma_region = cq->gdma_region;
194		cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
195		cq_spec.modr_ctx_id = 0;
196		cq_spec.attached_eq = GDMA_CQ_NO_EQ;
197
198		ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
199					 &wq_spec, &cq_spec, &wq->rx_object);
200		if (ret)
201			goto fail;
202
203		/* The GDMA regions are now owned by the WQ object */
204		wq->gdma_region = GDMA_INVALID_DMA_REGION;
205		cq->gdma_region = GDMA_INVALID_DMA_REGION;
206
207		wq->id = wq_spec.queue_index;
208		cq->id = cq_spec.queue_index;
209
210		ibdev_dbg(&mdev->ib_dev,
211			  "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
212			  ret, wq->rx_object, wq->id, cq->id);
213
214		resp.entries[i].cqid = cq->id;
215		resp.entries[i].wqid = wq->id;
216
217		mana_ind_table[i] = wq->rx_object;
218	}
219	resp.num_entries = i;
220
221	ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
222					 mana_ind_table,
223					 ind_tbl->log_ind_tbl_size,
224					 ucmd.rx_hash_key_len,
225					 ucmd.rx_hash_key);
226	if (ret)
227		goto fail;
228
229	ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
230	if (ret) {
231		ibdev_dbg(&mdev->ib_dev,
232			  "Failed to copy to udata create rss-qp, %d\n",
233			  ret);
234		goto fail;
235	}
236
237	kfree(mana_ind_table);
238
239	return 0;
240
241fail:
242	while (i-- > 0) {
243		ibwq = ind_tbl->ind_tbl[i];
244		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
245		mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
246	}
247
248	kfree(mana_ind_table);
249
250	return ret;
251}
252
253static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
254				 struct ib_qp_init_attr *attr,
255				 struct ib_udata *udata)
256{
257	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
258	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
259	struct mana_ib_dev *mdev =
260		container_of(ibpd->device, struct mana_ib_dev, ib_dev);
261	struct mana_ib_cq *send_cq =
262		container_of(attr->send_cq, struct mana_ib_cq, ibcq);
263	struct mana_ib_ucontext *mana_ucontext =
264		rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
265					  ibucontext);
266	struct mana_ib_create_qp_resp resp = {};
267	struct gdma_dev *gd = mdev->gdma_dev;
268	struct mana_ib_create_qp ucmd = {};
269	struct mana_obj_spec wq_spec = {};
270	struct mana_obj_spec cq_spec = {};
271	struct mana_port_context *mpc;
272	struct mana_context *mc;
273	struct net_device *ndev;
274	struct ib_umem *umem;
275	int err;
276	u32 port;
277
278	mc = gd->driver_data;
279
280	if (!mana_ucontext || udata->inlen < sizeof(ucmd))
281		return -EINVAL;
282
283	err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
284	if (err) {
285		ibdev_dbg(&mdev->ib_dev,
286			  "Failed to copy from udata create qp-raw, %d\n", err);
287		return err;
288	}
289
290	/* IB ports start with 1, MANA Ethernet ports start with 0 */
291	port = ucmd.port;
292	if (port < 1 || port > mc->num_ports)
293		return -EINVAL;
294
295	if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
296		ibdev_dbg(&mdev->ib_dev,
297			  "Requested max_send_wr %d exceeding limit\n",
298			  attr->cap.max_send_wr);
299		return -EINVAL;
300	}
301
302	if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
303		ibdev_dbg(&mdev->ib_dev,
304			  "Requested max_send_sge %d exceeding limit\n",
305			  attr->cap.max_send_sge);
306		return -EINVAL;
307	}
308
309	ndev = mc->ports[port - 1];
310	mpc = netdev_priv(ndev);
311	ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
312
313	err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext->doorbell);
314	if (err)
315		return -ENODEV;
316
317	qp->port = port;
318
319	ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
320		  ucmd.sq_buf_addr, ucmd.port);
321
322	umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size,
323			   IB_ACCESS_LOCAL_WRITE);
324	if (IS_ERR(umem)) {
325		err = PTR_ERR(umem);
326		ibdev_dbg(&mdev->ib_dev,
327			  "Failed to get umem for create qp-raw, err %d\n",
328			  err);
329		goto err_free_vport;
330	}
331	qp->sq_umem = umem;
332
333	err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
334					   &qp->sq_gdma_region);
335	if (err) {
336		ibdev_dbg(&mdev->ib_dev,
337			  "Failed to create dma region for create qp-raw, %d\n",
338			  err);
339		goto err_release_umem;
340	}
341
342	ibdev_dbg(&mdev->ib_dev,
343		  "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
344		  err, qp->sq_gdma_region);
345
346	/* Create a WQ on the same port handle used by the Ethernet */
347	wq_spec.gdma_region = qp->sq_gdma_region;
348	wq_spec.queue_size = ucmd.sq_buf_size;
349
350	cq_spec.gdma_region = send_cq->gdma_region;
351	cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
352	cq_spec.modr_ctx_id = 0;
353	cq_spec.attached_eq = GDMA_CQ_NO_EQ;
354
355	err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
356				 &cq_spec, &qp->tx_object);
357	if (err) {
358		ibdev_dbg(&mdev->ib_dev,
359			  "Failed to create wq for create raw-qp, err %d\n",
360			  err);
361		goto err_destroy_dma_region;
362	}
363
364	/* The GDMA regions are now owned by the WQ object */
365	qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
366	send_cq->gdma_region = GDMA_INVALID_DMA_REGION;
367
368	qp->sq_id = wq_spec.queue_index;
369	send_cq->id = cq_spec.queue_index;
370
371	ibdev_dbg(&mdev->ib_dev,
372		  "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
373		  qp->tx_object, qp->sq_id, send_cq->id);
374
375	resp.sqid = qp->sq_id;
376	resp.cqid = send_cq->id;
377	resp.tx_vp_offset = pd->tx_vp_offset;
378
379	err = ib_copy_to_udata(udata, &resp, sizeof(resp));
380	if (err) {
381		ibdev_dbg(&mdev->ib_dev,
382			  "Failed copy udata for create qp-raw, %d\n",
383			  err);
384		goto err_destroy_wq_obj;
385	}
386
387	return 0;
388
389err_destroy_wq_obj:
390	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
391
392err_destroy_dma_region:
393	mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
394
395err_release_umem:
396	ib_umem_release(umem);
397
398err_free_vport:
399	mana_ib_uncfg_vport(mdev, pd, port - 1);
400
401	return err;
402}
403
404int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
405		      struct ib_udata *udata)
406{
407	switch (attr->qp_type) {
408	case IB_QPT_RAW_PACKET:
409		/* When rwq_ind_tbl is used, it's for creating WQs for RSS */
410		if (attr->rwq_ind_tbl)
411			return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr,
412						     udata);
413
414		return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
415	default:
416		/* Creating QP other than IB_QPT_RAW_PACKET is not supported */
417		ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
418			  attr->qp_type);
419	}
420
421	return -EINVAL;
422}
423
424int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
425		      int attr_mask, struct ib_udata *udata)
426{
427	/* modify_qp is not supported by this version of the driver */
428	return -EOPNOTSUPP;
429}
430
431static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
432				  struct ib_rwq_ind_table *ind_tbl,
433				  struct ib_udata *udata)
434{
435	struct mana_ib_dev *mdev =
436		container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
437	struct gdma_dev *gd = mdev->gdma_dev;
438	struct mana_port_context *mpc;
439	struct mana_context *mc;
440	struct net_device *ndev;
441	struct mana_ib_wq *wq;
442	struct ib_wq *ibwq;
443	int i;
444
445	mc = gd->driver_data;
446	ndev = mc->ports[qp->port - 1];
447	mpc = netdev_priv(ndev);
448
449	for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
450		ibwq = ind_tbl->ind_tbl[i];
451		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
452		ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
453			  wq->rx_object);
454		mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
455	}
456
457	return 0;
458}
459
460static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
461{
462	struct mana_ib_dev *mdev =
463		container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
464	struct gdma_dev *gd = mdev->gdma_dev;
465	struct ib_pd *ibpd = qp->ibqp.pd;
466	struct mana_port_context *mpc;
467	struct mana_context *mc;
468	struct net_device *ndev;
469	struct mana_ib_pd *pd;
470
471	mc = gd->driver_data;
472	ndev = mc->ports[qp->port - 1];
473	mpc = netdev_priv(ndev);
474	pd = container_of(ibpd, struct mana_ib_pd, ibpd);
475
476	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
477
478	if (qp->sq_umem) {
479		mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
480		ib_umem_release(qp->sq_umem);
481	}
482
483	mana_ib_uncfg_vport(mdev, pd, qp->port - 1);
484
485	return 0;
486}
487
488int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
489{
490	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
491
492	switch (ibqp->qp_type) {
493	case IB_QPT_RAW_PACKET:
494		if (ibqp->rwq_ind_tbl)
495			return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl,
496						      udata);
497
498		return mana_ib_destroy_qp_raw(qp, udata);
499
500	default:
501		ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
502			  ibqp->qp_type);
503	}
504
505	return -ENOENT;
506}