Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2/* Copyright (c) 2021, Microsoft Corporation. */
  3
  4#include <net/mana/gdma.h>
  5#include <net/mana/hw_channel.h>
  6
  7static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
  8{
  9	struct gdma_resource *r = &hwc->inflight_msg_res;
 10	unsigned long flags;
 11	u32 index;
 12
 13	down(&hwc->sema);
 14
 15	spin_lock_irqsave(&r->lock, flags);
 16
 17	index = find_first_zero_bit(hwc->inflight_msg_res.map,
 18				    hwc->inflight_msg_res.size);
 19
 20	bitmap_set(hwc->inflight_msg_res.map, index, 1);
 21
 22	spin_unlock_irqrestore(&r->lock, flags);
 23
 24	*msg_id = index;
 25
 26	return 0;
 27}
 28
 29static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
 30{
 31	struct gdma_resource *r = &hwc->inflight_msg_res;
 32	unsigned long flags;
 33
 34	spin_lock_irqsave(&r->lock, flags);
 35	bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
 36	spin_unlock_irqrestore(&r->lock, flags);
 37
 38	up(&hwc->sema);
 39}
 40
 41static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
 42				    const struct gdma_resp_hdr *resp_msg,
 43				    u32 resp_len)
 44{
 45	if (resp_len < sizeof(*resp_msg))
 46		return -EPROTO;
 47
 48	if (resp_len > caller_ctx->output_buflen)
 49		return -EPROTO;
 50
 51	return 0;
 52}
 53
 54static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
 55				 const struct gdma_resp_hdr *resp_msg)
 56{
 57	struct hwc_caller_ctx *ctx;
 58	int err;
 59
 60	if (!test_bit(resp_msg->response.hwc_msg_id,
 61		      hwc->inflight_msg_res.map)) {
 62		dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
 63			resp_msg->response.hwc_msg_id);
 64		return;
 65	}
 66
 67	ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
 68	err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
 69	if (err)
 70		goto out;
 71
 72	ctx->status_code = resp_msg->status;
 73
 74	memcpy(ctx->output_buf, resp_msg, resp_len);
 75out:
 76	ctx->error = err;
 77	complete(&ctx->comp_event);
 78}
 79
 80static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
 81				struct hwc_work_request *req)
 82{
 83	struct device *dev = hwc_rxq->hwc->dev;
 84	struct gdma_sge *sge;
 85	int err;
 86
 87	sge = &req->sge;
 88	sge->address = (u64)req->buf_sge_addr;
 89	sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
 90	sge->size = req->buf_len;
 91
 92	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
 93	req->wqe_req.sgl = sge;
 94	req->wqe_req.num_sge = 1;
 95	req->wqe_req.client_data_unit = 0;
 96
 97	err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
 98	if (err)
 99		dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
100	return err;
101}
102
103static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
104					struct gdma_event *event)
105{
106	struct hw_channel_context *hwc = ctx;
107	struct gdma_dev *gd = hwc->gdma_dev;
108	union hwc_init_type_data type_data;
109	union hwc_init_eq_id_db eq_db;
110	u32 type, val;
111
112	switch (event->type) {
113	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
114		eq_db.as_uint32 = event->details[0];
115		hwc->cq->gdma_eq->id = eq_db.eq_id;
116		gd->doorbell = eq_db.doorbell;
117		break;
118
119	case GDMA_EQE_HWC_INIT_DATA:
120		type_data.as_uint32 = event->details[0];
121		type = type_data.type;
122		val = type_data.value;
123
124		switch (type) {
125		case HWC_INIT_DATA_CQID:
126			hwc->cq->gdma_cq->id = val;
127			break;
128
129		case HWC_INIT_DATA_RQID:
130			hwc->rxq->gdma_wq->id = val;
131			break;
132
133		case HWC_INIT_DATA_SQID:
134			hwc->txq->gdma_wq->id = val;
135			break;
136
137		case HWC_INIT_DATA_QUEUE_DEPTH:
138			hwc->hwc_init_q_depth_max = (u16)val;
139			break;
140
141		case HWC_INIT_DATA_MAX_REQUEST:
142			hwc->hwc_init_max_req_msg_size = val;
143			break;
144
145		case HWC_INIT_DATA_MAX_RESPONSE:
146			hwc->hwc_init_max_resp_msg_size = val;
147			break;
148
149		case HWC_INIT_DATA_MAX_NUM_CQS:
150			gd->gdma_context->max_num_cqs = val;
151			break;
152
153		case HWC_INIT_DATA_PDID:
154			hwc->gdma_dev->pdid = val;
155			break;
156
157		case HWC_INIT_DATA_GPA_MKEY:
158			hwc->rxq->msg_buf->gpa_mkey = val;
159			hwc->txq->msg_buf->gpa_mkey = val;
160			break;
161
162		case HWC_INIT_DATA_PF_DEST_RQ_ID:
163			hwc->pf_dest_vrq_id = val;
164			break;
165
166		case HWC_INIT_DATA_PF_DEST_CQ_ID:
167			hwc->pf_dest_vrcq_id = val;
168			break;
169		}
170
171		break;
172
173	case GDMA_EQE_HWC_INIT_DONE:
174		complete(&hwc->hwc_init_eqe_comp);
175		break;
176
177	case GDMA_EQE_HWC_SOC_RECONFIG_DATA:
178		type_data.as_uint32 = event->details[0];
179		type = type_data.type;
180		val = type_data.value;
181
182		switch (type) {
183		case HWC_DATA_CFG_HWC_TIMEOUT:
184			hwc->hwc_timeout = val;
185			break;
186
187		default:
188			dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type);
189			break;
190		}
191
192		break;
193
194	default:
195		dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
196		/* Ignore unknown events, which should never happen. */
197		break;
198	}
199}
200
201static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
202				      const struct hwc_rx_oob *rx_oob)
203{
204	struct hw_channel_context *hwc = ctx;
205	struct hwc_wq *hwc_rxq = hwc->rxq;
206	struct hwc_work_request *rx_req;
207	struct gdma_resp_hdr *resp;
208	struct gdma_wqe *dma_oob;
209	struct gdma_queue *rq;
210	struct gdma_sge *sge;
211	u64 rq_base_addr;
212	u64 rx_req_idx;
213	u8 *wqe;
214
215	if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
216		return;
217
218	rq = hwc_rxq->gdma_wq;
219	wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
220	dma_oob = (struct gdma_wqe *)wqe;
221
222	sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
223
224	/* Select the RX work request for virtual address and for reposting. */
225	rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
226	rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
227
228	rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
229	resp = (struct gdma_resp_hdr *)rx_req->buf_va;
230
231	if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
232		dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
233			resp->response.hwc_msg_id);
234		return;
235	}
236
237	mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
238
239	/* Do no longer use 'resp', because the buffer is posted to the HW
240	 * in the below mana_hwc_post_rx_wqe().
241	 */
242	resp = NULL;
243
244	mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
245}
246
247static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
248				      const struct hwc_rx_oob *rx_oob)
249{
250	struct hw_channel_context *hwc = ctx;
251	struct hwc_wq *hwc_txq = hwc->txq;
252
253	WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
254}
255
256static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
257				   enum gdma_queue_type type, u64 queue_size,
258				   struct gdma_queue **queue)
259{
260	struct gdma_queue_spec spec = {};
261
262	if (type != GDMA_SQ && type != GDMA_RQ)
263		return -EINVAL;
264
265	spec.type = type;
266	spec.monitor_avl_buf = false;
267	spec.queue_size = queue_size;
268
269	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
270}
271
272static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
273				   u64 queue_size,
274				   void *ctx, gdma_cq_callback *cb,
275				   struct gdma_queue *parent_eq,
276				   struct gdma_queue **queue)
277{
278	struct gdma_queue_spec spec = {};
279
280	spec.type = GDMA_CQ;
281	spec.monitor_avl_buf = false;
282	spec.queue_size = queue_size;
283	spec.cq.context = ctx;
284	spec.cq.callback = cb;
285	spec.cq.parent_eq = parent_eq;
286
287	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
288}
289
290static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
291				   u64 queue_size,
292				   void *ctx, gdma_eq_callback *cb,
293				   struct gdma_queue **queue)
294{
295	struct gdma_queue_spec spec = {};
296
297	spec.type = GDMA_EQ;
298	spec.monitor_avl_buf = false;
299	spec.queue_size = queue_size;
300	spec.eq.context = ctx;
301	spec.eq.callback = cb;
302	spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
303	spec.eq.msix_index = 0;
304
305	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
306}
307
308static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
309{
310	struct hwc_rx_oob comp_data = {};
311	struct gdma_comp *completions;
312	struct hwc_cq *hwc_cq = ctx;
313	int comp_read, i;
314
315	WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
316
317	completions = hwc_cq->comp_buf;
318	comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
319	WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
320
321	for (i = 0; i < comp_read; ++i) {
322		comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
323
324		if (completions[i].is_sq)
325			hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
326						completions[i].wq_num,
327						&comp_data);
328		else
329			hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
330						completions[i].wq_num,
331						&comp_data);
332	}
333
334	mana_gd_ring_cq(q_self, SET_ARM_BIT);
335}
336
337static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
338{
339	kfree(hwc_cq->comp_buf);
340
341	if (hwc_cq->gdma_cq)
342		mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
343
344	if (hwc_cq->gdma_eq)
345		mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
346
347	kfree(hwc_cq);
348}
349
350static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
351			      gdma_eq_callback *callback, void *ctx,
352			      hwc_rx_event_handler_t *rx_ev_hdlr,
353			      void *rx_ev_ctx,
354			      hwc_tx_event_handler_t *tx_ev_hdlr,
355			      void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
356{
357	struct gdma_queue *eq, *cq;
358	struct gdma_comp *comp_buf;
359	struct hwc_cq *hwc_cq;
360	u32 eq_size, cq_size;
361	int err;
362
363	eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
364	if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
365		eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
366
367	cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
368	if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
369		cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
370
371	hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
372	if (!hwc_cq)
373		return -ENOMEM;
374
375	err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
376	if (err) {
377		dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
378		goto out;
379	}
380	hwc_cq->gdma_eq = eq;
381
382	err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
383				      eq, &cq);
384	if (err) {
385		dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
386		goto out;
387	}
388	hwc_cq->gdma_cq = cq;
389
390	comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
391	if (!comp_buf) {
392		err = -ENOMEM;
393		goto out;
394	}
395
396	hwc_cq->hwc = hwc;
397	hwc_cq->comp_buf = comp_buf;
398	hwc_cq->queue_depth = q_depth;
399	hwc_cq->rx_event_handler = rx_ev_hdlr;
400	hwc_cq->rx_event_ctx = rx_ev_ctx;
401	hwc_cq->tx_event_handler = tx_ev_hdlr;
402	hwc_cq->tx_event_ctx = tx_ev_ctx;
403
404	*hwc_cq_ptr = hwc_cq;
405	return 0;
406out:
407	mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
408	return err;
409}
410
411static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
412				  u32 max_msg_size,
413				  struct hwc_dma_buf **dma_buf_ptr)
414{
415	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
416	struct hwc_work_request *hwc_wr;
417	struct hwc_dma_buf *dma_buf;
418	struct gdma_mem_info *gmi;
419	void *virt_addr;
420	u32 buf_size;
421	u8 *base_pa;
422	int err;
423	u16 i;
424
425	dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
426	if (!dma_buf)
427		return -ENOMEM;
428
429	dma_buf->num_reqs = q_depth;
430
431	buf_size = PAGE_ALIGN(q_depth * max_msg_size);
432
433	gmi = &dma_buf->mem_info;
434	err = mana_gd_alloc_memory(gc, buf_size, gmi);
435	if (err) {
436		dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
437		goto out;
438	}
439
440	virt_addr = dma_buf->mem_info.virt_addr;
441	base_pa = (u8 *)dma_buf->mem_info.dma_handle;
442
443	for (i = 0; i < q_depth; i++) {
444		hwc_wr = &dma_buf->reqs[i];
445
446		hwc_wr->buf_va = virt_addr + i * max_msg_size;
447		hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
448
449		hwc_wr->buf_len = max_msg_size;
450	}
451
452	*dma_buf_ptr = dma_buf;
453	return 0;
454out:
455	kfree(dma_buf);
456	return err;
457}
458
459static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
460				     struct hwc_dma_buf *dma_buf)
461{
462	if (!dma_buf)
463		return;
464
465	mana_gd_free_memory(&dma_buf->mem_info);
466
467	kfree(dma_buf);
468}
469
470static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
471				struct hwc_wq *hwc_wq)
472{
473	mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
474
475	if (hwc_wq->gdma_wq)
476		mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
477				      hwc_wq->gdma_wq);
478
479	kfree(hwc_wq);
480}
481
482static int mana_hwc_create_wq(struct hw_channel_context *hwc,
483			      enum gdma_queue_type q_type, u16 q_depth,
484			      u32 max_msg_size, struct hwc_cq *hwc_cq,
485			      struct hwc_wq **hwc_wq_ptr)
486{
487	struct gdma_queue *queue;
488	struct hwc_wq *hwc_wq;
489	u32 queue_size;
490	int err;
491
492	WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
493
494	if (q_type == GDMA_RQ)
495		queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
496	else
497		queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
498
499	if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
500		queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
501
502	hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
503	if (!hwc_wq)
504		return -ENOMEM;
505
506	err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
507	if (err)
508		goto out;
509
510	hwc_wq->hwc = hwc;
511	hwc_wq->gdma_wq = queue;
512	hwc_wq->queue_depth = q_depth;
513	hwc_wq->hwc_cq = hwc_cq;
514
515	err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
516				     &hwc_wq->msg_buf);
517	if (err)
518		goto out;
519
520	*hwc_wq_ptr = hwc_wq;
521	return 0;
522out:
523	if (err)
524		mana_hwc_destroy_wq(hwc, hwc_wq);
525	return err;
526}
527
528static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
529				struct hwc_work_request *req,
530				u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
531				bool dest_pf)
532{
533	struct device *dev = hwc_txq->hwc->dev;
534	struct hwc_tx_oob *tx_oob;
535	struct gdma_sge *sge;
536	int err;
537
538	if (req->msg_size == 0 || req->msg_size > req->buf_len) {
539		dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
540			req->msg_size, req->buf_len);
541		return -EINVAL;
542	}
543
544	tx_oob = &req->tx_oob;
545
546	tx_oob->vrq_id = dest_virt_rq_id;
547	tx_oob->dest_vfid = 0;
548	tx_oob->vrcq_id = dest_virt_rcq_id;
549	tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
550	tx_oob->loopback = false;
551	tx_oob->lso_override = false;
552	tx_oob->dest_pf = dest_pf;
553	tx_oob->vsq_id = hwc_txq->gdma_wq->id;
554
555	sge = &req->sge;
556	sge->address = (u64)req->buf_sge_addr;
557	sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
558	sge->size = req->msg_size;
559
560	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
561	req->wqe_req.sgl = sge;
562	req->wqe_req.num_sge = 1;
563	req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
564	req->wqe_req.inline_oob_data = tx_oob;
565	req->wqe_req.client_data_unit = 0;
566
567	err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
568	if (err)
569		dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
570	return err;
571}
572
573static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
574				      u16 num_msg)
575{
576	int err;
577
578	sema_init(&hwc->sema, num_msg);
579
580	err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
581	if (err)
582		dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
583	return err;
584}
585
586static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
587				 u32 max_req_msg_size, u32 max_resp_msg_size)
588{
589	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
590	struct hwc_wq *hwc_rxq = hwc->rxq;
591	struct hwc_work_request *req;
592	struct hwc_caller_ctx *ctx;
593	int err;
594	int i;
595
596	/* Post all WQEs on the RQ */
597	for (i = 0; i < q_depth; i++) {
598		req = &hwc_rxq->msg_buf->reqs[i];
599		err = mana_hwc_post_rx_wqe(hwc_rxq, req);
600		if (err)
601			return err;
602	}
603
604	ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
605	if (!ctx)
606		return -ENOMEM;
607
608	for (i = 0; i < q_depth; ++i)
609		init_completion(&ctx[i].comp_event);
610
611	hwc->caller_ctx = ctx;
612
613	return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
614}
615
616static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
617				      u32 *max_req_msg_size,
618				      u32 *max_resp_msg_size)
619{
620	struct hw_channel_context *hwc = gc->hwc.driver_data;
621	struct gdma_queue *rq = hwc->rxq->gdma_wq;
622	struct gdma_queue *sq = hwc->txq->gdma_wq;
623	struct gdma_queue *eq = hwc->cq->gdma_eq;
624	struct gdma_queue *cq = hwc->cq->gdma_cq;
625	int err;
626
627	init_completion(&hwc->hwc_init_eqe_comp);
628
629	err = mana_smc_setup_hwc(&gc->shm_channel, false,
630				 eq->mem_info.dma_handle,
631				 cq->mem_info.dma_handle,
632				 rq->mem_info.dma_handle,
633				 sq->mem_info.dma_handle,
634				 eq->eq.msix_index);
635	if (err)
636		return err;
637
638	if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
639		return -ETIMEDOUT;
640
641	*q_depth = hwc->hwc_init_q_depth_max;
642	*max_req_msg_size = hwc->hwc_init_max_req_msg_size;
643	*max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
644
645	/* Both were set in mana_hwc_init_event_handler(). */
646	if (WARN_ON(cq->id >= gc->max_num_cqs))
647		return -EPROTO;
648
649	gc->cq_table = vcalloc(gc->max_num_cqs, sizeof(struct gdma_queue *));
650	if (!gc->cq_table)
651		return -ENOMEM;
652
653	gc->cq_table[cq->id] = cq;
654
655	return 0;
656}
657
658static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
659				u32 max_req_msg_size, u32 max_resp_msg_size)
660{
661	int err;
662
663	err = mana_hwc_init_inflight_msg(hwc, q_depth);
664	if (err)
665		return err;
666
667	/* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
668	 * queue depth and RQ queue depth.
669	 */
670	err = mana_hwc_create_cq(hwc, q_depth * 2,
671				 mana_hwc_init_event_handler, hwc,
672				 mana_hwc_rx_event_handler, hwc,
673				 mana_hwc_tx_event_handler, hwc, &hwc->cq);
674	if (err) {
675		dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
676		goto out;
677	}
678
679	err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
680				 hwc->cq, &hwc->rxq);
681	if (err) {
682		dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
683		goto out;
684	}
685
686	err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
687				 hwc->cq, &hwc->txq);
688	if (err) {
689		dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
690		goto out;
691	}
692
693	hwc->num_inflight_msg = q_depth;
694	hwc->max_req_msg_size = max_req_msg_size;
695
696	return 0;
697out:
698	/* mana_hwc_create_channel() will do the cleanup.*/
699	return err;
700}
701
702int mana_hwc_create_channel(struct gdma_context *gc)
703{
704	u32 max_req_msg_size, max_resp_msg_size;
705	struct gdma_dev *gd = &gc->hwc;
706	struct hw_channel_context *hwc;
707	u16 q_depth_max;
708	int err;
709
710	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
711	if (!hwc)
712		return -ENOMEM;
713
714	gd->gdma_context = gc;
715	gd->driver_data = hwc;
716	hwc->gdma_dev = gd;
717	hwc->dev = gc->dev;
718	hwc->hwc_timeout = HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS;
719
720	/* HWC's instance number is always 0. */
721	gd->dev_id.as_uint32 = 0;
722	gd->dev_id.type = GDMA_DEVICE_HWC;
723
724	gd->pdid = INVALID_PDID;
725	gd->doorbell = INVALID_DOORBELL;
726
727	/* mana_hwc_init_queues() only creates the required data structures,
728	 * and doesn't touch the HWC device.
729	 */
730	err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
731				   HW_CHANNEL_MAX_REQUEST_SIZE,
732				   HW_CHANNEL_MAX_RESPONSE_SIZE);
733	if (err) {
734		dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
735		goto out;
736	}
737
738	err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
739					 &max_resp_msg_size);
740	if (err) {
741		dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
742		goto out;
743	}
744
745	err = mana_hwc_test_channel(gc->hwc.driver_data,
746				    HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
747				    max_req_msg_size, max_resp_msg_size);
748	if (err) {
749		dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
750		goto out;
751	}
752
753	return 0;
754out:
755	mana_hwc_destroy_channel(gc);
756	return err;
757}
758
759void mana_hwc_destroy_channel(struct gdma_context *gc)
760{
761	struct hw_channel_context *hwc = gc->hwc.driver_data;
762
763	if (!hwc)
764		return;
765
766	/* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
767	 * non-zero, the HWC worked and we should tear down the HWC here.
768	 */
769	if (gc->max_num_cqs > 0) {
770		mana_smc_teardown_hwc(&gc->shm_channel, false);
771		gc->max_num_cqs = 0;
772	}
773
774	kfree(hwc->caller_ctx);
775	hwc->caller_ctx = NULL;
776
777	if (hwc->txq)
778		mana_hwc_destroy_wq(hwc, hwc->txq);
779
780	if (hwc->rxq)
781		mana_hwc_destroy_wq(hwc, hwc->rxq);
782
783	if (hwc->cq)
784		mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
785
786	mana_gd_free_res_map(&hwc->inflight_msg_res);
787
788	hwc->num_inflight_msg = 0;
789
790	hwc->gdma_dev->doorbell = INVALID_DOORBELL;
791	hwc->gdma_dev->pdid = INVALID_PDID;
792
793	hwc->hwc_timeout = 0;
794
795	kfree(hwc);
796	gc->hwc.driver_data = NULL;
797	gc->hwc.gdma_context = NULL;
798
799	vfree(gc->cq_table);
800	gc->cq_table = NULL;
801}
802
803int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
804			  const void *req, u32 resp_len, void *resp)
805{
806	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
807	struct hwc_work_request *tx_wr;
808	struct hwc_wq *txq = hwc->txq;
809	struct gdma_req_hdr *req_msg;
810	struct hwc_caller_ctx *ctx;
811	u32 dest_vrcq = 0;
812	u32 dest_vrq = 0;
813	u16 msg_id;
814	int err;
815
816	mana_hwc_get_msg_index(hwc, &msg_id);
817
818	tx_wr = &txq->msg_buf->reqs[msg_id];
819
820	if (req_len > tx_wr->buf_len) {
821		dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
822			tx_wr->buf_len);
823		err = -EINVAL;
824		goto out;
825	}
826
827	ctx = hwc->caller_ctx + msg_id;
828	ctx->output_buf = resp;
829	ctx->output_buflen = resp_len;
830
831	req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
832	if (req)
833		memcpy(req_msg, req, req_len);
834
835	req_msg->req.hwc_msg_id = msg_id;
836
837	tx_wr->msg_size = req_len;
838
839	if (gc->is_pf) {
840		dest_vrq = hwc->pf_dest_vrq_id;
841		dest_vrcq = hwc->pf_dest_vrcq_id;
842	}
843
844	err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
845	if (err) {
846		dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
847		goto out;
848	}
849
850	if (!wait_for_completion_timeout(&ctx->comp_event,
851					 (msecs_to_jiffies(hwc->hwc_timeout) * HZ))) {
852		dev_err(hwc->dev, "HWC: Request timed out!\n");
853		err = -ETIMEDOUT;
854		goto out;
855	}
856
857	if (ctx->error) {
858		err = ctx->error;
859		goto out;
860	}
861
862	if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
863		dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
864			ctx->status_code);
865		err = -EPROTO;
866		goto out;
867	}
868out:
869	mana_hwc_put_msg_index(hwc, msg_id);
870	return err;
871}
v6.2
  1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2/* Copyright (c) 2021, Microsoft Corporation. */
  3
  4#include <net/mana/gdma.h>
  5#include <net/mana/hw_channel.h>
  6
  7static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
  8{
  9	struct gdma_resource *r = &hwc->inflight_msg_res;
 10	unsigned long flags;
 11	u32 index;
 12
 13	down(&hwc->sema);
 14
 15	spin_lock_irqsave(&r->lock, flags);
 16
 17	index = find_first_zero_bit(hwc->inflight_msg_res.map,
 18				    hwc->inflight_msg_res.size);
 19
 20	bitmap_set(hwc->inflight_msg_res.map, index, 1);
 21
 22	spin_unlock_irqrestore(&r->lock, flags);
 23
 24	*msg_id = index;
 25
 26	return 0;
 27}
 28
 29static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
 30{
 31	struct gdma_resource *r = &hwc->inflight_msg_res;
 32	unsigned long flags;
 33
 34	spin_lock_irqsave(&r->lock, flags);
 35	bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
 36	spin_unlock_irqrestore(&r->lock, flags);
 37
 38	up(&hwc->sema);
 39}
 40
 41static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
 42				    const struct gdma_resp_hdr *resp_msg,
 43				    u32 resp_len)
 44{
 45	if (resp_len < sizeof(*resp_msg))
 46		return -EPROTO;
 47
 48	if (resp_len > caller_ctx->output_buflen)
 49		return -EPROTO;
 50
 51	return 0;
 52}
 53
 54static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
 55				 const struct gdma_resp_hdr *resp_msg)
 56{
 57	struct hwc_caller_ctx *ctx;
 58	int err;
 59
 60	if (!test_bit(resp_msg->response.hwc_msg_id,
 61		      hwc->inflight_msg_res.map)) {
 62		dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
 63			resp_msg->response.hwc_msg_id);
 64		return;
 65	}
 66
 67	ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
 68	err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
 69	if (err)
 70		goto out;
 71
 72	ctx->status_code = resp_msg->status;
 73
 74	memcpy(ctx->output_buf, resp_msg, resp_len);
 75out:
 76	ctx->error = err;
 77	complete(&ctx->comp_event);
 78}
 79
 80static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
 81				struct hwc_work_request *req)
 82{
 83	struct device *dev = hwc_rxq->hwc->dev;
 84	struct gdma_sge *sge;
 85	int err;
 86
 87	sge = &req->sge;
 88	sge->address = (u64)req->buf_sge_addr;
 89	sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
 90	sge->size = req->buf_len;
 91
 92	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
 93	req->wqe_req.sgl = sge;
 94	req->wqe_req.num_sge = 1;
 95	req->wqe_req.client_data_unit = 0;
 96
 97	err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
 98	if (err)
 99		dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
100	return err;
101}
102
103static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
104					struct gdma_event *event)
105{
106	struct hw_channel_context *hwc = ctx;
107	struct gdma_dev *gd = hwc->gdma_dev;
108	union hwc_init_type_data type_data;
109	union hwc_init_eq_id_db eq_db;
110	u32 type, val;
111
112	switch (event->type) {
113	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
114		eq_db.as_uint32 = event->details[0];
115		hwc->cq->gdma_eq->id = eq_db.eq_id;
116		gd->doorbell = eq_db.doorbell;
117		break;
118
119	case GDMA_EQE_HWC_INIT_DATA:
120		type_data.as_uint32 = event->details[0];
121		type = type_data.type;
122		val = type_data.value;
123
124		switch (type) {
125		case HWC_INIT_DATA_CQID:
126			hwc->cq->gdma_cq->id = val;
127			break;
128
129		case HWC_INIT_DATA_RQID:
130			hwc->rxq->gdma_wq->id = val;
131			break;
132
133		case HWC_INIT_DATA_SQID:
134			hwc->txq->gdma_wq->id = val;
135			break;
136
137		case HWC_INIT_DATA_QUEUE_DEPTH:
138			hwc->hwc_init_q_depth_max = (u16)val;
139			break;
140
141		case HWC_INIT_DATA_MAX_REQUEST:
142			hwc->hwc_init_max_req_msg_size = val;
143			break;
144
145		case HWC_INIT_DATA_MAX_RESPONSE:
146			hwc->hwc_init_max_resp_msg_size = val;
147			break;
148
149		case HWC_INIT_DATA_MAX_NUM_CQS:
150			gd->gdma_context->max_num_cqs = val;
151			break;
152
153		case HWC_INIT_DATA_PDID:
154			hwc->gdma_dev->pdid = val;
155			break;
156
157		case HWC_INIT_DATA_GPA_MKEY:
158			hwc->rxq->msg_buf->gpa_mkey = val;
159			hwc->txq->msg_buf->gpa_mkey = val;
160			break;
161
162		case HWC_INIT_DATA_PF_DEST_RQ_ID:
163			hwc->pf_dest_vrq_id = val;
164			break;
165
166		case HWC_INIT_DATA_PF_DEST_CQ_ID:
167			hwc->pf_dest_vrcq_id = val;
168			break;
169		}
170
171		break;
172
173	case GDMA_EQE_HWC_INIT_DONE:
174		complete(&hwc->hwc_init_eqe_comp);
175		break;
176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177	default:
 
178		/* Ignore unknown events, which should never happen. */
179		break;
180	}
181}
182
183static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
184				      const struct hwc_rx_oob *rx_oob)
185{
186	struct hw_channel_context *hwc = ctx;
187	struct hwc_wq *hwc_rxq = hwc->rxq;
188	struct hwc_work_request *rx_req;
189	struct gdma_resp_hdr *resp;
190	struct gdma_wqe *dma_oob;
191	struct gdma_queue *rq;
192	struct gdma_sge *sge;
193	u64 rq_base_addr;
194	u64 rx_req_idx;
195	u8 *wqe;
196
197	if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
198		return;
199
200	rq = hwc_rxq->gdma_wq;
201	wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
202	dma_oob = (struct gdma_wqe *)wqe;
203
204	sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
205
206	/* Select the RX work request for virtual address and for reposting. */
207	rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
208	rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
209
210	rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
211	resp = (struct gdma_resp_hdr *)rx_req->buf_va;
212
213	if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
214		dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
215			resp->response.hwc_msg_id);
216		return;
217	}
218
219	mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
220
221	/* Do no longer use 'resp', because the buffer is posted to the HW
222	 * in the below mana_hwc_post_rx_wqe().
223	 */
224	resp = NULL;
225
226	mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
227}
228
229static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
230				      const struct hwc_rx_oob *rx_oob)
231{
232	struct hw_channel_context *hwc = ctx;
233	struct hwc_wq *hwc_txq = hwc->txq;
234
235	WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
236}
237
238static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
239				   enum gdma_queue_type type, u64 queue_size,
240				   struct gdma_queue **queue)
241{
242	struct gdma_queue_spec spec = {};
243
244	if (type != GDMA_SQ && type != GDMA_RQ)
245		return -EINVAL;
246
247	spec.type = type;
248	spec.monitor_avl_buf = false;
249	spec.queue_size = queue_size;
250
251	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
252}
253
254static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
255				   u64 queue_size,
256				   void *ctx, gdma_cq_callback *cb,
257				   struct gdma_queue *parent_eq,
258				   struct gdma_queue **queue)
259{
260	struct gdma_queue_spec spec = {};
261
262	spec.type = GDMA_CQ;
263	spec.monitor_avl_buf = false;
264	spec.queue_size = queue_size;
265	spec.cq.context = ctx;
266	spec.cq.callback = cb;
267	spec.cq.parent_eq = parent_eq;
268
269	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
270}
271
272static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
273				   u64 queue_size,
274				   void *ctx, gdma_eq_callback *cb,
275				   struct gdma_queue **queue)
276{
277	struct gdma_queue_spec spec = {};
278
279	spec.type = GDMA_EQ;
280	spec.monitor_avl_buf = false;
281	spec.queue_size = queue_size;
282	spec.eq.context = ctx;
283	spec.eq.callback = cb;
284	spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
 
285
286	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
287}
288
289static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
290{
291	struct hwc_rx_oob comp_data = {};
292	struct gdma_comp *completions;
293	struct hwc_cq *hwc_cq = ctx;
294	int comp_read, i;
295
296	WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
297
298	completions = hwc_cq->comp_buf;
299	comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
300	WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
301
302	for (i = 0; i < comp_read; ++i) {
303		comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
304
305		if (completions[i].is_sq)
306			hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
307						completions[i].wq_num,
308						&comp_data);
309		else
310			hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
311						completions[i].wq_num,
312						&comp_data);
313	}
314
315	mana_gd_ring_cq(q_self, SET_ARM_BIT);
316}
317
318static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
319{
320	kfree(hwc_cq->comp_buf);
321
322	if (hwc_cq->gdma_cq)
323		mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
324
325	if (hwc_cq->gdma_eq)
326		mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
327
328	kfree(hwc_cq);
329}
330
331static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
332			      gdma_eq_callback *callback, void *ctx,
333			      hwc_rx_event_handler_t *rx_ev_hdlr,
334			      void *rx_ev_ctx,
335			      hwc_tx_event_handler_t *tx_ev_hdlr,
336			      void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
337{
338	struct gdma_queue *eq, *cq;
339	struct gdma_comp *comp_buf;
340	struct hwc_cq *hwc_cq;
341	u32 eq_size, cq_size;
342	int err;
343
344	eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
345	if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
346		eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
347
348	cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
349	if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
350		cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
351
352	hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
353	if (!hwc_cq)
354		return -ENOMEM;
355
356	err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
357	if (err) {
358		dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
359		goto out;
360	}
361	hwc_cq->gdma_eq = eq;
362
363	err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
364				      eq, &cq);
365	if (err) {
366		dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
367		goto out;
368	}
369	hwc_cq->gdma_cq = cq;
370
371	comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
372	if (!comp_buf) {
373		err = -ENOMEM;
374		goto out;
375	}
376
377	hwc_cq->hwc = hwc;
378	hwc_cq->comp_buf = comp_buf;
379	hwc_cq->queue_depth = q_depth;
380	hwc_cq->rx_event_handler = rx_ev_hdlr;
381	hwc_cq->rx_event_ctx = rx_ev_ctx;
382	hwc_cq->tx_event_handler = tx_ev_hdlr;
383	hwc_cq->tx_event_ctx = tx_ev_ctx;
384
385	*hwc_cq_ptr = hwc_cq;
386	return 0;
387out:
388	mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
389	return err;
390}
391
392static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
393				  u32 max_msg_size,
394				  struct hwc_dma_buf **dma_buf_ptr)
395{
396	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
397	struct hwc_work_request *hwc_wr;
398	struct hwc_dma_buf *dma_buf;
399	struct gdma_mem_info *gmi;
400	void *virt_addr;
401	u32 buf_size;
402	u8 *base_pa;
403	int err;
404	u16 i;
405
406	dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
407	if (!dma_buf)
408		return -ENOMEM;
409
410	dma_buf->num_reqs = q_depth;
411
412	buf_size = PAGE_ALIGN(q_depth * max_msg_size);
413
414	gmi = &dma_buf->mem_info;
415	err = mana_gd_alloc_memory(gc, buf_size, gmi);
416	if (err) {
417		dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
418		goto out;
419	}
420
421	virt_addr = dma_buf->mem_info.virt_addr;
422	base_pa = (u8 *)dma_buf->mem_info.dma_handle;
423
424	for (i = 0; i < q_depth; i++) {
425		hwc_wr = &dma_buf->reqs[i];
426
427		hwc_wr->buf_va = virt_addr + i * max_msg_size;
428		hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
429
430		hwc_wr->buf_len = max_msg_size;
431	}
432
433	*dma_buf_ptr = dma_buf;
434	return 0;
435out:
436	kfree(dma_buf);
437	return err;
438}
439
440static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
441				     struct hwc_dma_buf *dma_buf)
442{
443	if (!dma_buf)
444		return;
445
446	mana_gd_free_memory(&dma_buf->mem_info);
447
448	kfree(dma_buf);
449}
450
451static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
452				struct hwc_wq *hwc_wq)
453{
454	mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
455
456	if (hwc_wq->gdma_wq)
457		mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
458				      hwc_wq->gdma_wq);
459
460	kfree(hwc_wq);
461}
462
463static int mana_hwc_create_wq(struct hw_channel_context *hwc,
464			      enum gdma_queue_type q_type, u16 q_depth,
465			      u32 max_msg_size, struct hwc_cq *hwc_cq,
466			      struct hwc_wq **hwc_wq_ptr)
467{
468	struct gdma_queue *queue;
469	struct hwc_wq *hwc_wq;
470	u32 queue_size;
471	int err;
472
473	WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
474
475	if (q_type == GDMA_RQ)
476		queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
477	else
478		queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
479
480	if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
481		queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
482
483	hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
484	if (!hwc_wq)
485		return -ENOMEM;
486
487	err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
488	if (err)
489		goto out;
490
491	hwc_wq->hwc = hwc;
492	hwc_wq->gdma_wq = queue;
493	hwc_wq->queue_depth = q_depth;
494	hwc_wq->hwc_cq = hwc_cq;
495
496	err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
497				     &hwc_wq->msg_buf);
498	if (err)
499		goto out;
500
501	*hwc_wq_ptr = hwc_wq;
502	return 0;
503out:
504	if (err)
505		mana_hwc_destroy_wq(hwc, hwc_wq);
506	return err;
507}
508
509static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
510				struct hwc_work_request *req,
511				u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
512				bool dest_pf)
513{
514	struct device *dev = hwc_txq->hwc->dev;
515	struct hwc_tx_oob *tx_oob;
516	struct gdma_sge *sge;
517	int err;
518
519	if (req->msg_size == 0 || req->msg_size > req->buf_len) {
520		dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
521			req->msg_size, req->buf_len);
522		return -EINVAL;
523	}
524
525	tx_oob = &req->tx_oob;
526
527	tx_oob->vrq_id = dest_virt_rq_id;
528	tx_oob->dest_vfid = 0;
529	tx_oob->vrcq_id = dest_virt_rcq_id;
530	tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
531	tx_oob->loopback = false;
532	tx_oob->lso_override = false;
533	tx_oob->dest_pf = dest_pf;
534	tx_oob->vsq_id = hwc_txq->gdma_wq->id;
535
536	sge = &req->sge;
537	sge->address = (u64)req->buf_sge_addr;
538	sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
539	sge->size = req->msg_size;
540
541	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
542	req->wqe_req.sgl = sge;
543	req->wqe_req.num_sge = 1;
544	req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
545	req->wqe_req.inline_oob_data = tx_oob;
546	req->wqe_req.client_data_unit = 0;
547
548	err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
549	if (err)
550		dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
551	return err;
552}
553
554static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
555				      u16 num_msg)
556{
557	int err;
558
559	sema_init(&hwc->sema, num_msg);
560
561	err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
562	if (err)
563		dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
564	return err;
565}
566
567static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
568				 u32 max_req_msg_size, u32 max_resp_msg_size)
569{
570	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
571	struct hwc_wq *hwc_rxq = hwc->rxq;
572	struct hwc_work_request *req;
573	struct hwc_caller_ctx *ctx;
574	int err;
575	int i;
576
577	/* Post all WQEs on the RQ */
578	for (i = 0; i < q_depth; i++) {
579		req = &hwc_rxq->msg_buf->reqs[i];
580		err = mana_hwc_post_rx_wqe(hwc_rxq, req);
581		if (err)
582			return err;
583	}
584
585	ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
586	if (!ctx)
587		return -ENOMEM;
588
589	for (i = 0; i < q_depth; ++i)
590		init_completion(&ctx[i].comp_event);
591
592	hwc->caller_ctx = ctx;
593
594	return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
595}
596
597static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
598				      u32 *max_req_msg_size,
599				      u32 *max_resp_msg_size)
600{
601	struct hw_channel_context *hwc = gc->hwc.driver_data;
602	struct gdma_queue *rq = hwc->rxq->gdma_wq;
603	struct gdma_queue *sq = hwc->txq->gdma_wq;
604	struct gdma_queue *eq = hwc->cq->gdma_eq;
605	struct gdma_queue *cq = hwc->cq->gdma_cq;
606	int err;
607
608	init_completion(&hwc->hwc_init_eqe_comp);
609
610	err = mana_smc_setup_hwc(&gc->shm_channel, false,
611				 eq->mem_info.dma_handle,
612				 cq->mem_info.dma_handle,
613				 rq->mem_info.dma_handle,
614				 sq->mem_info.dma_handle,
615				 eq->eq.msix_index);
616	if (err)
617		return err;
618
619	if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
620		return -ETIMEDOUT;
621
622	*q_depth = hwc->hwc_init_q_depth_max;
623	*max_req_msg_size = hwc->hwc_init_max_req_msg_size;
624	*max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
625
626	/* Both were set in mana_hwc_init_event_handler(). */
627	if (WARN_ON(cq->id >= gc->max_num_cqs))
628		return -EPROTO;
629
630	gc->cq_table = vzalloc(gc->max_num_cqs * sizeof(struct gdma_queue *));
631	if (!gc->cq_table)
632		return -ENOMEM;
633
634	gc->cq_table[cq->id] = cq;
635
636	return 0;
637}
638
639static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
640				u32 max_req_msg_size, u32 max_resp_msg_size)
641{
642	int err;
643
644	err = mana_hwc_init_inflight_msg(hwc, q_depth);
645	if (err)
646		return err;
647
648	/* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
649	 * queue depth and RQ queue depth.
650	 */
651	err = mana_hwc_create_cq(hwc, q_depth * 2,
652				 mana_hwc_init_event_handler, hwc,
653				 mana_hwc_rx_event_handler, hwc,
654				 mana_hwc_tx_event_handler, hwc, &hwc->cq);
655	if (err) {
656		dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
657		goto out;
658	}
659
660	err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
661				 hwc->cq, &hwc->rxq);
662	if (err) {
663		dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
664		goto out;
665	}
666
667	err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
668				 hwc->cq, &hwc->txq);
669	if (err) {
670		dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
671		goto out;
672	}
673
674	hwc->num_inflight_msg = q_depth;
675	hwc->max_req_msg_size = max_req_msg_size;
676
677	return 0;
678out:
679	/* mana_hwc_create_channel() will do the cleanup.*/
680	return err;
681}
682
683int mana_hwc_create_channel(struct gdma_context *gc)
684{
685	u32 max_req_msg_size, max_resp_msg_size;
686	struct gdma_dev *gd = &gc->hwc;
687	struct hw_channel_context *hwc;
688	u16 q_depth_max;
689	int err;
690
691	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
692	if (!hwc)
693		return -ENOMEM;
694
695	gd->gdma_context = gc;
696	gd->driver_data = hwc;
697	hwc->gdma_dev = gd;
698	hwc->dev = gc->dev;
 
699
700	/* HWC's instance number is always 0. */
701	gd->dev_id.as_uint32 = 0;
702	gd->dev_id.type = GDMA_DEVICE_HWC;
703
704	gd->pdid = INVALID_PDID;
705	gd->doorbell = INVALID_DOORBELL;
706
707	/* mana_hwc_init_queues() only creates the required data structures,
708	 * and doesn't touch the HWC device.
709	 */
710	err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
711				   HW_CHANNEL_MAX_REQUEST_SIZE,
712				   HW_CHANNEL_MAX_RESPONSE_SIZE);
713	if (err) {
714		dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
715		goto out;
716	}
717
718	err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
719					 &max_resp_msg_size);
720	if (err) {
721		dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
722		goto out;
723	}
724
725	err = mana_hwc_test_channel(gc->hwc.driver_data,
726				    HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
727				    max_req_msg_size, max_resp_msg_size);
728	if (err) {
729		dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
730		goto out;
731	}
732
733	return 0;
734out:
735	mana_hwc_destroy_channel(gc);
736	return err;
737}
738
739void mana_hwc_destroy_channel(struct gdma_context *gc)
740{
741	struct hw_channel_context *hwc = gc->hwc.driver_data;
742
743	if (!hwc)
744		return;
745
746	/* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
747	 * non-zero, the HWC worked and we should tear down the HWC here.
748	 */
749	if (gc->max_num_cqs > 0) {
750		mana_smc_teardown_hwc(&gc->shm_channel, false);
751		gc->max_num_cqs = 0;
752	}
753
754	kfree(hwc->caller_ctx);
755	hwc->caller_ctx = NULL;
756
757	if (hwc->txq)
758		mana_hwc_destroy_wq(hwc, hwc->txq);
759
760	if (hwc->rxq)
761		mana_hwc_destroy_wq(hwc, hwc->rxq);
762
763	if (hwc->cq)
764		mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
765
766	mana_gd_free_res_map(&hwc->inflight_msg_res);
767
768	hwc->num_inflight_msg = 0;
769
770	hwc->gdma_dev->doorbell = INVALID_DOORBELL;
771	hwc->gdma_dev->pdid = INVALID_PDID;
772
 
 
773	kfree(hwc);
774	gc->hwc.driver_data = NULL;
775	gc->hwc.gdma_context = NULL;
776
777	vfree(gc->cq_table);
778	gc->cq_table = NULL;
779}
780
781int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
782			  const void *req, u32 resp_len, void *resp)
783{
784	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
785	struct hwc_work_request *tx_wr;
786	struct hwc_wq *txq = hwc->txq;
787	struct gdma_req_hdr *req_msg;
788	struct hwc_caller_ctx *ctx;
789	u32 dest_vrcq = 0;
790	u32 dest_vrq = 0;
791	u16 msg_id;
792	int err;
793
794	mana_hwc_get_msg_index(hwc, &msg_id);
795
796	tx_wr = &txq->msg_buf->reqs[msg_id];
797
798	if (req_len > tx_wr->buf_len) {
799		dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
800			tx_wr->buf_len);
801		err = -EINVAL;
802		goto out;
803	}
804
805	ctx = hwc->caller_ctx + msg_id;
806	ctx->output_buf = resp;
807	ctx->output_buflen = resp_len;
808
809	req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
810	if (req)
811		memcpy(req_msg, req, req_len);
812
813	req_msg->req.hwc_msg_id = msg_id;
814
815	tx_wr->msg_size = req_len;
816
817	if (gc->is_pf) {
818		dest_vrq = hwc->pf_dest_vrq_id;
819		dest_vrcq = hwc->pf_dest_vrcq_id;
820	}
821
822	err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
823	if (err) {
824		dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
825		goto out;
826	}
827
828	if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) {
 
829		dev_err(hwc->dev, "HWC: Request timed out!\n");
830		err = -ETIMEDOUT;
831		goto out;
832	}
833
834	if (ctx->error) {
835		err = ctx->error;
836		goto out;
837	}
838
839	if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
840		dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
841			ctx->status_code);
842		err = -EPROTO;
843		goto out;
844	}
845out:
846	mana_hwc_put_msg_index(hwc, msg_id);
847	return err;
848}