Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright (c) 2015 HGST, a Western Digital Company.
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms and conditions of the GNU General Public License,
  6 * version 2, as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope it will be useful, but WITHOUT
  9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 11 * more details.
 12 */
 13#include <linux/module.h>
 14#include <linux/err.h>
 15#include <linux/slab.h>
 16#include <rdma/ib_verbs.h>
 17
 18/* # of WCs to poll for with a single call to ib_poll_cq */
 19#define IB_POLL_BATCH			16
 20#define IB_POLL_BATCH_DIRECT		8
 21
 22/* # of WCs to iterate over before yielding */
 23#define IB_POLL_BUDGET_IRQ		256
 24#define IB_POLL_BUDGET_WORKQUEUE	65536
 25
 26#define IB_POLL_FLAGS \
 27	(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
 28
 29static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
 30			   int batch)
 31{
 32	int i, n, completed = 0;
 33
 34	/*
 35	 * budget might be (-1) if the caller does not
 36	 * want to bound this call, thus we need unsigned
 37	 * minimum here.
 38	 */
 39	while ((n = ib_poll_cq(cq, min_t(u32, batch,
 40					 budget - completed), wcs)) > 0) {
 41		for (i = 0; i < n; i++) {
 42			struct ib_wc *wc = &wcs[i];
 43
 44			if (wc->wr_cqe)
 45				wc->wr_cqe->done(cq, wc);
 46			else
 47				WARN_ON_ONCE(wc->status == IB_WC_SUCCESS);
 48		}
 49
 50		completed += n;
 51
 52		if (n != batch || (budget != -1 && completed >= budget))
 53			break;
 54	}
 55
 56	return completed;
 57}
 58
 59/**
 60 * ib_process_direct_cq - process a CQ in caller context
 61 * @cq:		CQ to process
 62 * @budget:	number of CQEs to poll for
 63 *
 64 * This function is used to process all outstanding CQ entries.
 65 * It does not offload CQ processing to a different context and does
 66 * not ask for completion interrupts from the HCA.
 67 * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
 68 * concurrent processing.
 69 *
 70 * Note: do not pass -1 as %budget unless it is guaranteed that the number
 71 * of completions that will be processed is small.
 72 */
 73int ib_process_cq_direct(struct ib_cq *cq, int budget)
 74{
 75	struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
 76
 77	return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
 78}
 79EXPORT_SYMBOL(ib_process_cq_direct);
 80
 81static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
 82{
 83	WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
 84}
 85
 86static int ib_poll_handler(struct irq_poll *iop, int budget)
 87{
 88	struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
 89	int completed;
 90
 91	completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
 92	if (completed < budget) {
 93		irq_poll_complete(&cq->iop);
 94		if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
 95			irq_poll_sched(&cq->iop);
 96	}
 97
 98	return completed;
 99}
100
101static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
102{
103	irq_poll_sched(&cq->iop);
104}
105
106static void ib_cq_poll_work(struct work_struct *work)
107{
108	struct ib_cq *cq = container_of(work, struct ib_cq, work);
109	int completed;
110
111	completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
112				    IB_POLL_BATCH);
113	if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
114	    ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
115		queue_work(ib_comp_wq, &cq->work);
116}
117
118static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
119{
120	queue_work(ib_comp_wq, &cq->work);
121}
122
123/**
124 * __ib_alloc_cq - allocate a completion queue
125 * @dev:		device to allocate the CQ for
126 * @private:		driver private data, accessible from cq->cq_context
127 * @nr_cqe:		number of CQEs to allocate
128 * @comp_vector:	HCA completion vectors for this CQ
129 * @poll_ctx:		context to poll the CQ from.
130 * @caller:		module owner name.
131 *
132 * This is the proper interface to allocate a CQ for in-kernel users. A
133 * CQ allocated with this interface will automatically be polled from the
134 * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
135 * to use this CQ abstraction.
136 */
137struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
138			    int nr_cqe, int comp_vector,
139			    enum ib_poll_context poll_ctx, const char *caller)
140{
141	struct ib_cq_init_attr cq_attr = {
142		.cqe		= nr_cqe,
143		.comp_vector	= comp_vector,
144	};
145	struct ib_cq *cq;
146	int ret = -ENOMEM;
147
148	cq = dev->create_cq(dev, &cq_attr, NULL, NULL);
149	if (IS_ERR(cq))
150		return cq;
151
152	cq->device = dev;
153	cq->uobject = NULL;
154	cq->event_handler = NULL;
155	cq->cq_context = private;
156	cq->poll_ctx = poll_ctx;
157	atomic_set(&cq->usecnt, 0);
158
159	cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
160	if (!cq->wc)
161		goto out_destroy_cq;
162
163	cq->res.type = RDMA_RESTRACK_CQ;
164	cq->res.kern_name = caller;
165	rdma_restrack_add(&cq->res);
166
167	switch (cq->poll_ctx) {
168	case IB_POLL_DIRECT:
169		cq->comp_handler = ib_cq_completion_direct;
170		break;
171	case IB_POLL_SOFTIRQ:
172		cq->comp_handler = ib_cq_completion_softirq;
173
174		irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
175		ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
176		break;
177	case IB_POLL_WORKQUEUE:
178		cq->comp_handler = ib_cq_completion_workqueue;
179		INIT_WORK(&cq->work, ib_cq_poll_work);
180		ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
181		break;
182	default:
183		ret = -EINVAL;
184		goto out_free_wc;
185	}
186
187	return cq;
188
189out_free_wc:
190	kfree(cq->wc);
191	rdma_restrack_del(&cq->res);
192out_destroy_cq:
193	cq->device->destroy_cq(cq);
194	return ERR_PTR(ret);
195}
196EXPORT_SYMBOL(__ib_alloc_cq);
197
198/**
199 * ib_free_cq - free a completion queue
200 * @cq:		completion queue to free.
201 */
202void ib_free_cq(struct ib_cq *cq)
203{
204	int ret;
205
206	if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
207		return;
208
209	switch (cq->poll_ctx) {
210	case IB_POLL_DIRECT:
211		break;
212	case IB_POLL_SOFTIRQ:
213		irq_poll_disable(&cq->iop);
214		break;
215	case IB_POLL_WORKQUEUE:
216		cancel_work_sync(&cq->work);
217		break;
218	default:
219		WARN_ON_ONCE(1);
220	}
221
222	kfree(cq->wc);
223	rdma_restrack_del(&cq->res);
224	ret = cq->device->destroy_cq(cq);
225	WARN_ON_ONCE(ret);
226}
227EXPORT_SYMBOL(ib_free_cq);