Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2015 HGST, a Western Digital Company.
  4 */
 
  5#include <linux/err.h>
  6#include <linux/slab.h>
  7#include <rdma/ib_verbs.h>
  8
  9#include "core_priv.h"
 10
 11#include <trace/events/rdma_core.h>
 12/* Max size for shared CQ, may require tuning */
 13#define IB_MAX_SHARED_CQ_SZ		4096U
 14
 15/* # of WCs to poll for with a single call to ib_poll_cq */
 16#define IB_POLL_BATCH			16
 17#define IB_POLL_BATCH_DIRECT		8
 18
 19/* # of WCs to iterate over before yielding */
 20#define IB_POLL_BUDGET_IRQ		256
 21#define IB_POLL_BUDGET_WORKQUEUE	65536
 22
 23#define IB_POLL_FLAGS \
 24	(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
 25
 26static const struct dim_cq_moder
 27rdma_dim_prof[RDMA_DIM_PARAMS_NUM_PROFILES] = {
 28	{1,   0, 1,  0},
 29	{1,   0, 4,  0},
 30	{2,   0, 4,  0},
 31	{2,   0, 8,  0},
 32	{4,   0, 8,  0},
 33	{16,  0, 8,  0},
 34	{16,  0, 16, 0},
 35	{32,  0, 16, 0},
 36	{32,  0, 32, 0},
 37};
 38
 39static void ib_cq_rdma_dim_work(struct work_struct *w)
 40{
 41	struct dim *dim = container_of(w, struct dim, work);
 42	struct ib_cq *cq = dim->priv;
 43
 44	u16 usec = rdma_dim_prof[dim->profile_ix].usec;
 45	u16 comps = rdma_dim_prof[dim->profile_ix].comps;
 46
 47	dim->state = DIM_START_MEASURE;
 48
 49	trace_cq_modify(cq, comps, usec);
 50	cq->device->ops.modify_cq(cq, comps, usec);
 51}
 52
 53static void rdma_dim_init(struct ib_cq *cq)
 54{
 55	struct dim *dim;
 56
 57	if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim ||
 58	    cq->poll_ctx == IB_POLL_DIRECT)
 59		return;
 60
 61	dim = kzalloc(sizeof(struct dim), GFP_KERNEL);
 62	if (!dim)
 63		return;
 64
 65	dim->state = DIM_START_MEASURE;
 66	dim->tune_state = DIM_GOING_RIGHT;
 67	dim->profile_ix = RDMA_DIM_START_PROFILE;
 68	dim->priv = cq;
 69	cq->dim = dim;
 70
 71	INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
 72}
 73
 74static void rdma_dim_destroy(struct ib_cq *cq)
 75{
 76	if (!cq->dim)
 77		return;
 78
 79	cancel_work_sync(&cq->dim->work);
 80	kfree(cq->dim);
 81}
 82
 83static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
 84{
 85	int rc;
 86
 87	rc = ib_poll_cq(cq, num_entries, wc);
 88	trace_cq_poll(cq, num_entries, rc);
 89	return rc;
 90}
 91
 92static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
 93			   int batch)
 94{
 95	int i, n, completed = 0;
 96
 97	trace_cq_process(cq);
 98
 99	/*
100	 * budget might be (-1) if the caller does not
101	 * want to bound this call, thus we need unsigned
102	 * minimum here.
103	 */
104	while ((n = __poll_cq(cq, min_t(u32, batch,
105					budget - completed), wcs)) > 0) {
106		for (i = 0; i < n; i++) {
107			struct ib_wc *wc = &wcs[i];
108
109			if (wc->wr_cqe)
110				wc->wr_cqe->done(cq, wc);
111			else
112				WARN_ON_ONCE(wc->status == IB_WC_SUCCESS);
113		}
114
115		completed += n;
116
117		if (n != batch || (budget != -1 && completed >= budget))
118			break;
119	}
120
121	return completed;
122}
123
124/**
125 * ib_process_cq_direct - process a CQ in caller context
126 * @cq:		CQ to process
127 * @budget:	number of CQEs to poll for
128 *
129 * This function is used to process all outstanding CQ entries.
130 * It does not offload CQ processing to a different context and does
131 * not ask for completion interrupts from the HCA.
132 * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
133 * concurrent processing.
134 *
135 * Note: do not pass -1 as %budget unless it is guaranteed that the number
136 * of completions that will be processed is small.
137 */
138int ib_process_cq_direct(struct ib_cq *cq, int budget)
139{
140	struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
141
142	return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
143}
144EXPORT_SYMBOL(ib_process_cq_direct);
145
146static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
147{
148	WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
149}
150
151static int ib_poll_handler(struct irq_poll *iop, int budget)
152{
153	struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
154	struct dim *dim = cq->dim;
155	int completed;
156
157	completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
158	if (completed < budget) {
159		irq_poll_complete(&cq->iop);
160		if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) {
161			trace_cq_reschedule(cq);
162			irq_poll_sched(&cq->iop);
163		}
164	}
165
166	if (dim)
167		rdma_dim(dim, completed);
168
169	return completed;
170}
171
172static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
173{
174	trace_cq_schedule(cq);
175	irq_poll_sched(&cq->iop);
176}
177
178static void ib_cq_poll_work(struct work_struct *work)
179{
180	struct ib_cq *cq = container_of(work, struct ib_cq, work);
181	int completed;
182
183	completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
184				    IB_POLL_BATCH);
185	if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
186	    ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
187		queue_work(cq->comp_wq, &cq->work);
188	else if (cq->dim)
189		rdma_dim(cq->dim, completed);
190}
191
192static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
193{
194	trace_cq_schedule(cq);
195	queue_work(cq->comp_wq, &cq->work);
196}
197
198/**
199 * __ib_alloc_cq - allocate a completion queue
200 * @dev:		device to allocate the CQ for
201 * @private:		driver private data, accessible from cq->cq_context
202 * @nr_cqe:		number of CQEs to allocate
203 * @comp_vector:	HCA completion vectors for this CQ
204 * @poll_ctx:		context to poll the CQ from.
205 * @caller:		module owner name.
 
206 *
207 * This is the proper interface to allocate a CQ for in-kernel users. A
208 * CQ allocated with this interface will automatically be polled from the
209 * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
210 * to use this CQ abstraction.
211 */
212struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
213			    int comp_vector, enum ib_poll_context poll_ctx,
214			    const char *caller)
 
215{
216	struct ib_cq_init_attr cq_attr = {
217		.cqe		= nr_cqe,
218		.comp_vector	= comp_vector,
219	};
220	struct ib_cq *cq;
221	int ret = -ENOMEM;
222
223	cq = rdma_zalloc_drv_obj(dev, ib_cq);
224	if (!cq)
225		return ERR_PTR(ret);
226
227	cq->device = dev;
228	cq->cq_context = private;
229	cq->poll_ctx = poll_ctx;
230	atomic_set(&cq->usecnt, 0);
231	cq->comp_vector = comp_vector;
232
233	cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
234	if (!cq->wc)
235		goto out_free_cq;
236
237	rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
238	rdma_restrack_set_name(&cq->res, caller);
239
240	ret = dev->ops.create_cq(cq, &cq_attr, NULL);
241	if (ret)
242		goto out_free_wc;
243
 
 
244	rdma_dim_init(cq);
245
246	switch (cq->poll_ctx) {
247	case IB_POLL_DIRECT:
248		cq->comp_handler = ib_cq_completion_direct;
249		break;
250	case IB_POLL_SOFTIRQ:
251		cq->comp_handler = ib_cq_completion_softirq;
252
253		irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
254		ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
255		break;
256	case IB_POLL_WORKQUEUE:
257	case IB_POLL_UNBOUND_WORKQUEUE:
258		cq->comp_handler = ib_cq_completion_workqueue;
259		INIT_WORK(&cq->work, ib_cq_poll_work);
260		ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
261		cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
262				ib_comp_wq : ib_comp_unbound_wq;
263		break;
264	default:
265		ret = -EINVAL;
266		goto out_destroy_cq;
267	}
268
269	rdma_restrack_add(&cq->res);
270	trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx);
271	return cq;
272
273out_destroy_cq:
274	rdma_dim_destroy(cq);
275	cq->device->ops.destroy_cq(cq, NULL);
276out_free_wc:
277	rdma_restrack_put(&cq->res);
278	kfree(cq->wc);
279out_free_cq:
280	kfree(cq);
281	trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret);
282	return ERR_PTR(ret);
283}
284EXPORT_SYMBOL(__ib_alloc_cq);
285
286/**
287 * __ib_alloc_cq_any - allocate a completion queue
288 * @dev:		device to allocate the CQ for
289 * @private:		driver private data, accessible from cq->cq_context
290 * @nr_cqe:		number of CQEs to allocate
291 * @poll_ctx:		context to poll the CQ from
292 * @caller:		module owner name
293 *
294 * Attempt to spread ULP Completion Queues over each device's interrupt
295 * vectors. A simple best-effort mechanism is used.
296 */
297struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
298				int nr_cqe, enum ib_poll_context poll_ctx,
299				const char *caller)
300{
301	static atomic_t counter;
302	int comp_vector = 0;
303
304	if (dev->num_comp_vectors > 1)
305		comp_vector =
306			atomic_inc_return(&counter) %
307			min_t(int, dev->num_comp_vectors, num_online_cpus());
308
309	return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
310			     caller);
311}
312EXPORT_SYMBOL(__ib_alloc_cq_any);
313
314/**
315 * ib_free_cq - free a completion queue
316 * @cq:		completion queue to free.
 
317 */
318void ib_free_cq(struct ib_cq *cq)
319{
320	int ret;
321
322	if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
323		return;
324	if (WARN_ON_ONCE(cq->cqe_used))
325		return;
326
327	switch (cq->poll_ctx) {
328	case IB_POLL_DIRECT:
329		break;
330	case IB_POLL_SOFTIRQ:
331		irq_poll_disable(&cq->iop);
332		break;
333	case IB_POLL_WORKQUEUE:
334	case IB_POLL_UNBOUND_WORKQUEUE:
335		cancel_work_sync(&cq->work);
336		break;
337	default:
338		WARN_ON_ONCE(1);
339	}
340
341	rdma_dim_destroy(cq);
342	trace_cq_free(cq);
343	ret = cq->device->ops.destroy_cq(cq, NULL);
344	WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
345	rdma_restrack_del(&cq->res);
 
 
 
 
346	kfree(cq->wc);
347	kfree(cq);
348}
349EXPORT_SYMBOL(ib_free_cq);
350
351void ib_cq_pool_cleanup(struct ib_device *dev)
352{
353	struct ib_cq *cq, *n;
354	unsigned int i;
355
356	for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++) {
357		list_for_each_entry_safe(cq, n, &dev->cq_pools[i],
358					 pool_entry) {
359			WARN_ON(cq->cqe_used);
360			list_del(&cq->pool_entry);
361			cq->shared = false;
362			ib_free_cq(cq);
363		}
364	}
365}
366
367static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
368			enum ib_poll_context poll_ctx)
369{
370	LIST_HEAD(tmp_list);
371	unsigned int nr_cqs, i;
372	struct ib_cq *cq, *n;
373	int ret;
374
375	if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
376		WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
377		return -EINVAL;
378	}
379
380	/*
381	 * Allocate at least as many CQEs as requested, and otherwise
382	 * a reasonable batch size so that we can share CQs between
383	 * multiple users instead of allocating a larger number of CQs.
384	 */
385	nr_cqes = min_t(unsigned int, dev->attrs.max_cqe,
386			max(nr_cqes, IB_MAX_SHARED_CQ_SZ));
387	nr_cqs = min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
388	for (i = 0; i < nr_cqs; i++) {
389		cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx);
390		if (IS_ERR(cq)) {
391			ret = PTR_ERR(cq);
392			goto out_free_cqs;
393		}
394		cq->shared = true;
395		list_add_tail(&cq->pool_entry, &tmp_list);
396	}
397
398	spin_lock_irq(&dev->cq_pools_lock);
399	list_splice(&tmp_list, &dev->cq_pools[poll_ctx]);
400	spin_unlock_irq(&dev->cq_pools_lock);
401
402	return 0;
403
404out_free_cqs:
405	list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) {
406		cq->shared = false;
407		ib_free_cq(cq);
408	}
409	return ret;
410}
411
412/**
413 * ib_cq_pool_get() - Find the least used completion queue that matches
414 *   a given cpu hint (or least used for wild card affinity) and fits
415 *   nr_cqe.
416 * @dev: rdma device
417 * @nr_cqe: number of needed cqe entries
418 * @comp_vector_hint: completion vector hint (-1) for the driver to assign
419 *   a comp vector based on internal counter
420 * @poll_ctx: cq polling context
421 *
422 * Finds a cq that satisfies @comp_vector_hint and @nr_cqe requirements and
423 * claim entries in it for us.  In case there is no available cq, allocate
424 * a new cq with the requirements and add it to the device pool.
425 * IB_POLL_DIRECT cannot be used for shared cqs so it is not a valid value
426 * for @poll_ctx.
427 */
428struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
429			     int comp_vector_hint,
430			     enum ib_poll_context poll_ctx)
431{
432	static unsigned int default_comp_vector;
433	unsigned int vector, num_comp_vectors;
434	struct ib_cq *cq, *found = NULL;
435	int ret;
436
437	if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
438		WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
439		return ERR_PTR(-EINVAL);
440	}
441
442	num_comp_vectors =
443		min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
444	/* Project the affinty to the device completion vector range */
445	if (comp_vector_hint < 0) {
446		comp_vector_hint =
447			(READ_ONCE(default_comp_vector) + 1) % num_comp_vectors;
448		WRITE_ONCE(default_comp_vector, comp_vector_hint);
449	}
450	vector = comp_vector_hint % num_comp_vectors;
451
452	/*
453	 * Find the least used CQ with correct affinity and
454	 * enough free CQ entries
455	 */
456	while (!found) {
457		spin_lock_irq(&dev->cq_pools_lock);
458		list_for_each_entry(cq, &dev->cq_pools[poll_ctx],
459				    pool_entry) {
460			/*
461			 * Check to see if we have found a CQ with the
462			 * correct completion vector
463			 */
464			if (vector != cq->comp_vector)
465				continue;
466			if (cq->cqe_used + nr_cqe > cq->cqe)
467				continue;
468			found = cq;
469			break;
470		}
471
472		if (found) {
473			found->cqe_used += nr_cqe;
474			spin_unlock_irq(&dev->cq_pools_lock);
475
476			return found;
477		}
478		spin_unlock_irq(&dev->cq_pools_lock);
479
480		/*
481		 * Didn't find a match or ran out of CQs in the device
482		 * pool, allocate a new array of CQs.
483		 */
484		ret = ib_alloc_cqs(dev, nr_cqe, poll_ctx);
485		if (ret)
486			return ERR_PTR(ret);
487	}
488
489	return found;
490}
491EXPORT_SYMBOL(ib_cq_pool_get);
492
493/**
494 * ib_cq_pool_put - Return a CQ taken from a shared pool.
495 * @cq: The CQ to return.
496 * @nr_cqe: The max number of cqes that the user had requested.
497 */
498void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe)
499{
500	if (WARN_ON_ONCE(nr_cqe > cq->cqe_used))
501		return;
502
503	spin_lock_irq(&cq->device->cq_pools_lock);
504	cq->cqe_used -= nr_cqe;
505	spin_unlock_irq(&cq->device->cq_pools_lock);
506}
507EXPORT_SYMBOL(ib_cq_pool_put);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2015 HGST, a Western Digital Company.
  4 */
  5#include <linux/module.h>
  6#include <linux/err.h>
  7#include <linux/slab.h>
  8#include <rdma/ib_verbs.h>
  9
 
 
 
 
 
 
 10/* # of WCs to poll for with a single call to ib_poll_cq */
 11#define IB_POLL_BATCH			16
 12#define IB_POLL_BATCH_DIRECT		8
 13
 14/* # of WCs to iterate over before yielding */
 15#define IB_POLL_BUDGET_IRQ		256
 16#define IB_POLL_BUDGET_WORKQUEUE	65536
 17
 18#define IB_POLL_FLAGS \
 19	(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
 20
 21static const struct dim_cq_moder
 22rdma_dim_prof[RDMA_DIM_PARAMS_NUM_PROFILES] = {
 23	{1,   0, 1,  0},
 24	{1,   0, 4,  0},
 25	{2,   0, 4,  0},
 26	{2,   0, 8,  0},
 27	{4,   0, 8,  0},
 28	{16,  0, 8,  0},
 29	{16,  0, 16, 0},
 30	{32,  0, 16, 0},
 31	{32,  0, 32, 0},
 32};
 33
 34static void ib_cq_rdma_dim_work(struct work_struct *w)
 35{
 36	struct dim *dim = container_of(w, struct dim, work);
 37	struct ib_cq *cq = dim->priv;
 38
 39	u16 usec = rdma_dim_prof[dim->profile_ix].usec;
 40	u16 comps = rdma_dim_prof[dim->profile_ix].comps;
 41
 42	dim->state = DIM_START_MEASURE;
 43
 
 44	cq->device->ops.modify_cq(cq, comps, usec);
 45}
 46
 47static void rdma_dim_init(struct ib_cq *cq)
 48{
 49	struct dim *dim;
 50
 51	if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim ||
 52	    cq->poll_ctx == IB_POLL_DIRECT)
 53		return;
 54
 55	dim = kzalloc(sizeof(struct dim), GFP_KERNEL);
 56	if (!dim)
 57		return;
 58
 59	dim->state = DIM_START_MEASURE;
 60	dim->tune_state = DIM_GOING_RIGHT;
 61	dim->profile_ix = RDMA_DIM_START_PROFILE;
 62	dim->priv = cq;
 63	cq->dim = dim;
 64
 65	INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
 66}
 67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 68static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
 69			   int batch)
 70{
 71	int i, n, completed = 0;
 72
 
 
 73	/*
 74	 * budget might be (-1) if the caller does not
 75	 * want to bound this call, thus we need unsigned
 76	 * minimum here.
 77	 */
 78	while ((n = ib_poll_cq(cq, min_t(u32, batch,
 79					 budget - completed), wcs)) > 0) {
 80		for (i = 0; i < n; i++) {
 81			struct ib_wc *wc = &wcs[i];
 82
 83			if (wc->wr_cqe)
 84				wc->wr_cqe->done(cq, wc);
 85			else
 86				WARN_ON_ONCE(wc->status == IB_WC_SUCCESS);
 87		}
 88
 89		completed += n;
 90
 91		if (n != batch || (budget != -1 && completed >= budget))
 92			break;
 93	}
 94
 95	return completed;
 96}
 97
 98/**
 99 * ib_process_direct_cq - process a CQ in caller context
100 * @cq:		CQ to process
101 * @budget:	number of CQEs to poll for
102 *
103 * This function is used to process all outstanding CQ entries.
104 * It does not offload CQ processing to a different context and does
105 * not ask for completion interrupts from the HCA.
106 * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
107 * concurrent processing.
108 *
109 * Note: do not pass -1 as %budget unless it is guaranteed that the number
110 * of completions that will be processed is small.
111 */
112int ib_process_cq_direct(struct ib_cq *cq, int budget)
113{
114	struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
115
116	return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
117}
118EXPORT_SYMBOL(ib_process_cq_direct);
119
120static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
121{
122	WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
123}
124
125static int ib_poll_handler(struct irq_poll *iop, int budget)
126{
127	struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
128	struct dim *dim = cq->dim;
129	int completed;
130
131	completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
132	if (completed < budget) {
133		irq_poll_complete(&cq->iop);
134		if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
 
135			irq_poll_sched(&cq->iop);
 
136	}
137
138	if (dim)
139		rdma_dim(dim, completed);
140
141	return completed;
142}
143
144static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
145{
 
146	irq_poll_sched(&cq->iop);
147}
148
149static void ib_cq_poll_work(struct work_struct *work)
150{
151	struct ib_cq *cq = container_of(work, struct ib_cq, work);
152	int completed;
153
154	completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
155				    IB_POLL_BATCH);
156	if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
157	    ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
158		queue_work(cq->comp_wq, &cq->work);
159	else if (cq->dim)
160		rdma_dim(cq->dim, completed);
161}
162
163static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
164{
 
165	queue_work(cq->comp_wq, &cq->work);
166}
167
168/**
169 * __ib_alloc_cq_user - allocate a completion queue
170 * @dev:		device to allocate the CQ for
171 * @private:		driver private data, accessible from cq->cq_context
172 * @nr_cqe:		number of CQEs to allocate
173 * @comp_vector:	HCA completion vectors for this CQ
174 * @poll_ctx:		context to poll the CQ from.
175 * @caller:		module owner name.
176 * @udata:		Valid user data or NULL for kernel object
177 *
178 * This is the proper interface to allocate a CQ for in-kernel users. A
179 * CQ allocated with this interface will automatically be polled from the
180 * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
181 * to use this CQ abstraction.
182 */
183struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
184				 int nr_cqe, int comp_vector,
185				 enum ib_poll_context poll_ctx,
186				 const char *caller, struct ib_udata *udata)
187{
188	struct ib_cq_init_attr cq_attr = {
189		.cqe		= nr_cqe,
190		.comp_vector	= comp_vector,
191	};
192	struct ib_cq *cq;
193	int ret = -ENOMEM;
194
195	cq = rdma_zalloc_drv_obj(dev, ib_cq);
196	if (!cq)
197		return ERR_PTR(ret);
198
199	cq->device = dev;
200	cq->cq_context = private;
201	cq->poll_ctx = poll_ctx;
202	atomic_set(&cq->usecnt, 0);
 
203
204	cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
205	if (!cq->wc)
206		goto out_free_cq;
207
208	cq->res.type = RDMA_RESTRACK_CQ;
209	rdma_restrack_set_task(&cq->res, caller);
210
211	ret = dev->ops.create_cq(cq, &cq_attr, NULL);
212	if (ret)
213		goto out_free_wc;
214
215	rdma_restrack_kadd(&cq->res);
216
217	rdma_dim_init(cq);
218
219	switch (cq->poll_ctx) {
220	case IB_POLL_DIRECT:
221		cq->comp_handler = ib_cq_completion_direct;
222		break;
223	case IB_POLL_SOFTIRQ:
224		cq->comp_handler = ib_cq_completion_softirq;
225
226		irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
227		ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
228		break;
229	case IB_POLL_WORKQUEUE:
230	case IB_POLL_UNBOUND_WORKQUEUE:
231		cq->comp_handler = ib_cq_completion_workqueue;
232		INIT_WORK(&cq->work, ib_cq_poll_work);
233		ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
234		cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
235				ib_comp_wq : ib_comp_unbound_wq;
236		break;
237	default:
238		ret = -EINVAL;
239		goto out_destroy_cq;
240	}
241
 
 
242	return cq;
243
244out_destroy_cq:
245	rdma_restrack_del(&cq->res);
246	cq->device->ops.destroy_cq(cq, udata);
247out_free_wc:
 
248	kfree(cq->wc);
249out_free_cq:
250	kfree(cq);
 
251	return ERR_PTR(ret);
252}
253EXPORT_SYMBOL(__ib_alloc_cq_user);
254
255/**
256 * __ib_alloc_cq_any - allocate a completion queue
257 * @dev:		device to allocate the CQ for
258 * @private:		driver private data, accessible from cq->cq_context
259 * @nr_cqe:		number of CQEs to allocate
260 * @poll_ctx:		context to poll the CQ from
261 * @caller:		module owner name
262 *
263 * Attempt to spread ULP Completion Queues over each device's interrupt
264 * vectors. A simple best-effort mechanism is used.
265 */
266struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
267				int nr_cqe, enum ib_poll_context poll_ctx,
268				const char *caller)
269{
270	static atomic_t counter;
271	int comp_vector = 0;
272
273	if (dev->num_comp_vectors > 1)
274		comp_vector =
275			atomic_inc_return(&counter) %
276			min_t(int, dev->num_comp_vectors, num_online_cpus());
277
278	return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
279				  caller, NULL);
280}
281EXPORT_SYMBOL(__ib_alloc_cq_any);
282
283/**
284 * ib_free_cq_user - free a completion queue
285 * @cq:		completion queue to free.
286 * @udata:	User data or NULL for kernel object
287 */
288void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
289{
 
 
290	if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
291		return;
 
 
292
293	switch (cq->poll_ctx) {
294	case IB_POLL_DIRECT:
295		break;
296	case IB_POLL_SOFTIRQ:
297		irq_poll_disable(&cq->iop);
298		break;
299	case IB_POLL_WORKQUEUE:
300	case IB_POLL_UNBOUND_WORKQUEUE:
301		cancel_work_sync(&cq->work);
302		break;
303	default:
304		WARN_ON_ONCE(1);
305	}
306
 
 
 
 
307	rdma_restrack_del(&cq->res);
308	cq->device->ops.destroy_cq(cq, udata);
309	if (cq->dim)
310		cancel_work_sync(&cq->dim->work);
311	kfree(cq->dim);
312	kfree(cq->wc);
313	kfree(cq);
314}
315EXPORT_SYMBOL(ib_free_cq_user);