Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2
  3/*
  4 * Copyright 2016-2022 HabanaLabs, Ltd.
  5 * All Rights Reserved.
  6 */
  7
  8#include "habanalabs.h"
  9
 10#include <linux/slab.h>
 11
 12/**
 13 * struct hl_eqe_work - This structure is used to schedule work of EQ
 14 *                      entry and cpucp_reset event
 15 *
 16 * @eq_work:          workqueue object to run when EQ entry is received
 17 * @hdev:             pointer to device structure
 18 * @eq_entry:         copy of the EQ entry
 19 */
 20struct hl_eqe_work {
 21	struct work_struct	eq_work;
 22	struct hl_device	*hdev;
 23	struct hl_eq_entry	eq_entry;
 24};
 25
 26/**
 27 * hl_cq_inc_ptr - increment ci or pi of cq
 28 *
 29 * @ptr: the current ci or pi value of the completion queue
 30 *
 31 * Increment ptr by 1. If it reaches the number of completion queue
 32 * entries, set it to 0
 33 */
 34inline u32 hl_cq_inc_ptr(u32 ptr)
 35{
 36	ptr++;
 37	if (unlikely(ptr == HL_CQ_LENGTH))
 38		ptr = 0;
 39	return ptr;
 40}
 41
 42/**
 43 * hl_eq_inc_ptr - increment ci of eq
 44 *
 45 * @ptr: the current ci value of the event queue
 46 *
 47 * Increment ptr by 1. If it reaches the number of event queue
 48 * entries, set it to 0
 49 */
 50static inline u32 hl_eq_inc_ptr(u32 ptr)
 51{
 52	ptr++;
 53	if (unlikely(ptr == HL_EQ_LENGTH))
 54		ptr = 0;
 55	return ptr;
 56}
 57
 58static void irq_handle_eqe(struct work_struct *work)
 59{
 60	struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work,
 61							eq_work);
 62	struct hl_device *hdev = eqe_work->hdev;
 63
 64	hdev->asic_funcs->handle_eqe(hdev, &eqe_work->eq_entry);
 65
 66	kfree(eqe_work);
 67}
 68
 69/**
 70 * job_finish - queue job finish work
 71 *
 72 * @hdev: pointer to device structure
 73 * @cs_seq: command submission sequence
 74 * @cq: completion queue
 75 *
 76 */
 77static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq)
 78{
 79	struct hl_hw_queue *queue;
 80	struct hl_cs_job *job;
 81
 82	queue = &hdev->kernel_queues[cq->hw_queue_id];
 83	job = queue->shadow_queue[hl_pi_2_offset(cs_seq)];
 84	queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
 85
 86	atomic_inc(&queue->ci);
 87}
 88
 89/**
 90 * cs_finish - queue all cs jobs finish work
 91 *
 92 * @hdev: pointer to device structure
 93 * @cs_seq: command submission sequence
 94 *
 95 */
 96static void cs_finish(struct hl_device *hdev, u16 cs_seq)
 97{
 98	struct asic_fixed_properties *prop = &hdev->asic_prop;
 99	struct hl_hw_queue *queue;
100	struct hl_cs *cs;
101	struct hl_cs_job *job;
102
103	cs = hdev->shadow_cs_queue[cs_seq & (prop->max_pending_cs - 1)];
104	if (!cs) {
105		dev_warn(hdev->dev,
106			"No pointer to CS in shadow array at index %d\n",
107			cs_seq);
108		return;
109	}
110
111	list_for_each_entry(job, &cs->job_list, cs_node) {
112		queue = &hdev->kernel_queues[job->hw_queue_id];
113		atomic_inc(&queue->ci);
114	}
115
116	queue_work(hdev->cs_cmplt_wq, &cs->finish_work);
117}
118
119/**
120 * hl_irq_handler_cq - irq handler for completion queue
121 *
122 * @irq: irq number
123 * @arg: pointer to completion queue structure
124 *
125 */
126irqreturn_t hl_irq_handler_cq(int irq, void *arg)
127{
128	struct hl_cq *cq = arg;
129	struct hl_device *hdev = cq->hdev;
130	bool shadow_index_valid, entry_ready;
131	u16 shadow_index;
132	struct hl_cq_entry *cq_entry, *cq_base;
133
134	if (hdev->disabled) {
135		dev_dbg(hdev->dev,
136			"Device disabled but received IRQ %d for CQ %d\n",
137			irq, cq->hw_queue_id);
138		return IRQ_HANDLED;
139	}
140
141	cq_base = cq->kernel_address;
142
143	while (1) {
144		cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
145
146		entry_ready = !!FIELD_GET(CQ_ENTRY_READY_MASK,
147				le32_to_cpu(cq_entry->data));
148		if (!entry_ready)
149			break;
150
151		/* Make sure we read CQ entry contents after we've
152		 * checked the ownership bit.
153		 */
154		dma_rmb();
155
156		shadow_index_valid =
157			!!FIELD_GET(CQ_ENTRY_SHADOW_INDEX_VALID_MASK,
158					le32_to_cpu(cq_entry->data));
159
160		shadow_index = FIELD_GET(CQ_ENTRY_SHADOW_INDEX_MASK,
161				le32_to_cpu(cq_entry->data));
162
163		/*
164		 * CQ interrupt handler has 2 modes of operation:
165		 * 1. Interrupt per CS completion: (Single CQ for all queues)
166		 *    CQ entry represents a completed CS
167		 *
168		 * 2. Interrupt per CS job completion in queue: (CQ per queue)
169		 *    CQ entry represents a completed job in a certain queue
170		 */
171		if (shadow_index_valid && !hdev->disabled) {
172			if (hdev->asic_prop.completion_mode ==
173					HL_COMPLETION_MODE_CS)
174				cs_finish(hdev, shadow_index);
175			else
176				job_finish(hdev, shadow_index, cq);
177		}
178
179		/* Clear CQ entry ready bit */
180		cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
181						~CQ_ENTRY_READY_MASK);
182
183		cq->ci = hl_cq_inc_ptr(cq->ci);
184
185		/* Increment free slots */
186		atomic_inc(&cq->free_slots_cnt);
187	}
188
189	return IRQ_HANDLED;
190}
191
192/*
193 * hl_ts_free_objects - handler of the free objects workqueue.
194 * This function should put refcount to objects that the registration node
195 * took refcount to them.
196 * @work: workqueue object pointer
197 */
198static void hl_ts_free_objects(struct work_struct *work)
199{
200	struct timestamp_reg_work_obj *job =
201			container_of(work, struct timestamp_reg_work_obj, free_obj);
202	struct timestamp_reg_free_node *free_obj, *temp_free_obj;
203	struct list_head *free_list_head = job->free_obj_head;
204	struct hl_device *hdev = job->hdev;
205
206	list_for_each_entry_safe(free_obj, temp_free_obj, free_list_head, free_objects_node) {
207		dev_dbg(hdev->dev, "About to put refcount to buf (%p) cq_cb(%p)\n",
208					free_obj->buf,
209					free_obj->cq_cb);
210
211		hl_mmap_mem_buf_put(free_obj->buf);
212		hl_cb_put(free_obj->cq_cb);
213		kfree(free_obj);
214	}
215
216	kfree(free_list_head);
217	kfree(job);
218}
219
220/*
221 * This function called with spin_lock of wait_list_lock taken
222 * This function will set timestamp and delete the registration node from the
223 * wait_list_lock.
224 * and since we're protected with spin_lock here, so we cannot just put the refcount
225 * for the objects here, since the release function may be called and it's also a long
226 * logic (which might sleep also) that cannot be handled in irq context.
227 * so here we'll be filling a list with nodes of "put" jobs and then will send this
228 * list to a dedicated workqueue to do the actual put.
229 */
230static int handle_registration_node(struct hl_device *hdev, struct hl_user_pending_interrupt *pend,
231						struct list_head **free_list)
232{
233	struct timestamp_reg_free_node *free_node;
234	u64 timestamp;
235
236	if (!(*free_list)) {
237		/* Alloc/Init the timestamp registration free objects list */
238		*free_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
239		if (!(*free_list))
240			return -ENOMEM;
241
242		INIT_LIST_HEAD(*free_list);
243	}
244
245	free_node = kmalloc(sizeof(*free_node), GFP_ATOMIC);
246	if (!free_node)
247		return -ENOMEM;
248
249	timestamp = ktime_get_ns();
250
251	*pend->ts_reg_info.timestamp_kernel_addr = timestamp;
252
253	dev_dbg(hdev->dev, "Timestamp is set to ts cb address (%p), ts: 0x%llx\n",
254			pend->ts_reg_info.timestamp_kernel_addr,
255			*(u64 *)pend->ts_reg_info.timestamp_kernel_addr);
256
257	list_del(&pend->wait_list_node);
258
259	/* Mark kernel CB node as free */
260	pend->ts_reg_info.in_use = 0;
261
262	/* Putting the refcount for ts_buff and cq_cb objects will be handled
263	 * in workqueue context, just add job to free_list.
264	 */
265	free_node->buf = pend->ts_reg_info.buf;
266	free_node->cq_cb = pend->ts_reg_info.cq_cb;
267	list_add(&free_node->free_objects_node, *free_list);
268
269	return 0;
270}
271
272static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interrupt *intr)
273{
274	struct hl_user_pending_interrupt *pend, *temp_pend;
275	struct list_head *ts_reg_free_list_head = NULL;
276	struct timestamp_reg_work_obj *job;
277	bool reg_node_handle_fail = false;
278	ktime_t now = ktime_get();
279	int rc;
280
281	/* For registration nodes:
282	 * As part of handling the registration nodes, we should put refcount to
283	 * some objects. the problem is that we cannot do that under spinlock
284	 * or in irq handler context at all (since release functions are long and
285	 * might sleep), so we will need to handle that part in workqueue context.
286	 * To avoid handling kmalloc failure which compels us rolling back actions
287	 * and move nodes hanged on the free list back to the interrupt wait list
288	 * we always alloc the job of the WQ at the beginning.
289	 */
290	job = kmalloc(sizeof(*job), GFP_ATOMIC);
291	if (!job)
292		return;
293
294	spin_lock(&intr->wait_list_lock);
295	list_for_each_entry_safe(pend, temp_pend, &intr->wait_list_head, wait_list_node) {
296		if ((pend->cq_kernel_addr && *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
297				!pend->cq_kernel_addr) {
298			if (pend->ts_reg_info.buf) {
299				if (!reg_node_handle_fail) {
300					rc = handle_registration_node(hdev, pend,
301									&ts_reg_free_list_head);
302					if (rc)
303						reg_node_handle_fail = true;
304				}
305			} else {
306				/* Handle wait target value node */
307				pend->fence.timestamp = now;
308				complete_all(&pend->fence.completion);
309			}
310		}
311	}
312	spin_unlock(&intr->wait_list_lock);
313
314	if (ts_reg_free_list_head) {
315		INIT_WORK(&job->free_obj, hl_ts_free_objects);
316		job->free_obj_head = ts_reg_free_list_head;
317		job->hdev = hdev;
318		queue_work(hdev->ts_free_obj_wq, &job->free_obj);
319	} else {
320		kfree(job);
321	}
322}
323
324/**
325 * hl_irq_handler_user_interrupt - irq handler for user interrupts
326 *
327 * @irq: irq number
328 * @arg: pointer to user interrupt structure
329 *
330 */
331irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
332{
333	struct hl_user_interrupt *user_int = arg;
334	struct hl_device *hdev = user_int->hdev;
335
336	if (user_int->is_decoder)
337		handle_user_interrupt(hdev, &hdev->common_decoder_interrupt);
338	else
339		handle_user_interrupt(hdev, &hdev->common_user_cq_interrupt);
340
341	/* Handle user cq or decoder interrupts registered on this specific irq */
342	handle_user_interrupt(hdev, user_int);
343
344	return IRQ_HANDLED;
345}
346
347/**
348 * hl_irq_handler_default - default irq handler
349 *
350 * @irq: irq number
351 * @arg: pointer to user interrupt structure
352 *
353 */
354irqreturn_t hl_irq_handler_default(int irq, void *arg)
355{
356	struct hl_user_interrupt *user_interrupt = arg;
357	struct hl_device *hdev = user_interrupt->hdev;
358	u32 interrupt_id = user_interrupt->interrupt_id;
359
360	dev_err(hdev->dev, "got invalid user interrupt %u", interrupt_id);
361
362	return IRQ_HANDLED;
363}
364
365/**
366 * hl_irq_handler_eq - irq handler for event queue
367 *
368 * @irq: irq number
369 * @arg: pointer to event queue structure
370 *
371 */
372irqreturn_t hl_irq_handler_eq(int irq, void *arg)
373{
374	struct hl_eq *eq = arg;
375	struct hl_device *hdev = eq->hdev;
376	struct hl_eq_entry *eq_entry;
377	struct hl_eq_entry *eq_base;
378	struct hl_eqe_work *handle_eqe_work;
379	bool entry_ready;
380	u32 cur_eqe;
381	u16 cur_eqe_index;
382
383	eq_base = eq->kernel_address;
384
385	while (1) {
386		cur_eqe = le32_to_cpu(eq_base[eq->ci].hdr.ctl);
387		entry_ready = !!FIELD_GET(EQ_CTL_READY_MASK, cur_eqe);
388
389		if (!entry_ready)
390			break;
391
392		cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe);
393		if ((hdev->event_queue.check_eqe_index) &&
394				(((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK)
395							!= cur_eqe_index)) {
396			dev_dbg(hdev->dev,
397				"EQE 0x%x in queue is ready but index does not match %d!=%d",
398				eq_base[eq->ci].hdr.ctl,
399				((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
400				cur_eqe_index);
401			break;
402		}
403
404		eq->prev_eqe_index++;
405
406		eq_entry = &eq_base[eq->ci];
407
408		/*
409		 * Make sure we read EQ entry contents after we've
410		 * checked the ownership bit.
411		 */
412		dma_rmb();
413
414		if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
415			dev_warn(hdev->dev, "Device disabled but received an EQ event\n");
416			goto skip_irq;
417		}
418
419		handle_eqe_work = kmalloc(sizeof(*handle_eqe_work), GFP_ATOMIC);
420		if (handle_eqe_work) {
421			INIT_WORK(&handle_eqe_work->eq_work, irq_handle_eqe);
422			handle_eqe_work->hdev = hdev;
423
424			memcpy(&handle_eqe_work->eq_entry, eq_entry,
425					sizeof(*eq_entry));
426
427			queue_work(hdev->eq_wq, &handle_eqe_work->eq_work);
428		}
429skip_irq:
430		/* Clear EQ entry ready bit */
431		eq_entry->hdr.ctl =
432			cpu_to_le32(le32_to_cpu(eq_entry->hdr.ctl) &
433							~EQ_CTL_READY_MASK);
434
435		eq->ci = hl_eq_inc_ptr(eq->ci);
436
437		hdev->asic_funcs->update_eq_ci(hdev, eq->ci);
438	}
439
440	return IRQ_HANDLED;
441}
442
443/**
444 * hl_irq_handler_dec_abnrm - Decoder error interrupt handler
445 * @irq: IRQ number
446 * @arg: pointer to decoder structure.
447 */
448irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg)
449{
450	struct hl_dec *dec = arg;
451
452	schedule_work(&dec->completion_abnrm_work);
453
454	return IRQ_HANDLED;
455}
456
457/**
458 * hl_cq_init - main initialization function for an cq object
459 *
460 * @hdev: pointer to device structure
461 * @q: pointer to cq structure
462 * @hw_queue_id: The H/W queue ID this completion queue belongs to
463 *               HL_INVALID_QUEUE if cq is not attached to any specific queue
464 *
465 * Allocate dma-able memory for the completion queue and initialize fields
466 * Returns 0 on success
467 */
468int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
469{
470	void *p;
471
472	p = hl_asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES, &q->bus_address,
473					GFP_KERNEL | __GFP_ZERO);
474	if (!p)
475		return -ENOMEM;
476
477	q->hdev = hdev;
478	q->kernel_address = p;
479	q->hw_queue_id = hw_queue_id;
480	q->ci = 0;
481	q->pi = 0;
482
483	atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
484
485	return 0;
486}
487
488/**
489 * hl_cq_fini - destroy completion queue
490 *
491 * @hdev: pointer to device structure
492 * @q: pointer to cq structure
493 *
494 * Free the completion queue memory
495 */
496void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
497{
498	hl_asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES, q->kernel_address, q->bus_address);
499}
500
501void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
502{
503	q->ci = 0;
504	q->pi = 0;
505
506	atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
507
508	/*
509	 * It's not enough to just reset the PI/CI because the H/W may have
510	 * written valid completion entries before it was halted and therefore
511	 * we need to clean the actual queues so we won't process old entries
512	 * when the device is operational again
513	 */
514
515	memset(q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
516}
517
518/**
519 * hl_eq_init - main initialization function for an event queue object
520 *
521 * @hdev: pointer to device structure
522 * @q: pointer to eq structure
523 *
524 * Allocate dma-able memory for the event queue and initialize fields
525 * Returns 0 on success
526 */
527int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
528{
529	void *p;
530
531	p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_EQ_SIZE_IN_BYTES, &q->bus_address);
532	if (!p)
533		return -ENOMEM;
534
535	q->hdev = hdev;
536	q->kernel_address = p;
537	q->ci = 0;
538	q->prev_eqe_index = 0;
539
540	return 0;
541}
542
543/**
544 * hl_eq_fini - destroy event queue
545 *
546 * @hdev: pointer to device structure
547 * @q: pointer to eq structure
548 *
549 * Free the event queue memory
550 */
551void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
552{
553	flush_workqueue(hdev->eq_wq);
554
555	hl_cpu_accessible_dma_pool_free(hdev, HL_EQ_SIZE_IN_BYTES, q->kernel_address);
556}
557
558void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
559{
560	q->ci = 0;
561	q->prev_eqe_index = 0;
562
563	/*
564	 * It's not enough to just reset the PI/CI because the H/W may have
565	 * written valid completion entries before it was halted and therefore
566	 * we need to clean the actual queues so we won't process old entries
567	 * when the device is operational again
568	 */
569
570	memset(q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
571}