Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2
  3/*
  4 * Copyright 2016-2019 HabanaLabs, Ltd.
  5 * All Rights Reserved.
  6 */
  7
  8#include "habanalabs.h"
  9
 10#include <linux/slab.h>
 11
 12/*
 13 * hl_queue_add_ptr - add to pi or ci and checks if it wraps around
 14 *
 15 * @ptr: the current pi/ci value
 16 * @val: the amount to add
 17 *
 18 * Add val to ptr. It can go until twice the queue length.
 19 */
 20inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val)
 21{
 22	ptr += val;
 23	ptr &= ((HL_QUEUE_LENGTH << 1) - 1);
 24	return ptr;
 25}
 26static inline int queue_ci_get(atomic_t *ci, u32 queue_len)
 27{
 28	return atomic_read(ci) & ((queue_len << 1) - 1);
 29}
 30
 31static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
 32{
 33	int delta = (q->pi - queue_ci_get(&q->ci, queue_len));
 34
 35	if (delta >= 0)
 36		return (queue_len - delta);
 37	else
 38		return (abs(delta) - queue_len);
 39}
 40
 41void hl_int_hw_queue_update_ci(struct hl_cs *cs)
 42{
 43	struct hl_device *hdev = cs->ctx->hdev;
 44	struct hl_hw_queue *q;
 45	int i;
 46
 47	if (hdev->disabled)
 48		return;
 49
 50	q = &hdev->kernel_queues[0];
 51	for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) {
 52		if (q->queue_type == QUEUE_TYPE_INT)
 53			atomic_add(cs->jobs_in_queue_cnt[i], &q->ci);
 54	}
 55}
 56
 57/*
 58 * ext_and_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
 59 *                                H/W queue.
 60 * @hdev: pointer to habanalabs device structure
 61 * @q: pointer to habanalabs queue structure
 62 * @ctl: BD's control word
 63 * @len: BD's length
 64 * @ptr: BD's pointer
 65 *
 66 * This function assumes there is enough space on the queue to submit a new
 67 * BD to it. It initializes the next BD and calls the device specific
 68 * function to set the pi (and doorbell)
 69 *
 70 * This function must be called when the scheduler mutex is taken
 71 *
 72 */
 73static void ext_and_hw_queue_submit_bd(struct hl_device *hdev,
 74			struct hl_hw_queue *q, u32 ctl, u32 len, u64 ptr)
 75{
 76	struct hl_bd *bd;
 77
 78	bd = (struct hl_bd *) (uintptr_t) q->kernel_address;
 79	bd += hl_pi_2_offset(q->pi);
 80	bd->ctl = cpu_to_le32(ctl);
 81	bd->len = cpu_to_le32(len);
 82	bd->ptr = cpu_to_le64(ptr);
 83
 84	q->pi = hl_queue_inc_ptr(q->pi);
 85	hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
 86}
 87
 88/*
 89 * ext_queue_sanity_checks - perform some sanity checks on external queue
 90 *
 91 * @hdev              : pointer to hl_device structure
 92 * @q                 :	pointer to hl_hw_queue structure
 93 * @num_of_entries    : how many entries to check for space
 94 * @reserve_cq_entry  :	whether to reserve an entry in the cq
 95 *
 96 * H/W queues spinlock should be taken before calling this function
 97 *
 98 * Perform the following:
 99 * - Make sure we have enough space in the h/w queue
100 * - Make sure we have enough space in the completion queue
101 * - Reserve space in the completion queue (needs to be reversed if there
102 *   is a failure down the road before the actual submission of work). Only
103 *   do this action if reserve_cq_entry is true
104 *
105 */
106static int ext_queue_sanity_checks(struct hl_device *hdev,
107				struct hl_hw_queue *q, int num_of_entries,
108				bool reserve_cq_entry)
109{
110	atomic_t *free_slots =
111			&hdev->completion_queue[q->cq_id].free_slots_cnt;
112	int free_slots_cnt;
113
114	/* Check we have enough space in the queue */
115	free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
116
117	if (free_slots_cnt < num_of_entries) {
118		dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
119			q->hw_queue_id, num_of_entries);
120		return -EAGAIN;
121	}
122
123	if (reserve_cq_entry) {
124		/*
125		 * Check we have enough space in the completion queue
126		 * Add -1 to counter (decrement) unless counter was already 0
127		 * In that case, CQ is full so we can't submit a new CB because
128		 * we won't get ack on its completion
129		 * atomic_add_unless will return 0 if counter was already 0
130		 */
131		if (atomic_add_negative(num_of_entries * -1, free_slots)) {
132			dev_dbg(hdev->dev, "No space for %d on CQ %d\n",
133				num_of_entries, q->hw_queue_id);
134			atomic_add(num_of_entries, free_slots);
135			return -EAGAIN;
136		}
137	}
138
139	return 0;
140}
141
142/*
143 * int_queue_sanity_checks - perform some sanity checks on internal queue
144 *
145 * @hdev              : pointer to hl_device structure
146 * @q                 :	pointer to hl_hw_queue structure
147 * @num_of_entries    : how many entries to check for space
148 *
149 * H/W queues spinlock should be taken before calling this function
150 *
151 * Perform the following:
152 * - Make sure we have enough space in the h/w queue
153 *
154 */
155static int int_queue_sanity_checks(struct hl_device *hdev,
156					struct hl_hw_queue *q,
157					int num_of_entries)
158{
159	int free_slots_cnt;
160
161	if (num_of_entries > q->int_queue_len) {
162		dev_err(hdev->dev,
163			"Cannot populate queue %u with %u jobs\n",
164			q->hw_queue_id, num_of_entries);
165		return -ENOMEM;
166	}
167
168	/* Check we have enough space in the queue */
169	free_slots_cnt = queue_free_slots(q, q->int_queue_len);
170
171	if (free_slots_cnt < num_of_entries) {
172		dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
173			q->hw_queue_id, num_of_entries);
174		return -EAGAIN;
175	}
176
177	return 0;
178}
179
180/*
181 * hw_queue_sanity_checks() - Make sure we have enough space in the h/w queue
182 * @hdev: Pointer to hl_device structure.
183 * @q: Pointer to hl_hw_queue structure.
184 * @num_of_entries: How many entries to check for space.
185 *
186 * Notice: We do not reserve queue entries so this function mustn't be called
187 *         more than once per CS for the same queue
188 *
189 */
190static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
191					int num_of_entries)
192{
193	int free_slots_cnt;
194
195	/* Check we have enough space in the queue */
196	free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
197
198	if (free_slots_cnt < num_of_entries) {
199		dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
200			q->hw_queue_id, num_of_entries);
201		return -EAGAIN;
202	}
203
204	return 0;
205}
206
207/*
208 * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
209 *
210 * @hdev: pointer to hl_device structure
211 * @hw_queue_id: Queue's type
212 * @cb_size: size of CB
213 * @cb_ptr: pointer to CB location
214 *
215 * This function sends a single CB, that must NOT generate a completion entry
216 *
217 */
218int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
219				u32 cb_size, u64 cb_ptr)
220{
221	struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
222	int rc = 0;
223
224	/*
225	 * The CPU queue is a synchronous queue with an effective depth of
226	 * a single entry (although it is allocated with room for multiple
227	 * entries). Therefore, there is a different lock, called
228	 * send_cpu_message_lock, that serializes accesses to the CPU queue.
229	 * As a result, we don't need to lock the access to the entire H/W
230	 * queues module when submitting a JOB to the CPU queue
231	 */
232	if (q->queue_type != QUEUE_TYPE_CPU)
233		hdev->asic_funcs->hw_queues_lock(hdev);
234
235	if (hdev->disabled) {
236		rc = -EPERM;
237		goto out;
238	}
239
240	/*
241	 * hl_hw_queue_send_cb_no_cmpl() is called for queues of a H/W queue
242	 * type only on init phase, when the queues are empty and being tested,
243	 * so there is no need for sanity checks.
244	 */
245	if (q->queue_type != QUEUE_TYPE_HW) {
246		rc = ext_queue_sanity_checks(hdev, q, 1, false);
247		if (rc)
248			goto out;
249	}
250
251	ext_and_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
252
253out:
254	if (q->queue_type != QUEUE_TYPE_CPU)
255		hdev->asic_funcs->hw_queues_unlock(hdev);
256
257	return rc;
258}
259
260/*
261 * ext_queue_schedule_job - submit a JOB to an external queue
262 *
263 * @job: pointer to the job that needs to be submitted to the queue
264 *
265 * This function must be called when the scheduler mutex is taken
266 *
267 */
268static void ext_queue_schedule_job(struct hl_cs_job *job)
269{
270	struct hl_device *hdev = job->cs->ctx->hdev;
271	struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
272	struct hl_cq_entry cq_pkt;
273	struct hl_cq *cq;
274	u64 cq_addr;
275	struct hl_cb *cb;
276	u32 ctl;
277	u32 len;
278	u64 ptr;
279
280	/*
281	 * Update the JOB ID inside the BD CTL so the device would know what
282	 * to write in the completion queue
283	 */
284	ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK);
285
286	cb = job->patched_cb;
287	len = job->job_cb_size;
288	ptr = cb->bus_address;
289
290	cq_pkt.data = cpu_to_le32(
291				((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
292					& CQ_ENTRY_SHADOW_INDEX_MASK) |
293				(1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) |
294				(1 << CQ_ENTRY_READY_SHIFT));
295
296	/*
297	 * No need to protect pi_offset because scheduling to the
298	 * H/W queues is done under the scheduler mutex
299	 *
300	 * No need to check if CQ is full because it was already
301	 * checked in ext_queue_sanity_checks
302	 */
303	cq = &hdev->completion_queue[q->cq_id];
304	cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
305
306	hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
307						cq_addr,
308						le32_to_cpu(cq_pkt.data),
309						q->msi_vec,
310						job->contains_dma_pkt);
311
312	q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
313
314	cq->pi = hl_cq_inc_ptr(cq->pi);
315
316	ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
317}
318
319/*
320 * int_queue_schedule_job - submit a JOB to an internal queue
321 *
322 * @job: pointer to the job that needs to be submitted to the queue
323 *
324 * This function must be called when the scheduler mutex is taken
325 *
326 */
327static void int_queue_schedule_job(struct hl_cs_job *job)
328{
329	struct hl_device *hdev = job->cs->ctx->hdev;
330	struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
331	struct hl_bd bd;
332	__le64 *pi;
333
334	bd.ctl = 0;
335	bd.len = cpu_to_le32(job->job_cb_size);
336	bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
337
338	pi = (__le64 *) (uintptr_t) (q->kernel_address +
339		((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
340
341	q->pi++;
342	q->pi &= ((q->int_queue_len << 1) - 1);
343
344	hdev->asic_funcs->pqe_write(hdev, pi, &bd);
345
346	hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
347}
348
349/*
350 * hw_queue_schedule_job - submit a JOB to a H/W queue
351 *
352 * @job: pointer to the job that needs to be submitted to the queue
353 *
354 * This function must be called when the scheduler mutex is taken
355 *
356 */
357static void hw_queue_schedule_job(struct hl_cs_job *job)
358{
359	struct hl_device *hdev = job->cs->ctx->hdev;
360	struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
361	u64 ptr;
362	u32 offset, ctl, len;
363
364	/*
365	 * Upon PQE completion, COMP_DATA is used as the write data to the
366	 * completion queue (QMAN HBW message), and COMP_OFFSET is used as the
367	 * write address offset in the SM block (QMAN LBW message).
368	 * The write address offset is calculated as "COMP_OFFSET << 2".
369	 */
370	offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1);
371	ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) |
372		((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK);
373
374	len = job->job_cb_size;
375
376	/*
377	 * A patched CB is created only if a user CB was allocated by driver and
378	 * MMU is disabled. If MMU is enabled, the user CB should be used
379	 * instead. If the user CB wasn't allocated by driver, assume that it
380	 * holds an address.
381	 */
382	if (job->patched_cb)
383		ptr = job->patched_cb->bus_address;
384	else if (job->is_kernel_allocated_cb)
385		ptr = job->user_cb->bus_address;
386	else
387		ptr = (u64) (uintptr_t) job->user_cb;
388
389	ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
390}
391
392/*
393 * init_signal_wait_cs - initialize a signal/wait CS
394 * @cs: pointer to the signal/wait CS
395 *
396 * H/W queues spinlock should be taken before calling this function
397 */
398static void init_signal_wait_cs(struct hl_cs *cs)
399{
400	struct hl_ctx *ctx = cs->ctx;
401	struct hl_device *hdev = ctx->hdev;
402	struct hl_hw_queue *hw_queue;
403	struct hl_cs_compl *cs_cmpl =
404			container_of(cs->fence, struct hl_cs_compl, base_fence);
405
406	struct hl_hw_sob *hw_sob;
407	struct hl_cs_job *job;
408	u32 q_idx;
409
410	/* There is only one job in a signal/wait CS */
411	job = list_first_entry(&cs->job_list, struct hl_cs_job,
412				cs_node);
413	q_idx = job->hw_queue_id;
414	hw_queue = &hdev->kernel_queues[q_idx];
415
416	if (cs->type & CS_TYPE_SIGNAL) {
417		hw_sob = &hw_queue->hw_sob[hw_queue->curr_sob_offset];
418
419		cs_cmpl->hw_sob = hw_sob;
420		cs_cmpl->sob_val = hw_queue->next_sob_val++;
421
422		dev_dbg(hdev->dev,
423			"generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
424			cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
425
426		hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
427					cs_cmpl->hw_sob->sob_id);
428
429		kref_get(&hw_sob->kref);
430
431		/* check for wraparound */
432		if (hw_queue->next_sob_val == HL_MAX_SOB_VAL) {
433			/*
434			 * Decrement as we reached the max value.
435			 * The release function won't be called here as we've
436			 * just incremented the refcount.
437			 */
438			kref_put(&hw_sob->kref, hl_sob_reset_error);
439			hw_queue->next_sob_val = 1;
440			/* only two SOBs are currently in use */
441			hw_queue->curr_sob_offset =
442					(hw_queue->curr_sob_offset + 1) %
443						HL_RSVD_SOBS_IN_USE;
444
445			dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
446					hw_queue->curr_sob_offset, q_idx);
447		}
448	} else if (cs->type & CS_TYPE_WAIT) {
449		struct hl_cs_compl *signal_cs_cmpl;
450
451		signal_cs_cmpl = container_of(cs->signal_fence,
452						struct hl_cs_compl,
453						base_fence);
454
455		/* copy the the SOB id and value of the signal CS */
456		cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
457		cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
458
459		dev_dbg(hdev->dev,
460			"generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n",
461			cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
462			hw_queue->base_mon_id, q_idx);
463
464		hdev->asic_funcs->gen_wait_cb(hdev, job->patched_cb,
465						cs_cmpl->hw_sob->sob_id,
466						cs_cmpl->sob_val,
467						hw_queue->base_mon_id,
468						q_idx);
469
470		kref_get(&cs_cmpl->hw_sob->kref);
471		/*
472		 * Must put the signal fence after the SOB refcnt increment so
473		 * the SOB refcnt won't turn 0 and reset the SOB before the
474		 * wait CS was submitted.
475		 */
476		mb();
477		dma_fence_put(cs->signal_fence);
478		cs->signal_fence = NULL;
479	}
480}
481
482/*
483 * hl_hw_queue_schedule_cs - schedule a command submission
484 * @cs: pointer to the CS
485 */
486int hl_hw_queue_schedule_cs(struct hl_cs *cs)
487{
488	struct hl_ctx *ctx = cs->ctx;
489	struct hl_device *hdev = ctx->hdev;
490	struct hl_cs_job *job, *tmp;
491	struct hl_hw_queue *q;
492	u32 max_queues;
493	int rc = 0, i, cq_cnt;
494
495	hdev->asic_funcs->hw_queues_lock(hdev);
496
497	if (hl_device_disabled_or_in_reset(hdev)) {
498		ctx->cs_counters.device_in_reset_drop_cnt++;
499		dev_err(hdev->dev,
500			"device is disabled or in reset, CS rejected!\n");
501		rc = -EPERM;
502		goto out;
503	}
504
505	max_queues = hdev->asic_prop.max_queues;
506
507	q = &hdev->kernel_queues[0];
508	for (i = 0, cq_cnt = 0 ; i < max_queues ; i++, q++) {
509		if (cs->jobs_in_queue_cnt[i]) {
510			switch (q->queue_type) {
511			case QUEUE_TYPE_EXT:
512				rc = ext_queue_sanity_checks(hdev, q,
513						cs->jobs_in_queue_cnt[i], true);
514				break;
515			case QUEUE_TYPE_INT:
516				rc = int_queue_sanity_checks(hdev, q,
517						cs->jobs_in_queue_cnt[i]);
518				break;
519			case QUEUE_TYPE_HW:
520				rc = hw_queue_sanity_checks(hdev, q,
521						cs->jobs_in_queue_cnt[i]);
522				break;
523			default:
524				dev_err(hdev->dev, "Queue type %d is invalid\n",
525					q->queue_type);
526				rc = -EINVAL;
527				break;
528			}
529
530			if (rc) {
531				ctx->cs_counters.queue_full_drop_cnt++;
532				goto unroll_cq_resv;
533			}
534
535			if (q->queue_type == QUEUE_TYPE_EXT)
536				cq_cnt++;
537		}
538	}
539
540	if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT))
541		init_signal_wait_cs(cs);
542
543	spin_lock(&hdev->hw_queues_mirror_lock);
544	list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list);
545
546	/* Queue TDR if the CS is the first entry and if timeout is wanted */
547	if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
548			(list_first_entry(&hdev->hw_queues_mirror_list,
549					struct hl_cs, mirror_node) == cs)) {
550		cs->tdr_active = true;
551		schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
552		spin_unlock(&hdev->hw_queues_mirror_lock);
553	} else {
554		spin_unlock(&hdev->hw_queues_mirror_lock);
555	}
556
557	if (!hdev->cs_active_cnt++) {
558		struct hl_device_idle_busy_ts *ts;
559
560		ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx];
561		ts->busy_to_idle_ts = ktime_set(0, 0);
562		ts->idle_to_busy_ts = ktime_get();
563	}
564
565	list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
566		switch (job->queue_type) {
567		case QUEUE_TYPE_EXT:
568			ext_queue_schedule_job(job);
569			break;
570		case QUEUE_TYPE_INT:
571			int_queue_schedule_job(job);
572			break;
573		case QUEUE_TYPE_HW:
574			hw_queue_schedule_job(job);
575			break;
576		default:
577			break;
578		}
579
580	cs->submitted = true;
581
582	goto out;
583
584unroll_cq_resv:
585	q = &hdev->kernel_queues[0];
586	for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) {
587		if ((q->queue_type == QUEUE_TYPE_EXT) &&
588						(cs->jobs_in_queue_cnt[i])) {
589			atomic_t *free_slots =
590				&hdev->completion_queue[i].free_slots_cnt;
591			atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
592			cq_cnt--;
593		}
594	}
595
596out:
597	hdev->asic_funcs->hw_queues_unlock(hdev);
598
599	return rc;
600}
601
602/*
603 * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue
604 *
605 * @hdev: pointer to hl_device structure
606 * @hw_queue_id: which queue to increment its ci
607 */
608void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
609{
610	struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
611
612	atomic_inc(&q->ci);
613}
614
615static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
616					bool is_cpu_queue)
617{
618	void *p;
619	int rc;
620
621	if (is_cpu_queue)
622		p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
623							HL_QUEUE_SIZE_IN_BYTES,
624							&q->bus_address);
625	else
626		p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
627						HL_QUEUE_SIZE_IN_BYTES,
628						&q->bus_address,
629						GFP_KERNEL | __GFP_ZERO);
630	if (!p)
631		return -ENOMEM;
632
633	q->kernel_address = (u64) (uintptr_t) p;
634
635	q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH,
636					sizeof(*q->shadow_queue),
637					GFP_KERNEL);
638	if (!q->shadow_queue) {
639		dev_err(hdev->dev,
640			"Failed to allocate shadow queue for H/W queue %d\n",
641			q->hw_queue_id);
642		rc = -ENOMEM;
643		goto free_queue;
644	}
645
646	/* Make sure read/write pointers are initialized to start of queue */
647	atomic_set(&q->ci, 0);
648	q->pi = 0;
649
650	return 0;
651
652free_queue:
653	if (is_cpu_queue)
654		hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
655					HL_QUEUE_SIZE_IN_BYTES,
656					(void *) (uintptr_t) q->kernel_address);
657	else
658		hdev->asic_funcs->asic_dma_free_coherent(hdev,
659					HL_QUEUE_SIZE_IN_BYTES,
660					(void *) (uintptr_t) q->kernel_address,
661					q->bus_address);
662
663	return rc;
664}
665
666static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
667{
668	void *p;
669
670	p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id,
671					&q->bus_address, &q->int_queue_len);
672	if (!p) {
673		dev_err(hdev->dev,
674			"Failed to get base address for internal queue %d\n",
675			q->hw_queue_id);
676		return -EFAULT;
677	}
678
679	q->kernel_address = (u64) (uintptr_t) p;
680	q->pi = 0;
681	atomic_set(&q->ci, 0);
682
683	return 0;
684}
685
686static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
687{
688	return ext_and_cpu_queue_init(hdev, q, true);
689}
690
691static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
692{
693	return ext_and_cpu_queue_init(hdev, q, false);
694}
695
696static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
697{
698	void *p;
699
700	p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
701						HL_QUEUE_SIZE_IN_BYTES,
702						&q->bus_address,
703						GFP_KERNEL | __GFP_ZERO);
704	if (!p)
705		return -ENOMEM;
706
707	q->kernel_address = (u64) (uintptr_t) p;
708
709	/* Make sure read/write pointers are initialized to start of queue */
710	atomic_set(&q->ci, 0);
711	q->pi = 0;
712
713	return 0;
714}
715
716static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx)
717{
718	struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
719	struct asic_fixed_properties *prop = &hdev->asic_prop;
720	struct hl_hw_sob *hw_sob;
721	int sob, queue_idx = hdev->sync_stream_queue_idx++;
722
723	hw_queue->base_sob_id =
724		prop->sync_stream_first_sob + queue_idx * HL_RSVD_SOBS;
725	hw_queue->base_mon_id =
726		prop->sync_stream_first_mon + queue_idx * HL_RSVD_MONS;
727	hw_queue->next_sob_val = 1;
728	hw_queue->curr_sob_offset = 0;
729
730	for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) {
731		hw_sob = &hw_queue->hw_sob[sob];
732		hw_sob->hdev = hdev;
733		hw_sob->sob_id = hw_queue->base_sob_id + sob;
734		hw_sob->q_idx = q_idx;
735		kref_init(&hw_sob->kref);
736	}
737}
738
739static void sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx)
740{
741	struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
742
743	/*
744	 * In case we got here due to a stuck CS, the refcnt might be bigger
745	 * than 1 and therefore we reset it.
746	 */
747	kref_init(&hw_queue->hw_sob[hw_queue->curr_sob_offset].kref);
748	hw_queue->curr_sob_offset = 0;
749	hw_queue->next_sob_val = 1;
750}
751
752/*
753 * queue_init - main initialization function for H/W queue object
754 *
755 * @hdev: pointer to hl_device device structure
756 * @q: pointer to hl_hw_queue queue structure
757 * @hw_queue_id: The id of the H/W queue
758 *
759 * Allocate dma-able memory for the queue and initialize fields
760 * Returns 0 on success
761 */
762static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
763			u32 hw_queue_id)
764{
765	int rc;
766
767	q->hw_queue_id = hw_queue_id;
768
769	switch (q->queue_type) {
770	case QUEUE_TYPE_EXT:
771		rc = ext_queue_init(hdev, q);
772		break;
773	case QUEUE_TYPE_INT:
774		rc = int_queue_init(hdev, q);
775		break;
776	case QUEUE_TYPE_CPU:
777		rc = cpu_queue_init(hdev, q);
778		break;
779	case QUEUE_TYPE_HW:
780		rc = hw_queue_init(hdev, q);
781		break;
782	case QUEUE_TYPE_NA:
783		q->valid = 0;
784		return 0;
785	default:
786		dev_crit(hdev->dev, "wrong queue type %d during init\n",
787			q->queue_type);
788		rc = -EINVAL;
789		break;
790	}
791
792	if (q->supports_sync_stream)
793		sync_stream_queue_init(hdev, q->hw_queue_id);
794
795	if (rc)
796		return rc;
797
798	q->valid = 1;
799
800	return 0;
801}
802
803/*
804 * hw_queue_fini - destroy queue
805 *
806 * @hdev: pointer to hl_device device structure
807 * @q: pointer to hl_hw_queue queue structure
808 *
809 * Free the queue memory
810 */
811static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
812{
813	if (!q->valid)
814		return;
815
816	/*
817	 * If we arrived here, there are no jobs waiting on this queue
818	 * so we can safely remove it.
819	 * This is because this function can only called when:
820	 * 1. Either a context is deleted, which only can occur if all its
821	 *    jobs were finished
822	 * 2. A context wasn't able to be created due to failure or timeout,
823	 *    which means there are no jobs on the queue yet
824	 *
825	 * The only exception are the queues of the kernel context, but
826	 * if they are being destroyed, it means that the entire module is
827	 * being removed. If the module is removed, it means there is no open
828	 * user context. It also means that if a job was submitted by
829	 * the kernel driver (e.g. context creation), the job itself was
830	 * released by the kernel driver when a timeout occurred on its
831	 * Completion. Thus, we don't need to release it again.
832	 */
833
834	if (q->queue_type == QUEUE_TYPE_INT)
835		return;
836
837	kfree(q->shadow_queue);
838
839	if (q->queue_type == QUEUE_TYPE_CPU)
840		hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
841					HL_QUEUE_SIZE_IN_BYTES,
842					(void *) (uintptr_t) q->kernel_address);
843	else
844		hdev->asic_funcs->asic_dma_free_coherent(hdev,
845					HL_QUEUE_SIZE_IN_BYTES,
846					(void *) (uintptr_t) q->kernel_address,
847					q->bus_address);
848}
849
850int hl_hw_queues_create(struct hl_device *hdev)
851{
852	struct asic_fixed_properties *asic = &hdev->asic_prop;
853	struct hl_hw_queue *q;
854	int i, rc, q_ready_cnt;
855
856	hdev->kernel_queues = kcalloc(asic->max_queues,
857				sizeof(*hdev->kernel_queues), GFP_KERNEL);
858
859	if (!hdev->kernel_queues) {
860		dev_err(hdev->dev, "Not enough memory for H/W queues\n");
861		return -ENOMEM;
862	}
863
864	/* Initialize the H/W queues */
865	for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
866			i < asic->max_queues ; i++, q_ready_cnt++, q++) {
867
868		q->queue_type = asic->hw_queues_props[i].type;
869		q->supports_sync_stream =
870				asic->hw_queues_props[i].supports_sync_stream;
871		rc = queue_init(hdev, q, i);
872		if (rc) {
873			dev_err(hdev->dev,
874				"failed to initialize queue %d\n", i);
875			goto release_queues;
876		}
877	}
878
879	return 0;
880
881release_queues:
882	for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
883		queue_fini(hdev, q);
884
885	kfree(hdev->kernel_queues);
886
887	return rc;
888}
889
890void hl_hw_queues_destroy(struct hl_device *hdev)
891{
892	struct hl_hw_queue *q;
893	u32 max_queues = hdev->asic_prop.max_queues;
894	int i;
895
896	for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++)
897		queue_fini(hdev, q);
898
899	kfree(hdev->kernel_queues);
900}
901
902void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
903{
904	struct hl_hw_queue *q;
905	u32 max_queues = hdev->asic_prop.max_queues;
906	int i;
907
908	for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) {
909		if ((!q->valid) ||
910			((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
911			continue;
912		q->pi = 0;
913		atomic_set(&q->ci, 0);
914
915		if (q->supports_sync_stream)
916			sync_stream_queue_reset(hdev, q->hw_queue_id);
917	}
918}