Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 *    Dave Airlie
 30 */
 31#include <linux/seq_file.h>
 32#include <linux/atomic.h>
 33#include <linux/wait.h>
 34#include <linux/kref.h>
 35#include <linux/slab.h>
 36#include <linux/firmware.h>
 37#include <linux/pm_runtime.h>
 38
 39#include <drm/drm_debugfs.h>
 40
 41#include "amdgpu.h"
 42#include "amdgpu_trace.h"
 
 43
 44/*
 45 * Fences
 46 * Fences mark an event in the GPUs pipeline and are used
 47 * for GPU/CPU synchronization.  When the fence is written,
 48 * it is expected that all buffers associated with that fence
 49 * are no longer in use by the associated ring on the GPU and
 50 * that the the relevant GPU caches have been flushed.
 51 */
 52
 53struct amdgpu_fence {
 54	struct dma_fence base;
 55
 56	/* RB, DMA, etc. */
 57	struct amdgpu_ring		*ring;
 
 58};
 59
 60static struct kmem_cache *amdgpu_fence_slab;
 61
 62int amdgpu_fence_slab_init(void)
 63{
 64	amdgpu_fence_slab = kmem_cache_create(
 65		"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
 66		SLAB_HWCACHE_ALIGN, NULL);
 67	if (!amdgpu_fence_slab)
 68		return -ENOMEM;
 69	return 0;
 70}
 71
 72void amdgpu_fence_slab_fini(void)
 73{
 74	rcu_barrier();
 75	kmem_cache_destroy(amdgpu_fence_slab);
 76}
 77/*
 78 * Cast helper
 79 */
 80static const struct dma_fence_ops amdgpu_fence_ops;
 
 81static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
 82{
 83	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
 84
 85	if (__f->base.ops == &amdgpu_fence_ops)
 
 86		return __f;
 87
 88	return NULL;
 89}
 90
 91/**
 92 * amdgpu_fence_write - write a fence value
 93 *
 94 * @ring: ring the fence is associated with
 95 * @seq: sequence number to write
 96 *
 97 * Writes a fence value to memory (all asics).
 98 */
 99static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
100{
101	struct amdgpu_fence_driver *drv = &ring->fence_drv;
102
103	if (drv->cpu_addr)
104		*drv->cpu_addr = cpu_to_le32(seq);
105}
106
107/**
108 * amdgpu_fence_read - read a fence value
109 *
110 * @ring: ring the fence is associated with
111 *
112 * Reads a fence value from memory (all asics).
113 * Returns the value of the fence read from memory.
114 */
115static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
116{
117	struct amdgpu_fence_driver *drv = &ring->fence_drv;
118	u32 seq = 0;
119
120	if (drv->cpu_addr)
121		seq = le32_to_cpu(*drv->cpu_addr);
122	else
123		seq = atomic_read(&drv->last_seq);
124
125	return seq;
126}
127
128/**
129 * amdgpu_fence_emit - emit a fence on the requested ring
130 *
131 * @ring: ring the fence is associated with
132 * @f: resulting fence object
 
 
133 *
134 * Emits a fence command on the requested ring (all asics).
135 * Returns 0 on success, -ENOMEM on failure.
136 */
137int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
138		      unsigned flags)
139{
140	struct amdgpu_device *adev = ring->adev;
141	struct amdgpu_fence *fence;
 
142	struct dma_fence __rcu **ptr;
143	uint32_t seq;
144	int r;
145
146	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
147	if (fence == NULL)
148		return -ENOMEM;
 
 
 
 
 
 
 
 
149
150	seq = ++ring->fence_drv.sync_seq;
151	fence->ring = ring;
152	dma_fence_init(&fence->base, &amdgpu_fence_ops,
153		       &ring->fence_drv.lock,
154		       adev->fence_context + ring->idx,
155		       seq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
157			       seq, flags | AMDGPU_FENCE_FLAG_INT);
158	pm_runtime_get_noresume(adev->ddev->dev);
159	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
160	if (unlikely(rcu_dereference_protected(*ptr, 1))) {
161		struct dma_fence *old;
162
163		rcu_read_lock();
164		old = dma_fence_get_rcu_safe(ptr);
165		rcu_read_unlock();
166
167		if (old) {
168			r = dma_fence_wait(old, false);
169			dma_fence_put(old);
170			if (r)
171				return r;
172		}
173	}
174
 
 
175	/* This function can't be called concurrently anyway, otherwise
176	 * emitting the fence would mess up the hardware ring buffer.
177	 */
178	rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
179
180	*f = &fence->base;
181
182	return 0;
183}
184
185/**
186 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
187 *
188 * @ring: ring the fence is associated with
189 * @s: resulting sequence number
 
190 *
191 * Emits a fence command on the requested ring (all asics).
192 * Used For polling fence.
193 * Returns 0 on success, -ENOMEM on failure.
194 */
195int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
196			      uint32_t timeout)
197{
198	uint32_t seq;
199	signed long r;
200
201	if (!s)
202		return -EINVAL;
203
204	seq = ++ring->fence_drv.sync_seq;
205	r = amdgpu_fence_wait_polling(ring,
206				      seq - ring->fence_drv.num_fences_mask,
207				      timeout);
208	if (r < 1)
209		return -ETIMEDOUT;
210
211	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
212			       seq, 0);
213
214	*s = seq;
215
216	return 0;
217}
218
219/**
220 * amdgpu_fence_schedule_fallback - schedule fallback check
221 *
222 * @ring: pointer to struct amdgpu_ring
223 *
224 * Start a timer as fallback to our interrupts.
225 */
226static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
227{
228	mod_timer(&ring->fence_drv.fallback_timer,
229		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
230}
231
232/**
233 * amdgpu_fence_process - check for fence activity
234 *
235 * @ring: pointer to struct amdgpu_ring
236 *
237 * Checks the current fence value and calculates the last
238 * signalled fence value. Wakes the fence queue if the
239 * sequence number has increased.
240 *
241 * Returns true if fence was processed
242 */
243bool amdgpu_fence_process(struct amdgpu_ring *ring)
244{
245	struct amdgpu_fence_driver *drv = &ring->fence_drv;
246	struct amdgpu_device *adev = ring->adev;
247	uint32_t seq, last_seq;
248	int r;
249
250	do {
251		last_seq = atomic_read(&ring->fence_drv.last_seq);
252		seq = amdgpu_fence_read(ring);
253
254	} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
255
256	if (del_timer(&ring->fence_drv.fallback_timer) &&
257	    seq != ring->fence_drv.sync_seq)
258		amdgpu_fence_schedule_fallback(ring);
259
260	if (unlikely(seq == last_seq))
261		return false;
262
263	last_seq &= drv->num_fences_mask;
264	seq &= drv->num_fences_mask;
265
266	do {
267		struct dma_fence *fence, **ptr;
268
269		++last_seq;
270		last_seq &= drv->num_fences_mask;
271		ptr = &drv->fences[last_seq];
272
273		/* There is always exactly one thread signaling this fence slot */
274		fence = rcu_dereference_protected(*ptr, 1);
275		RCU_INIT_POINTER(*ptr, NULL);
276
277		if (!fence)
278			continue;
279
280		r = dma_fence_signal(fence);
281		if (!r)
282			DMA_FENCE_TRACE(fence, "signaled from irq context\n");
283		else
284			BUG();
285
286		dma_fence_put(fence);
287		pm_runtime_mark_last_busy(adev->ddev->dev);
288		pm_runtime_put_autosuspend(adev->ddev->dev);
289	} while (last_seq != seq);
290
291	return true;
292}
293
294/**
295 * amdgpu_fence_fallback - fallback for hardware interrupts
296 *
297 * @work: delayed work item
298 *
299 * Checks for fence activity.
300 */
301static void amdgpu_fence_fallback(struct timer_list *t)
302{
303	struct amdgpu_ring *ring = from_timer(ring, t,
304					      fence_drv.fallback_timer);
305
306	if (amdgpu_fence_process(ring))
307		DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
308}
309
310/**
311 * amdgpu_fence_wait_empty - wait for all fences to signal
312 *
313 * @adev: amdgpu device pointer
314 * @ring: ring index the fence is associated with
315 *
316 * Wait for all fences on the requested ring to signal (all asics).
317 * Returns 0 if the fences have passed, error for all other cases.
318 */
319int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
320{
321	uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
322	struct dma_fence *fence, **ptr;
323	int r;
324
325	if (!seq)
326		return 0;
327
328	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
329	rcu_read_lock();
330	fence = rcu_dereference(*ptr);
331	if (!fence || !dma_fence_get_rcu(fence)) {
332		rcu_read_unlock();
333		return 0;
334	}
335	rcu_read_unlock();
336
337	r = dma_fence_wait(fence, false);
338	dma_fence_put(fence);
339	return r;
340}
341
342/**
343 * amdgpu_fence_wait_polling - busy wait for givn sequence number
344 *
345 * @ring: ring index the fence is associated with
346 * @wait_seq: sequence number to wait
347 * @timeout: the timeout for waiting in usecs
348 *
349 * Wait for all fences on the requested ring to signal (all asics).
350 * Returns left time if no timeout, 0 or minus if timeout.
351 */
352signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
353				      uint32_t wait_seq,
354				      signed long timeout)
355{
356	uint32_t seq;
357
358	do {
359		seq = amdgpu_fence_read(ring);
360		udelay(5);
361		timeout -= 5;
362	} while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
363
364	return timeout > 0 ? timeout : 0;
365}
366/**
367 * amdgpu_fence_count_emitted - get the count of emitted fences
368 *
369 * @ring: ring the fence is associated with
370 *
371 * Get the number of fences emitted on the requested ring (all asics).
372 * Returns the number of emitted fences on the ring.  Used by the
373 * dynpm code to ring track activity.
374 */
375unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
376{
377	uint64_t emitted;
378
379	/* We are not protected by ring lock when reading the last sequence
380	 * but it's ok to report slightly wrong fence count here.
381	 */
382	amdgpu_fence_process(ring);
383	emitted = 0x100000000ull;
384	emitted -= atomic_read(&ring->fence_drv.last_seq);
385	emitted += READ_ONCE(ring->fence_drv.sync_seq);
386	return lower_32_bits(emitted);
387}
388
389/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
390 * amdgpu_fence_driver_start_ring - make the fence driver
391 * ready for use on the requested ring.
392 *
393 * @ring: ring to start the fence driver on
394 * @irq_src: interrupt source to use for this ring
395 * @irq_type: interrupt type to use for this ring
396 *
397 * Make the fence driver ready for processing (all asics).
398 * Not all asics have all rings, so each asic will only
399 * start the fence driver on the rings it has.
400 * Returns 0 for success, errors for failure.
401 */
402int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
403				   struct amdgpu_irq_src *irq_src,
404				   unsigned irq_type)
405{
406	struct amdgpu_device *adev = ring->adev;
407	uint64_t index;
408
409	if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
410		ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
411		ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
412	} else {
413		/* put fence directly behind firmware */
414		index = ALIGN(adev->uvd.fw->size, 8);
415		ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
416		ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
417	}
418	amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
419
420	if (irq_src)
421		amdgpu_irq_get(adev, irq_src, irq_type);
422
423	ring->fence_drv.irq_src = irq_src;
424	ring->fence_drv.irq_type = irq_type;
425	ring->fence_drv.initialized = true;
426
427	DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
428		      ring->name, ring->fence_drv.gpu_addr);
429	return 0;
430}
431
432/**
433 * amdgpu_fence_driver_init_ring - init the fence driver
434 * for the requested ring.
435 *
436 * @ring: ring to init the fence driver on
437 * @num_hw_submission: number of entries on the hardware queue
438 *
439 * Init the fence driver for the requested ring (all asics).
440 * Helper function for amdgpu_fence_driver_init().
441 */
442int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
443				  unsigned num_hw_submission)
444{
445	struct amdgpu_device *adev = ring->adev;
446	long timeout;
447	int r;
448
449	if (!adev)
450		return -EINVAL;
451
452	if (!is_power_of_2(num_hw_submission))
453		return -EINVAL;
454
455	ring->fence_drv.cpu_addr = NULL;
456	ring->fence_drv.gpu_addr = 0;
457	ring->fence_drv.sync_seq = 0;
458	atomic_set(&ring->fence_drv.last_seq, 0);
459	ring->fence_drv.initialized = false;
460
461	timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
462
463	ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
464	spin_lock_init(&ring->fence_drv.lock);
465	ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
466					 GFP_KERNEL);
 
467	if (!ring->fence_drv.fences)
468		return -ENOMEM;
469
470	/* No need to setup the GPU scheduler for rings that don't need it */
471	if (!ring->no_scheduler) {
472		switch (ring->funcs->type) {
473		case AMDGPU_RING_TYPE_GFX:
474			timeout = adev->gfx_timeout;
475			break;
476		case AMDGPU_RING_TYPE_COMPUTE:
477			timeout = adev->compute_timeout;
478			break;
479		case AMDGPU_RING_TYPE_SDMA:
480			timeout = adev->sdma_timeout;
481			break;
482		default:
483			timeout = adev->video_timeout;
484			break;
485		}
486
487		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
488				   num_hw_submission, amdgpu_job_hang_limit,
489				   timeout, ring->name);
490		if (r) {
491			DRM_ERROR("Failed to create scheduler on ring %s.\n",
492				  ring->name);
493			return r;
494		}
495	}
496
497	return 0;
498}
499
500/**
501 * amdgpu_fence_driver_init - init the fence driver
502 * for all possible rings.
503 *
504 * @adev: amdgpu device pointer
505 *
506 * Init the fence driver for all possible rings (all asics).
507 * Not all asics have all rings, so each asic will only
508 * start the fence driver on the rings it has using
509 * amdgpu_fence_driver_start_ring().
510 * Returns 0 for success.
511 */
512int amdgpu_fence_driver_init(struct amdgpu_device *adev)
513{
514	return 0;
515}
516
517/**
518 * amdgpu_fence_driver_fini - tear down the fence driver
519 * for all possible rings.
520 *
521 * @adev: amdgpu device pointer
522 *
523 * Tear down the fence driver for all possible rings (all asics).
524 */
525void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
526{
527	unsigned i, j;
528	int r;
529
530	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
531		struct amdgpu_ring *ring = adev->rings[i];
532
533		if (!ring || !ring->fence_drv.initialized)
534			continue;
535		r = amdgpu_fence_wait_empty(ring);
536		if (r) {
537			/* no need to trigger GPU reset as we are unloading */
 
 
 
 
 
538			amdgpu_fence_driver_force_completion(ring);
539		}
540		if (ring->fence_drv.irq_src)
541			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
542				       ring->fence_drv.irq_type);
543		if (!ring->no_scheduler)
544			drm_sched_fini(&ring->sched);
545		del_timer_sync(&ring->fence_drv.fallback_timer);
546		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
547			dma_fence_put(ring->fence_drv.fences[j]);
548		kfree(ring->fence_drv.fences);
549		ring->fence_drv.fences = NULL;
550		ring->fence_drv.initialized = false;
551	}
552}
553
554/**
555 * amdgpu_fence_driver_suspend - suspend the fence driver
556 * for all possible rings.
557 *
558 * @adev: amdgpu device pointer
559 *
560 * Suspend the fence driver for all possible rings (all asics).
561 */
562void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
563{
564	int i, r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
565
566	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
567		struct amdgpu_ring *ring = adev->rings[i];
 
568		if (!ring || !ring->fence_drv.initialized)
569			continue;
570
571		/* wait for gpu to finish processing current batch */
572		r = amdgpu_fence_wait_empty(ring);
573		if (r) {
574			/* delay GPU reset to resume */
575			amdgpu_fence_driver_force_completion(ring);
576		}
 
 
577
578		/* disable the interrupt */
579		if (ring->fence_drv.irq_src)
580			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
581				       ring->fence_drv.irq_type);
 
582	}
583}
584
585/**
586 * amdgpu_fence_driver_resume - resume the fence driver
587 * for all possible rings.
588 *
589 * @adev: amdgpu device pointer
590 *
591 * Resume the fence driver for all possible rings (all asics).
592 * Not all asics have all rings, so each asic will only
593 * start the fence driver on the rings it has using
594 * amdgpu_fence_driver_start_ring().
595 * Returns 0 for success.
596 */
597void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
598{
599	int i;
600
601	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
602		struct amdgpu_ring *ring = adev->rings[i];
603		if (!ring || !ring->fence_drv.initialized)
604			continue;
605
606		/* enable the interrupt */
607		if (ring->fence_drv.irq_src)
608			amdgpu_irq_get(adev, ring->fence_drv.irq_src,
609				       ring->fence_drv.irq_type);
610	}
611}
612
613/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
614 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
615 *
616 * @ring: fence of the ring to signal
617 *
618 */
619void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
620{
621	amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
622	amdgpu_fence_process(ring);
623}
624
625/*
626 * Common fence implementation
627 */
628
629static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
630{
631	return "amdgpu";
632}
633
634static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
635{
636	struct amdgpu_fence *fence = to_amdgpu_fence(f);
637	return (const char *)fence->ring->name;
 
 
 
 
 
 
638}
639
640/**
641 * amdgpu_fence_enable_signaling - enable signalling on fence
642 * @fence: fence
643 *
644 * This function is called with fence_queue lock held, and adds a callback
645 * to fence_queue that checks if this fence is signaled, and if so it
646 * signals the fence and removes itself.
647 */
648static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
649{
650	struct amdgpu_fence *fence = to_amdgpu_fence(f);
651	struct amdgpu_ring *ring = fence->ring;
652
653	if (!timer_pending(&ring->fence_drv.fallback_timer))
654		amdgpu_fence_schedule_fallback(ring);
655
656	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
 
 
 
 
 
 
 
 
 
 
 
 
657
658	return true;
659}
660
661/**
662 * amdgpu_fence_free - free up the fence memory
663 *
664 * @rcu: RCU callback head
665 *
666 * Free up the fence memory after the RCU grace period.
667 */
668static void amdgpu_fence_free(struct rcu_head *rcu)
669{
670	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
671	struct amdgpu_fence *fence = to_amdgpu_fence(f);
672	kmem_cache_free(amdgpu_fence_slab, fence);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
673}
674
675/**
676 * amdgpu_fence_release - callback that fence can be freed
677 *
678 * @fence: fence
679 *
680 * This function is called when the reference count becomes zero.
681 * It just RCU schedules freeing up the fence.
682 */
683static void amdgpu_fence_release(struct dma_fence *f)
684{
685	call_rcu(&f->rcu, amdgpu_fence_free);
686}
687
 
 
 
 
 
 
 
 
 
 
 
 
 
688static const struct dma_fence_ops amdgpu_fence_ops = {
689	.get_driver_name = amdgpu_fence_get_driver_name,
690	.get_timeline_name = amdgpu_fence_get_timeline_name,
691	.enable_signaling = amdgpu_fence_enable_signaling,
692	.release = amdgpu_fence_release,
693};
694
 
 
 
 
 
 
 
695/*
696 * Fence debugfs
697 */
698#if defined(CONFIG_DEBUG_FS)
699static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
700{
701	struct drm_info_node *node = (struct drm_info_node *)m->private;
702	struct drm_device *dev = node->minor->dev;
703	struct amdgpu_device *adev = dev->dev_private;
704	int i;
705
706	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
707		struct amdgpu_ring *ring = adev->rings[i];
708		if (!ring || !ring->fence_drv.initialized)
709			continue;
710
711		amdgpu_fence_process(ring);
712
713		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
714		seq_printf(m, "Last signaled fence          0x%08x\n",
715			   atomic_read(&ring->fence_drv.last_seq));
716		seq_printf(m, "Last emitted                 0x%08x\n",
717			   ring->fence_drv.sync_seq);
718
719		if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
720		    ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
721			seq_printf(m, "Last signaled trailing fence 0x%08x\n",
722				   le32_to_cpu(*ring->trail_fence_cpu_addr));
723			seq_printf(m, "Last emitted                 0x%08x\n",
724				   ring->trail_seq);
725		}
726
727		if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
728			continue;
729
730		/* set in CP_VMID_PREEMPT and preemption occurred */
731		seq_printf(m, "Last preempted               0x%08x\n",
732			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
733		/* set in CP_VMID_RESET and reset occurred */
734		seq_printf(m, "Last reset                   0x%08x\n",
735			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
736		/* Both preemption and reset occurred */
737		seq_printf(m, "Last both                    0x%08x\n",
738			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
739	}
740	return 0;
741}
742
743/**
744 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
745 *
746 * Manually trigger a gpu reset at the next fence wait.
747 */
748static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
749{
750	struct drm_info_node *node = (struct drm_info_node *) m->private;
751	struct drm_device *dev = node->minor->dev;
752	struct amdgpu_device *adev = dev->dev_private;
753	int r;
754
755	r = pm_runtime_get_sync(dev->dev);
756	if (r < 0) {
757		pm_runtime_put_autosuspend(dev->dev);
758		return 0;
759	}
760
761	seq_printf(m, "gpu recover\n");
762	amdgpu_device_gpu_recover(adev, NULL);
 
 
763
764	pm_runtime_mark_last_busy(dev->dev);
765	pm_runtime_put_autosuspend(dev->dev);
766
767	return 0;
768}
769
770static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
771	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
772	{"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
773};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
774
775static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
776	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
777};
778#endif
779
780int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
781{
782#if defined(CONFIG_DEBUG_FS)
783	if (amdgpu_sriov_vf(adev))
784		return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov,
785						ARRAY_SIZE(amdgpu_debugfs_fence_list_sriov));
786	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list,
787					ARRAY_SIZE(amdgpu_debugfs_fence_list));
788#else
789	return 0;
 
 
 
 
 
790#endif
791}
792
v6.2
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 *    Dave Airlie
 30 */
 31#include <linux/seq_file.h>
 32#include <linux/atomic.h>
 33#include <linux/wait.h>
 34#include <linux/kref.h>
 35#include <linux/slab.h>
 36#include <linux/firmware.h>
 37#include <linux/pm_runtime.h>
 38
 39#include <drm/drm_drv.h>
 
 40#include "amdgpu.h"
 41#include "amdgpu_trace.h"
 42#include "amdgpu_reset.h"
 43
 44/*
 45 * Fences
 46 * Fences mark an event in the GPUs pipeline and are used
 47 * for GPU/CPU synchronization.  When the fence is written,
 48 * it is expected that all buffers associated with that fence
 49 * are no longer in use by the associated ring on the GPU and
 50 * that the relevant GPU caches have been flushed.
 51 */
 52
 53struct amdgpu_fence {
 54	struct dma_fence base;
 55
 56	/* RB, DMA, etc. */
 57	struct amdgpu_ring		*ring;
 58	ktime_t				start_timestamp;
 59};
 60
 61static struct kmem_cache *amdgpu_fence_slab;
 62
 63int amdgpu_fence_slab_init(void)
 64{
 65	amdgpu_fence_slab = kmem_cache_create(
 66		"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
 67		SLAB_HWCACHE_ALIGN, NULL);
 68	if (!amdgpu_fence_slab)
 69		return -ENOMEM;
 70	return 0;
 71}
 72
 73void amdgpu_fence_slab_fini(void)
 74{
 75	rcu_barrier();
 76	kmem_cache_destroy(amdgpu_fence_slab);
 77}
 78/*
 79 * Cast helper
 80 */
 81static const struct dma_fence_ops amdgpu_fence_ops;
 82static const struct dma_fence_ops amdgpu_job_fence_ops;
 83static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
 84{
 85	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
 86
 87	if (__f->base.ops == &amdgpu_fence_ops ||
 88	    __f->base.ops == &amdgpu_job_fence_ops)
 89		return __f;
 90
 91	return NULL;
 92}
 93
 94/**
 95 * amdgpu_fence_write - write a fence value
 96 *
 97 * @ring: ring the fence is associated with
 98 * @seq: sequence number to write
 99 *
100 * Writes a fence value to memory (all asics).
101 */
102static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
103{
104	struct amdgpu_fence_driver *drv = &ring->fence_drv;
105
106	if (drv->cpu_addr)
107		*drv->cpu_addr = cpu_to_le32(seq);
108}
109
110/**
111 * amdgpu_fence_read - read a fence value
112 *
113 * @ring: ring the fence is associated with
114 *
115 * Reads a fence value from memory (all asics).
116 * Returns the value of the fence read from memory.
117 */
118static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
119{
120	struct amdgpu_fence_driver *drv = &ring->fence_drv;
121	u32 seq = 0;
122
123	if (drv->cpu_addr)
124		seq = le32_to_cpu(*drv->cpu_addr);
125	else
126		seq = atomic_read(&drv->last_seq);
127
128	return seq;
129}
130
131/**
132 * amdgpu_fence_emit - emit a fence on the requested ring
133 *
134 * @ring: ring the fence is associated with
135 * @f: resulting fence object
136 * @job: job the fence is embedded in
137 * @flags: flags to pass into the subordinate .emit_fence() call
138 *
139 * Emits a fence command on the requested ring (all asics).
140 * Returns 0 on success, -ENOMEM on failure.
141 */
142int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
143		      unsigned flags)
144{
145	struct amdgpu_device *adev = ring->adev;
146	struct dma_fence *fence;
147	struct amdgpu_fence *am_fence;
148	struct dma_fence __rcu **ptr;
149	uint32_t seq;
150	int r;
151
152	if (job == NULL) {
153		/* create a sperate hw fence */
154		am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
155		if (am_fence == NULL)
156			return -ENOMEM;
157		fence = &am_fence->base;
158		am_fence->ring = ring;
159	} else {
160		/* take use of job-embedded fence */
161		fence = &job->hw_fence;
162	}
163
164	seq = ++ring->fence_drv.sync_seq;
165	if (job && job->job_run_counter) {
166		/* reinit seq for resubmitted jobs */
167		fence->seqno = seq;
168		/* TO be inline with external fence creation and other drivers */
169		dma_fence_get(fence);
170	} else {
171		if (job) {
172			dma_fence_init(fence, &amdgpu_job_fence_ops,
173				       &ring->fence_drv.lock,
174				       adev->fence_context + ring->idx, seq);
175			/* Against remove in amdgpu_job_{free, free_cb} */
176			dma_fence_get(fence);
177		}
178		else
179			dma_fence_init(fence, &amdgpu_fence_ops,
180				       &ring->fence_drv.lock,
181				       adev->fence_context + ring->idx, seq);
182	}
183
184	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
185			       seq, flags | AMDGPU_FENCE_FLAG_INT);
186	pm_runtime_get_noresume(adev_to_drm(adev)->dev);
187	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
188	if (unlikely(rcu_dereference_protected(*ptr, 1))) {
189		struct dma_fence *old;
190
191		rcu_read_lock();
192		old = dma_fence_get_rcu_safe(ptr);
193		rcu_read_unlock();
194
195		if (old) {
196			r = dma_fence_wait(old, false);
197			dma_fence_put(old);
198			if (r)
199				return r;
200		}
201	}
202
203	to_amdgpu_fence(fence)->start_timestamp = ktime_get();
204
205	/* This function can't be called concurrently anyway, otherwise
206	 * emitting the fence would mess up the hardware ring buffer.
207	 */
208	rcu_assign_pointer(*ptr, dma_fence_get(fence));
209
210	*f = fence;
211
212	return 0;
213}
214
215/**
216 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
217 *
218 * @ring: ring the fence is associated with
219 * @s: resulting sequence number
220 * @timeout: the timeout for waiting in usecs
221 *
222 * Emits a fence command on the requested ring (all asics).
223 * Used For polling fence.
224 * Returns 0 on success, -ENOMEM on failure.
225 */
226int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
227			      uint32_t timeout)
228{
229	uint32_t seq;
230	signed long r;
231
232	if (!s)
233		return -EINVAL;
234
235	seq = ++ring->fence_drv.sync_seq;
236	r = amdgpu_fence_wait_polling(ring,
237				      seq - ring->fence_drv.num_fences_mask,
238				      timeout);
239	if (r < 1)
240		return -ETIMEDOUT;
241
242	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
243			       seq, 0);
244
245	*s = seq;
246
247	return 0;
248}
249
250/**
251 * amdgpu_fence_schedule_fallback - schedule fallback check
252 *
253 * @ring: pointer to struct amdgpu_ring
254 *
255 * Start a timer as fallback to our interrupts.
256 */
257static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
258{
259	mod_timer(&ring->fence_drv.fallback_timer,
260		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
261}
262
263/**
264 * amdgpu_fence_process - check for fence activity
265 *
266 * @ring: pointer to struct amdgpu_ring
267 *
268 * Checks the current fence value and calculates the last
269 * signalled fence value. Wakes the fence queue if the
270 * sequence number has increased.
271 *
272 * Returns true if fence was processed
273 */
274bool amdgpu_fence_process(struct amdgpu_ring *ring)
275{
276	struct amdgpu_fence_driver *drv = &ring->fence_drv;
277	struct amdgpu_device *adev = ring->adev;
278	uint32_t seq, last_seq;
 
279
280	do {
281		last_seq = atomic_read(&ring->fence_drv.last_seq);
282		seq = amdgpu_fence_read(ring);
283
284	} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
285
286	if (del_timer(&ring->fence_drv.fallback_timer) &&
287	    seq != ring->fence_drv.sync_seq)
288		amdgpu_fence_schedule_fallback(ring);
289
290	if (unlikely(seq == last_seq))
291		return false;
292
293	last_seq &= drv->num_fences_mask;
294	seq &= drv->num_fences_mask;
295
296	do {
297		struct dma_fence *fence, **ptr;
298
299		++last_seq;
300		last_seq &= drv->num_fences_mask;
301		ptr = &drv->fences[last_seq];
302
303		/* There is always exactly one thread signaling this fence slot */
304		fence = rcu_dereference_protected(*ptr, 1);
305		RCU_INIT_POINTER(*ptr, NULL);
306
307		if (!fence)
308			continue;
309
310		dma_fence_signal(fence);
 
 
 
 
 
311		dma_fence_put(fence);
312		pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
313		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
314	} while (last_seq != seq);
315
316	return true;
317}
318
319/**
320 * amdgpu_fence_fallback - fallback for hardware interrupts
321 *
322 * @t: timer context used to obtain the pointer to ring structure
323 *
324 * Checks for fence activity.
325 */
326static void amdgpu_fence_fallback(struct timer_list *t)
327{
328	struct amdgpu_ring *ring = from_timer(ring, t,
329					      fence_drv.fallback_timer);
330
331	if (amdgpu_fence_process(ring))
332		DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
333}
334
335/**
336 * amdgpu_fence_wait_empty - wait for all fences to signal
337 *
 
338 * @ring: ring index the fence is associated with
339 *
340 * Wait for all fences on the requested ring to signal (all asics).
341 * Returns 0 if the fences have passed, error for all other cases.
342 */
343int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
344{
345	uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
346	struct dma_fence *fence, **ptr;
347	int r;
348
349	if (!seq)
350		return 0;
351
352	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
353	rcu_read_lock();
354	fence = rcu_dereference(*ptr);
355	if (!fence || !dma_fence_get_rcu(fence)) {
356		rcu_read_unlock();
357		return 0;
358	}
359	rcu_read_unlock();
360
361	r = dma_fence_wait(fence, false);
362	dma_fence_put(fence);
363	return r;
364}
365
366/**
367 * amdgpu_fence_wait_polling - busy wait for givn sequence number
368 *
369 * @ring: ring index the fence is associated with
370 * @wait_seq: sequence number to wait
371 * @timeout: the timeout for waiting in usecs
372 *
373 * Wait for all fences on the requested ring to signal (all asics).
374 * Returns left time if no timeout, 0 or minus if timeout.
375 */
376signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
377				      uint32_t wait_seq,
378				      signed long timeout)
379{
380	uint32_t seq;
381
382	do {
383		seq = amdgpu_fence_read(ring);
384		udelay(5);
385		timeout -= 5;
386	} while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
387
388	return timeout > 0 ? timeout : 0;
389}
390/**
391 * amdgpu_fence_count_emitted - get the count of emitted fences
392 *
393 * @ring: ring the fence is associated with
394 *
395 * Get the number of fences emitted on the requested ring (all asics).
396 * Returns the number of emitted fences on the ring.  Used by the
397 * dynpm code to ring track activity.
398 */
399unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
400{
401	uint64_t emitted;
402
403	/* We are not protected by ring lock when reading the last sequence
404	 * but it's ok to report slightly wrong fence count here.
405	 */
 
406	emitted = 0x100000000ull;
407	emitted -= atomic_read(&ring->fence_drv.last_seq);
408	emitted += READ_ONCE(ring->fence_drv.sync_seq);
409	return lower_32_bits(emitted);
410}
411
412/**
413 * amdgpu_fence_last_unsignaled_time_us - the time fence emitted until now
414 * @ring: ring the fence is associated with
415 *
416 * Find the earliest fence unsignaled until now, calculate the time delta
417 * between the time fence emitted and now.
418 */
419u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring)
420{
421	struct amdgpu_fence_driver *drv = &ring->fence_drv;
422	struct dma_fence *fence;
423	uint32_t last_seq, sync_seq;
424
425	last_seq = atomic_read(&ring->fence_drv.last_seq);
426	sync_seq = READ_ONCE(ring->fence_drv.sync_seq);
427	if (last_seq == sync_seq)
428		return 0;
429
430	++last_seq;
431	last_seq &= drv->num_fences_mask;
432	fence = drv->fences[last_seq];
433	if (!fence)
434		return 0;
435
436	return ktime_us_delta(ktime_get(),
437		to_amdgpu_fence(fence)->start_timestamp);
438}
439
440/**
441 * amdgpu_fence_update_start_timestamp - update the timestamp of the fence
442 * @ring: ring the fence is associated with
443 * @seq: the fence seq number to update.
444 * @timestamp: the start timestamp to update.
445 *
446 * The function called at the time the fence and related ib is about to
447 * resubmit to gpu in MCBP scenario. Thus we do not consider race condition
448 * with amdgpu_fence_process to modify the same fence.
449 */
450void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp)
451{
452	struct amdgpu_fence_driver *drv = &ring->fence_drv;
453	struct dma_fence *fence;
454
455	seq &= drv->num_fences_mask;
456	fence = drv->fences[seq];
457	if (!fence)
458		return;
459
460	to_amdgpu_fence(fence)->start_timestamp = timestamp;
461}
462
463/**
464 * amdgpu_fence_driver_start_ring - make the fence driver
465 * ready for use on the requested ring.
466 *
467 * @ring: ring to start the fence driver on
468 * @irq_src: interrupt source to use for this ring
469 * @irq_type: interrupt type to use for this ring
470 *
471 * Make the fence driver ready for processing (all asics).
472 * Not all asics have all rings, so each asic will only
473 * start the fence driver on the rings it has.
474 * Returns 0 for success, errors for failure.
475 */
476int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
477				   struct amdgpu_irq_src *irq_src,
478				   unsigned irq_type)
479{
480	struct amdgpu_device *adev = ring->adev;
481	uint64_t index;
482
483	if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
484		ring->fence_drv.cpu_addr = ring->fence_cpu_addr;
485		ring->fence_drv.gpu_addr = ring->fence_gpu_addr;
486	} else {
487		/* put fence directly behind firmware */
488		index = ALIGN(adev->uvd.fw->size, 8);
489		ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
490		ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
491	}
492	amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
493
 
 
 
494	ring->fence_drv.irq_src = irq_src;
495	ring->fence_drv.irq_type = irq_type;
496	ring->fence_drv.initialized = true;
497
498	DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
499		      ring->name, ring->fence_drv.gpu_addr);
500	return 0;
501}
502
503/**
504 * amdgpu_fence_driver_init_ring - init the fence driver
505 * for the requested ring.
506 *
507 * @ring: ring to init the fence driver on
 
508 *
509 * Init the fence driver for the requested ring (all asics).
510 * Helper function for amdgpu_fence_driver_init().
511 */
512int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
 
513{
514	struct amdgpu_device *adev = ring->adev;
 
 
515
516	if (!adev)
517		return -EINVAL;
518
519	if (!is_power_of_2(ring->num_hw_submission))
520		return -EINVAL;
521
522	ring->fence_drv.cpu_addr = NULL;
523	ring->fence_drv.gpu_addr = 0;
524	ring->fence_drv.sync_seq = 0;
525	atomic_set(&ring->fence_drv.last_seq, 0);
526	ring->fence_drv.initialized = false;
527
528	timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
529
530	ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1;
531	spin_lock_init(&ring->fence_drv.lock);
532	ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *),
533					 GFP_KERNEL);
534
535	if (!ring->fence_drv.fences)
536		return -ENOMEM;
537
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
538	return 0;
539}
540
541/**
542 * amdgpu_fence_driver_sw_init - init the fence driver
543 * for all possible rings.
544 *
545 * @adev: amdgpu device pointer
546 *
547 * Init the fence driver for all possible rings (all asics).
548 * Not all asics have all rings, so each asic will only
549 * start the fence driver on the rings it has using
550 * amdgpu_fence_driver_start_ring().
551 * Returns 0 for success.
552 */
553int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
554{
555	return 0;
556}
557
558/**
559 * amdgpu_fence_driver_hw_fini - tear down the fence driver
560 * for all possible rings.
561 *
562 * @adev: amdgpu device pointer
563 *
564 * Tear down the fence driver for all possible rings (all asics).
565 */
566void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
567{
568	int i, r;
 
569
570	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
571		struct amdgpu_ring *ring = adev->rings[i];
572
573		if (!ring || !ring->fence_drv.initialized)
574			continue;
575
576		/* You can't wait for HW to signal if it's gone */
577		if (!drm_dev_is_unplugged(adev_to_drm(adev)))
578			r = amdgpu_fence_wait_empty(ring);
579		else
580			r = -ENODEV;
581		/* no need to trigger GPU reset as we are unloading */
582		if (r)
583			amdgpu_fence_driver_force_completion(ring);
584
585		if (ring->fence_drv.irq_src)
586			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
587				       ring->fence_drv.irq_type);
588
 
589		del_timer_sync(&ring->fence_drv.fallback_timer);
 
 
 
 
 
590	}
591}
592
593/* Will either stop and flush handlers for amdgpu interrupt or reanble it */
594void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop)
 
 
 
 
 
 
 
595{
596	int i;
597
598	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
599		struct amdgpu_ring *ring = adev->rings[i];
600
601		if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src)
602			continue;
603
604		if (stop)
605			disable_irq(adev->irq.irq);
606		else
607			enable_irq(adev->irq.irq);
608	}
609}
610
611void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
612{
613	unsigned int i, j;
614
615	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
616		struct amdgpu_ring *ring = adev->rings[i];
617
618		if (!ring || !ring->fence_drv.initialized)
619			continue;
620
621		/*
622		 * Notice we check for sched.ops since there's some
623		 * override on the meaning of sched.ready by amdgpu.
624		 * The natural check would be sched.ready, which is
625		 * set as drm_sched_init() finishes...
626		 */
627		if (ring->sched.ops)
628			drm_sched_fini(&ring->sched);
629
630		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
631			dma_fence_put(ring->fence_drv.fences[j]);
632		kfree(ring->fence_drv.fences);
633		ring->fence_drv.fences = NULL;
634		ring->fence_drv.initialized = false;
635	}
636}
637
638/**
639 * amdgpu_fence_driver_hw_init - enable the fence driver
640 * for all possible rings.
641 *
642 * @adev: amdgpu device pointer
643 *
644 * Enable the fence driver for all possible rings (all asics).
645 * Not all asics have all rings, so each asic will only
646 * start the fence driver on the rings it has using
647 * amdgpu_fence_driver_start_ring().
648 * Returns 0 for success.
649 */
650void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
651{
652	int i;
653
654	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
655		struct amdgpu_ring *ring = adev->rings[i];
656		if (!ring || !ring->fence_drv.initialized)
657			continue;
658
659		/* enable the interrupt */
660		if (ring->fence_drv.irq_src)
661			amdgpu_irq_get(adev, ring->fence_drv.irq_src,
662				       ring->fence_drv.irq_type);
663	}
664}
665
666/**
667 * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
668 *
669 * @ring: fence of the ring to be cleared
670 *
671 */
672void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
673{
674	int i;
675	struct dma_fence *old, **ptr;
676
677	for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
678		ptr = &ring->fence_drv.fences[i];
679		old = rcu_dereference_protected(*ptr, 1);
680		if (old && old->ops == &amdgpu_job_fence_ops) {
681			RCU_INIT_POINTER(*ptr, NULL);
682			dma_fence_put(old);
683		}
684	}
685}
686
687/**
688 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
689 *
690 * @ring: fence of the ring to signal
691 *
692 */
693void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
694{
695	amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
696	amdgpu_fence_process(ring);
697}
698
699/*
700 * Common fence implementation
701 */
702
703static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
704{
705	return "amdgpu";
706}
707
708static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
709{
710	return (const char *)to_amdgpu_fence(f)->ring->name;
711}
712
713static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
714{
715	struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
716
717	return (const char *)to_amdgpu_ring(job->base.sched)->name;
718}
719
720/**
721 * amdgpu_fence_enable_signaling - enable signalling on fence
722 * @f: fence
723 *
724 * This function is called with fence_queue lock held, and adds a callback
725 * to fence_queue that checks if this fence is signaled, and if so it
726 * signals the fence and removes itself.
727 */
728static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
729{
730	if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
731		amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
732
733	return true;
734}
735
736/**
737 * amdgpu_job_fence_enable_signaling - enable signalling on job fence
738 * @f: fence
739 *
740 * This is the simliar function with amdgpu_fence_enable_signaling above, it
741 * only handles the job embedded fence.
742 */
743static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
744{
745	struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
746
747	if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
748		amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
749
750	return true;
751}
752
753/**
754 * amdgpu_fence_free - free up the fence memory
755 *
756 * @rcu: RCU callback head
757 *
758 * Free up the fence memory after the RCU grace period.
759 */
760static void amdgpu_fence_free(struct rcu_head *rcu)
761{
762	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
763
764	/* free fence_slab if it's separated fence*/
765	kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
766}
767
768/**
769 * amdgpu_job_fence_free - free up the job with embedded fence
770 *
771 * @rcu: RCU callback head
772 *
773 * Free up the job with embedded fence after the RCU grace period.
774 */
775static void amdgpu_job_fence_free(struct rcu_head *rcu)
776{
777	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
778
779	/* free job if fence has a parent job */
780	kfree(container_of(f, struct amdgpu_job, hw_fence));
781}
782
783/**
784 * amdgpu_fence_release - callback that fence can be freed
785 *
786 * @f: fence
787 *
788 * This function is called when the reference count becomes zero.
789 * It just RCU schedules freeing up the fence.
790 */
791static void amdgpu_fence_release(struct dma_fence *f)
792{
793	call_rcu(&f->rcu, amdgpu_fence_free);
794}
795
796/**
797 * amdgpu_job_fence_release - callback that job embedded fence can be freed
798 *
799 * @f: fence
800 *
801 * This is the simliar function with amdgpu_fence_release above, it
802 * only handles the job embedded fence.
803 */
804static void amdgpu_job_fence_release(struct dma_fence *f)
805{
806	call_rcu(&f->rcu, amdgpu_job_fence_free);
807}
808
809static const struct dma_fence_ops amdgpu_fence_ops = {
810	.get_driver_name = amdgpu_fence_get_driver_name,
811	.get_timeline_name = amdgpu_fence_get_timeline_name,
812	.enable_signaling = amdgpu_fence_enable_signaling,
813	.release = amdgpu_fence_release,
814};
815
816static const struct dma_fence_ops amdgpu_job_fence_ops = {
817	.get_driver_name = amdgpu_fence_get_driver_name,
818	.get_timeline_name = amdgpu_job_fence_get_timeline_name,
819	.enable_signaling = amdgpu_job_fence_enable_signaling,
820	.release = amdgpu_job_fence_release,
821};
822
823/*
824 * Fence debugfs
825 */
826#if defined(CONFIG_DEBUG_FS)
827static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
828{
829	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
 
 
830	int i;
831
832	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
833		struct amdgpu_ring *ring = adev->rings[i];
834		if (!ring || !ring->fence_drv.initialized)
835			continue;
836
837		amdgpu_fence_process(ring);
838
839		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
840		seq_printf(m, "Last signaled fence          0x%08x\n",
841			   atomic_read(&ring->fence_drv.last_seq));
842		seq_printf(m, "Last emitted                 0x%08x\n",
843			   ring->fence_drv.sync_seq);
844
845		if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
846		    ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
847			seq_printf(m, "Last signaled trailing fence 0x%08x\n",
848				   le32_to_cpu(*ring->trail_fence_cpu_addr));
849			seq_printf(m, "Last emitted                 0x%08x\n",
850				   ring->trail_seq);
851		}
852
853		if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
854			continue;
855
856		/* set in CP_VMID_PREEMPT and preemption occurred */
857		seq_printf(m, "Last preempted               0x%08x\n",
858			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
859		/* set in CP_VMID_RESET and reset occurred */
860		seq_printf(m, "Last reset                   0x%08x\n",
861			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
862		/* Both preemption and reset occurred */
863		seq_printf(m, "Last both                    0x%08x\n",
864			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
865	}
866	return 0;
867}
868
869/*
870 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
871 *
872 * Manually trigger a gpu reset at the next fence wait.
873 */
874static int gpu_recover_get(void *data, u64 *val)
875{
876	struct amdgpu_device *adev = (struct amdgpu_device *)data;
877	struct drm_device *dev = adev_to_drm(adev);
 
878	int r;
879
880	r = pm_runtime_get_sync(dev->dev);
881	if (r < 0) {
882		pm_runtime_put_autosuspend(dev->dev);
883		return 0;
884	}
885
886	if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work))
887		flush_work(&adev->reset_work);
888
889	*val = atomic_read(&adev->reset_domain->reset_res);
890
891	pm_runtime_mark_last_busy(dev->dev);
892	pm_runtime_put_autosuspend(dev->dev);
893
894	return 0;
895}
896
897DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
898DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
899			 "%lld\n");
900
901static void amdgpu_debugfs_reset_work(struct work_struct *work)
902{
903	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
904						  reset_work);
905
906	struct amdgpu_reset_context reset_context;
907	memset(&reset_context, 0, sizeof(reset_context));
908
909	reset_context.method = AMD_RESET_METHOD_NONE;
910	reset_context.reset_req_dev = adev;
911	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
912
913	amdgpu_device_gpu_recover(adev, NULL, &reset_context);
914}
915
 
 
 
916#endif
917
918void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
919{
920#if defined(CONFIG_DEBUG_FS)
921	struct drm_minor *minor = adev_to_drm(adev)->primary;
922	struct dentry *root = minor->debugfs_root;
923
924	debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
925			    &amdgpu_debugfs_fence_info_fops);
926
927	if (!amdgpu_sriov_vf(adev)) {
928
929		INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work);
930		debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
931				    &amdgpu_debugfs_gpu_recover_fops);
932	}
933#endif
934}
935