Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 *    Dave Airlie
 30 */
 31#include <linux/seq_file.h>
 32#include <linux/atomic.h>
 33#include <linux/wait.h>
 34#include <linux/kref.h>
 35#include <linux/slab.h>
 36#include <linux/firmware.h>
 37#include <linux/pm_runtime.h>
 38
 39#include <drm/drm_debugfs.h>
 40
 41#include "amdgpu.h"
 42#include "amdgpu_trace.h"
 43
 44/*
 45 * Fences
 46 * Fences mark an event in the GPUs pipeline and are used
 47 * for GPU/CPU synchronization.  When the fence is written,
 48 * it is expected that all buffers associated with that fence
 49 * are no longer in use by the associated ring on the GPU and
 50 * that the the relevant GPU caches have been flushed.
 51 */
 52
 53struct amdgpu_fence {
 54	struct dma_fence base;
 55
 56	/* RB, DMA, etc. */
 57	struct amdgpu_ring		*ring;
 58};
 59
 60static struct kmem_cache *amdgpu_fence_slab;
 61
 62int amdgpu_fence_slab_init(void)
 63{
 64	amdgpu_fence_slab = kmem_cache_create(
 65		"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
 66		SLAB_HWCACHE_ALIGN, NULL);
 67	if (!amdgpu_fence_slab)
 68		return -ENOMEM;
 69	return 0;
 70}
 71
 72void amdgpu_fence_slab_fini(void)
 73{
 74	rcu_barrier();
 75	kmem_cache_destroy(amdgpu_fence_slab);
 76}
 77/*
 78 * Cast helper
 79 */
 80static const struct dma_fence_ops amdgpu_fence_ops;
 81static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
 82{
 83	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
 84
 85	if (__f->base.ops == &amdgpu_fence_ops)
 86		return __f;
 87
 88	return NULL;
 89}
 90
 91/**
 92 * amdgpu_fence_write - write a fence value
 93 *
 94 * @ring: ring the fence is associated with
 95 * @seq: sequence number to write
 96 *
 97 * Writes a fence value to memory (all asics).
 98 */
 99static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
100{
101	struct amdgpu_fence_driver *drv = &ring->fence_drv;
102
103	if (drv->cpu_addr)
104		*drv->cpu_addr = cpu_to_le32(seq);
105}
106
107/**
108 * amdgpu_fence_read - read a fence value
109 *
110 * @ring: ring the fence is associated with
111 *
112 * Reads a fence value from memory (all asics).
113 * Returns the value of the fence read from memory.
114 */
115static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
116{
117	struct amdgpu_fence_driver *drv = &ring->fence_drv;
118	u32 seq = 0;
119
120	if (drv->cpu_addr)
121		seq = le32_to_cpu(*drv->cpu_addr);
122	else
123		seq = atomic_read(&drv->last_seq);
124
125	return seq;
126}
127
128/**
129 * amdgpu_fence_emit - emit a fence on the requested ring
130 *
131 * @ring: ring the fence is associated with
132 * @f: resulting fence object
133 *
134 * Emits a fence command on the requested ring (all asics).
135 * Returns 0 on success, -ENOMEM on failure.
136 */
137int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
138		      unsigned flags)
139{
140	struct amdgpu_device *adev = ring->adev;
141	struct amdgpu_fence *fence;
142	struct dma_fence __rcu **ptr;
143	uint32_t seq;
144	int r;
145
146	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
147	if (fence == NULL)
148		return -ENOMEM;
149
150	seq = ++ring->fence_drv.sync_seq;
151	fence->ring = ring;
152	dma_fence_init(&fence->base, &amdgpu_fence_ops,
153		       &ring->fence_drv.lock,
154		       adev->fence_context + ring->idx,
155		       seq);
156	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
157			       seq, flags | AMDGPU_FENCE_FLAG_INT);
158	pm_runtime_get_noresume(adev->ddev->dev);
159	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
160	if (unlikely(rcu_dereference_protected(*ptr, 1))) {
161		struct dma_fence *old;
162
163		rcu_read_lock();
164		old = dma_fence_get_rcu_safe(ptr);
165		rcu_read_unlock();
166
167		if (old) {
168			r = dma_fence_wait(old, false);
169			dma_fence_put(old);
170			if (r)
171				return r;
172		}
173	}
174
 
175	/* This function can't be called concurrently anyway, otherwise
176	 * emitting the fence would mess up the hardware ring buffer.
177	 */
 
 
 
 
 
 
178	rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
179
180	*f = &fence->base;
181
182	return 0;
183}
184
185/**
186 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
187 *
188 * @ring: ring the fence is associated with
189 * @s: resulting sequence number
190 *
191 * Emits a fence command on the requested ring (all asics).
192 * Used For polling fence.
193 * Returns 0 on success, -ENOMEM on failure.
194 */
195int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
196			      uint32_t timeout)
197{
198	uint32_t seq;
199	signed long r;
200
201	if (!s)
202		return -EINVAL;
203
204	seq = ++ring->fence_drv.sync_seq;
205	r = amdgpu_fence_wait_polling(ring,
206				      seq - ring->fence_drv.num_fences_mask,
207				      timeout);
208	if (r < 1)
209		return -ETIMEDOUT;
210
211	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
212			       seq, 0);
213
214	*s = seq;
215
216	return 0;
217}
218
219/**
220 * amdgpu_fence_schedule_fallback - schedule fallback check
221 *
222 * @ring: pointer to struct amdgpu_ring
223 *
224 * Start a timer as fallback to our interrupts.
225 */
226static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
227{
228	mod_timer(&ring->fence_drv.fallback_timer,
229		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
230}
231
232/**
233 * amdgpu_fence_process - check for fence activity
234 *
235 * @ring: pointer to struct amdgpu_ring
236 *
237 * Checks the current fence value and calculates the last
238 * signalled fence value. Wakes the fence queue if the
239 * sequence number has increased.
240 *
241 * Returns true if fence was processed
242 */
243bool amdgpu_fence_process(struct amdgpu_ring *ring)
244{
245	struct amdgpu_fence_driver *drv = &ring->fence_drv;
246	struct amdgpu_device *adev = ring->adev;
247	uint32_t seq, last_seq;
248	int r;
249
250	do {
251		last_seq = atomic_read(&ring->fence_drv.last_seq);
252		seq = amdgpu_fence_read(ring);
253
254	} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
255
256	if (del_timer(&ring->fence_drv.fallback_timer) &&
257	    seq != ring->fence_drv.sync_seq)
258		amdgpu_fence_schedule_fallback(ring);
259
260	if (unlikely(seq == last_seq))
261		return false;
262
263	last_seq &= drv->num_fences_mask;
264	seq &= drv->num_fences_mask;
265
266	do {
267		struct dma_fence *fence, **ptr;
268
269		++last_seq;
270		last_seq &= drv->num_fences_mask;
271		ptr = &drv->fences[last_seq];
272
273		/* There is always exactly one thread signaling this fence slot */
274		fence = rcu_dereference_protected(*ptr, 1);
275		RCU_INIT_POINTER(*ptr, NULL);
276
277		if (!fence)
278			continue;
279
280		r = dma_fence_signal(fence);
281		if (!r)
282			DMA_FENCE_TRACE(fence, "signaled from irq context\n");
283		else
284			BUG();
285
286		dma_fence_put(fence);
287		pm_runtime_mark_last_busy(adev->ddev->dev);
288		pm_runtime_put_autosuspend(adev->ddev->dev);
289	} while (last_seq != seq);
290
291	return true;
292}
293
294/**
295 * amdgpu_fence_fallback - fallback for hardware interrupts
296 *
297 * @work: delayed work item
298 *
299 * Checks for fence activity.
300 */
301static void amdgpu_fence_fallback(struct timer_list *t)
302{
303	struct amdgpu_ring *ring = from_timer(ring, t,
304					      fence_drv.fallback_timer);
305
306	if (amdgpu_fence_process(ring))
307		DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
308}
309
310/**
311 * amdgpu_fence_wait_empty - wait for all fences to signal
312 *
313 * @adev: amdgpu device pointer
314 * @ring: ring index the fence is associated with
315 *
316 * Wait for all fences on the requested ring to signal (all asics).
317 * Returns 0 if the fences have passed, error for all other cases.
318 */
319int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
320{
321	uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
322	struct dma_fence *fence, **ptr;
323	int r;
324
325	if (!seq)
326		return 0;
327
328	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
329	rcu_read_lock();
330	fence = rcu_dereference(*ptr);
331	if (!fence || !dma_fence_get_rcu(fence)) {
332		rcu_read_unlock();
333		return 0;
334	}
335	rcu_read_unlock();
336
337	r = dma_fence_wait(fence, false);
338	dma_fence_put(fence);
339	return r;
340}
341
342/**
343 * amdgpu_fence_wait_polling - busy wait for givn sequence number
344 *
345 * @ring: ring index the fence is associated with
346 * @wait_seq: sequence number to wait
347 * @timeout: the timeout for waiting in usecs
348 *
349 * Wait for all fences on the requested ring to signal (all asics).
350 * Returns left time if no timeout, 0 or minus if timeout.
351 */
352signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
353				      uint32_t wait_seq,
354				      signed long timeout)
355{
356	uint32_t seq;
357
358	do {
359		seq = amdgpu_fence_read(ring);
360		udelay(5);
361		timeout -= 5;
362	} while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
363
364	return timeout > 0 ? timeout : 0;
365}
366/**
367 * amdgpu_fence_count_emitted - get the count of emitted fences
368 *
369 * @ring: ring the fence is associated with
370 *
371 * Get the number of fences emitted on the requested ring (all asics).
372 * Returns the number of emitted fences on the ring.  Used by the
373 * dynpm code to ring track activity.
374 */
375unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
376{
377	uint64_t emitted;
378
379	/* We are not protected by ring lock when reading the last sequence
380	 * but it's ok to report slightly wrong fence count here.
381	 */
382	amdgpu_fence_process(ring);
383	emitted = 0x100000000ull;
384	emitted -= atomic_read(&ring->fence_drv.last_seq);
385	emitted += READ_ONCE(ring->fence_drv.sync_seq);
386	return lower_32_bits(emitted);
387}
388
389/**
390 * amdgpu_fence_driver_start_ring - make the fence driver
391 * ready for use on the requested ring.
392 *
393 * @ring: ring to start the fence driver on
394 * @irq_src: interrupt source to use for this ring
395 * @irq_type: interrupt type to use for this ring
396 *
397 * Make the fence driver ready for processing (all asics).
398 * Not all asics have all rings, so each asic will only
399 * start the fence driver on the rings it has.
400 * Returns 0 for success, errors for failure.
401 */
402int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
403				   struct amdgpu_irq_src *irq_src,
404				   unsigned irq_type)
405{
406	struct amdgpu_device *adev = ring->adev;
407	uint64_t index;
408
409	if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
410		ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
411		ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
412	} else {
413		/* put fence directly behind firmware */
414		index = ALIGN(adev->uvd.fw->size, 8);
415		ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
416		ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
417	}
418	amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
419
420	if (irq_src)
421		amdgpu_irq_get(adev, irq_src, irq_type);
422
423	ring->fence_drv.irq_src = irq_src;
424	ring->fence_drv.irq_type = irq_type;
425	ring->fence_drv.initialized = true;
426
427	DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
428		      ring->name, ring->fence_drv.gpu_addr);
 
429	return 0;
430}
431
432/**
433 * amdgpu_fence_driver_init_ring - init the fence driver
434 * for the requested ring.
435 *
436 * @ring: ring to init the fence driver on
437 * @num_hw_submission: number of entries on the hardware queue
438 *
439 * Init the fence driver for the requested ring (all asics).
440 * Helper function for amdgpu_fence_driver_init().
441 */
442int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
443				  unsigned num_hw_submission)
444{
445	struct amdgpu_device *adev = ring->adev;
446	long timeout;
447	int r;
448
449	if (!adev)
450		return -EINVAL;
451
452	if (!is_power_of_2(num_hw_submission))
453		return -EINVAL;
454
455	ring->fence_drv.cpu_addr = NULL;
456	ring->fence_drv.gpu_addr = 0;
457	ring->fence_drv.sync_seq = 0;
458	atomic_set(&ring->fence_drv.last_seq, 0);
459	ring->fence_drv.initialized = false;
460
461	timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
 
462
463	ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
464	spin_lock_init(&ring->fence_drv.lock);
465	ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
466					 GFP_KERNEL);
467	if (!ring->fence_drv.fences)
468		return -ENOMEM;
469
470	/* No need to setup the GPU scheduler for rings that don't need it */
471	if (!ring->no_scheduler) {
472		switch (ring->funcs->type) {
473		case AMDGPU_RING_TYPE_GFX:
474			timeout = adev->gfx_timeout;
475			break;
476		case AMDGPU_RING_TYPE_COMPUTE:
477			timeout = adev->compute_timeout;
478			break;
479		case AMDGPU_RING_TYPE_SDMA:
480			timeout = adev->sdma_timeout;
481			break;
482		default:
483			timeout = adev->video_timeout;
484			break;
485		}
486
487		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
488				   num_hw_submission, amdgpu_job_hang_limit,
489				   timeout, ring->name);
490		if (r) {
491			DRM_ERROR("Failed to create scheduler on ring %s.\n",
492				  ring->name);
493			return r;
494		}
495	}
496
497	return 0;
498}
499
500/**
501 * amdgpu_fence_driver_init - init the fence driver
502 * for all possible rings.
503 *
504 * @adev: amdgpu device pointer
505 *
506 * Init the fence driver for all possible rings (all asics).
507 * Not all asics have all rings, so each asic will only
508 * start the fence driver on the rings it has using
509 * amdgpu_fence_driver_start_ring().
510 * Returns 0 for success.
511 */
512int amdgpu_fence_driver_init(struct amdgpu_device *adev)
513{
 
 
 
514	return 0;
515}
516
517/**
518 * amdgpu_fence_driver_fini - tear down the fence driver
519 * for all possible rings.
520 *
521 * @adev: amdgpu device pointer
522 *
523 * Tear down the fence driver for all possible rings (all asics).
524 */
525void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
526{
527	unsigned i, j;
528	int r;
529
530	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
531		struct amdgpu_ring *ring = adev->rings[i];
532
533		if (!ring || !ring->fence_drv.initialized)
534			continue;
535		r = amdgpu_fence_wait_empty(ring);
536		if (r) {
537			/* no need to trigger GPU reset as we are unloading */
538			amdgpu_fence_driver_force_completion(ring);
539		}
540		if (ring->fence_drv.irq_src)
541			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
542				       ring->fence_drv.irq_type);
543		if (!ring->no_scheduler)
544			drm_sched_fini(&ring->sched);
545		del_timer_sync(&ring->fence_drv.fallback_timer);
546		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
547			dma_fence_put(ring->fence_drv.fences[j]);
548		kfree(ring->fence_drv.fences);
549		ring->fence_drv.fences = NULL;
550		ring->fence_drv.initialized = false;
551	}
552}
553
554/**
555 * amdgpu_fence_driver_suspend - suspend the fence driver
556 * for all possible rings.
557 *
558 * @adev: amdgpu device pointer
559 *
560 * Suspend the fence driver for all possible rings (all asics).
561 */
562void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
563{
564	int i, r;
565
566	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
567		struct amdgpu_ring *ring = adev->rings[i];
568		if (!ring || !ring->fence_drv.initialized)
569			continue;
570
571		/* wait for gpu to finish processing current batch */
572		r = amdgpu_fence_wait_empty(ring);
573		if (r) {
574			/* delay GPU reset to resume */
575			amdgpu_fence_driver_force_completion(ring);
576		}
577
578		/* disable the interrupt */
579		if (ring->fence_drv.irq_src)
580			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
581				       ring->fence_drv.irq_type);
582	}
583}
584
585/**
586 * amdgpu_fence_driver_resume - resume the fence driver
587 * for all possible rings.
588 *
589 * @adev: amdgpu device pointer
590 *
591 * Resume the fence driver for all possible rings (all asics).
592 * Not all asics have all rings, so each asic will only
593 * start the fence driver on the rings it has using
594 * amdgpu_fence_driver_start_ring().
595 * Returns 0 for success.
596 */
597void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
598{
599	int i;
600
601	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
602		struct amdgpu_ring *ring = adev->rings[i];
603		if (!ring || !ring->fence_drv.initialized)
604			continue;
605
606		/* enable the interrupt */
607		if (ring->fence_drv.irq_src)
608			amdgpu_irq_get(adev, ring->fence_drv.irq_src,
609				       ring->fence_drv.irq_type);
610	}
611}
612
613/**
614 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
615 *
616 * @ring: fence of the ring to signal
617 *
 
 
618 */
619void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
620{
621	amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
622	amdgpu_fence_process(ring);
 
 
 
 
 
 
 
623}
624
625/*
626 * Common fence implementation
627 */
628
629static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
630{
631	return "amdgpu";
632}
633
634static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
635{
636	struct amdgpu_fence *fence = to_amdgpu_fence(f);
637	return (const char *)fence->ring->name;
638}
639
640/**
641 * amdgpu_fence_enable_signaling - enable signalling on fence
642 * @fence: fence
643 *
644 * This function is called with fence_queue lock held, and adds a callback
645 * to fence_queue that checks if this fence is signaled, and if so it
646 * signals the fence and removes itself.
647 */
648static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
649{
650	struct amdgpu_fence *fence = to_amdgpu_fence(f);
651	struct amdgpu_ring *ring = fence->ring;
652
653	if (!timer_pending(&ring->fence_drv.fallback_timer))
654		amdgpu_fence_schedule_fallback(ring);
655
656	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
657
658	return true;
659}
660
661/**
662 * amdgpu_fence_free - free up the fence memory
663 *
664 * @rcu: RCU callback head
665 *
666 * Free up the fence memory after the RCU grace period.
667 */
668static void amdgpu_fence_free(struct rcu_head *rcu)
669{
670	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
671	struct amdgpu_fence *fence = to_amdgpu_fence(f);
672	kmem_cache_free(amdgpu_fence_slab, fence);
673}
674
675/**
676 * amdgpu_fence_release - callback that fence can be freed
677 *
678 * @fence: fence
679 *
680 * This function is called when the reference count becomes zero.
681 * It just RCU schedules freeing up the fence.
682 */
683static void amdgpu_fence_release(struct dma_fence *f)
684{
685	call_rcu(&f->rcu, amdgpu_fence_free);
686}
687
688static const struct dma_fence_ops amdgpu_fence_ops = {
689	.get_driver_name = amdgpu_fence_get_driver_name,
690	.get_timeline_name = amdgpu_fence_get_timeline_name,
691	.enable_signaling = amdgpu_fence_enable_signaling,
 
692	.release = amdgpu_fence_release,
693};
694
695/*
696 * Fence debugfs
697 */
698#if defined(CONFIG_DEBUG_FS)
699static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
700{
701	struct drm_info_node *node = (struct drm_info_node *)m->private;
702	struct drm_device *dev = node->minor->dev;
703	struct amdgpu_device *adev = dev->dev_private;
704	int i;
705
706	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
707		struct amdgpu_ring *ring = adev->rings[i];
708		if (!ring || !ring->fence_drv.initialized)
709			continue;
710
711		amdgpu_fence_process(ring);
712
713		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
714		seq_printf(m, "Last signaled fence          0x%08x\n",
715			   atomic_read(&ring->fence_drv.last_seq));
716		seq_printf(m, "Last emitted                 0x%08x\n",
717			   ring->fence_drv.sync_seq);
718
719		if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
720		    ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
721			seq_printf(m, "Last signaled trailing fence 0x%08x\n",
722				   le32_to_cpu(*ring->trail_fence_cpu_addr));
723			seq_printf(m, "Last emitted                 0x%08x\n",
724				   ring->trail_seq);
725		}
726
727		if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
728			continue;
729
730		/* set in CP_VMID_PREEMPT and preemption occurred */
731		seq_printf(m, "Last preempted               0x%08x\n",
732			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
733		/* set in CP_VMID_RESET and reset occurred */
734		seq_printf(m, "Last reset                   0x%08x\n",
735			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
736		/* Both preemption and reset occurred */
737		seq_printf(m, "Last both                    0x%08x\n",
738			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
739	}
740	return 0;
741}
742
743/**
744 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
745 *
746 * Manually trigger a gpu reset at the next fence wait.
747 */
748static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
749{
750	struct drm_info_node *node = (struct drm_info_node *) m->private;
751	struct drm_device *dev = node->minor->dev;
752	struct amdgpu_device *adev = dev->dev_private;
753	int r;
754
755	r = pm_runtime_get_sync(dev->dev);
756	if (r < 0) {
757		pm_runtime_put_autosuspend(dev->dev);
758		return 0;
759	}
760
761	seq_printf(m, "gpu recover\n");
762	amdgpu_device_gpu_recover(adev, NULL);
763
764	pm_runtime_mark_last_busy(dev->dev);
765	pm_runtime_put_autosuspend(dev->dev);
766
767	return 0;
768}
769
770static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
771	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
772	{"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
773};
774
775static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
776	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
777};
778#endif
779
780int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
781{
782#if defined(CONFIG_DEBUG_FS)
783	if (amdgpu_sriov_vf(adev))
784		return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov,
785						ARRAY_SIZE(amdgpu_debugfs_fence_list_sriov));
786	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list,
787					ARRAY_SIZE(amdgpu_debugfs_fence_list));
788#else
789	return 0;
790#endif
791}
792
v4.10.11
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 *    Dave Airlie
 30 */
 31#include <linux/seq_file.h>
 32#include <linux/atomic.h>
 33#include <linux/wait.h>
 34#include <linux/kref.h>
 35#include <linux/slab.h>
 36#include <linux/firmware.h>
 37#include <drm/drmP.h>
 
 
 
 38#include "amdgpu.h"
 39#include "amdgpu_trace.h"
 40
 41/*
 42 * Fences
 43 * Fences mark an event in the GPUs pipeline and are used
 44 * for GPU/CPU synchronization.  When the fence is written,
 45 * it is expected that all buffers associated with that fence
 46 * are no longer in use by the associated ring on the GPU and
 47 * that the the relevant GPU caches have been flushed.
 48 */
 49
 50struct amdgpu_fence {
 51	struct dma_fence base;
 52
 53	/* RB, DMA, etc. */
 54	struct amdgpu_ring		*ring;
 55};
 56
 57static struct kmem_cache *amdgpu_fence_slab;
 58
 59int amdgpu_fence_slab_init(void)
 60{
 61	amdgpu_fence_slab = kmem_cache_create(
 62		"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
 63		SLAB_HWCACHE_ALIGN, NULL);
 64	if (!amdgpu_fence_slab)
 65		return -ENOMEM;
 66	return 0;
 67}
 68
 69void amdgpu_fence_slab_fini(void)
 70{
 71	rcu_barrier();
 72	kmem_cache_destroy(amdgpu_fence_slab);
 73}
 74/*
 75 * Cast helper
 76 */
 77static const struct dma_fence_ops amdgpu_fence_ops;
 78static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
 79{
 80	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
 81
 82	if (__f->base.ops == &amdgpu_fence_ops)
 83		return __f;
 84
 85	return NULL;
 86}
 87
 88/**
 89 * amdgpu_fence_write - write a fence value
 90 *
 91 * @ring: ring the fence is associated with
 92 * @seq: sequence number to write
 93 *
 94 * Writes a fence value to memory (all asics).
 95 */
 96static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
 97{
 98	struct amdgpu_fence_driver *drv = &ring->fence_drv;
 99
100	if (drv->cpu_addr)
101		*drv->cpu_addr = cpu_to_le32(seq);
102}
103
104/**
105 * amdgpu_fence_read - read a fence value
106 *
107 * @ring: ring the fence is associated with
108 *
109 * Reads a fence value from memory (all asics).
110 * Returns the value of the fence read from memory.
111 */
112static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
113{
114	struct amdgpu_fence_driver *drv = &ring->fence_drv;
115	u32 seq = 0;
116
117	if (drv->cpu_addr)
118		seq = le32_to_cpu(*drv->cpu_addr);
119	else
120		seq = atomic_read(&drv->last_seq);
121
122	return seq;
123}
124
125/**
126 * amdgpu_fence_emit - emit a fence on the requested ring
127 *
128 * @ring: ring the fence is associated with
129 * @f: resulting fence object
130 *
131 * Emits a fence command on the requested ring (all asics).
132 * Returns 0 on success, -ENOMEM on failure.
133 */
134int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
 
135{
136	struct amdgpu_device *adev = ring->adev;
137	struct amdgpu_fence *fence;
138	struct dma_fence *old, **ptr;
139	uint32_t seq;
 
140
141	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
142	if (fence == NULL)
143		return -ENOMEM;
144
145	seq = ++ring->fence_drv.sync_seq;
146	fence->ring = ring;
147	dma_fence_init(&fence->base, &amdgpu_fence_ops,
148		       &ring->fence_drv.lock,
149		       adev->fence_context + ring->idx,
150		       seq);
151	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
152			       seq, AMDGPU_FENCE_FLAG_INT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
154	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
155	/* This function can't be called concurrently anyway, otherwise
156	 * emitting the fence would mess up the hardware ring buffer.
157	 */
158	old = rcu_dereference_protected(*ptr, 1);
159	if (old && !dma_fence_is_signaled(old)) {
160		DRM_INFO("rcu slot is busy\n");
161		dma_fence_wait(old, false);
162	}
163
164	rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
165
166	*f = &fence->base;
167
168	return 0;
169}
170
171/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172 * amdgpu_fence_schedule_fallback - schedule fallback check
173 *
174 * @ring: pointer to struct amdgpu_ring
175 *
176 * Start a timer as fallback to our interrupts.
177 */
178static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
179{
180	mod_timer(&ring->fence_drv.fallback_timer,
181		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
182}
183
184/**
185 * amdgpu_fence_process - check for fence activity
186 *
187 * @ring: pointer to struct amdgpu_ring
188 *
189 * Checks the current fence value and calculates the last
190 * signalled fence value. Wakes the fence queue if the
191 * sequence number has increased.
 
 
192 */
193void amdgpu_fence_process(struct amdgpu_ring *ring)
194{
195	struct amdgpu_fence_driver *drv = &ring->fence_drv;
 
196	uint32_t seq, last_seq;
197	int r;
198
199	do {
200		last_seq = atomic_read(&ring->fence_drv.last_seq);
201		seq = amdgpu_fence_read(ring);
202
203	} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
204
205	if (seq != ring->fence_drv.sync_seq)
 
206		amdgpu_fence_schedule_fallback(ring);
207
208	if (unlikely(seq == last_seq))
209		return;
210
211	last_seq &= drv->num_fences_mask;
212	seq &= drv->num_fences_mask;
213
214	do {
215		struct dma_fence *fence, **ptr;
216
217		++last_seq;
218		last_seq &= drv->num_fences_mask;
219		ptr = &drv->fences[last_seq];
220
221		/* There is always exactly one thread signaling this fence slot */
222		fence = rcu_dereference_protected(*ptr, 1);
223		RCU_INIT_POINTER(*ptr, NULL);
224
225		if (!fence)
226			continue;
227
228		r = dma_fence_signal(fence);
229		if (!r)
230			DMA_FENCE_TRACE(fence, "signaled from irq context\n");
231		else
232			BUG();
233
234		dma_fence_put(fence);
 
 
235	} while (last_seq != seq);
 
 
236}
237
238/**
239 * amdgpu_fence_fallback - fallback for hardware interrupts
240 *
241 * @work: delayed work item
242 *
243 * Checks for fence activity.
244 */
245static void amdgpu_fence_fallback(unsigned long arg)
246{
247	struct amdgpu_ring *ring = (void *)arg;
 
248
249	amdgpu_fence_process(ring);
 
250}
251
252/**
253 * amdgpu_fence_wait_empty - wait for all fences to signal
254 *
255 * @adev: amdgpu device pointer
256 * @ring: ring index the fence is associated with
257 *
258 * Wait for all fences on the requested ring to signal (all asics).
259 * Returns 0 if the fences have passed, error for all other cases.
260 */
261int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
262{
263	uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
264	struct dma_fence *fence, **ptr;
265	int r;
266
267	if (!seq)
268		return 0;
269
270	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
271	rcu_read_lock();
272	fence = rcu_dereference(*ptr);
273	if (!fence || !dma_fence_get_rcu(fence)) {
274		rcu_read_unlock();
275		return 0;
276	}
277	rcu_read_unlock();
278
279	r = dma_fence_wait(fence, false);
280	dma_fence_put(fence);
281	return r;
282}
283
284/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285 * amdgpu_fence_count_emitted - get the count of emitted fences
286 *
287 * @ring: ring the fence is associated with
288 *
289 * Get the number of fences emitted on the requested ring (all asics).
290 * Returns the number of emitted fences on the ring.  Used by the
291 * dynpm code to ring track activity.
292 */
293unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
294{
295	uint64_t emitted;
296
297	/* We are not protected by ring lock when reading the last sequence
298	 * but it's ok to report slightly wrong fence count here.
299	 */
300	amdgpu_fence_process(ring);
301	emitted = 0x100000000ull;
302	emitted -= atomic_read(&ring->fence_drv.last_seq);
303	emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
304	return lower_32_bits(emitted);
305}
306
307/**
308 * amdgpu_fence_driver_start_ring - make the fence driver
309 * ready for use on the requested ring.
310 *
311 * @ring: ring to start the fence driver on
312 * @irq_src: interrupt source to use for this ring
313 * @irq_type: interrupt type to use for this ring
314 *
315 * Make the fence driver ready for processing (all asics).
316 * Not all asics have all rings, so each asic will only
317 * start the fence driver on the rings it has.
318 * Returns 0 for success, errors for failure.
319 */
320int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
321				   struct amdgpu_irq_src *irq_src,
322				   unsigned irq_type)
323{
324	struct amdgpu_device *adev = ring->adev;
325	uint64_t index;
326
327	if (ring != &adev->uvd.ring) {
328		ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
329		ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
330	} else {
331		/* put fence directly behind firmware */
332		index = ALIGN(adev->uvd.fw->size, 8);
333		ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
334		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
335	}
336	amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
337	amdgpu_irq_get(adev, irq_src, irq_type);
 
 
338
339	ring->fence_drv.irq_src = irq_src;
340	ring->fence_drv.irq_type = irq_type;
341	ring->fence_drv.initialized = true;
342
343	dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
344		 "cpu addr 0x%p\n", ring->idx,
345		 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
346	return 0;
347}
348
349/**
350 * amdgpu_fence_driver_init_ring - init the fence driver
351 * for the requested ring.
352 *
353 * @ring: ring to init the fence driver on
354 * @num_hw_submission: number of entries on the hardware queue
355 *
356 * Init the fence driver for the requested ring (all asics).
357 * Helper function for amdgpu_fence_driver_init().
358 */
359int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
360				  unsigned num_hw_submission)
361{
 
362	long timeout;
363	int r;
364
365	/* Check that num_hw_submission is a power of two */
366	if ((num_hw_submission & (num_hw_submission - 1)) != 0)
 
 
367		return -EINVAL;
368
369	ring->fence_drv.cpu_addr = NULL;
370	ring->fence_drv.gpu_addr = 0;
371	ring->fence_drv.sync_seq = 0;
372	atomic_set(&ring->fence_drv.last_seq, 0);
373	ring->fence_drv.initialized = false;
374
375	setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
376		    (unsigned long)ring);
377
378	ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
379	spin_lock_init(&ring->fence_drv.lock);
380	ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
381					 GFP_KERNEL);
382	if (!ring->fence_drv.fences)
383		return -ENOMEM;
384
385	/* No need to setup the GPU scheduler for KIQ ring */
386	if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
387		timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
388		if (timeout == 0) {
389			/*
390			 * FIXME:
391			 * Delayed workqueue cannot use it directly,
392			 * so the scheduler will not use delayed workqueue if
393			 * MAX_SCHEDULE_TIMEOUT is set.
394			 * Currently keep it simple and silly.
395			 */
396			timeout = MAX_SCHEDULE_TIMEOUT;
 
 
 
397		}
398		r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
399				   num_hw_submission,
 
400				   timeout, ring->name);
401		if (r) {
402			DRM_ERROR("Failed to create scheduler on ring %s.\n",
403				  ring->name);
404			return r;
405		}
406	}
407
408	return 0;
409}
410
411/**
412 * amdgpu_fence_driver_init - init the fence driver
413 * for all possible rings.
414 *
415 * @adev: amdgpu device pointer
416 *
417 * Init the fence driver for all possible rings (all asics).
418 * Not all asics have all rings, so each asic will only
419 * start the fence driver on the rings it has using
420 * amdgpu_fence_driver_start_ring().
421 * Returns 0 for success.
422 */
423int amdgpu_fence_driver_init(struct amdgpu_device *adev)
424{
425	if (amdgpu_debugfs_fence_init(adev))
426		dev_err(adev->dev, "fence debugfs file creation failed\n");
427
428	return 0;
429}
430
431/**
432 * amdgpu_fence_driver_fini - tear down the fence driver
433 * for all possible rings.
434 *
435 * @adev: amdgpu device pointer
436 *
437 * Tear down the fence driver for all possible rings (all asics).
438 */
439void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
440{
441	unsigned i, j;
442	int r;
443
444	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
445		struct amdgpu_ring *ring = adev->rings[i];
446
447		if (!ring || !ring->fence_drv.initialized)
448			continue;
449		r = amdgpu_fence_wait_empty(ring);
450		if (r) {
451			/* no need to trigger GPU reset as we are unloading */
452			amdgpu_fence_driver_force_completion(adev);
453		}
454		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
455			       ring->fence_drv.irq_type);
456		amd_sched_fini(&ring->sched);
 
 
457		del_timer_sync(&ring->fence_drv.fallback_timer);
458		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
459			dma_fence_put(ring->fence_drv.fences[j]);
460		kfree(ring->fence_drv.fences);
461		ring->fence_drv.fences = NULL;
462		ring->fence_drv.initialized = false;
463	}
464}
465
466/**
467 * amdgpu_fence_driver_suspend - suspend the fence driver
468 * for all possible rings.
469 *
470 * @adev: amdgpu device pointer
471 *
472 * Suspend the fence driver for all possible rings (all asics).
473 */
474void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
475{
476	int i, r;
477
478	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
479		struct amdgpu_ring *ring = adev->rings[i];
480		if (!ring || !ring->fence_drv.initialized)
481			continue;
482
483		/* wait for gpu to finish processing current batch */
484		r = amdgpu_fence_wait_empty(ring);
485		if (r) {
486			/* delay GPU reset to resume */
487			amdgpu_fence_driver_force_completion(adev);
488		}
489
490		/* disable the interrupt */
491		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
492			       ring->fence_drv.irq_type);
 
493	}
494}
495
496/**
497 * amdgpu_fence_driver_resume - resume the fence driver
498 * for all possible rings.
499 *
500 * @adev: amdgpu device pointer
501 *
502 * Resume the fence driver for all possible rings (all asics).
503 * Not all asics have all rings, so each asic will only
504 * start the fence driver on the rings it has using
505 * amdgpu_fence_driver_start_ring().
506 * Returns 0 for success.
507 */
508void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
509{
510	int i;
511
512	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
513		struct amdgpu_ring *ring = adev->rings[i];
514		if (!ring || !ring->fence_drv.initialized)
515			continue;
516
517		/* enable the interrupt */
518		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
519			       ring->fence_drv.irq_type);
 
520	}
521}
522
523/**
524 * amdgpu_fence_driver_force_completion - force all fence waiter to complete
525 *
526 * @adev: amdgpu device pointer
527 *
528 * In case of GPU reset failure make sure no process keep waiting on fence
529 * that will never complete.
530 */
531void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
532{
533	int i;
534
535	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
536		struct amdgpu_ring *ring = adev->rings[i];
537		if (!ring || !ring->fence_drv.initialized)
538			continue;
539
540		amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
541	}
542}
543
544/*
545 * Common fence implementation
546 */
547
548static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
549{
550	return "amdgpu";
551}
552
553static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
554{
555	struct amdgpu_fence *fence = to_amdgpu_fence(f);
556	return (const char *)fence->ring->name;
557}
558
559/**
560 * amdgpu_fence_enable_signaling - enable signalling on fence
561 * @fence: fence
562 *
563 * This function is called with fence_queue lock held, and adds a callback
564 * to fence_queue that checks if this fence is signaled, and if so it
565 * signals the fence and removes itself.
566 */
567static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
568{
569	struct amdgpu_fence *fence = to_amdgpu_fence(f);
570	struct amdgpu_ring *ring = fence->ring;
571
572	if (!timer_pending(&ring->fence_drv.fallback_timer))
573		amdgpu_fence_schedule_fallback(ring);
574
575	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
576
577	return true;
578}
579
580/**
581 * amdgpu_fence_free - free up the fence memory
582 *
583 * @rcu: RCU callback head
584 *
585 * Free up the fence memory after the RCU grace period.
586 */
587static void amdgpu_fence_free(struct rcu_head *rcu)
588{
589	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
590	struct amdgpu_fence *fence = to_amdgpu_fence(f);
591	kmem_cache_free(amdgpu_fence_slab, fence);
592}
593
594/**
595 * amdgpu_fence_release - callback that fence can be freed
596 *
597 * @fence: fence
598 *
599 * This function is called when the reference count becomes zero.
600 * It just RCU schedules freeing up the fence.
601 */
602static void amdgpu_fence_release(struct dma_fence *f)
603{
604	call_rcu(&f->rcu, amdgpu_fence_free);
605}
606
607static const struct dma_fence_ops amdgpu_fence_ops = {
608	.get_driver_name = amdgpu_fence_get_driver_name,
609	.get_timeline_name = amdgpu_fence_get_timeline_name,
610	.enable_signaling = amdgpu_fence_enable_signaling,
611	.wait = dma_fence_default_wait,
612	.release = amdgpu_fence_release,
613};
614
615/*
616 * Fence debugfs
617 */
618#if defined(CONFIG_DEBUG_FS)
619static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
620{
621	struct drm_info_node *node = (struct drm_info_node *)m->private;
622	struct drm_device *dev = node->minor->dev;
623	struct amdgpu_device *adev = dev->dev_private;
624	int i;
625
626	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
627		struct amdgpu_ring *ring = adev->rings[i];
628		if (!ring || !ring->fence_drv.initialized)
629			continue;
630
631		amdgpu_fence_process(ring);
632
633		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
634		seq_printf(m, "Last signaled fence 0x%08x\n",
635			   atomic_read(&ring->fence_drv.last_seq));
636		seq_printf(m, "Last emitted        0x%08x\n",
637			   ring->fence_drv.sync_seq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
638	}
639	return 0;
640}
641
642/**
643 * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
644 *
645 * Manually trigger a gpu reset at the next fence wait.
646 */
647static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
648{
649	struct drm_info_node *node = (struct drm_info_node *) m->private;
650	struct drm_device *dev = node->minor->dev;
651	struct amdgpu_device *adev = dev->dev_private;
 
 
 
 
 
 
 
652
653	seq_printf(m, "gpu reset\n");
654	amdgpu_gpu_reset(adev);
 
 
 
655
656	return 0;
657}
658
659static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
660	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
661	{"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
 
 
 
 
662};
663#endif
664
665int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
666{
667#if defined(CONFIG_DEBUG_FS)
668	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
 
 
 
 
669#else
670	return 0;
671#endif
672}
673