Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 *    Dave Airlie
 30 */
 31#include <linux/seq_file.h>
 32#include <linux/atomic.h>
 33#include <linux/wait.h>
 34#include <linux/kref.h>
 35#include <linux/slab.h>
 36#include <linux/firmware.h>
 37#include <drm/drmP.h>
 38#include "amdgpu.h"
 39#include "amdgpu_trace.h"
 40
 41/*
 42 * Fences
 43 * Fences mark an event in the GPUs pipeline and are used
 44 * for GPU/CPU synchronization.  When the fence is written,
 45 * it is expected that all buffers associated with that fence
 46 * are no longer in use by the associated ring on the GPU and
 47 * that the the relevant GPU caches have been flushed.
 48 */
 49
 50struct amdgpu_fence {
 51	struct dma_fence base;
 52
 53	/* RB, DMA, etc. */
 54	struct amdgpu_ring		*ring;
 55};
 56
 57static struct kmem_cache *amdgpu_fence_slab;
 58
 59int amdgpu_fence_slab_init(void)
 60{
 61	amdgpu_fence_slab = kmem_cache_create(
 62		"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
 63		SLAB_HWCACHE_ALIGN, NULL);
 64	if (!amdgpu_fence_slab)
 65		return -ENOMEM;
 66	return 0;
 67}
 68
 69void amdgpu_fence_slab_fini(void)
 70{
 71	rcu_barrier();
 72	kmem_cache_destroy(amdgpu_fence_slab);
 73}
 74/*
 75 * Cast helper
 76 */
 77static const struct dma_fence_ops amdgpu_fence_ops;
 78static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
 79{
 80	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
 81
 82	if (__f->base.ops == &amdgpu_fence_ops)
 83		return __f;
 84
 85	return NULL;
 86}
 87
 88/**
 89 * amdgpu_fence_write - write a fence value
 90 *
 91 * @ring: ring the fence is associated with
 92 * @seq: sequence number to write
 93 *
 94 * Writes a fence value to memory (all asics).
 95 */
 96static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
 97{
 98	struct amdgpu_fence_driver *drv = &ring->fence_drv;
 99
100	if (drv->cpu_addr)
101		*drv->cpu_addr = cpu_to_le32(seq);
102}
103
104/**
105 * amdgpu_fence_read - read a fence value
106 *
107 * @ring: ring the fence is associated with
108 *
109 * Reads a fence value from memory (all asics).
110 * Returns the value of the fence read from memory.
111 */
112static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
113{
114	struct amdgpu_fence_driver *drv = &ring->fence_drv;
115	u32 seq = 0;
116
117	if (drv->cpu_addr)
118		seq = le32_to_cpu(*drv->cpu_addr);
119	else
120		seq = atomic_read(&drv->last_seq);
121
122	return seq;
123}
124
125/**
126 * amdgpu_fence_emit - emit a fence on the requested ring
127 *
128 * @ring: ring the fence is associated with
129 * @f: resulting fence object
130 *
131 * Emits a fence command on the requested ring (all asics).
132 * Returns 0 on success, -ENOMEM on failure.
133 */
134int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
135{
136	struct amdgpu_device *adev = ring->adev;
137	struct amdgpu_fence *fence;
138	struct dma_fence *old, **ptr;
139	uint32_t seq;
140
141	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
142	if (fence == NULL)
143		return -ENOMEM;
144
145	seq = ++ring->fence_drv.sync_seq;
146	fence->ring = ring;
147	dma_fence_init(&fence->base, &amdgpu_fence_ops,
148		       &ring->fence_drv.lock,
149		       adev->fence_context + ring->idx,
150		       seq);
151	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
152			       seq, AMDGPU_FENCE_FLAG_INT);
153
154	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
155	/* This function can't be called concurrently anyway, otherwise
156	 * emitting the fence would mess up the hardware ring buffer.
157	 */
158	old = rcu_dereference_protected(*ptr, 1);
159	if (old && !dma_fence_is_signaled(old)) {
160		DRM_INFO("rcu slot is busy\n");
161		dma_fence_wait(old, false);
162	}
163
164	rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
165
166	*f = &fence->base;
167
168	return 0;
169}
170
171/**
172 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
173 *
174 * @ring: ring the fence is associated with
175 * @s: resulting sequence number
176 *
177 * Emits a fence command on the requested ring (all asics).
178 * Used For polling fence.
179 * Returns 0 on success, -ENOMEM on failure.
180 */
181int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
182{
183	uint32_t seq;
184
185	if (!s)
186		return -EINVAL;
187
188	seq = ++ring->fence_drv.sync_seq;
189	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
190			       seq, 0);
191
192	*s = seq;
193
194	return 0;
195}
196
197/**
198 * amdgpu_fence_schedule_fallback - schedule fallback check
199 *
200 * @ring: pointer to struct amdgpu_ring
201 *
202 * Start a timer as fallback to our interrupts.
203 */
204static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
205{
206	mod_timer(&ring->fence_drv.fallback_timer,
207		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
208}
209
210/**
211 * amdgpu_fence_process - check for fence activity
212 *
213 * @ring: pointer to struct amdgpu_ring
214 *
215 * Checks the current fence value and calculates the last
216 * signalled fence value. Wakes the fence queue if the
217 * sequence number has increased.
218 */
219void amdgpu_fence_process(struct amdgpu_ring *ring)
220{
221	struct amdgpu_fence_driver *drv = &ring->fence_drv;
222	uint32_t seq, last_seq;
223	int r;
224
225	do {
226		last_seq = atomic_read(&ring->fence_drv.last_seq);
227		seq = amdgpu_fence_read(ring);
228
229	} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
230
231	if (seq != ring->fence_drv.sync_seq)
232		amdgpu_fence_schedule_fallback(ring);
233
234	if (unlikely(seq == last_seq))
235		return;
236
237	last_seq &= drv->num_fences_mask;
238	seq &= drv->num_fences_mask;
239
240	do {
241		struct dma_fence *fence, **ptr;
242
243		++last_seq;
244		last_seq &= drv->num_fences_mask;
245		ptr = &drv->fences[last_seq];
246
247		/* There is always exactly one thread signaling this fence slot */
248		fence = rcu_dereference_protected(*ptr, 1);
249		RCU_INIT_POINTER(*ptr, NULL);
250
251		if (!fence)
252			continue;
253
254		r = dma_fence_signal(fence);
255		if (!r)
256			DMA_FENCE_TRACE(fence, "signaled from irq context\n");
257		else
258			BUG();
259
260		dma_fence_put(fence);
261	} while (last_seq != seq);
262}
263
264/**
265 * amdgpu_fence_fallback - fallback for hardware interrupts
266 *
267 * @work: delayed work item
268 *
269 * Checks for fence activity.
270 */
271static void amdgpu_fence_fallback(struct timer_list *t)
272{
273	struct amdgpu_ring *ring = from_timer(ring, t,
274					      fence_drv.fallback_timer);
275
276	amdgpu_fence_process(ring);
277}
278
279/**
280 * amdgpu_fence_wait_empty - wait for all fences to signal
281 *
282 * @adev: amdgpu device pointer
283 * @ring: ring index the fence is associated with
284 *
285 * Wait for all fences on the requested ring to signal (all asics).
286 * Returns 0 if the fences have passed, error for all other cases.
287 */
288int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
289{
290	uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
291	struct dma_fence *fence, **ptr;
292	int r;
293
294	if (!seq)
295		return 0;
296
297	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
298	rcu_read_lock();
299	fence = rcu_dereference(*ptr);
300	if (!fence || !dma_fence_get_rcu(fence)) {
301		rcu_read_unlock();
302		return 0;
303	}
304	rcu_read_unlock();
305
306	r = dma_fence_wait(fence, false);
307	dma_fence_put(fence);
308	return r;
309}
310
311/**
312 * amdgpu_fence_wait_polling - busy wait for givn sequence number
313 *
314 * @ring: ring index the fence is associated with
315 * @wait_seq: sequence number to wait
316 * @timeout: the timeout for waiting in usecs
317 *
318 * Wait for all fences on the requested ring to signal (all asics).
319 * Returns left time if no timeout, 0 or minus if timeout.
320 */
321signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
322				      uint32_t wait_seq,
323				      signed long timeout)
324{
325	uint32_t seq;
326
327	do {
328		seq = amdgpu_fence_read(ring);
329		udelay(5);
330		timeout -= 5;
331	} while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
332
333	return timeout > 0 ? timeout : 0;
334}
335/**
336 * amdgpu_fence_count_emitted - get the count of emitted fences
337 *
338 * @ring: ring the fence is associated with
339 *
340 * Get the number of fences emitted on the requested ring (all asics).
341 * Returns the number of emitted fences on the ring.  Used by the
342 * dynpm code to ring track activity.
343 */
344unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
345{
346	uint64_t emitted;
347
348	/* We are not protected by ring lock when reading the last sequence
349	 * but it's ok to report slightly wrong fence count here.
350	 */
351	amdgpu_fence_process(ring);
352	emitted = 0x100000000ull;
353	emitted -= atomic_read(&ring->fence_drv.last_seq);
354	emitted += READ_ONCE(ring->fence_drv.sync_seq);
355	return lower_32_bits(emitted);
356}
357
358/**
359 * amdgpu_fence_driver_start_ring - make the fence driver
360 * ready for use on the requested ring.
361 *
362 * @ring: ring to start the fence driver on
363 * @irq_src: interrupt source to use for this ring
364 * @irq_type: interrupt type to use for this ring
365 *
366 * Make the fence driver ready for processing (all asics).
367 * Not all asics have all rings, so each asic will only
368 * start the fence driver on the rings it has.
369 * Returns 0 for success, errors for failure.
370 */
371int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
372				   struct amdgpu_irq_src *irq_src,
373				   unsigned irq_type)
374{
375	struct amdgpu_device *adev = ring->adev;
376	uint64_t index;
377
378	if (ring != &adev->uvd.ring) {
379		ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
380		ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
381	} else {
382		/* put fence directly behind firmware */
383		index = ALIGN(adev->uvd.fw->size, 8);
384		ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
385		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
386	}
387	amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
388	amdgpu_irq_get(adev, irq_src, irq_type);
389
390	ring->fence_drv.irq_src = irq_src;
391	ring->fence_drv.irq_type = irq_type;
392	ring->fence_drv.initialized = true;
393
394	dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
395		"cpu addr 0x%p\n", ring->idx,
396		ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
397	return 0;
398}
399
400/**
401 * amdgpu_fence_driver_init_ring - init the fence driver
402 * for the requested ring.
403 *
404 * @ring: ring to init the fence driver on
405 * @num_hw_submission: number of entries on the hardware queue
406 *
407 * Init the fence driver for the requested ring (all asics).
408 * Helper function for amdgpu_fence_driver_init().
409 */
410int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
411				  unsigned num_hw_submission)
412{
413	long timeout;
414	int r;
415
416	/* Check that num_hw_submission is a power of two */
417	if ((num_hw_submission & (num_hw_submission - 1)) != 0)
418		return -EINVAL;
419
420	ring->fence_drv.cpu_addr = NULL;
421	ring->fence_drv.gpu_addr = 0;
422	ring->fence_drv.sync_seq = 0;
423	atomic_set(&ring->fence_drv.last_seq, 0);
424	ring->fence_drv.initialized = false;
425
426	timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
427
428	ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
429	spin_lock_init(&ring->fence_drv.lock);
430	ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
431					 GFP_KERNEL);
432	if (!ring->fence_drv.fences)
433		return -ENOMEM;
434
435	/* No need to setup the GPU scheduler for KIQ ring */
436	if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
437		/* for non-sriov case, no timeout enforce on compute ring */
438		if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
439				&& !amdgpu_sriov_vf(ring->adev))
440			timeout = MAX_SCHEDULE_TIMEOUT;
441		else
442			timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
443
444		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
445				   num_hw_submission, amdgpu_job_hang_limit,
446				   timeout, ring->name);
447		if (r) {
448			DRM_ERROR("Failed to create scheduler on ring %s.\n",
449				  ring->name);
450			return r;
451		}
452	}
453
454	return 0;
455}
456
457/**
458 * amdgpu_fence_driver_init - init the fence driver
459 * for all possible rings.
460 *
461 * @adev: amdgpu device pointer
462 *
463 * Init the fence driver for all possible rings (all asics).
464 * Not all asics have all rings, so each asic will only
465 * start the fence driver on the rings it has using
466 * amdgpu_fence_driver_start_ring().
467 * Returns 0 for success.
468 */
469int amdgpu_fence_driver_init(struct amdgpu_device *adev)
470{
471	if (amdgpu_debugfs_fence_init(adev))
472		dev_err(adev->dev, "fence debugfs file creation failed\n");
473
474	return 0;
475}
476
477/**
478 * amdgpu_fence_driver_fini - tear down the fence driver
479 * for all possible rings.
480 *
481 * @adev: amdgpu device pointer
482 *
483 * Tear down the fence driver for all possible rings (all asics).
484 */
485void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
486{
487	unsigned i, j;
488	int r;
489
490	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
491		struct amdgpu_ring *ring = adev->rings[i];
492
493		if (!ring || !ring->fence_drv.initialized)
494			continue;
495		r = amdgpu_fence_wait_empty(ring);
496		if (r) {
497			/* no need to trigger GPU reset as we are unloading */
498			amdgpu_fence_driver_force_completion(ring);
499		}
500		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
501			       ring->fence_drv.irq_type);
502		drm_sched_fini(&ring->sched);
503		del_timer_sync(&ring->fence_drv.fallback_timer);
504		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
505			dma_fence_put(ring->fence_drv.fences[j]);
506		kfree(ring->fence_drv.fences);
507		ring->fence_drv.fences = NULL;
508		ring->fence_drv.initialized = false;
509	}
510}
511
512/**
513 * amdgpu_fence_driver_suspend - suspend the fence driver
514 * for all possible rings.
515 *
516 * @adev: amdgpu device pointer
517 *
518 * Suspend the fence driver for all possible rings (all asics).
519 */
520void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
521{
522	int i, r;
523
524	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
525		struct amdgpu_ring *ring = adev->rings[i];
526		if (!ring || !ring->fence_drv.initialized)
527			continue;
528
529		/* wait for gpu to finish processing current batch */
530		r = amdgpu_fence_wait_empty(ring);
531		if (r) {
532			/* delay GPU reset to resume */
533			amdgpu_fence_driver_force_completion(ring);
534		}
535
536		/* disable the interrupt */
537		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
538			       ring->fence_drv.irq_type);
539	}
540}
541
542/**
543 * amdgpu_fence_driver_resume - resume the fence driver
544 * for all possible rings.
545 *
546 * @adev: amdgpu device pointer
547 *
548 * Resume the fence driver for all possible rings (all asics).
549 * Not all asics have all rings, so each asic will only
550 * start the fence driver on the rings it has using
551 * amdgpu_fence_driver_start_ring().
552 * Returns 0 for success.
553 */
554void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
555{
556	int i;
557
558	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
559		struct amdgpu_ring *ring = adev->rings[i];
560		if (!ring || !ring->fence_drv.initialized)
561			continue;
562
563		/* enable the interrupt */
564		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
565			       ring->fence_drv.irq_type);
566	}
567}
568
569/**
570 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
571 *
572 * @ring: fence of the ring to signal
573 *
574 */
575void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
576{
577	amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
578	amdgpu_fence_process(ring);
579}
580
581/*
582 * Common fence implementation
583 */
584
585static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
586{
587	return "amdgpu";
588}
589
590static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
591{
592	struct amdgpu_fence *fence = to_amdgpu_fence(f);
593	return (const char *)fence->ring->name;
594}
595
596/**
597 * amdgpu_fence_enable_signaling - enable signalling on fence
598 * @fence: fence
599 *
600 * This function is called with fence_queue lock held, and adds a callback
601 * to fence_queue that checks if this fence is signaled, and if so it
602 * signals the fence and removes itself.
603 */
604static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
605{
606	struct amdgpu_fence *fence = to_amdgpu_fence(f);
607	struct amdgpu_ring *ring = fence->ring;
608
609	if (!timer_pending(&ring->fence_drv.fallback_timer))
610		amdgpu_fence_schedule_fallback(ring);
611
612	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
613
614	return true;
615}
616
617/**
618 * amdgpu_fence_free - free up the fence memory
619 *
620 * @rcu: RCU callback head
621 *
622 * Free up the fence memory after the RCU grace period.
623 */
624static void amdgpu_fence_free(struct rcu_head *rcu)
625{
626	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
627	struct amdgpu_fence *fence = to_amdgpu_fence(f);
628	kmem_cache_free(amdgpu_fence_slab, fence);
629}
630
631/**
632 * amdgpu_fence_release - callback that fence can be freed
633 *
634 * @fence: fence
635 *
636 * This function is called when the reference count becomes zero.
637 * It just RCU schedules freeing up the fence.
638 */
639static void amdgpu_fence_release(struct dma_fence *f)
640{
641	call_rcu(&f->rcu, amdgpu_fence_free);
642}
643
644static const struct dma_fence_ops amdgpu_fence_ops = {
645	.get_driver_name = amdgpu_fence_get_driver_name,
646	.get_timeline_name = amdgpu_fence_get_timeline_name,
647	.enable_signaling = amdgpu_fence_enable_signaling,
648	.wait = dma_fence_default_wait,
649	.release = amdgpu_fence_release,
650};
651
652/*
653 * Fence debugfs
654 */
655#if defined(CONFIG_DEBUG_FS)
656static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
657{
658	struct drm_info_node *node = (struct drm_info_node *)m->private;
659	struct drm_device *dev = node->minor->dev;
660	struct amdgpu_device *adev = dev->dev_private;
661	int i;
662
663	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
664		struct amdgpu_ring *ring = adev->rings[i];
665		if (!ring || !ring->fence_drv.initialized)
666			continue;
667
668		amdgpu_fence_process(ring);
669
670		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
671		seq_printf(m, "Last signaled fence 0x%08x\n",
672			   atomic_read(&ring->fence_drv.last_seq));
673		seq_printf(m, "Last emitted        0x%08x\n",
674			   ring->fence_drv.sync_seq);
675
676		if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
677			continue;
678
679		/* set in CP_VMID_PREEMPT and preemption occurred */
680		seq_printf(m, "Last preempted      0x%08x\n",
681			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
682		/* set in CP_VMID_RESET and reset occurred */
683		seq_printf(m, "Last reset          0x%08x\n",
684			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
685		/* Both preemption and reset occurred */
686		seq_printf(m, "Last both           0x%08x\n",
687			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
688	}
689	return 0;
690}
691
692/**
693 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
694 *
695 * Manually trigger a gpu reset at the next fence wait.
696 */
697static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
698{
699	struct drm_info_node *node = (struct drm_info_node *) m->private;
700	struct drm_device *dev = node->minor->dev;
701	struct amdgpu_device *adev = dev->dev_private;
702
703	seq_printf(m, "gpu recover\n");
704	amdgpu_device_gpu_recover(adev, NULL, true);
705
706	return 0;
707}
708
709static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
710	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
711	{"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
712};
713
714static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
715	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
716};
717#endif
718
719int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
720{
721#if defined(CONFIG_DEBUG_FS)
722	if (amdgpu_sriov_vf(adev))
723		return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1);
724	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
725#else
726	return 0;
727#endif
728}
729