Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 *    Dave Airlie
 30 */
 31#include <linux/seq_file.h>
 32#include <linux/atomic.h>
 33#include <linux/wait.h>
 34#include <linux/list.h>
 35#include <linux/kref.h>
 36#include <linux/slab.h>
 37#include "drmP.h"
 38#include "drm.h"
 39#include "radeon_reg.h"
 40#include "radeon.h"
 41#include "radeon_trace.h"
 42
 43static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44{
 45	if (rdev->wb.enabled) {
 46		u32 scratch_index;
 47		if (rdev->wb.use_event)
 48			scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
 49		else
 50			scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
 51		rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
 52	} else
 53		WREG32(rdev->fence_drv.scratch_reg, seq);
 54}
 55
 56static u32 radeon_fence_read(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
 57{
 58	u32 seq;
 
 59
 60	if (rdev->wb.enabled) {
 61		u32 scratch_index;
 62		if (rdev->wb.use_event)
 63			scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
 64		else
 65			scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
 66		seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
 67	} else
 68		seq = RREG32(rdev->fence_drv.scratch_reg);
 69	return seq;
 70}
 71
 72int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73{
 74	unsigned long irq_flags;
 75
 76	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
 77	if (fence->emited) {
 78		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
 79		return 0;
 80	}
 81	fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
 82	if (!rdev->cp.ready)
 83		/* FIXME: cp is not running assume everythings is done right
 84		 * away
 85		 */
 86		radeon_fence_write(rdev, fence->seq);
 87	else
 88		radeon_fence_ring_emit(rdev, fence);
 89
 90	trace_radeon_fence_emit(rdev->ddev, fence->seq);
 91	fence->emited = true;
 92	list_move_tail(&fence->list, &rdev->fence_drv.emited);
 93	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
 94	return 0;
 95}
 96
 97static bool radeon_fence_poll_locked(struct radeon_device *rdev)
 
 
 
 
 
 
 
 98{
 99	struct radeon_fence *fence;
100	struct list_head *i, *n;
101	uint32_t seq;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102	bool wake = false;
103	unsigned long cjiffies;
104
105	seq = radeon_fence_read(rdev);
106	if (seq != rdev->fence_drv.last_seq) {
107		rdev->fence_drv.last_seq = seq;
108		rdev->fence_drv.last_jiffies = jiffies;
109		rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
110	} else {
111		cjiffies = jiffies;
112		if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
113			cjiffies -= rdev->fence_drv.last_jiffies;
114			if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
115				/* update the timeout */
116				rdev->fence_drv.last_timeout -= cjiffies;
117			} else {
118				/* the 500ms timeout is elapsed we should test
119				 * for GPU lockup
120				 */
121				rdev->fence_drv.last_timeout = 1;
122			}
123		} else {
124			/* wrap around update last jiffies, we will just wait
125			 * a little longer
126			 */
127			rdev->fence_drv.last_jiffies = cjiffies;
 
 
 
 
 
 
128		}
129		return false;
130	}
131	n = NULL;
132	list_for_each(i, &rdev->fence_drv.emited) {
133		fence = list_entry(i, struct radeon_fence, list);
134		if (fence->seq == seq) {
135			n = i;
136			break;
137		}
138	}
139	/* all fence previous to this one are considered as signaled */
140	if (n) {
141		i = n;
142		do {
143			n = i->prev;
144			list_move_tail(i, &rdev->fence_drv.signaled);
145			fence = list_entry(i, struct radeon_fence, list);
146			fence->signaled = true;
147			i = n;
148		} while (i != &rdev->fence_drv.emited);
149		wake = true;
150	}
 
 
 
 
 
 
 
 
 
 
 
 
 
151	return wake;
152}
153
154static void radeon_fence_destroy(struct kref *kref)
 
 
 
 
 
 
 
 
155{
156	unsigned long irq_flags;
157        struct radeon_fence *fence;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
159	fence = container_of(kref, struct radeon_fence, kref);
160	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
161	list_del(&fence->list);
162	fence->emited = false;
163	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
164	kfree(fence);
 
 
 
 
 
165}
166
167int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
 
 
 
 
 
 
 
 
 
168{
169	unsigned long irq_flags;
 
 
170
171	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
172	if ((*fence) == NULL) {
173		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174	}
175	kref_init(&((*fence)->kref));
176	(*fence)->rdev = rdev;
177	(*fence)->emited = false;
178	(*fence)->signaled = false;
179	(*fence)->seq = 0;
180	INIT_LIST_HEAD(&(*fence)->list);
181
182	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
183	list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
184	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
185	return 0;
186}
187
 
 
 
 
 
 
188
189bool radeon_fence_signaled(struct radeon_fence *fence)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190{
191	unsigned long irq_flags;
192	bool signaled = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194	if (!fence)
195		return true;
196
197	if (fence->rdev->gpu_lockup)
198		return true;
199
200	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
201	signaled = fence->signaled;
202	/* if we are shuting down report all fence as signaled */
203	if (fence->rdev->shutdown) {
204		signaled = true;
205	}
206	if (!fence->emited) {
207		WARN(1, "Querying an unemited fence : %p !\n", fence);
208		signaled = true;
209	}
210	if (!signaled) {
211		radeon_fence_poll_locked(fence->rdev);
212		signaled = fence->signaled;
213	}
214	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
215	return signaled;
216}
217
218int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 
 
 
 
 
 
 
 
 
 
 
219{
220	struct radeon_device *rdev;
221	unsigned long irq_flags, timeout;
222	u32 seq;
223	int r;
224
225	if (fence == NULL) {
226		WARN(1, "Querying an invalid fence : %p !\n", fence);
227		return 0;
228	}
229	rdev = fence->rdev;
230	if (radeon_fence_signaled(fence)) {
231		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232	}
233	timeout = rdev->fence_drv.last_timeout;
234retry:
235	/* save current sequence used to check for GPU lockup */
236	seq = rdev->fence_drv.last_seq;
237	trace_radeon_fence_wait_begin(rdev->ddev, seq);
238	if (intr) {
239		radeon_irq_kms_sw_irq_get(rdev);
240		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
241				radeon_fence_signaled(fence), timeout);
242		radeon_irq_kms_sw_irq_put(rdev);
243		if (unlikely(r < 0)) {
244			return r;
245		}
246	} else {
247		radeon_irq_kms_sw_irq_get(rdev);
248		r = wait_event_timeout(rdev->fence_drv.queue,
249			 radeon_fence_signaled(fence), timeout);
250		radeon_irq_kms_sw_irq_put(rdev);
251	}
252	trace_radeon_fence_wait_end(rdev->ddev, seq);
253	if (unlikely(!radeon_fence_signaled(fence))) {
254		/* we were interrupted for some reason and fence isn't
255		 * isn't signaled yet, resume wait
256		 */
257		if (r) {
258			timeout = r;
259			goto retry;
260		}
261		/* don't protect read access to rdev->fence_drv.last_seq
262		 * if we experiencing a lockup the value doesn't change
263		 */
264		if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
265			/* good news we believe it's a lockup */
266			WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
267			     fence->seq, seq);
268			/* FIXME: what should we do ? marking everyone
269			 * as signaled for now
270			 */
271			rdev->gpu_lockup = true;
272			r = radeon_gpu_reset(rdev);
273			if (r)
274				return r;
275			radeon_fence_write(rdev, fence->seq);
276			rdev->gpu_lockup = false;
277		}
278		timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
279		write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
280		rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
281		rdev->fence_drv.last_jiffies = jiffies;
282		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
283		goto retry;
284	}
285	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
286}
287
288int radeon_fence_wait_next(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
289{
290	unsigned long irq_flags;
291	struct radeon_fence *fence;
292	int r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
294	if (rdev->gpu_lockup) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295		return 0;
 
 
296	}
297	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
298	if (list_empty(&rdev->fence_drv.emited)) {
299		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
300		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301	}
302	fence = list_entry(rdev->fence_drv.emited.next,
303			   struct radeon_fence, list);
304	radeon_fence_ref(fence);
305	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
306	r = radeon_fence_wait(fence, false);
307	radeon_fence_unref(&fence);
308	return r;
 
 
 
309}
310
311int radeon_fence_wait_last(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
 
312{
313	unsigned long irq_flags;
314	struct radeon_fence *fence;
315	int r;
316
317	if (rdev->gpu_lockup) {
318		return 0;
 
 
 
319	}
320	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
321	if (list_empty(&rdev->fence_drv.emited)) {
322		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323		return 0;
 
 
 
 
 
 
 
 
324	}
325	fence = list_entry(rdev->fence_drv.emited.prev,
326			   struct radeon_fence, list);
327	radeon_fence_ref(fence);
328	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
329	r = radeon_fence_wait(fence, false);
330	radeon_fence_unref(&fence);
331	return r;
332}
333
 
 
 
 
 
 
 
 
334struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
335{
336	kref_get(&fence->kref);
337	return fence;
338}
339
 
 
 
 
 
 
 
340void radeon_fence_unref(struct radeon_fence **fence)
341{
342	struct radeon_fence *tmp = *fence;
343
344	*fence = NULL;
345	if (tmp) {
346		kref_put(&tmp->kref, radeon_fence_destroy);
347	}
348}
349
350void radeon_fence_process(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
 
351{
352	unsigned long irq_flags;
353	bool wake;
354
355	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
356	wake = radeon_fence_poll_locked(rdev);
357	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
358	if (wake) {
359		wake_up_all(&rdev->fence_drv.queue);
 
 
 
 
360	}
 
361}
362
363int radeon_fence_driver_init(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
364{
365	unsigned long irq_flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366	int r;
367
368	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
369	r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
370	if (r) {
371		dev_err(rdev->dev, "fence failed to get scratch register\n");
372		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
373		return r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
374	}
375	radeon_fence_write(rdev, 0);
376	atomic_set(&rdev->fence_drv.seq, 0);
377	INIT_LIST_HEAD(&rdev->fence_drv.created);
378	INIT_LIST_HEAD(&rdev->fence_drv.emited);
379	INIT_LIST_HEAD(&rdev->fence_drv.signaled);
380	init_waitqueue_head(&rdev->fence_drv.queue);
381	rdev->fence_drv.initialized = true;
382	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
383	if (radeon_debugfs_fence_init(rdev)) {
384		dev_err(rdev->dev, "fence debugfs file creation failed\n");
385	}
386	return 0;
387}
388
 
 
 
 
 
 
 
 
389void radeon_fence_driver_fini(struct radeon_device *rdev)
390{
391	unsigned long irq_flags;
392
393	if (!rdev->fence_drv.initialized)
394		return;
395	wake_up_all(&rdev->fence_drv.queue);
396	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
397	radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
398	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
399	rdev->fence_drv.initialized = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400}
401
402
403/*
404 * Fence debugfs
405 */
406#if defined(CONFIG_DEBUG_FS)
407static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
408{
409	struct drm_info_node *node = (struct drm_info_node *)m->private;
410	struct drm_device *dev = node->minor->dev;
411	struct radeon_device *rdev = dev->dev_private;
412	struct radeon_fence *fence;
413
414	seq_printf(m, "Last signaled fence 0x%08X\n",
415		   radeon_fence_read(rdev));
416	if (!list_empty(&rdev->fence_drv.emited)) {
417		   fence = list_entry(rdev->fence_drv.emited.prev,
418				      struct radeon_fence, list);
419		   seq_printf(m, "Last emited fence %p with 0x%08X\n",
420			      fence,  fence->seq);
 
 
 
 
 
 
 
 
 
 
421	}
422	return 0;
423}
424
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425static struct drm_info_list radeon_debugfs_fence_list[] = {
426	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
 
427};
428#endif
429
430int radeon_debugfs_fence_init(struct radeon_device *rdev)
431{
432#if defined(CONFIG_DEBUG_FS)
433	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
434#else
435	return 0;
436#endif
437}
v4.10.11
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Dave Airlie
  30 */
  31#include <linux/seq_file.h>
  32#include <linux/atomic.h>
  33#include <linux/wait.h>
 
  34#include <linux/kref.h>
  35#include <linux/slab.h>
  36#include <linux/firmware.h>
  37#include <drm/drmP.h>
  38#include "radeon_reg.h"
  39#include "radeon.h"
  40#include "radeon_trace.h"
  41
  42/*
  43 * Fences
  44 * Fences mark an event in the GPUs pipeline and are used
  45 * for GPU/CPU synchronization.  When the fence is written,
  46 * it is expected that all buffers associated with that fence
  47 * are no longer in use by the associated ring on the GPU and
  48 * that the the relevant GPU caches have been flushed.  Whether
  49 * we use a scratch register or memory location depends on the asic
  50 * and whether writeback is enabled.
  51 */
  52
  53/**
  54 * radeon_fence_write - write a fence value
  55 *
  56 * @rdev: radeon_device pointer
  57 * @seq: sequence number to write
  58 * @ring: ring index the fence is associated with
  59 *
  60 * Writes a fence value to memory or a scratch register (all asics).
  61 */
  62static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
  63{
  64	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
  65	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
  66		if (drv->cpu_addr) {
  67			*drv->cpu_addr = cpu_to_le32(seq);
  68		}
  69	} else {
  70		WREG32(drv->scratch_reg, seq);
  71	}
 
  72}
  73
  74/**
  75 * radeon_fence_read - read a fence value
  76 *
  77 * @rdev: radeon_device pointer
  78 * @ring: ring index the fence is associated with
  79 *
  80 * Reads a fence value from memory or a scratch register (all asics).
  81 * Returns the value of the fence read from memory or register.
  82 */
  83static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
  84{
  85	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
  86	u32 seq = 0;
  87
  88	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
  89		if (drv->cpu_addr) {
  90			seq = le32_to_cpu(*drv->cpu_addr);
  91		} else {
  92			seq = lower_32_bits(atomic64_read(&drv->last_seq));
  93		}
  94	} else {
  95		seq = RREG32(drv->scratch_reg);
  96	}
  97	return seq;
  98}
  99
 100/**
 101 * radeon_fence_schedule_check - schedule lockup check
 102 *
 103 * @rdev: radeon_device pointer
 104 * @ring: ring index we should work with
 105 *
 106 * Queues a delayed work item to check for lockups.
 107 */
 108static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
 109{
 110	/*
 111	 * Do not reset the timer here with mod_delayed_work,
 112	 * this can livelock in an interaction with TTM delayed destroy.
 113	 */
 114	queue_delayed_work(system_power_efficient_wq,
 115			   &rdev->fence_drv[ring].lockup_work,
 116			   RADEON_FENCE_JIFFIES_TIMEOUT);
 117}
 118
 119/**
 120 * radeon_fence_emit - emit a fence on the requested ring
 121 *
 122 * @rdev: radeon_device pointer
 123 * @fence: radeon fence object
 124 * @ring: ring index the fence is associated with
 125 *
 126 * Emits a fence command on the requested ring (all asics).
 127 * Returns 0 on success, -ENOMEM on failure.
 128 */
 129int radeon_fence_emit(struct radeon_device *rdev,
 130		      struct radeon_fence **fence,
 131		      int ring)
 132{
 133	u64 seq;
 134
 135	/* we are protected by the ring emission mutex */
 136	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
 137	if ((*fence) == NULL) {
 138		return -ENOMEM;
 139	}
 140	(*fence)->rdev = rdev;
 141	(*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
 142	(*fence)->ring = ring;
 143	(*fence)->is_vm_update = false;
 144	dma_fence_init(&(*fence)->base, &radeon_fence_ops,
 145		       &rdev->fence_queue.lock,
 146		       rdev->fence_context + ring,
 147		       seq);
 148	radeon_fence_ring_emit(rdev, ring, *fence);
 149	trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
 150	radeon_fence_schedule_check(rdev, ring);
 
 
 151	return 0;
 152}
 153
 154/**
 155 * radeon_fence_check_signaled - callback from fence_queue
 156 *
 157 * this function is called with fence_queue lock held, which is also used
 158 * for the fence locking itself, so unlocked variants are used for
 159 * fence_signal, and remove_wait_queue.
 160 */
 161static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
 162{
 163	struct radeon_fence *fence;
 164	u64 seq;
 165
 166	fence = container_of(wait, struct radeon_fence, fence_wake);
 167
 168	/*
 169	 * We cannot use radeon_fence_process here because we're already
 170	 * in the waitqueue, in a call from wake_up_all.
 171	 */
 172	seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
 173	if (seq >= fence->seq) {
 174		int ret = dma_fence_signal_locked(&fence->base);
 175
 176		if (!ret)
 177			DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
 178		else
 179			DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
 180
 181		radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
 182		__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
 183		dma_fence_put(&fence->base);
 184	} else
 185		DMA_FENCE_TRACE(&fence->base, "pending\n");
 186	return 0;
 187}
 188
 189/**
 190 * radeon_fence_activity - check for fence activity
 191 *
 192 * @rdev: radeon_device pointer
 193 * @ring: ring index the fence is associated with
 194 *
 195 * Checks the current fence value and calculates the last
 196 * signalled fence value. Returns true if activity occured
 197 * on the ring, and the fence_queue should be waken up.
 198 */
 199static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
 200{
 201	uint64_t seq, last_seq, last_emitted;
 202	unsigned count_loop = 0;
 203	bool wake = false;
 
 204
 205	/* Note there is a scenario here for an infinite loop but it's
 206	 * very unlikely to happen. For it to happen, the current polling
 207	 * process need to be interrupted by another process and another
 208	 * process needs to update the last_seq btw the atomic read and
 209	 * xchg of the current process.
 210	 *
 211	 * More over for this to go in infinite loop there need to be
 212	 * continuously new fence signaled ie radeon_fence_read needs
 213	 * to return a different value each time for both the currently
 214	 * polling process and the other process that xchg the last_seq
 215	 * btw atomic read and xchg of the current process. And the
 216	 * value the other process set as last seq must be higher than
 217	 * the seq value we just read. Which means that current process
 218	 * need to be interrupted after radeon_fence_read and before
 219	 * atomic xchg.
 220	 *
 221	 * To be even more safe we count the number of time we loop and
 222	 * we bail after 10 loop just accepting the fact that we might
 223	 * have temporarly set the last_seq not to the true real last
 224	 * seq but to an older one.
 225	 */
 226	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
 227	do {
 228		last_emitted = rdev->fence_drv[ring].sync_seq[ring];
 229		seq = radeon_fence_read(rdev, ring);
 230		seq |= last_seq & 0xffffffff00000000LL;
 231		if (seq < last_seq) {
 232			seq &= 0xffffffff;
 233			seq |= last_emitted & 0xffffffff00000000LL;
 234		}
 235
 236		if (seq <= last_seq || seq > last_emitted) {
 
 
 
 
 
 237			break;
 238		}
 239		/* If we loop over we don't want to return without
 240		 * checking if a fence is signaled as it means that the
 241		 * seq we just read is different from the previous on.
 242		 */
 
 
 
 
 
 
 
 243		wake = true;
 244		last_seq = seq;
 245		if ((count_loop++) > 10) {
 246			/* We looped over too many time leave with the
 247			 * fact that we might have set an older fence
 248			 * seq then the current real last seq as signaled
 249			 * by the hw.
 250			 */
 251			break;
 252		}
 253	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
 254
 255	if (seq < last_emitted)
 256		radeon_fence_schedule_check(rdev, ring);
 257
 258	return wake;
 259}
 260
 261/**
 262 * radeon_fence_check_lockup - check for hardware lockup
 263 *
 264 * @work: delayed work item
 265 *
 266 * Checks for fence activity and if there is none probe
 267 * the hardware if a lockup occured.
 268 */
 269static void radeon_fence_check_lockup(struct work_struct *work)
 270{
 271	struct radeon_fence_driver *fence_drv;
 272	struct radeon_device *rdev;
 273	int ring;
 274
 275	fence_drv = container_of(work, struct radeon_fence_driver,
 276				 lockup_work.work);
 277	rdev = fence_drv->rdev;
 278	ring = fence_drv - &rdev->fence_drv[0];
 279
 280	if (!down_read_trylock(&rdev->exclusive_lock)) {
 281		/* just reschedule the check if a reset is going on */
 282		radeon_fence_schedule_check(rdev, ring);
 283		return;
 284	}
 285
 286	if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) {
 287		unsigned long irqflags;
 288
 289		fence_drv->delayed_irq = false;
 290		spin_lock_irqsave(&rdev->irq.lock, irqflags);
 291		radeon_irq_set(rdev);
 292		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
 293	}
 294
 295	if (radeon_fence_activity(rdev, ring))
 296		wake_up_all(&rdev->fence_queue);
 297
 298	else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
 299
 300		/* good news we believe it's a lockup */
 301		dev_warn(rdev->dev, "GPU lockup (current fence id "
 302			 "0x%016llx last fence id 0x%016llx on ring %d)\n",
 303			 (uint64_t)atomic64_read(&fence_drv->last_seq),
 304			 fence_drv->sync_seq[ring], ring);
 305
 306		/* remember that we need an reset */
 307		rdev->needs_reset = true;
 308		wake_up_all(&rdev->fence_queue);
 309	}
 310	up_read(&rdev->exclusive_lock);
 311}
 312
 313/**
 314 * radeon_fence_process - process a fence
 315 *
 316 * @rdev: radeon_device pointer
 317 * @ring: ring index the fence is associated with
 318 *
 319 * Checks the current fence value and wakes the fence queue
 320 * if the sequence number has increased (all asics).
 321 */
 322void radeon_fence_process(struct radeon_device *rdev, int ring)
 323{
 324	if (radeon_fence_activity(rdev, ring))
 325		wake_up_all(&rdev->fence_queue);
 326}
 327
 328/**
 329 * radeon_fence_seq_signaled - check if a fence sequence number has signaled
 330 *
 331 * @rdev: radeon device pointer
 332 * @seq: sequence number
 333 * @ring: ring index the fence is associated with
 334 *
 335 * Check if the last signaled fence sequnce number is >= the requested
 336 * sequence number (all asics).
 337 * Returns true if the fence has signaled (current fence value
 338 * is >= requested value) or false if it has not (current fence
 339 * value is < the requested value.  Helper function for
 340 * radeon_fence_signaled().
 341 */
 342static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
 343				      u64 seq, unsigned ring)
 344{
 345	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
 346		return true;
 347	}
 348	/* poll new last sequence at least once */
 349	radeon_fence_process(rdev, ring);
 350	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
 351		return true;
 352	}
 353	return false;
 
 
 
 
 
 354}
 355
 356static bool radeon_fence_is_signaled(struct dma_fence *f)
 357{
 358	struct radeon_fence *fence = to_radeon_fence(f);
 359	struct radeon_device *rdev = fence->rdev;
 360	unsigned ring = fence->ring;
 361	u64 seq = fence->seq;
 362
 363	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
 364		return true;
 365	}
 366
 367	if (down_read_trylock(&rdev->exclusive_lock)) {
 368		radeon_fence_process(rdev, ring);
 369		up_read(&rdev->exclusive_lock);
 370
 371		if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
 372			return true;
 373		}
 374	}
 375	return false;
 376}
 377
 378/**
 379 * radeon_fence_enable_signaling - enable signalling on fence
 380 * @fence: fence
 381 *
 382 * This function is called with fence_queue lock held, and adds a callback
 383 * to fence_queue that checks if this fence is signaled, and if so it
 384 * signals the fence and removes itself.
 385 */
 386static bool radeon_fence_enable_signaling(struct dma_fence *f)
 387{
 388	struct radeon_fence *fence = to_radeon_fence(f);
 389	struct radeon_device *rdev = fence->rdev;
 390
 391	if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
 392		return false;
 393
 394	if (down_read_trylock(&rdev->exclusive_lock)) {
 395		radeon_irq_kms_sw_irq_get(rdev, fence->ring);
 396
 397		if (radeon_fence_activity(rdev, fence->ring))
 398			wake_up_all_locked(&rdev->fence_queue);
 399
 400		/* did fence get signaled after we enabled the sw irq? */
 401		if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
 402			radeon_irq_kms_sw_irq_put(rdev, fence->ring);
 403			up_read(&rdev->exclusive_lock);
 404			return false;
 405		}
 406
 407		up_read(&rdev->exclusive_lock);
 408	} else {
 409		/* we're probably in a lockup, lets not fiddle too much */
 410		if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
 411			rdev->fence_drv[fence->ring].delayed_irq = true;
 412		radeon_fence_schedule_check(rdev, fence->ring);
 413	}
 414
 415	fence->fence_wake.flags = 0;
 416	fence->fence_wake.private = NULL;
 417	fence->fence_wake.func = radeon_fence_check_signaled;
 418	__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
 419	dma_fence_get(f);
 420
 421	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
 422	return true;
 423}
 424
 425/**
 426 * radeon_fence_signaled - check if a fence has signaled
 427 *
 428 * @fence: radeon fence object
 429 *
 430 * Check if the requested fence has signaled (all asics).
 431 * Returns true if the fence has signaled or false if it has not.
 432 */
 433bool radeon_fence_signaled(struct radeon_fence *fence)
 434{
 435	if (!fence)
 436		return true;
 437
 438	if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
 439		int ret;
 440
 441		ret = dma_fence_signal(&fence->base);
 442		if (!ret)
 443			DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
 444		return true;
 
 
 
 
 
 
 
 
 
 445	}
 446	return false;
 
 447}
 448
 449/**
 450 * radeon_fence_any_seq_signaled - check if any sequence number is signaled
 451 *
 452 * @rdev: radeon device pointer
 453 * @seq: sequence numbers
 454 *
 455 * Check if the last signaled fence sequnce number is >= the requested
 456 * sequence number (all asics).
 457 * Returns true if any has signaled (current value is >= requested value)
 458 * or false if it has not. Helper function for radeon_fence_wait_seq.
 459 */
 460static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
 461{
 462	unsigned i;
 
 
 
 463
 464	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 465		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
 466			return true;
 467	}
 468	return false;
 469}
 470
 471/**
 472 * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers
 473 *
 474 * @rdev: radeon device pointer
 475 * @target_seq: sequence number(s) we want to wait for
 476 * @intr: use interruptable sleep
 477 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
 478 *
 479 * Wait for the requested sequence number(s) to be written by any ring
 480 * (all asics).  Sequnce number array is indexed by ring id.
 481 * @intr selects whether to use interruptable (true) or non-interruptable
 482 * (false) sleep when waiting for the sequence number.  Helper function
 483 * for radeon_fence_wait_*().
 484 * Returns remaining time if the sequence number has passed, 0 when
 485 * the wait timeout, or an error for all other cases.
 486 * -EDEADLK is returned when a GPU lockup has been detected.
 487 */
 488static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
 489					  u64 *target_seq, bool intr,
 490					  long timeout)
 491{
 492	long r;
 493	int i;
 494
 495	if (radeon_fence_any_seq_signaled(rdev, target_seq))
 496		return timeout;
 497
 498	/* enable IRQs and tracing */
 499	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 500		if (!target_seq[i])
 501			continue;
 502
 503		trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
 504		radeon_irq_kms_sw_irq_get(rdev, i);
 505	}
 506
 
 
 
 
 507	if (intr) {
 508		r = wait_event_interruptible_timeout(rdev->fence_queue, (
 509			radeon_fence_any_seq_signaled(rdev, target_seq)
 510			 || rdev->needs_reset), timeout);
 
 
 
 
 511	} else {
 512		r = wait_event_timeout(rdev->fence_queue, (
 513			radeon_fence_any_seq_signaled(rdev, target_seq)
 514			 || rdev->needs_reset), timeout);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 515	}
 516
 517	if (rdev->needs_reset)
 518		r = -EDEADLK;
 519
 520	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 521		if (!target_seq[i])
 522			continue;
 523
 524		radeon_irq_kms_sw_irq_put(rdev, i);
 525		trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
 526	}
 527
 528	return r;
 529}
 530
 531/**
 532 * radeon_fence_wait_timeout - wait for a fence to signal with timeout
 533 *
 534 * @fence: radeon fence object
 535 * @intr: use interruptible sleep
 536 *
 537 * Wait for the requested fence to signal (all asics).
 538 * @intr selects whether to use interruptable (true) or non-interruptable
 539 * (false) sleep when waiting for the fence.
 540 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
 541 * Returns remaining time if the sequence number has passed, 0 when
 542 * the wait timeout, or an error for all other cases.
 543 */
 544long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout)
 545{
 546	uint64_t seq[RADEON_NUM_RINGS] = {};
 547	long r;
 548	int r_sig;
 549
 550	/*
 551	 * This function should not be called on !radeon fences.
 552	 * If this is the case, it would mean this function can
 553	 * also be called on radeon fences belonging to another card.
 554	 * exclusive_lock is not held in that case.
 555	 */
 556	if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
 557		return dma_fence_wait(&fence->base, intr);
 558
 559	seq[fence->ring] = fence->seq;
 560	r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
 561	if (r <= 0) {
 562		return r;
 563	}
 564
 565	r_sig = dma_fence_signal(&fence->base);
 566	if (!r_sig)
 567		DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
 568	return r;
 569}
 570
 571/**
 572 * radeon_fence_wait - wait for a fence to signal
 573 *
 574 * @fence: radeon fence object
 575 * @intr: use interruptible sleep
 576 *
 577 * Wait for the requested fence to signal (all asics).
 578 * @intr selects whether to use interruptable (true) or non-interruptable
 579 * (false) sleep when waiting for the fence.
 580 * Returns 0 if the fence has passed, error for all other cases.
 581 */
 582int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 583{
 584	long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
 585	if (r > 0) {
 586		return 0;
 587	} else {
 588		return r;
 589	}
 590}
 591
 592/**
 593 * radeon_fence_wait_any - wait for a fence to signal on any ring
 594 *
 595 * @rdev: radeon device pointer
 596 * @fences: radeon fence object(s)
 597 * @intr: use interruptable sleep
 598 *
 599 * Wait for any requested fence to signal (all asics).  Fence
 600 * array is indexed by ring id.  @intr selects whether to use
 601 * interruptable (true) or non-interruptable (false) sleep when
 602 * waiting for the fences. Used by the suballocator.
 603 * Returns 0 if any fence has passed, error for all other cases.
 604 */
 605int radeon_fence_wait_any(struct radeon_device *rdev,
 606			  struct radeon_fence **fences,
 607			  bool intr)
 608{
 609	uint64_t seq[RADEON_NUM_RINGS];
 610	unsigned i, num_rings = 0;
 611	long r;
 612
 613	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 614		seq[i] = 0;
 615
 616		if (!fences[i]) {
 617			continue;
 618		}
 619
 620		seq[i] = fences[i]->seq;
 621		++num_rings;
 622	}
 623
 624	/* nothing to wait for ? */
 625	if (num_rings == 0)
 626		return -ENOENT;
 627
 628	r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
 629	if (r < 0) {
 630		return r;
 631	}
 632	return 0;
 633}
 634
 635/**
 636 * radeon_fence_wait_next - wait for the next fence to signal
 637 *
 638 * @rdev: radeon device pointer
 639 * @ring: ring index the fence is associated with
 640 *
 641 * Wait for the next fence on the requested ring to signal (all asics).
 642 * Returns 0 if the next fence has passed, error for all other cases.
 643 * Caller must hold ring lock.
 644 */
 645int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
 646{
 647	uint64_t seq[RADEON_NUM_RINGS] = {};
 648	long r;
 
 649
 650	seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
 651	if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
 652		/* nothing to wait for, last_seq is
 653		   already the last emited fence */
 654		return -ENOENT;
 655	}
 656	r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
 657	if (r < 0)
 658		return r;
 659	return 0;
 660}
 661
 662/**
 663 * radeon_fence_wait_empty - wait for all fences to signal
 664 *
 665 * @rdev: radeon device pointer
 666 * @ring: ring index the fence is associated with
 667 *
 668 * Wait for all fences on the requested ring to signal (all asics).
 669 * Returns 0 if the fences have passed, error for all other cases.
 670 * Caller must hold ring lock.
 671 */
 672int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
 673{
 674	uint64_t seq[RADEON_NUM_RINGS] = {};
 675	long r;
 676
 677	seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
 678	if (!seq[ring])
 679		return 0;
 680
 681	r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
 682	if (r < 0) {
 683		if (r == -EDEADLK)
 684			return -EDEADLK;
 685
 686		dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
 687			ring, r);
 688	}
 689	return 0;
 
 
 
 
 
 
 690}
 691
 692/**
 693 * radeon_fence_ref - take a ref on a fence
 694 *
 695 * @fence: radeon fence object
 696 *
 697 * Take a reference on a fence (all asics).
 698 * Returns the fence.
 699 */
 700struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
 701{
 702	dma_fence_get(&fence->base);
 703	return fence;
 704}
 705
 706/**
 707 * radeon_fence_unref - remove a ref on a fence
 708 *
 709 * @fence: radeon fence object
 710 *
 711 * Remove a reference on a fence (all asics).
 712 */
 713void radeon_fence_unref(struct radeon_fence **fence)
 714{
 715	struct radeon_fence *tmp = *fence;
 716
 717	*fence = NULL;
 718	if (tmp) {
 719		dma_fence_put(&tmp->base);
 720	}
 721}
 722
 723/**
 724 * radeon_fence_count_emitted - get the count of emitted fences
 725 *
 726 * @rdev: radeon device pointer
 727 * @ring: ring index the fence is associated with
 728 *
 729 * Get the number of fences emitted on the requested ring (all asics).
 730 * Returns the number of emitted fences on the ring.  Used by the
 731 * dynpm code to ring track activity.
 732 */
 733unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
 734{
 735	uint64_t emitted;
 
 736
 737	/* We are not protected by ring lock when reading the last sequence
 738	 * but it's ok to report slightly wrong fence count here.
 739	 */
 740	radeon_fence_process(rdev, ring);
 741	emitted = rdev->fence_drv[ring].sync_seq[ring]
 742		- atomic64_read(&rdev->fence_drv[ring].last_seq);
 743	/* to avoid 32bits warp around */
 744	if (emitted > 0x10000000) {
 745		emitted = 0x10000000;
 746	}
 747	return (unsigned)emitted;
 748}
 749
 750/**
 751 * radeon_fence_need_sync - do we need a semaphore
 752 *
 753 * @fence: radeon fence object
 754 * @dst_ring: which ring to check against
 755 *
 756 * Check if the fence needs to be synced against another ring
 757 * (all asics).  If so, we need to emit a semaphore.
 758 * Returns true if we need to sync with another ring, false if
 759 * not.
 760 */
 761bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
 762{
 763	struct radeon_fence_driver *fdrv;
 764
 765	if (!fence) {
 766		return false;
 767	}
 768
 769	if (fence->ring == dst_ring) {
 770		return false;
 771	}
 772
 773	/* we are protected by the ring mutex */
 774	fdrv = &fence->rdev->fence_drv[dst_ring];
 775	if (fence->seq <= fdrv->sync_seq[fence->ring]) {
 776		return false;
 777	}
 778
 779	return true;
 780}
 781
 782/**
 783 * radeon_fence_note_sync - record the sync point
 784 *
 785 * @fence: radeon fence object
 786 * @dst_ring: which ring to check against
 787 *
 788 * Note the sequence number at which point the fence will
 789 * be synced with the requested ring (all asics).
 790 */
 791void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
 792{
 793	struct radeon_fence_driver *dst, *src;
 794	unsigned i;
 795
 796	if (!fence) {
 797		return;
 798	}
 799
 800	if (fence->ring == dst_ring) {
 801		return;
 802	}
 803
 804	/* we are protected by the ring mutex */
 805	src = &fence->rdev->fence_drv[fence->ring];
 806	dst = &fence->rdev->fence_drv[dst_ring];
 807	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 808		if (i == dst_ring) {
 809			continue;
 810		}
 811		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
 812	}
 813}
 814
 815/**
 816 * radeon_fence_driver_start_ring - make the fence driver
 817 * ready for use on the requested ring.
 818 *
 819 * @rdev: radeon device pointer
 820 * @ring: ring index to start the fence driver on
 821 *
 822 * Make the fence driver ready for processing (all asics).
 823 * Not all asics have all rings, so each asic will only
 824 * start the fence driver on the rings it has.
 825 * Returns 0 for success, errors for failure.
 826 */
 827int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
 828{
 829	uint64_t index;
 830	int r;
 831
 832	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
 833	if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
 834		rdev->fence_drv[ring].scratch_reg = 0;
 835		if (ring != R600_RING_TYPE_UVD_INDEX) {
 836			index = R600_WB_EVENT_OFFSET + ring * 4;
 837			rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
 838			rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
 839							 index;
 840
 841		} else {
 842			/* put fence directly behind firmware */
 843			index = ALIGN(rdev->uvd_fw->size, 8);
 844			rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
 845			rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
 846		}
 847
 848	} else {
 849		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
 850		if (r) {
 851			dev_err(rdev->dev, "fence failed to get scratch register\n");
 852			return r;
 853		}
 854		index = RADEON_WB_SCRATCH_OFFSET +
 855			rdev->fence_drv[ring].scratch_reg -
 856			rdev->scratch.reg_base;
 857		rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
 858		rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
 859	}
 860	radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
 861	rdev->fence_drv[ring].initialized = true;
 862	dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
 863		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
 864	return 0;
 865}
 866
 867/**
 868 * radeon_fence_driver_init_ring - init the fence driver
 869 * for the requested ring.
 870 *
 871 * @rdev: radeon device pointer
 872 * @ring: ring index to start the fence driver on
 873 *
 874 * Init the fence driver for the requested ring (all asics).
 875 * Helper function for radeon_fence_driver_init().
 876 */
 877static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
 878{
 879	int i;
 880
 881	rdev->fence_drv[ring].scratch_reg = -1;
 882	rdev->fence_drv[ring].cpu_addr = NULL;
 883	rdev->fence_drv[ring].gpu_addr = 0;
 884	for (i = 0; i < RADEON_NUM_RINGS; ++i)
 885		rdev->fence_drv[ring].sync_seq[i] = 0;
 886	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
 887	rdev->fence_drv[ring].initialized = false;
 888	INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
 889			  radeon_fence_check_lockup);
 890	rdev->fence_drv[ring].rdev = rdev;
 891}
 892
 893/**
 894 * radeon_fence_driver_init - init the fence driver
 895 * for all possible rings.
 896 *
 897 * @rdev: radeon device pointer
 898 *
 899 * Init the fence driver for all possible rings (all asics).
 900 * Not all asics have all rings, so each asic will only
 901 * start the fence driver on the rings it has using
 902 * radeon_fence_driver_start_ring().
 903 * Returns 0 for success.
 904 */
 905int radeon_fence_driver_init(struct radeon_device *rdev)
 906{
 907	int ring;
 908
 909	init_waitqueue_head(&rdev->fence_queue);
 910	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
 911		radeon_fence_driver_init_ring(rdev, ring);
 912	}
 
 
 
 
 
 
 
 
 913	if (radeon_debugfs_fence_init(rdev)) {
 914		dev_err(rdev->dev, "fence debugfs file creation failed\n");
 915	}
 916	return 0;
 917}
 918
 919/**
 920 * radeon_fence_driver_fini - tear down the fence driver
 921 * for all possible rings.
 922 *
 923 * @rdev: radeon device pointer
 924 *
 925 * Tear down the fence driver for all possible rings (all asics).
 926 */
 927void radeon_fence_driver_fini(struct radeon_device *rdev)
 928{
 929	int ring, r;
 930
 931	mutex_lock(&rdev->ring_lock);
 932	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
 933		if (!rdev->fence_drv[ring].initialized)
 934			continue;
 935		r = radeon_fence_wait_empty(rdev, ring);
 936		if (r) {
 937			/* no need to trigger GPU reset as we are unloading */
 938			radeon_fence_driver_force_completion(rdev, ring);
 939		}
 940		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
 941		wake_up_all(&rdev->fence_queue);
 942		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
 943		rdev->fence_drv[ring].initialized = false;
 944	}
 945	mutex_unlock(&rdev->ring_lock);
 946}
 947
 948/**
 949 * radeon_fence_driver_force_completion - force all fence waiter to complete
 950 *
 951 * @rdev: radeon device pointer
 952 * @ring: the ring to complete
 953 *
 954 * In case of GPU reset failure make sure no process keep waiting on fence
 955 * that will never complete.
 956 */
 957void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
 958{
 959	if (rdev->fence_drv[ring].initialized) {
 960		radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
 961		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
 962	}
 963}
 964
 965
 966/*
 967 * Fence debugfs
 968 */
 969#if defined(CONFIG_DEBUG_FS)
 970static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
 971{
 972	struct drm_info_node *node = (struct drm_info_node *)m->private;
 973	struct drm_device *dev = node->minor->dev;
 974	struct radeon_device *rdev = dev->dev_private;
 975	int i, j;
 976
 977	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 978		if (!rdev->fence_drv[i].initialized)
 979			continue;
 980
 981		radeon_fence_process(rdev, i);
 982
 983		seq_printf(m, "--- ring %d ---\n", i);
 984		seq_printf(m, "Last signaled fence 0x%016llx\n",
 985			   (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
 986		seq_printf(m, "Last emitted        0x%016llx\n",
 987			   rdev->fence_drv[i].sync_seq[i]);
 988
 989		for (j = 0; j < RADEON_NUM_RINGS; ++j) {
 990			if (i != j && rdev->fence_drv[j].initialized)
 991				seq_printf(m, "Last sync to ring %d 0x%016llx\n",
 992					   j, rdev->fence_drv[i].sync_seq[j]);
 993		}
 994	}
 995	return 0;
 996}
 997
 998/**
 999 * radeon_debugfs_gpu_reset - manually trigger a gpu reset
1000 *
1001 * Manually trigger a gpu reset at the next fence wait.
1002 */
1003static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
1004{
1005	struct drm_info_node *node = (struct drm_info_node *) m->private;
1006	struct drm_device *dev = node->minor->dev;
1007	struct radeon_device *rdev = dev->dev_private;
1008
1009	down_read(&rdev->exclusive_lock);
1010	seq_printf(m, "%d\n", rdev->needs_reset);
1011	rdev->needs_reset = true;
1012	wake_up_all(&rdev->fence_queue);
1013	up_read(&rdev->exclusive_lock);
1014
1015	return 0;
1016}
1017
1018static struct drm_info_list radeon_debugfs_fence_list[] = {
1019	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
1020	{"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
1021};
1022#endif
1023
1024int radeon_debugfs_fence_init(struct radeon_device *rdev)
1025{
1026#if defined(CONFIG_DEBUG_FS)
1027	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
1028#else
1029	return 0;
1030#endif
1031}
1032
1033static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
1034{
1035	return "radeon";
1036}
1037
1038static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
1039{
1040	struct radeon_fence *fence = to_radeon_fence(f);
1041	switch (fence->ring) {
1042	case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
1043	case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
1044	case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2";
1045	case R600_RING_TYPE_DMA_INDEX: return "radeon.dma";
1046	case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1";
1047	case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
1048	case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
1049	case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
1050	default: WARN_ON_ONCE(1); return "radeon.unk";
1051	}
1052}
1053
1054static inline bool radeon_test_signaled(struct radeon_fence *fence)
1055{
1056	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1057}
1058
1059struct radeon_wait_cb {
1060	struct dma_fence_cb base;
1061	struct task_struct *task;
1062};
1063
1064static void
1065radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1066{
1067	struct radeon_wait_cb *wait =
1068		container_of(cb, struct radeon_wait_cb, base);
1069
1070	wake_up_process(wait->task);
1071}
1072
1073static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
1074					     signed long t)
1075{
1076	struct radeon_fence *fence = to_radeon_fence(f);
1077	struct radeon_device *rdev = fence->rdev;
1078	struct radeon_wait_cb cb;
1079
1080	cb.task = current;
1081
1082	if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
1083		return t;
1084
1085	while (t > 0) {
1086		if (intr)
1087			set_current_state(TASK_INTERRUPTIBLE);
1088		else
1089			set_current_state(TASK_UNINTERRUPTIBLE);
1090
1091		/*
1092		 * radeon_test_signaled must be called after
1093		 * set_current_state to prevent a race with wake_up_process
1094		 */
1095		if (radeon_test_signaled(fence))
1096			break;
1097
1098		if (rdev->needs_reset) {
1099			t = -EDEADLK;
1100			break;
1101		}
1102
1103		t = schedule_timeout(t);
1104
1105		if (t > 0 && intr && signal_pending(current))
1106			t = -ERESTARTSYS;
1107	}
1108
1109	__set_current_state(TASK_RUNNING);
1110	dma_fence_remove_callback(f, &cb.base);
1111
1112	return t;
1113}
1114
1115const struct dma_fence_ops radeon_fence_ops = {
1116	.get_driver_name = radeon_fence_get_driver_name,
1117	.get_timeline_name = radeon_fence_get_timeline_name,
1118	.enable_signaling = radeon_fence_enable_signaling,
1119	.signaled = radeon_fence_is_signaled,
1120	.wait = radeon_fence_default_wait,
1121	.release = NULL,
1122};