Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 *    Dave Airlie
 30 */
 31#include <linux/seq_file.h>
 32#include <linux/atomic.h>
 33#include <linux/wait.h>
 34#include <linux/list.h>
 35#include <linux/kref.h>
 
 
 36#include <linux/slab.h>
 37#include "drmP.h"
 38#include "drm.h"
 39#include "radeon_reg.h"
 
 
 40#include "radeon.h"
 
 41#include "radeon_trace.h"
 42
 43static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44{
 45	if (rdev->wb.enabled) {
 46		u32 scratch_index;
 47		if (rdev->wb.use_event)
 48			scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
 49		else
 50			scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
 51		rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
 52	} else
 53		WREG32(rdev->fence_drv.scratch_reg, seq);
 54}
 55
 56static u32 radeon_fence_read(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
 57{
 58	u32 seq;
 
 59
 60	if (rdev->wb.enabled) {
 61		u32 scratch_index;
 62		if (rdev->wb.use_event)
 63			scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
 64		else
 65			scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
 66		seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
 67	} else
 68		seq = RREG32(rdev->fence_drv.scratch_reg);
 69	return seq;
 70}
 71
 72int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
 
 
 
 
 
 
 
 
 73{
 74	unsigned long irq_flags;
 
 
 
 
 
 
 
 75
 76	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
 77	if (fence->emited) {
 78		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
 79		return 0;
 80	}
 81	fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
 82	if (!rdev->cp.ready)
 83		/* FIXME: cp is not running assume everythings is done right
 84		 * away
 85		 */
 86		radeon_fence_write(rdev, fence->seq);
 87	else
 88		radeon_fence_ring_emit(rdev, fence);
 
 
 89
 90	trace_radeon_fence_emit(rdev->ddev, fence->seq);
 91	fence->emited = true;
 92	list_move_tail(&fence->list, &rdev->fence_drv.emited);
 93	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 94	return 0;
 95}
 96
 97static bool radeon_fence_poll_locked(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 98{
 99	struct radeon_fence *fence;
100	struct list_head *i, *n;
101	uint32_t seq;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102	bool wake = false;
103	unsigned long cjiffies;
104
105	seq = radeon_fence_read(rdev);
106	if (seq != rdev->fence_drv.last_seq) {
107		rdev->fence_drv.last_seq = seq;
108		rdev->fence_drv.last_jiffies = jiffies;
109		rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
110	} else {
111		cjiffies = jiffies;
112		if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
113			cjiffies -= rdev->fence_drv.last_jiffies;
114			if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
115				/* update the timeout */
116				rdev->fence_drv.last_timeout -= cjiffies;
117			} else {
118				/* the 500ms timeout is elapsed we should test
119				 * for GPU lockup
120				 */
121				rdev->fence_drv.last_timeout = 1;
122			}
123		} else {
124			/* wrap around update last jiffies, we will just wait
125			 * a little longer
126			 */
127			rdev->fence_drv.last_jiffies = cjiffies;
 
 
 
 
 
 
128		}
129		return false;
130	}
131	n = NULL;
132	list_for_each(i, &rdev->fence_drv.emited) {
133		fence = list_entry(i, struct radeon_fence, list);
134		if (fence->seq == seq) {
135			n = i;
 
 
 
 
 
 
 
 
 
136			break;
137		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138	}
139	/* all fence previous to this one are considered as signaled */
140	if (n) {
141		i = n;
142		do {
143			n = i->prev;
144			list_move_tail(i, &rdev->fence_drv.signaled);
145			fence = list_entry(i, struct radeon_fence, list);
146			fence->signaled = true;
147			i = n;
148		} while (i != &rdev->fence_drv.emited);
149		wake = true;
150	}
151	return wake;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152}
153
154static void radeon_fence_destroy(struct kref *kref)
 
 
 
 
 
 
 
 
 
155{
156	unsigned long irq_flags;
157        struct radeon_fence *fence;
 
158
159	fence = container_of(kref, struct radeon_fence, kref);
160	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
161	list_del(&fence->list);
162	fence->emited = false;
163	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
164	kfree(fence);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165}
166
167int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
168{
169	unsigned long irq_flags;
 
 
 
170
171	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
172	if ((*fence) == NULL) {
173		return -ENOMEM;
 
 
 
 
 
 
174	}
175	kref_init(&((*fence)->kref));
176	(*fence)->rdev = rdev;
177	(*fence)->emited = false;
178	(*fence)->signaled = false;
179	(*fence)->seq = 0;
180	INIT_LIST_HEAD(&(*fence)->list);
181
182	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
183	list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
184	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
185	return 0;
186}
187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
 
 
 
 
 
 
 
 
189bool radeon_fence_signaled(struct radeon_fence *fence)
190{
191	unsigned long irq_flags;
192	bool signaled = false;
193
194	if (!fence)
195		return true;
196
197	if (fence->rdev->gpu_lockup)
 
198		return true;
199
200	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
201	signaled = fence->signaled;
202	/* if we are shuting down report all fence as signaled */
203	if (fence->rdev->shutdown) {
204		signaled = true;
205	}
206	if (!fence->emited) {
207		WARN(1, "Querying an unemited fence : %p !\n", fence);
208		signaled = true;
209	}
210	if (!signaled) {
211		radeon_fence_poll_locked(fence->rdev);
212		signaled = fence->signaled;
213	}
214	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
215	return signaled;
216}
217
218int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 
 
 
 
 
 
 
 
 
 
 
219{
220	struct radeon_device *rdev;
221	unsigned long irq_flags, timeout;
222	u32 seq;
223	int r;
224
225	if (fence == NULL) {
226		WARN(1, "Querying an invalid fence : %p !\n", fence);
227		return 0;
228	}
229	rdev = fence->rdev;
230	if (radeon_fence_signaled(fence)) {
231		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232	}
233	timeout = rdev->fence_drv.last_timeout;
234retry:
235	/* save current sequence used to check for GPU lockup */
236	seq = rdev->fence_drv.last_seq;
237	trace_radeon_fence_wait_begin(rdev->ddev, seq);
238	if (intr) {
239		radeon_irq_kms_sw_irq_get(rdev);
240		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
241				radeon_fence_signaled(fence), timeout);
242		radeon_irq_kms_sw_irq_put(rdev);
243		if (unlikely(r < 0)) {
244			return r;
245		}
246	} else {
247		radeon_irq_kms_sw_irq_get(rdev);
248		r = wait_event_timeout(rdev->fence_drv.queue,
249			 radeon_fence_signaled(fence), timeout);
250		radeon_irq_kms_sw_irq_put(rdev);
251	}
252	trace_radeon_fence_wait_end(rdev->ddev, seq);
253	if (unlikely(!radeon_fence_signaled(fence))) {
254		/* we were interrupted for some reason and fence isn't
255		 * isn't signaled yet, resume wait
256		 */
257		if (r) {
258			timeout = r;
259			goto retry;
260		}
261		/* don't protect read access to rdev->fence_drv.last_seq
262		 * if we experiencing a lockup the value doesn't change
263		 */
264		if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
265			/* good news we believe it's a lockup */
266			WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
267			     fence->seq, seq);
268			/* FIXME: what should we do ? marking everyone
269			 * as signaled for now
270			 */
271			rdev->gpu_lockup = true;
272			r = radeon_gpu_reset(rdev);
273			if (r)
274				return r;
275			radeon_fence_write(rdev, fence->seq);
276			rdev->gpu_lockup = false;
277		}
278		timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
279		write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
280		rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
281		rdev->fence_drv.last_jiffies = jiffies;
282		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
283		goto retry;
284	}
285	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
286}
287
288int radeon_fence_wait_next(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
289{
290	unsigned long irq_flags;
291	struct radeon_fence *fence;
292	int r;
293
294	if (rdev->gpu_lockup) {
295		return 0;
296	}
297	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
298	if (list_empty(&rdev->fence_drv.emited)) {
299		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
300		return 0;
301	}
302	fence = list_entry(rdev->fence_drv.emited.next,
303			   struct radeon_fence, list);
304	radeon_fence_ref(fence);
305	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
306	r = radeon_fence_wait(fence, false);
307	radeon_fence_unref(&fence);
 
308	return r;
309}
310
311int radeon_fence_wait_last(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
 
 
312{
313	unsigned long irq_flags;
314	struct radeon_fence *fence;
315	int r;
316
317	if (rdev->gpu_lockup) {
318		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319	}
320	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
321	if (list_empty(&rdev->fence_drv.emited)) {
322		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323		return 0;
 
 
 
 
 
 
 
 
324	}
325	fence = list_entry(rdev->fence_drv.emited.prev,
326			   struct radeon_fence, list);
327	radeon_fence_ref(fence);
328	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
329	r = radeon_fence_wait(fence, false);
330	radeon_fence_unref(&fence);
331	return r;
332}
333
 
 
 
 
 
 
 
 
334struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
335{
336	kref_get(&fence->kref);
337	return fence;
338}
339
 
 
 
 
 
 
 
340void radeon_fence_unref(struct radeon_fence **fence)
341{
342	struct radeon_fence *tmp = *fence;
343
344	*fence = NULL;
345	if (tmp) {
346		kref_put(&tmp->kref, radeon_fence_destroy);
347	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348}
349
350void radeon_fence_process(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
351{
352	unsigned long irq_flags;
353	bool wake;
 
 
 
 
 
 
 
 
 
 
 
 
 
354
355	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
356	wake = radeon_fence_poll_locked(rdev);
357	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
358	if (wake) {
359		wake_up_all(&rdev->fence_drv.queue);
360	}
361}
362
363int radeon_fence_driver_init(struct radeon_device *rdev)
 
 
 
 
 
 
 
 
 
 
 
 
364{
365	unsigned long irq_flags;
366	int r;
367
368	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
369	r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
370	if (r) {
371		dev_err(rdev->dev, "fence failed to get scratch register\n");
372		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
373		return r;
374	}
375	radeon_fence_write(rdev, 0);
376	atomic_set(&rdev->fence_drv.seq, 0);
377	INIT_LIST_HEAD(&rdev->fence_drv.created);
378	INIT_LIST_HEAD(&rdev->fence_drv.emited);
379	INIT_LIST_HEAD(&rdev->fence_drv.signaled);
380	init_waitqueue_head(&rdev->fence_drv.queue);
381	rdev->fence_drv.initialized = true;
382	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
383	if (radeon_debugfs_fence_init(rdev)) {
384		dev_err(rdev->dev, "fence debugfs file creation failed\n");
385	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
386	return 0;
387}
388
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
389void radeon_fence_driver_fini(struct radeon_device *rdev)
390{
391	unsigned long irq_flags;
392
393	if (!rdev->fence_drv.initialized)
394		return;
395	wake_up_all(&rdev->fence_drv.queue);
396	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
397	radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
398	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
399	rdev->fence_drv.initialized = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400}
401
402
403/*
404 * Fence debugfs
405 */
406#if defined(CONFIG_DEBUG_FS)
407static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
408{
409	struct drm_info_node *node = (struct drm_info_node *)m->private;
410	struct drm_device *dev = node->minor->dev;
411	struct radeon_device *rdev = dev->dev_private;
412	struct radeon_fence *fence;
413
414	seq_printf(m, "Last signaled fence 0x%08X\n",
415		   radeon_fence_read(rdev));
416	if (!list_empty(&rdev->fence_drv.emited)) {
417		   fence = list_entry(rdev->fence_drv.emited.prev,
418				      struct radeon_fence, list);
419		   seq_printf(m, "Last emited fence %p with 0x%08X\n",
420			      fence,  fence->seq);
 
 
 
 
 
 
 
 
 
 
421	}
422	return 0;
423}
424
425static struct drm_info_list radeon_debugfs_fence_list[] = {
426	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
427};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428#endif
429
430int radeon_debugfs_fence_init(struct radeon_device *rdev)
431{
432#if defined(CONFIG_DEBUG_FS)
433	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
434#else
435	return 0;
 
 
 
 
 
436#endif
437}
v6.8
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Dave Airlie
  30 */
  31
  32#include <linux/atomic.h>
  33#include <linux/firmware.h>
 
  34#include <linux/kref.h>
  35#include <linux/sched/signal.h>
  36#include <linux/seq_file.h>
  37#include <linux/slab.h>
  38#include <linux/wait.h>
  39
  40#include <drm/drm_device.h>
  41#include <drm/drm_file.h>
  42
  43#include "radeon.h"
  44#include "radeon_reg.h"
  45#include "radeon_trace.h"
  46
  47/*
  48 * Fences mark an event in the GPUs pipeline and are used
  49 * for GPU/CPU synchronization.  When the fence is written,
  50 * it is expected that all buffers associated with that fence
  51 * are no longer in use by the associated ring on the GPU and
  52 * that the relevant GPU caches have been flushed.  Whether
  53 * we use a scratch register or memory location depends on the asic
  54 * and whether writeback is enabled.
  55 */
  56
  57/**
  58 * radeon_fence_write - write a fence value
  59 *
  60 * @rdev: radeon_device pointer
  61 * @seq: sequence number to write
  62 * @ring: ring index the fence is associated with
  63 *
  64 * Writes a fence value to memory or a scratch register (all asics).
  65 */
  66static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
  67{
  68	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
  69
  70	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
  71		if (drv->cpu_addr)
  72			*drv->cpu_addr = cpu_to_le32(seq);
  73	} else {
  74		WREG32(drv->scratch_reg, seq);
  75	}
 
  76}
  77
  78/**
  79 * radeon_fence_read - read a fence value
  80 *
  81 * @rdev: radeon_device pointer
  82 * @ring: ring index the fence is associated with
  83 *
  84 * Reads a fence value from memory or a scratch register (all asics).
  85 * Returns the value of the fence read from memory or register.
  86 */
  87static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
  88{
  89	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
  90	u32 seq = 0;
  91
  92	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
  93		if (drv->cpu_addr)
  94			seq = le32_to_cpu(*drv->cpu_addr);
 
  95		else
  96			seq = lower_32_bits(atomic64_read(&drv->last_seq));
  97	} else {
  98		seq = RREG32(drv->scratch_reg);
  99	}
 100	return seq;
 101}
 102
 103/**
 104 * radeon_fence_schedule_check - schedule lockup check
 105 *
 106 * @rdev: radeon_device pointer
 107 * @ring: ring index we should work with
 108 *
 109 * Queues a delayed work item to check for lockups.
 110 */
 111static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
 112{
 113	/*
 114	 * Do not reset the timer here with mod_delayed_work,
 115	 * this can livelock in an interaction with TTM delayed destroy.
 116	 */
 117	queue_delayed_work(system_power_efficient_wq,
 118			   &rdev->fence_drv[ring].lockup_work,
 119			   RADEON_FENCE_JIFFIES_TIMEOUT);
 120}
 121
 122/**
 123 * radeon_fence_emit - emit a fence on the requested ring
 124 *
 125 * @rdev: radeon_device pointer
 126 * @fence: radeon fence object
 127 * @ring: ring index the fence is associated with
 128 *
 129 * Emits a fence command on the requested ring (all asics).
 130 * Returns 0 on success, -ENOMEM on failure.
 131 */
 132int radeon_fence_emit(struct radeon_device *rdev,
 133		      struct radeon_fence **fence,
 134		      int ring)
 135{
 136	u64 seq;
 137
 138	/* we are protected by the ring emission mutex */
 139	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
 140	if ((*fence) == NULL)
 141		return -ENOMEM;
 142
 143	(*fence)->rdev = rdev;
 144	(*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
 145	(*fence)->ring = ring;
 146	(*fence)->is_vm_update = false;
 147	dma_fence_init(&(*fence)->base, &radeon_fence_ops,
 148		       &rdev->fence_queue.lock,
 149		       rdev->fence_context + ring,
 150		       seq);
 151	radeon_fence_ring_emit(rdev, ring, *fence);
 152	trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
 153	radeon_fence_schedule_check(rdev, ring);
 154	return 0;
 155}
 156
 157/*
 158 * radeon_fence_check_signaled - callback from fence_queue
 159 *
 160 * this function is called with fence_queue lock held, which is also used
 161 * for the fence locking itself, so unlocked variants are used for
 162 * fence_signal, and remove_wait_queue.
 163 */
 164static int radeon_fence_check_signaled(wait_queue_entry_t *wait,
 165				       unsigned int mode, int flags, void *key)
 166{
 167	struct radeon_fence *fence;
 168	u64 seq;
 169
 170	fence = container_of(wait, struct radeon_fence, fence_wake);
 171
 172	/*
 173	 * We cannot use radeon_fence_process here because we're already
 174	 * in the waitqueue, in a call from wake_up_all.
 175	 */
 176	seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
 177	if (seq >= fence->seq) {
 178		dma_fence_signal_locked(&fence->base);
 179		radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
 180		__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
 181		dma_fence_put(&fence->base);
 182	}
 183	return 0;
 184}
 185
 186/**
 187 * radeon_fence_activity - check for fence activity
 188 *
 189 * @rdev: radeon_device pointer
 190 * @ring: ring index the fence is associated with
 191 *
 192 * Checks the current fence value and calculates the last
 193 * signalled fence value. Returns true if activity occured
 194 * on the ring, and the fence_queue should be waken up.
 195 */
 196static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
 197{
 198	uint64_t seq, last_seq, last_emitted;
 199	unsigned int count_loop = 0;
 200	bool wake = false;
 
 201
 202	/* Note there is a scenario here for an infinite loop but it's
 203	 * very unlikely to happen. For it to happen, the current polling
 204	 * process need to be interrupted by another process and another
 205	 * process needs to update the last_seq btw the atomic read and
 206	 * xchg of the current process.
 207	 *
 208	 * More over for this to go in infinite loop there need to be
 209	 * continuously new fence signaled ie radeon_fence_read needs
 210	 * to return a different value each time for both the currently
 211	 * polling process and the other process that xchg the last_seq
 212	 * btw atomic read and xchg of the current process. And the
 213	 * value the other process set as last seq must be higher than
 214	 * the seq value we just read. Which means that current process
 215	 * need to be interrupted after radeon_fence_read and before
 216	 * atomic xchg.
 217	 *
 218	 * To be even more safe we count the number of time we loop and
 219	 * we bail after 10 loop just accepting the fact that we might
 220	 * have temporarly set the last_seq not to the true real last
 221	 * seq but to an older one.
 222	 */
 223	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
 224	do {
 225		last_emitted = rdev->fence_drv[ring].sync_seq[ring];
 226		seq = radeon_fence_read(rdev, ring);
 227		seq |= last_seq & 0xffffffff00000000LL;
 228		if (seq < last_seq) {
 229			seq &= 0xffffffff;
 230			seq |= last_emitted & 0xffffffff00000000LL;
 231		}
 232
 233		if (seq <= last_seq || seq > last_emitted)
 234			break;
 235
 236		/* If we loop over we don't want to return without
 237		 * checking if a fence is signaled as it means that the
 238		 * seq we just read is different from the previous on.
 239		 */
 240		wake = true;
 241		last_seq = seq;
 242		if ((count_loop++) > 10) {
 243			/* We looped over too many time leave with the
 244			 * fact that we might have set an older fence
 245			 * seq then the current real last seq as signaled
 246			 * by the hw.
 247			 */
 248			break;
 249		}
 250	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
 251
 252	if (seq < last_emitted)
 253		radeon_fence_schedule_check(rdev, ring);
 254
 255	return wake;
 256}
 257
 258/**
 259 * radeon_fence_check_lockup - check for hardware lockup
 260 *
 261 * @work: delayed work item
 262 *
 263 * Checks for fence activity and if there is none probe
 264 * the hardware if a lockup occured.
 265 */
 266static void radeon_fence_check_lockup(struct work_struct *work)
 267{
 268	struct radeon_fence_driver *fence_drv;
 269	struct radeon_device *rdev;
 270	int ring;
 271
 272	fence_drv = container_of(work, struct radeon_fence_driver,
 273				 lockup_work.work);
 274	rdev = fence_drv->rdev;
 275	ring = fence_drv - &rdev->fence_drv[0];
 276
 277	if (!down_read_trylock(&rdev->exclusive_lock)) {
 278		/* just reschedule the check if a reset is going on */
 279		radeon_fence_schedule_check(rdev, ring);
 280		return;
 281	}
 282
 283	if (fence_drv->delayed_irq && rdev->irq.installed) {
 284		unsigned long irqflags;
 285
 286		fence_drv->delayed_irq = false;
 287		spin_lock_irqsave(&rdev->irq.lock, irqflags);
 288		radeon_irq_set(rdev);
 289		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
 
 
 
 290	}
 291
 292	if (radeon_fence_activity(rdev, ring))
 293		wake_up_all(&rdev->fence_queue);
 294
 295	else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
 296
 297		/* good news we believe it's a lockup */
 298		dev_warn(rdev->dev, "GPU lockup (current fence id 0x%016llx last fence id 0x%016llx on ring %d)\n",
 299			 (uint64_t)atomic64_read(&fence_drv->last_seq),
 300			 fence_drv->sync_seq[ring], ring);
 301
 302		/* remember that we need an reset */
 303		rdev->needs_reset = true;
 304		wake_up_all(&rdev->fence_queue);
 305	}
 306	up_read(&rdev->exclusive_lock);
 307}
 308
 309/**
 310 * radeon_fence_process - process a fence
 311 *
 312 * @rdev: radeon_device pointer
 313 * @ring: ring index the fence is associated with
 314 *
 315 * Checks the current fence value and wakes the fence queue
 316 * if the sequence number has increased (all asics).
 317 */
 318void radeon_fence_process(struct radeon_device *rdev, int ring)
 319{
 320	if (radeon_fence_activity(rdev, ring))
 321		wake_up_all(&rdev->fence_queue);
 322}
 323
 324/**
 325 * radeon_fence_seq_signaled - check if a fence sequence number has signaled
 326 *
 327 * @rdev: radeon device pointer
 328 * @seq: sequence number
 329 * @ring: ring index the fence is associated with
 330 *
 331 * Check if the last signaled fence sequnce number is >= the requested
 332 * sequence number (all asics).
 333 * Returns true if the fence has signaled (current fence value
 334 * is >= requested value) or false if it has not (current fence
 335 * value is < the requested value.  Helper function for
 336 * radeon_fence_signaled().
 337 */
 338static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
 339				      u64 seq, unsigned int ring)
 340{
 341	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
 342		return true;
 343
 344	/* poll new last sequence at least once */
 345	radeon_fence_process(rdev, ring);
 346	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
 347		return true;
 348
 349	return false;
 350}
 351
 352static bool radeon_fence_is_signaled(struct dma_fence *f)
 353{
 354	struct radeon_fence *fence = to_radeon_fence(f);
 355	struct radeon_device *rdev = fence->rdev;
 356	unsigned int ring = fence->ring;
 357	u64 seq = fence->seq;
 358
 359	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
 360		return true;
 361
 362	if (down_read_trylock(&rdev->exclusive_lock)) {
 363		radeon_fence_process(rdev, ring);
 364		up_read(&rdev->exclusive_lock);
 365
 366		if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
 367			return true;
 368	}
 369	return false;
 
 
 
 
 
 
 
 
 
 
 370}
 371
 372/**
 373 * radeon_fence_enable_signaling - enable signalling on fence
 374 * @f: fence
 375 *
 376 * This function is called with fence_queue lock held, and adds a callback
 377 * to fence_queue that checks if this fence is signaled, and if so it
 378 * signals the fence and removes itself.
 379 */
 380static bool radeon_fence_enable_signaling(struct dma_fence *f)
 381{
 382	struct radeon_fence *fence = to_radeon_fence(f);
 383	struct radeon_device *rdev = fence->rdev;
 384
 385	if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
 386		return false;
 387
 388	if (down_read_trylock(&rdev->exclusive_lock)) {
 389		radeon_irq_kms_sw_irq_get(rdev, fence->ring);
 390
 391		if (radeon_fence_activity(rdev, fence->ring))
 392			wake_up_all_locked(&rdev->fence_queue);
 393
 394		/* did fence get signaled after we enabled the sw irq? */
 395		if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
 396			radeon_irq_kms_sw_irq_put(rdev, fence->ring);
 397			up_read(&rdev->exclusive_lock);
 398			return false;
 399		}
 400
 401		up_read(&rdev->exclusive_lock);
 402	} else {
 403		/* we're probably in a lockup, lets not fiddle too much */
 404		if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
 405			rdev->fence_drv[fence->ring].delayed_irq = true;
 406		radeon_fence_schedule_check(rdev, fence->ring);
 407	}
 408
 409	fence->fence_wake.flags = 0;
 410	fence->fence_wake.private = NULL;
 411	fence->fence_wake.func = radeon_fence_check_signaled;
 412	__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
 413	dma_fence_get(f);
 414	return true;
 415}
 416
 417/**
 418 * radeon_fence_signaled - check if a fence has signaled
 419 *
 420 * @fence: radeon fence object
 421 *
 422 * Check if the requested fence has signaled (all asics).
 423 * Returns true if the fence has signaled or false if it has not.
 424 */
 425bool radeon_fence_signaled(struct radeon_fence *fence)
 426{
 
 
 
 427	if (!fence)
 428		return true;
 429
 430	if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
 431		dma_fence_signal(&fence->base);
 432		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 433	}
 434	return false;
 
 435}
 436
 437/**
 438 * radeon_fence_any_seq_signaled - check if any sequence number is signaled
 439 *
 440 * @rdev: radeon device pointer
 441 * @seq: sequence numbers
 442 *
 443 * Check if the last signaled fence sequnce number is >= the requested
 444 * sequence number (all asics).
 445 * Returns true if any has signaled (current value is >= requested value)
 446 * or false if it has not. Helper function for radeon_fence_wait_seq.
 447 */
 448static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
 449{
 450	unsigned int i;
 
 
 
 451
 452	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 453		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
 454			return true;
 455	}
 456	return false;
 457}
 458
 459/**
 460 * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers
 461 *
 462 * @rdev: radeon device pointer
 463 * @target_seq: sequence number(s) we want to wait for
 464 * @intr: use interruptable sleep
 465 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
 466 *
 467 * Wait for the requested sequence number(s) to be written by any ring
 468 * (all asics).  Sequnce number array is indexed by ring id.
 469 * @intr selects whether to use interruptable (true) or non-interruptable
 470 * (false) sleep when waiting for the sequence number.  Helper function
 471 * for radeon_fence_wait_*().
 472 * Returns remaining time if the sequence number has passed, 0 when
 473 * the wait timeout, or an error for all other cases.
 474 * -EDEADLK is returned when a GPU lockup has been detected.
 475 */
 476static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
 477					  u64 *target_seq, bool intr,
 478					  long timeout)
 479{
 480	long r;
 481	int i;
 482
 483	if (radeon_fence_any_seq_signaled(rdev, target_seq))
 484		return timeout;
 485
 486	/* enable IRQs and tracing */
 487	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 488		if (!target_seq[i])
 489			continue;
 490
 491		trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
 492		radeon_irq_kms_sw_irq_get(rdev, i);
 493	}
 494
 
 
 
 
 495	if (intr) {
 496		r = wait_event_interruptible_timeout(rdev->fence_queue, (
 497			radeon_fence_any_seq_signaled(rdev, target_seq)
 498			 || rdev->needs_reset), timeout);
 
 
 
 
 499	} else {
 500		r = wait_event_timeout(rdev->fence_queue, (
 501			radeon_fence_any_seq_signaled(rdev, target_seq)
 502			 || rdev->needs_reset), timeout);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 503	}
 504
 505	if (rdev->needs_reset)
 506		r = -EDEADLK;
 507
 508	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 509		if (!target_seq[i])
 510			continue;
 511
 512		radeon_irq_kms_sw_irq_put(rdev, i);
 513		trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
 514	}
 515
 516	return r;
 517}
 518
 519/**
 520 * radeon_fence_wait_timeout - wait for a fence to signal with timeout
 521 *
 522 * @fence: radeon fence object
 523 * @intr: use interruptible sleep
 524 *
 525 * Wait for the requested fence to signal (all asics).
 526 * @intr selects whether to use interruptable (true) or non-interruptable
 527 * (false) sleep when waiting for the fence.
 528 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
 529 * Returns remaining time if the sequence number has passed, 0 when
 530 * the wait timeout, or an error for all other cases.
 531 */
 532long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout)
 533{
 534	uint64_t seq[RADEON_NUM_RINGS] = {};
 535	long r;
 
 536
 537	/*
 538	 * This function should not be called on !radeon fences.
 539	 * If this is the case, it would mean this function can
 540	 * also be called on radeon fences belonging to another card.
 541	 * exclusive_lock is not held in that case.
 542	 */
 543	if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
 544		return dma_fence_wait(&fence->base, intr);
 545
 546	seq[fence->ring] = fence->seq;
 547	r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
 548	if (r <= 0)
 549		return r;
 550
 551	dma_fence_signal(&fence->base);
 552	return r;
 553}
 554
 555/**
 556 * radeon_fence_wait - wait for a fence to signal
 557 *
 558 * @fence: radeon fence object
 559 * @intr: use interruptible sleep
 560 *
 561 * Wait for the requested fence to signal (all asics).
 562 * @intr selects whether to use interruptable (true) or non-interruptable
 563 * (false) sleep when waiting for the fence.
 564 * Returns 0 if the fence has passed, error for all other cases.
 565 */
 566int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 567{
 568	long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
 
 
 569
 570	if (r > 0)
 571		return 0;
 572	else
 573		return r;
 574}
 575
 576/**
 577 * radeon_fence_wait_any - wait for a fence to signal on any ring
 578 *
 579 * @rdev: radeon device pointer
 580 * @fences: radeon fence object(s)
 581 * @intr: use interruptable sleep
 582 *
 583 * Wait for any requested fence to signal (all asics).  Fence
 584 * array is indexed by ring id.  @intr selects whether to use
 585 * interruptable (true) or non-interruptable (false) sleep when
 586 * waiting for the fences. Used by the suballocator.
 587 * Returns 0 if any fence has passed, error for all other cases.
 588 */
 589int radeon_fence_wait_any(struct radeon_device *rdev,
 590			  struct radeon_fence **fences,
 591			  bool intr)
 592{
 593	uint64_t seq[RADEON_NUM_RINGS];
 594	unsigned int i, num_rings = 0;
 595	long r;
 596
 597	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 598		seq[i] = 0;
 599
 600		if (!fences[i])
 601			continue;
 602
 603		seq[i] = fences[i]->seq;
 604		++num_rings;
 605	}
 606
 607	/* nothing to wait for ? */
 608	if (num_rings == 0)
 609		return -ENOENT;
 610
 611	r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
 612	if (r < 0)
 613		return r;
 614
 615	return 0;
 616}
 617
 618/**
 619 * radeon_fence_wait_next - wait for the next fence to signal
 620 *
 621 * @rdev: radeon device pointer
 622 * @ring: ring index the fence is associated with
 623 *
 624 * Wait for the next fence on the requested ring to signal (all asics).
 625 * Returns 0 if the next fence has passed, error for all other cases.
 626 * Caller must hold ring lock.
 627 */
 628int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
 629{
 630	uint64_t seq[RADEON_NUM_RINGS] = {};
 631	long r;
 632
 633	seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
 634	if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
 635		/* nothing to wait for, last_seq is already
 636		 * the last emited fence
 637		 */
 638		return -ENOENT;
 639	}
 640
 641	r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
 642	if (r < 0)
 643		return r;
 644
 645	return 0;
 646}
 647
 648/**
 649 * radeon_fence_wait_empty - wait for all fences to signal
 650 *
 651 * @rdev: radeon device pointer
 652 * @ring: ring index the fence is associated with
 653 *
 654 * Wait for all fences on the requested ring to signal (all asics).
 655 * Returns 0 if the fences have passed, error for all other cases.
 656 * Caller must hold ring lock.
 657 */
 658int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
 659{
 660	uint64_t seq[RADEON_NUM_RINGS] = {};
 661	long r;
 662
 663	seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
 664	if (!seq[ring])
 665		return 0;
 666
 667	r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
 668	if (r < 0) {
 669		if (r == -EDEADLK)
 670			return -EDEADLK;
 671
 672		dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
 673			ring, r);
 674	}
 675	return 0;
 
 
 
 
 
 
 676}
 677
 678/**
 679 * radeon_fence_ref - take a ref on a fence
 680 *
 681 * @fence: radeon fence object
 682 *
 683 * Take a reference on a fence (all asics).
 684 * Returns the fence.
 685 */
 686struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
 687{
 688	dma_fence_get(&fence->base);
 689	return fence;
 690}
 691
 692/**
 693 * radeon_fence_unref - remove a ref on a fence
 694 *
 695 * @fence: radeon fence object
 696 *
 697 * Remove a reference on a fence (all asics).
 698 */
 699void radeon_fence_unref(struct radeon_fence **fence)
 700{
 701	struct radeon_fence *tmp = *fence;
 702
 703	*fence = NULL;
 704	if (tmp)
 705		dma_fence_put(&tmp->base);
 706}
 707
 708/**
 709 * radeon_fence_count_emitted - get the count of emitted fences
 710 *
 711 * @rdev: radeon device pointer
 712 * @ring: ring index the fence is associated with
 713 *
 714 * Get the number of fences emitted on the requested ring (all asics).
 715 * Returns the number of emitted fences on the ring.  Used by the
 716 * dynpm code to ring track activity.
 717 */
 718unsigned int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
 719{
 720	uint64_t emitted;
 721
 722	/* We are not protected by ring lock when reading the last sequence
 723	 * but it's ok to report slightly wrong fence count here.
 724	 */
 725	radeon_fence_process(rdev, ring);
 726	emitted = rdev->fence_drv[ring].sync_seq[ring]
 727		- atomic64_read(&rdev->fence_drv[ring].last_seq);
 728	/* to avoid 32bits warp around */
 729	if (emitted > 0x10000000)
 730		emitted = 0x10000000;
 731
 732	return (unsigned int)emitted;
 733}
 734
 735/**
 736 * radeon_fence_need_sync - do we need a semaphore
 737 *
 738 * @fence: radeon fence object
 739 * @dst_ring: which ring to check against
 740 *
 741 * Check if the fence needs to be synced against another ring
 742 * (all asics).  If so, we need to emit a semaphore.
 743 * Returns true if we need to sync with another ring, false if
 744 * not.
 745 */
 746bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
 747{
 748	struct radeon_fence_driver *fdrv;
 749
 750	if (!fence)
 751		return false;
 752
 753	if (fence->ring == dst_ring)
 754		return false;
 755
 756	/* we are protected by the ring mutex */
 757	fdrv = &fence->rdev->fence_drv[dst_ring];
 758	if (fence->seq <= fdrv->sync_seq[fence->ring])
 759		return false;
 760
 761	return true;
 762}
 763
 764/**
 765 * radeon_fence_note_sync - record the sync point
 766 *
 767 * @fence: radeon fence object
 768 * @dst_ring: which ring to check against
 769 *
 770 * Note the sequence number at which point the fence will
 771 * be synced with the requested ring (all asics).
 772 */
 773void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
 774{
 775	struct radeon_fence_driver *dst, *src;
 776	unsigned int i;
 777
 778	if (!fence)
 779		return;
 780
 781	if (fence->ring == dst_ring)
 782		return;
 783
 784	/* we are protected by the ring mutex */
 785	src = &fence->rdev->fence_drv[fence->ring];
 786	dst = &fence->rdev->fence_drv[dst_ring];
 787	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 788		if (i == dst_ring)
 789			continue;
 790
 791		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
 
 
 
 
 792	}
 793}
 794
 795/**
 796 * radeon_fence_driver_start_ring - make the fence driver
 797 * ready for use on the requested ring.
 798 *
 799 * @rdev: radeon device pointer
 800 * @ring: ring index to start the fence driver on
 801 *
 802 * Make the fence driver ready for processing (all asics).
 803 * Not all asics have all rings, so each asic will only
 804 * start the fence driver on the rings it has.
 805 * Returns 0 for success, errors for failure.
 806 */
 807int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
 808{
 809	uint64_t index;
 810	int r;
 811
 812	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
 813	if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
 814		rdev->fence_drv[ring].scratch_reg = 0;
 815		if (ring != R600_RING_TYPE_UVD_INDEX) {
 816			index = R600_WB_EVENT_OFFSET + ring * 4;
 817			rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
 818			rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
 819							 index;
 820
 821		} else {
 822			/* put fence directly behind firmware */
 823			index = ALIGN(rdev->uvd_fw->size, 8);
 824			rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
 825			rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
 826		}
 827
 828	} else {
 829		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
 830		if (r) {
 831			dev_err(rdev->dev, "fence failed to get scratch register\n");
 832			return r;
 833		}
 834		index = RADEON_WB_SCRATCH_OFFSET +
 835			rdev->fence_drv[ring].scratch_reg -
 836			rdev->scratch.reg_base;
 837		rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
 838		rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
 839	}
 840	radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
 841	rdev->fence_drv[ring].initialized = true;
 842	dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx\n",
 843		 ring, rdev->fence_drv[ring].gpu_addr);
 844	return 0;
 845}
 846
 847/**
 848 * radeon_fence_driver_init_ring - init the fence driver
 849 * for the requested ring.
 850 *
 851 * @rdev: radeon device pointer
 852 * @ring: ring index to start the fence driver on
 853 *
 854 * Init the fence driver for the requested ring (all asics).
 855 * Helper function for radeon_fence_driver_init().
 856 */
 857static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
 858{
 859	int i;
 860
 861	rdev->fence_drv[ring].scratch_reg = -1;
 862	rdev->fence_drv[ring].cpu_addr = NULL;
 863	rdev->fence_drv[ring].gpu_addr = 0;
 864	for (i = 0; i < RADEON_NUM_RINGS; ++i)
 865		rdev->fence_drv[ring].sync_seq[i] = 0;
 866	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
 867	rdev->fence_drv[ring].initialized = false;
 868	INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
 869			  radeon_fence_check_lockup);
 870	rdev->fence_drv[ring].rdev = rdev;
 871}
 872
 873/**
 874 * radeon_fence_driver_init - init the fence driver
 875 * for all possible rings.
 876 *
 877 * @rdev: radeon device pointer
 878 *
 879 * Init the fence driver for all possible rings (all asics).
 880 * Not all asics have all rings, so each asic will only
 881 * start the fence driver on the rings it has using
 882 * radeon_fence_driver_start_ring().
 883 */
 884void radeon_fence_driver_init(struct radeon_device *rdev)
 885{
 886	int ring;
 887
 888	init_waitqueue_head(&rdev->fence_queue);
 889	for (ring = 0; ring < RADEON_NUM_RINGS; ring++)
 890		radeon_fence_driver_init_ring(rdev, ring);
 891
 892	radeon_debugfs_fence_init(rdev);
 893}
 894
 895/**
 896 * radeon_fence_driver_fini - tear down the fence driver
 897 * for all possible rings.
 898 *
 899 * @rdev: radeon device pointer
 900 *
 901 * Tear down the fence driver for all possible rings (all asics).
 902 */
 903void radeon_fence_driver_fini(struct radeon_device *rdev)
 904{
 905	int ring, r;
 906
 907	mutex_lock(&rdev->ring_lock);
 908	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
 909		if (!rdev->fence_drv[ring].initialized)
 910			continue;
 911		r = radeon_fence_wait_empty(rdev, ring);
 912		if (r) {
 913			/* no need to trigger GPU reset as we are unloading */
 914			radeon_fence_driver_force_completion(rdev, ring);
 915		}
 916		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
 917		wake_up_all(&rdev->fence_queue);
 918		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
 919		rdev->fence_drv[ring].initialized = false;
 920	}
 921	mutex_unlock(&rdev->ring_lock);
 922}
 923
 924/**
 925 * radeon_fence_driver_force_completion - force all fence waiter to complete
 926 *
 927 * @rdev: radeon device pointer
 928 * @ring: the ring to complete
 929 *
 930 * In case of GPU reset failure make sure no process keep waiting on fence
 931 * that will never complete.
 932 */
 933void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
 934{
 935	if (rdev->fence_drv[ring].initialized) {
 936		radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
 937		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
 938	}
 939}
 940
 941
 942/*
 943 * Fence debugfs
 944 */
 945#if defined(CONFIG_DEBUG_FS)
 946static int radeon_debugfs_fence_info_show(struct seq_file *m, void *data)
 947{
 948	struct radeon_device *rdev = m->private;
 949	int i, j;
 
 
 950
 951	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 952		if (!rdev->fence_drv[i].initialized)
 953			continue;
 954
 955		radeon_fence_process(rdev, i);
 956
 957		seq_printf(m, "--- ring %d ---\n", i);
 958		seq_printf(m, "Last signaled fence 0x%016llx\n",
 959			   (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
 960		seq_printf(m, "Last emitted        0x%016llx\n",
 961			   rdev->fence_drv[i].sync_seq[i]);
 962
 963		for (j = 0; j < RADEON_NUM_RINGS; ++j) {
 964			if (i != j && rdev->fence_drv[j].initialized)
 965				seq_printf(m, "Last sync to ring %d 0x%016llx\n",
 966					   j, rdev->fence_drv[i].sync_seq[j]);
 967		}
 968	}
 969	return 0;
 970}
 971
 972/*
 973 * radeon_debugfs_gpu_reset - manually trigger a gpu reset
 974 *
 975 * Manually trigger a gpu reset at the next fence wait.
 976 */
 977static int radeon_debugfs_gpu_reset(void *data, u64 *val)
 978{
 979	struct radeon_device *rdev = (struct radeon_device *)data;
 980
 981	down_read(&rdev->exclusive_lock);
 982	*val = rdev->needs_reset;
 983	rdev->needs_reset = true;
 984	wake_up_all(&rdev->fence_queue);
 985	up_read(&rdev->exclusive_lock);
 986
 987	return 0;
 988}
 989DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_fence_info);
 990DEFINE_DEBUGFS_ATTRIBUTE(radeon_debugfs_gpu_reset_fops,
 991			 radeon_debugfs_gpu_reset, NULL, "%lld\n");
 992#endif
 993
 994void radeon_debugfs_fence_init(struct radeon_device *rdev)
 995{
 996#if defined(CONFIG_DEBUG_FS)
 997	struct dentry *root = rdev->ddev->primary->debugfs_root;
 998
 999	debugfs_create_file("radeon_gpu_reset", 0444, root, rdev,
1000			    &radeon_debugfs_gpu_reset_fops);
1001	debugfs_create_file("radeon_fence_info", 0444, root, rdev,
1002			    &radeon_debugfs_fence_info_fops);
1003
1004
1005#endif
1006}
1007
1008static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
1009{
1010	return "radeon";
1011}
1012
1013static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
1014{
1015	struct radeon_fence *fence = to_radeon_fence(f);
1016
1017	switch (fence->ring) {
1018	case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
1019	case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
1020	case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2";
1021	case R600_RING_TYPE_DMA_INDEX: return "radeon.dma";
1022	case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1";
1023	case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
1024	case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
1025	case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
1026	default:
1027		WARN_ON_ONCE(1);
1028		return "radeon.unk";
1029	}
1030}
1031
1032static inline bool radeon_test_signaled(struct radeon_fence *fence)
1033{
1034	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1035}
1036
1037struct radeon_wait_cb {
1038	struct dma_fence_cb base;
1039	struct task_struct *task;
1040};
1041
1042static void
1043radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1044{
1045	struct radeon_wait_cb *wait =
1046		container_of(cb, struct radeon_wait_cb, base);
1047
1048	wake_up_process(wait->task);
1049}
1050
1051static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
1052					     signed long t)
1053{
1054	struct radeon_fence *fence = to_radeon_fence(f);
1055	struct radeon_device *rdev = fence->rdev;
1056	struct radeon_wait_cb cb;
1057
1058	cb.task = current;
1059
1060	if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
1061		return t;
1062
1063	while (t > 0) {
1064		if (intr)
1065			set_current_state(TASK_INTERRUPTIBLE);
1066		else
1067			set_current_state(TASK_UNINTERRUPTIBLE);
1068
1069		/*
1070		 * radeon_test_signaled must be called after
1071		 * set_current_state to prevent a race with wake_up_process
1072		 */
1073		if (radeon_test_signaled(fence))
1074			break;
1075
1076		if (rdev->needs_reset) {
1077			t = -EDEADLK;
1078			break;
1079		}
1080
1081		t = schedule_timeout(t);
1082
1083		if (t > 0 && intr && signal_pending(current))
1084			t = -ERESTARTSYS;
1085	}
1086
1087	__set_current_state(TASK_RUNNING);
1088	dma_fence_remove_callback(f, &cb.base);
1089
1090	return t;
1091}
1092
1093const struct dma_fence_ops radeon_fence_ops = {
1094	.get_driver_name = radeon_fence_get_driver_name,
1095	.get_timeline_name = radeon_fence_get_timeline_name,
1096	.enable_signaling = radeon_fence_enable_signaling,
1097	.signaled = radeon_fence_is_signaled,
1098	.wait = radeon_fence_default_wait,
1099	.release = NULL,
1100};