Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v4.6
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Dave Airlie
  30 */
  31#include <linux/seq_file.h>
  32#include <linux/atomic.h>
  33#include <linux/wait.h>
  34#include <linux/kref.h>
 
 
  35#include <linux/slab.h>
  36#include <linux/firmware.h>
  37#include <drm/drmP.h>
  38#include "radeon_reg.h"
 
 
 
  39#include "radeon.h"
 
  40#include "radeon_trace.h"
  41
  42/*
  43 * Fences
  44 * Fences mark an event in the GPUs pipeline and are used
  45 * for GPU/CPU synchronization.  When the fence is written,
  46 * it is expected that all buffers associated with that fence
  47 * are no longer in use by the associated ring on the GPU and
  48 * that the the relevant GPU caches have been flushed.  Whether
  49 * we use a scratch register or memory location depends on the asic
  50 * and whether writeback is enabled.
  51 */
  52
  53/**
  54 * radeon_fence_write - write a fence value
  55 *
  56 * @rdev: radeon_device pointer
  57 * @seq: sequence number to write
  58 * @ring: ring index the fence is associated with
  59 *
  60 * Writes a fence value to memory or a scratch register (all asics).
  61 */
  62static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
  63{
  64	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
  65	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
  66		if (drv->cpu_addr) {
  67			*drv->cpu_addr = cpu_to_le32(seq);
  68		}
  69	} else {
  70		WREG32(drv->scratch_reg, seq);
  71	}
  72}
  73
  74/**
  75 * radeon_fence_read - read a fence value
  76 *
  77 * @rdev: radeon_device pointer
  78 * @ring: ring index the fence is associated with
  79 *
  80 * Reads a fence value from memory or a scratch register (all asics).
  81 * Returns the value of the fence read from memory or register.
  82 */
  83static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
  84{
  85	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
  86	u32 seq = 0;
  87
  88	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
  89		if (drv->cpu_addr) {
  90			seq = le32_to_cpu(*drv->cpu_addr);
  91		} else {
  92			seq = lower_32_bits(atomic64_read(&drv->last_seq));
  93		}
  94	} else {
  95		seq = RREG32(drv->scratch_reg);
  96	}
  97	return seq;
  98}
  99
 100/**
 101 * radeon_fence_schedule_check - schedule lockup check
 102 *
 103 * @rdev: radeon_device pointer
 104 * @ring: ring index we should work with
 105 *
 106 * Queues a delayed work item to check for lockups.
 107 */
 108static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
 109{
 110	/*
 111	 * Do not reset the timer here with mod_delayed_work,
 112	 * this can livelock in an interaction with TTM delayed destroy.
 113	 */
 114	queue_delayed_work(system_power_efficient_wq,
 115			   &rdev->fence_drv[ring].lockup_work,
 116			   RADEON_FENCE_JIFFIES_TIMEOUT);
 117}
 118
 119/**
 120 * radeon_fence_emit - emit a fence on the requested ring
 121 *
 122 * @rdev: radeon_device pointer
 123 * @fence: radeon fence object
 124 * @ring: ring index the fence is associated with
 125 *
 126 * Emits a fence command on the requested ring (all asics).
 127 * Returns 0 on success, -ENOMEM on failure.
 128 */
 129int radeon_fence_emit(struct radeon_device *rdev,
 130		      struct radeon_fence **fence,
 131		      int ring)
 132{
 133	u64 seq;
 134
 135	/* we are protected by the ring emission mutex */
 136	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
 137	if ((*fence) == NULL) {
 138		return -ENOMEM;
 139	}
 140	(*fence)->rdev = rdev;
 141	(*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
 142	(*fence)->ring = ring;
 143	(*fence)->is_vm_update = false;
 144	fence_init(&(*fence)->base, &radeon_fence_ops,
 145		   &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
 
 
 146	radeon_fence_ring_emit(rdev, ring, *fence);
 147	trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
 148	radeon_fence_schedule_check(rdev, ring);
 149	return 0;
 150}
 151
 152/**
 153 * radeon_fence_check_signaled - callback from fence_queue
 154 *
 155 * this function is called with fence_queue lock held, which is also used
 156 * for the fence locking itself, so unlocked variants are used for
 157 * fence_signal, and remove_wait_queue.
 158 */
 159static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
 160{
 161	struct radeon_fence *fence;
 162	u64 seq;
 163
 164	fence = container_of(wait, struct radeon_fence, fence_wake);
 165
 166	/*
 167	 * We cannot use radeon_fence_process here because we're already
 168	 * in the waitqueue, in a call from wake_up_all.
 169	 */
 170	seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
 171	if (seq >= fence->seq) {
 172		int ret = fence_signal_locked(&fence->base);
 173
 174		if (!ret)
 175			FENCE_TRACE(&fence->base, "signaled from irq context\n");
 176		else
 177			FENCE_TRACE(&fence->base, "was already signaled\n");
 178
 179		radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
 180		__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
 181		fence_put(&fence->base);
 182	} else
 183		FENCE_TRACE(&fence->base, "pending\n");
 184	return 0;
 185}
 186
 187/**
 188 * radeon_fence_activity - check for fence activity
 189 *
 190 * @rdev: radeon_device pointer
 191 * @ring: ring index the fence is associated with
 192 *
 193 * Checks the current fence value and calculates the last
 194 * signalled fence value. Returns true if activity occured
 195 * on the ring, and the fence_queue should be waken up.
 196 */
 197static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
 198{
 199	uint64_t seq, last_seq, last_emitted;
 200	unsigned count_loop = 0;
 201	bool wake = false;
 202
 203	/* Note there is a scenario here for an infinite loop but it's
 204	 * very unlikely to happen. For it to happen, the current polling
 205	 * process need to be interrupted by another process and another
 206	 * process needs to update the last_seq btw the atomic read and
 207	 * xchg of the current process.
 208	 *
 209	 * More over for this to go in infinite loop there need to be
 210	 * continuously new fence signaled ie radeon_fence_read needs
 211	 * to return a different value each time for both the currently
 212	 * polling process and the other process that xchg the last_seq
 213	 * btw atomic read and xchg of the current process. And the
 214	 * value the other process set as last seq must be higher than
 215	 * the seq value we just read. Which means that current process
 216	 * need to be interrupted after radeon_fence_read and before
 217	 * atomic xchg.
 218	 *
 219	 * To be even more safe we count the number of time we loop and
 220	 * we bail after 10 loop just accepting the fact that we might
 221	 * have temporarly set the last_seq not to the true real last
 222	 * seq but to an older one.
 223	 */
 224	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
 225	do {
 226		last_emitted = rdev->fence_drv[ring].sync_seq[ring];
 227		seq = radeon_fence_read(rdev, ring);
 228		seq |= last_seq & 0xffffffff00000000LL;
 229		if (seq < last_seq) {
 230			seq &= 0xffffffff;
 231			seq |= last_emitted & 0xffffffff00000000LL;
 232		}
 233
 234		if (seq <= last_seq || seq > last_emitted) {
 235			break;
 236		}
 237		/* If we loop over we don't want to return without
 238		 * checking if a fence is signaled as it means that the
 239		 * seq we just read is different from the previous on.
 240		 */
 241		wake = true;
 242		last_seq = seq;
 243		if ((count_loop++) > 10) {
 244			/* We looped over too many time leave with the
 245			 * fact that we might have set an older fence
 246			 * seq then the current real last seq as signaled
 247			 * by the hw.
 248			 */
 249			break;
 250		}
 251	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
 252
 253	if (seq < last_emitted)
 254		radeon_fence_schedule_check(rdev, ring);
 255
 256	return wake;
 257}
 258
 259/**
 260 * radeon_fence_check_lockup - check for hardware lockup
 261 *
 262 * @work: delayed work item
 263 *
 264 * Checks for fence activity and if there is none probe
 265 * the hardware if a lockup occured.
 266 */
 267static void radeon_fence_check_lockup(struct work_struct *work)
 268{
 269	struct radeon_fence_driver *fence_drv;
 270	struct radeon_device *rdev;
 271	int ring;
 272
 273	fence_drv = container_of(work, struct radeon_fence_driver,
 274				 lockup_work.work);
 275	rdev = fence_drv->rdev;
 276	ring = fence_drv - &rdev->fence_drv[0];
 277
 278	if (!down_read_trylock(&rdev->exclusive_lock)) {
 279		/* just reschedule the check if a reset is going on */
 280		radeon_fence_schedule_check(rdev, ring);
 281		return;
 282	}
 283
 284	if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) {
 285		unsigned long irqflags;
 286
 287		fence_drv->delayed_irq = false;
 288		spin_lock_irqsave(&rdev->irq.lock, irqflags);
 289		radeon_irq_set(rdev);
 290		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
 291	}
 292
 293	if (radeon_fence_activity(rdev, ring))
 294		wake_up_all(&rdev->fence_queue);
 295
 296	else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
 297
 298		/* good news we believe it's a lockup */
 299		dev_warn(rdev->dev, "GPU lockup (current fence id "
 300			 "0x%016llx last fence id 0x%016llx on ring %d)\n",
 301			 (uint64_t)atomic64_read(&fence_drv->last_seq),
 302			 fence_drv->sync_seq[ring], ring);
 303
 304		/* remember that we need an reset */
 305		rdev->needs_reset = true;
 306		wake_up_all(&rdev->fence_queue);
 307	}
 308	up_read(&rdev->exclusive_lock);
 309}
 310
 311/**
 312 * radeon_fence_process - process a fence
 313 *
 314 * @rdev: radeon_device pointer
 315 * @ring: ring index the fence is associated with
 316 *
 317 * Checks the current fence value and wakes the fence queue
 318 * if the sequence number has increased (all asics).
 319 */
 320void radeon_fence_process(struct radeon_device *rdev, int ring)
 321{
 322	if (radeon_fence_activity(rdev, ring))
 323		wake_up_all(&rdev->fence_queue);
 324}
 325
 326/**
 327 * radeon_fence_seq_signaled - check if a fence sequence number has signaled
 328 *
 329 * @rdev: radeon device pointer
 330 * @seq: sequence number
 331 * @ring: ring index the fence is associated with
 332 *
 333 * Check if the last signaled fence sequnce number is >= the requested
 334 * sequence number (all asics).
 335 * Returns true if the fence has signaled (current fence value
 336 * is >= requested value) or false if it has not (current fence
 337 * value is < the requested value.  Helper function for
 338 * radeon_fence_signaled().
 339 */
 340static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
 341				      u64 seq, unsigned ring)
 342{
 343	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
 344		return true;
 345	}
 346	/* poll new last sequence at least once */
 347	radeon_fence_process(rdev, ring);
 348	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
 349		return true;
 350	}
 351	return false;
 352}
 353
 354static bool radeon_fence_is_signaled(struct fence *f)
 355{
 356	struct radeon_fence *fence = to_radeon_fence(f);
 357	struct radeon_device *rdev = fence->rdev;
 358	unsigned ring = fence->ring;
 359	u64 seq = fence->seq;
 360
 361	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
 362		return true;
 363	}
 364
 365	if (down_read_trylock(&rdev->exclusive_lock)) {
 366		radeon_fence_process(rdev, ring);
 367		up_read(&rdev->exclusive_lock);
 368
 369		if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
 370			return true;
 371		}
 372	}
 373	return false;
 374}
 375
 376/**
 377 * radeon_fence_enable_signaling - enable signalling on fence
 378 * @fence: fence
 379 *
 380 * This function is called with fence_queue lock held, and adds a callback
 381 * to fence_queue that checks if this fence is signaled, and if so it
 382 * signals the fence and removes itself.
 383 */
 384static bool radeon_fence_enable_signaling(struct fence *f)
 385{
 386	struct radeon_fence *fence = to_radeon_fence(f);
 387	struct radeon_device *rdev = fence->rdev;
 388
 389	if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
 390		return false;
 391
 392	if (down_read_trylock(&rdev->exclusive_lock)) {
 393		radeon_irq_kms_sw_irq_get(rdev, fence->ring);
 394
 395		if (radeon_fence_activity(rdev, fence->ring))
 396			wake_up_all_locked(&rdev->fence_queue);
 397
 398		/* did fence get signaled after we enabled the sw irq? */
 399		if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
 400			radeon_irq_kms_sw_irq_put(rdev, fence->ring);
 401			up_read(&rdev->exclusive_lock);
 402			return false;
 403		}
 404
 405		up_read(&rdev->exclusive_lock);
 406	} else {
 407		/* we're probably in a lockup, lets not fiddle too much */
 408		if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
 409			rdev->fence_drv[fence->ring].delayed_irq = true;
 410		radeon_fence_schedule_check(rdev, fence->ring);
 411	}
 412
 413	fence->fence_wake.flags = 0;
 414	fence->fence_wake.private = NULL;
 415	fence->fence_wake.func = radeon_fence_check_signaled;
 416	__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
 417	fence_get(f);
 418
 419	FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
 420	return true;
 421}
 422
 423/**
 424 * radeon_fence_signaled - check if a fence has signaled
 425 *
 426 * @fence: radeon fence object
 427 *
 428 * Check if the requested fence has signaled (all asics).
 429 * Returns true if the fence has signaled or false if it has not.
 430 */
 431bool radeon_fence_signaled(struct radeon_fence *fence)
 432{
 433	if (!fence)
 434		return true;
 435
 436	if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
 437		int ret;
 438
 439		ret = fence_signal(&fence->base);
 440		if (!ret)
 441			FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
 442		return true;
 443	}
 444	return false;
 445}
 446
 447/**
 448 * radeon_fence_any_seq_signaled - check if any sequence number is signaled
 449 *
 450 * @rdev: radeon device pointer
 451 * @seq: sequence numbers
 452 *
 453 * Check if the last signaled fence sequnce number is >= the requested
 454 * sequence number (all asics).
 455 * Returns true if any has signaled (current value is >= requested value)
 456 * or false if it has not. Helper function for radeon_fence_wait_seq.
 457 */
 458static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
 459{
 460	unsigned i;
 461
 462	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 463		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
 464			return true;
 465	}
 466	return false;
 467}
 468
 469/**
 470 * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers
 471 *
 472 * @rdev: radeon device pointer
 473 * @target_seq: sequence number(s) we want to wait for
 474 * @intr: use interruptable sleep
 475 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
 476 *
 477 * Wait for the requested sequence number(s) to be written by any ring
 478 * (all asics).  Sequnce number array is indexed by ring id.
 479 * @intr selects whether to use interruptable (true) or non-interruptable
 480 * (false) sleep when waiting for the sequence number.  Helper function
 481 * for radeon_fence_wait_*().
 482 * Returns remaining time if the sequence number has passed, 0 when
 483 * the wait timeout, or an error for all other cases.
 484 * -EDEADLK is returned when a GPU lockup has been detected.
 485 */
 486static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
 487					  u64 *target_seq, bool intr,
 488					  long timeout)
 489{
 490	long r;
 491	int i;
 492
 493	if (radeon_fence_any_seq_signaled(rdev, target_seq))
 494		return timeout;
 495
 496	/* enable IRQs and tracing */
 497	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 498		if (!target_seq[i])
 499			continue;
 500
 501		trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
 502		radeon_irq_kms_sw_irq_get(rdev, i);
 503	}
 504
 505	if (intr) {
 506		r = wait_event_interruptible_timeout(rdev->fence_queue, (
 507			radeon_fence_any_seq_signaled(rdev, target_seq)
 508			 || rdev->needs_reset), timeout);
 509	} else {
 510		r = wait_event_timeout(rdev->fence_queue, (
 511			radeon_fence_any_seq_signaled(rdev, target_seq)
 512			 || rdev->needs_reset), timeout);
 513	}
 514
 515	if (rdev->needs_reset)
 516		r = -EDEADLK;
 517
 518	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 519		if (!target_seq[i])
 520			continue;
 521
 522		radeon_irq_kms_sw_irq_put(rdev, i);
 523		trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
 524	}
 525
 526	return r;
 527}
 528
 529/**
 530 * radeon_fence_wait_timeout - wait for a fence to signal with timeout
 531 *
 532 * @fence: radeon fence object
 533 * @intr: use interruptible sleep
 534 *
 535 * Wait for the requested fence to signal (all asics).
 536 * @intr selects whether to use interruptable (true) or non-interruptable
 537 * (false) sleep when waiting for the fence.
 538 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
 539 * Returns remaining time if the sequence number has passed, 0 when
 540 * the wait timeout, or an error for all other cases.
 541 */
 542long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout)
 543{
 544	uint64_t seq[RADEON_NUM_RINGS] = {};
 545	long r;
 546	int r_sig;
 547
 548	/*
 549	 * This function should not be called on !radeon fences.
 550	 * If this is the case, it would mean this function can
 551	 * also be called on radeon fences belonging to another card.
 552	 * exclusive_lock is not held in that case.
 553	 */
 554	if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
 555		return fence_wait(&fence->base, intr);
 556
 557	seq[fence->ring] = fence->seq;
 558	r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
 559	if (r <= 0) {
 560		return r;
 561	}
 562
 563	r_sig = fence_signal(&fence->base);
 564	if (!r_sig)
 565		FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
 566	return r;
 567}
 568
 569/**
 570 * radeon_fence_wait - wait for a fence to signal
 571 *
 572 * @fence: radeon fence object
 573 * @intr: use interruptible sleep
 574 *
 575 * Wait for the requested fence to signal (all asics).
 576 * @intr selects whether to use interruptable (true) or non-interruptable
 577 * (false) sleep when waiting for the fence.
 578 * Returns 0 if the fence has passed, error for all other cases.
 579 */
 580int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 581{
 582	long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
 583	if (r > 0) {
 584		return 0;
 585	} else {
 586		return r;
 587	}
 588}
 589
 590/**
 591 * radeon_fence_wait_any - wait for a fence to signal on any ring
 592 *
 593 * @rdev: radeon device pointer
 594 * @fences: radeon fence object(s)
 595 * @intr: use interruptable sleep
 596 *
 597 * Wait for any requested fence to signal (all asics).  Fence
 598 * array is indexed by ring id.  @intr selects whether to use
 599 * interruptable (true) or non-interruptable (false) sleep when
 600 * waiting for the fences. Used by the suballocator.
 601 * Returns 0 if any fence has passed, error for all other cases.
 602 */
 603int radeon_fence_wait_any(struct radeon_device *rdev,
 604			  struct radeon_fence **fences,
 605			  bool intr)
 606{
 607	uint64_t seq[RADEON_NUM_RINGS];
 608	unsigned i, num_rings = 0;
 609	long r;
 610
 611	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 612		seq[i] = 0;
 613
 614		if (!fences[i]) {
 615			continue;
 616		}
 617
 618		seq[i] = fences[i]->seq;
 619		++num_rings;
 620	}
 621
 622	/* nothing to wait for ? */
 623	if (num_rings == 0)
 624		return -ENOENT;
 625
 626	r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
 627	if (r < 0) {
 628		return r;
 629	}
 630	return 0;
 631}
 632
 633/**
 634 * radeon_fence_wait_next - wait for the next fence to signal
 635 *
 636 * @rdev: radeon device pointer
 637 * @ring: ring index the fence is associated with
 638 *
 639 * Wait for the next fence on the requested ring to signal (all asics).
 640 * Returns 0 if the next fence has passed, error for all other cases.
 641 * Caller must hold ring lock.
 642 */
 643int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
 644{
 645	uint64_t seq[RADEON_NUM_RINGS] = {};
 646	long r;
 647
 648	seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
 649	if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
 650		/* nothing to wait for, last_seq is
 651		   already the last emited fence */
 652		return -ENOENT;
 653	}
 654	r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
 655	if (r < 0)
 656		return r;
 657	return 0;
 658}
 659
 660/**
 661 * radeon_fence_wait_empty - wait for all fences to signal
 662 *
 663 * @rdev: radeon device pointer
 664 * @ring: ring index the fence is associated with
 665 *
 666 * Wait for all fences on the requested ring to signal (all asics).
 667 * Returns 0 if the fences have passed, error for all other cases.
 668 * Caller must hold ring lock.
 669 */
 670int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
 671{
 672	uint64_t seq[RADEON_NUM_RINGS] = {};
 673	long r;
 674
 675	seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
 676	if (!seq[ring])
 677		return 0;
 678
 679	r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
 680	if (r < 0) {
 681		if (r == -EDEADLK)
 682			return -EDEADLK;
 683
 684		dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
 685			ring, r);
 686	}
 687	return 0;
 688}
 689
 690/**
 691 * radeon_fence_ref - take a ref on a fence
 692 *
 693 * @fence: radeon fence object
 694 *
 695 * Take a reference on a fence (all asics).
 696 * Returns the fence.
 697 */
 698struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
 699{
 700	fence_get(&fence->base);
 701	return fence;
 702}
 703
 704/**
 705 * radeon_fence_unref - remove a ref on a fence
 706 *
 707 * @fence: radeon fence object
 708 *
 709 * Remove a reference on a fence (all asics).
 710 */
 711void radeon_fence_unref(struct radeon_fence **fence)
 712{
 713	struct radeon_fence *tmp = *fence;
 714
 715	*fence = NULL;
 716	if (tmp) {
 717		fence_put(&tmp->base);
 718	}
 719}
 720
 721/**
 722 * radeon_fence_count_emitted - get the count of emitted fences
 723 *
 724 * @rdev: radeon device pointer
 725 * @ring: ring index the fence is associated with
 726 *
 727 * Get the number of fences emitted on the requested ring (all asics).
 728 * Returns the number of emitted fences on the ring.  Used by the
 729 * dynpm code to ring track activity.
 730 */
 731unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
 732{
 733	uint64_t emitted;
 734
 735	/* We are not protected by ring lock when reading the last sequence
 736	 * but it's ok to report slightly wrong fence count here.
 737	 */
 738	radeon_fence_process(rdev, ring);
 739	emitted = rdev->fence_drv[ring].sync_seq[ring]
 740		- atomic64_read(&rdev->fence_drv[ring].last_seq);
 741	/* to avoid 32bits warp around */
 742	if (emitted > 0x10000000) {
 743		emitted = 0x10000000;
 744	}
 745	return (unsigned)emitted;
 746}
 747
 748/**
 749 * radeon_fence_need_sync - do we need a semaphore
 750 *
 751 * @fence: radeon fence object
 752 * @dst_ring: which ring to check against
 753 *
 754 * Check if the fence needs to be synced against another ring
 755 * (all asics).  If so, we need to emit a semaphore.
 756 * Returns true if we need to sync with another ring, false if
 757 * not.
 758 */
 759bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
 760{
 761	struct radeon_fence_driver *fdrv;
 762
 763	if (!fence) {
 764		return false;
 765	}
 766
 767	if (fence->ring == dst_ring) {
 768		return false;
 769	}
 770
 771	/* we are protected by the ring mutex */
 772	fdrv = &fence->rdev->fence_drv[dst_ring];
 773	if (fence->seq <= fdrv->sync_seq[fence->ring]) {
 774		return false;
 775	}
 776
 777	return true;
 778}
 779
 780/**
 781 * radeon_fence_note_sync - record the sync point
 782 *
 783 * @fence: radeon fence object
 784 * @dst_ring: which ring to check against
 785 *
 786 * Note the sequence number at which point the fence will
 787 * be synced with the requested ring (all asics).
 788 */
 789void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
 790{
 791	struct radeon_fence_driver *dst, *src;
 792	unsigned i;
 793
 794	if (!fence) {
 795		return;
 796	}
 797
 798	if (fence->ring == dst_ring) {
 799		return;
 800	}
 801
 802	/* we are protected by the ring mutex */
 803	src = &fence->rdev->fence_drv[fence->ring];
 804	dst = &fence->rdev->fence_drv[dst_ring];
 805	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 806		if (i == dst_ring) {
 807			continue;
 808		}
 809		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
 810	}
 811}
 812
 813/**
 814 * radeon_fence_driver_start_ring - make the fence driver
 815 * ready for use on the requested ring.
 816 *
 817 * @rdev: radeon device pointer
 818 * @ring: ring index to start the fence driver on
 819 *
 820 * Make the fence driver ready for processing (all asics).
 821 * Not all asics have all rings, so each asic will only
 822 * start the fence driver on the rings it has.
 823 * Returns 0 for success, errors for failure.
 824 */
 825int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
 826{
 827	uint64_t index;
 828	int r;
 829
 830	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
 831	if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
 832		rdev->fence_drv[ring].scratch_reg = 0;
 833		if (ring != R600_RING_TYPE_UVD_INDEX) {
 834			index = R600_WB_EVENT_OFFSET + ring * 4;
 835			rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
 836			rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
 837							 index;
 838
 839		} else {
 840			/* put fence directly behind firmware */
 841			index = ALIGN(rdev->uvd_fw->size, 8);
 842			rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
 843			rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
 844		}
 845
 846	} else {
 847		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
 848		if (r) {
 849			dev_err(rdev->dev, "fence failed to get scratch register\n");
 850			return r;
 851		}
 852		index = RADEON_WB_SCRATCH_OFFSET +
 853			rdev->fence_drv[ring].scratch_reg -
 854			rdev->scratch.reg_base;
 855		rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
 856		rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
 857	}
 858	radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
 859	rdev->fence_drv[ring].initialized = true;
 860	dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
 861		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
 862	return 0;
 863}
 864
 865/**
 866 * radeon_fence_driver_init_ring - init the fence driver
 867 * for the requested ring.
 868 *
 869 * @rdev: radeon device pointer
 870 * @ring: ring index to start the fence driver on
 871 *
 872 * Init the fence driver for the requested ring (all asics).
 873 * Helper function for radeon_fence_driver_init().
 874 */
 875static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
 876{
 877	int i;
 878
 879	rdev->fence_drv[ring].scratch_reg = -1;
 880	rdev->fence_drv[ring].cpu_addr = NULL;
 881	rdev->fence_drv[ring].gpu_addr = 0;
 882	for (i = 0; i < RADEON_NUM_RINGS; ++i)
 883		rdev->fence_drv[ring].sync_seq[i] = 0;
 884	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
 885	rdev->fence_drv[ring].initialized = false;
 886	INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
 887			  radeon_fence_check_lockup);
 888	rdev->fence_drv[ring].rdev = rdev;
 889}
 890
 891/**
 892 * radeon_fence_driver_init - init the fence driver
 893 * for all possible rings.
 894 *
 895 * @rdev: radeon device pointer
 896 *
 897 * Init the fence driver for all possible rings (all asics).
 898 * Not all asics have all rings, so each asic will only
 899 * start the fence driver on the rings it has using
 900 * radeon_fence_driver_start_ring().
 901 * Returns 0 for success.
 902 */
 903int radeon_fence_driver_init(struct radeon_device *rdev)
 904{
 905	int ring;
 906
 907	init_waitqueue_head(&rdev->fence_queue);
 908	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
 909		radeon_fence_driver_init_ring(rdev, ring);
 910	}
 911	if (radeon_debugfs_fence_init(rdev)) {
 912		dev_err(rdev->dev, "fence debugfs file creation failed\n");
 913	}
 914	return 0;
 915}
 916
 917/**
 918 * radeon_fence_driver_fini - tear down the fence driver
 919 * for all possible rings.
 920 *
 921 * @rdev: radeon device pointer
 922 *
 923 * Tear down the fence driver for all possible rings (all asics).
 924 */
 925void radeon_fence_driver_fini(struct radeon_device *rdev)
 926{
 927	int ring, r;
 928
 929	mutex_lock(&rdev->ring_lock);
 930	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
 931		if (!rdev->fence_drv[ring].initialized)
 932			continue;
 933		r = radeon_fence_wait_empty(rdev, ring);
 934		if (r) {
 935			/* no need to trigger GPU reset as we are unloading */
 936			radeon_fence_driver_force_completion(rdev, ring);
 937		}
 938		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
 939		wake_up_all(&rdev->fence_queue);
 940		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
 941		rdev->fence_drv[ring].initialized = false;
 942	}
 943	mutex_unlock(&rdev->ring_lock);
 944}
 945
 946/**
 947 * radeon_fence_driver_force_completion - force all fence waiter to complete
 948 *
 949 * @rdev: radeon device pointer
 950 * @ring: the ring to complete
 951 *
 952 * In case of GPU reset failure make sure no process keep waiting on fence
 953 * that will never complete.
 954 */
 955void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
 956{
 957	if (rdev->fence_drv[ring].initialized) {
 958		radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
 959		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
 960	}
 961}
 962
 963
 964/*
 965 * Fence debugfs
 966 */
 967#if defined(CONFIG_DEBUG_FS)
 968static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
 969{
 970	struct drm_info_node *node = (struct drm_info_node *)m->private;
 971	struct drm_device *dev = node->minor->dev;
 972	struct radeon_device *rdev = dev->dev_private;
 973	int i, j;
 974
 975	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 976		if (!rdev->fence_drv[i].initialized)
 977			continue;
 978
 979		radeon_fence_process(rdev, i);
 980
 981		seq_printf(m, "--- ring %d ---\n", i);
 982		seq_printf(m, "Last signaled fence 0x%016llx\n",
 983			   (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
 984		seq_printf(m, "Last emitted        0x%016llx\n",
 985			   rdev->fence_drv[i].sync_seq[i]);
 986
 987		for (j = 0; j < RADEON_NUM_RINGS; ++j) {
 988			if (i != j && rdev->fence_drv[j].initialized)
 989				seq_printf(m, "Last sync to ring %d 0x%016llx\n",
 990					   j, rdev->fence_drv[i].sync_seq[j]);
 991		}
 992	}
 993	return 0;
 994}
 995
 996/**
 997 * radeon_debugfs_gpu_reset - manually trigger a gpu reset
 998 *
 999 * Manually trigger a gpu reset at the next fence wait.
1000 */
1001static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
1002{
1003	struct drm_info_node *node = (struct drm_info_node *) m->private;
1004	struct drm_device *dev = node->minor->dev;
1005	struct radeon_device *rdev = dev->dev_private;
1006
1007	down_read(&rdev->exclusive_lock);
1008	seq_printf(m, "%d\n", rdev->needs_reset);
1009	rdev->needs_reset = true;
1010	wake_up_all(&rdev->fence_queue);
1011	up_read(&rdev->exclusive_lock);
1012
1013	return 0;
1014}
1015
1016static struct drm_info_list radeon_debugfs_fence_list[] = {
1017	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
1018	{"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
1019};
1020#endif
1021
1022int radeon_debugfs_fence_init(struct radeon_device *rdev)
1023{
1024#if defined(CONFIG_DEBUG_FS)
1025	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
1026#else
1027	return 0;
1028#endif
1029}
1030
1031static const char *radeon_fence_get_driver_name(struct fence *fence)
1032{
1033	return "radeon";
1034}
1035
1036static const char *radeon_fence_get_timeline_name(struct fence *f)
1037{
1038	struct radeon_fence *fence = to_radeon_fence(f);
1039	switch (fence->ring) {
1040	case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
1041	case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
1042	case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2";
1043	case R600_RING_TYPE_DMA_INDEX: return "radeon.dma";
1044	case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1";
1045	case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
1046	case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
1047	case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
1048	default: WARN_ON_ONCE(1); return "radeon.unk";
1049	}
1050}
1051
1052static inline bool radeon_test_signaled(struct radeon_fence *fence)
1053{
1054	return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1055}
1056
1057struct radeon_wait_cb {
1058	struct fence_cb base;
1059	struct task_struct *task;
1060};
1061
1062static void
1063radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
1064{
1065	struct radeon_wait_cb *wait =
1066		container_of(cb, struct radeon_wait_cb, base);
1067
1068	wake_up_process(wait->task);
1069}
1070
1071static signed long radeon_fence_default_wait(struct fence *f, bool intr,
1072					     signed long t)
1073{
1074	struct radeon_fence *fence = to_radeon_fence(f);
1075	struct radeon_device *rdev = fence->rdev;
1076	struct radeon_wait_cb cb;
1077
1078	cb.task = current;
1079
1080	if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
1081		return t;
1082
1083	while (t > 0) {
1084		if (intr)
1085			set_current_state(TASK_INTERRUPTIBLE);
1086		else
1087			set_current_state(TASK_UNINTERRUPTIBLE);
1088
1089		/*
1090		 * radeon_test_signaled must be called after
1091		 * set_current_state to prevent a race with wake_up_process
1092		 */
1093		if (radeon_test_signaled(fence))
1094			break;
1095
1096		if (rdev->needs_reset) {
1097			t = -EDEADLK;
1098			break;
1099		}
1100
1101		t = schedule_timeout(t);
1102
1103		if (t > 0 && intr && signal_pending(current))
1104			t = -ERESTARTSYS;
1105	}
1106
1107	__set_current_state(TASK_RUNNING);
1108	fence_remove_callback(f, &cb.base);
1109
1110	return t;
1111}
1112
1113const struct fence_ops radeon_fence_ops = {
1114	.get_driver_name = radeon_fence_get_driver_name,
1115	.get_timeline_name = radeon_fence_get_timeline_name,
1116	.enable_signaling = radeon_fence_enable_signaling,
1117	.signaled = radeon_fence_is_signaled,
1118	.wait = radeon_fence_default_wait,
1119	.release = NULL,
1120};
v5.9
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Dave Airlie
  30 */
  31
  32#include <linux/atomic.h>
  33#include <linux/firmware.h>
  34#include <linux/kref.h>
  35#include <linux/sched/signal.h>
  36#include <linux/seq_file.h>
  37#include <linux/slab.h>
  38#include <linux/wait.h>
  39
  40#include <drm/drm_debugfs.h>
  41#include <drm/drm_device.h>
  42#include <drm/drm_file.h>
  43
  44#include "radeon.h"
  45#include "radeon_reg.h"
  46#include "radeon_trace.h"
  47
  48/*
  49 * Fences
  50 * Fences mark an event in the GPUs pipeline and are used
  51 * for GPU/CPU synchronization.  When the fence is written,
  52 * it is expected that all buffers associated with that fence
  53 * are no longer in use by the associated ring on the GPU and
  54 * that the the relevant GPU caches have been flushed.  Whether
  55 * we use a scratch register or memory location depends on the asic
  56 * and whether writeback is enabled.
  57 */
  58
  59/**
  60 * radeon_fence_write - write a fence value
  61 *
  62 * @rdev: radeon_device pointer
  63 * @seq: sequence number to write
  64 * @ring: ring index the fence is associated with
  65 *
  66 * Writes a fence value to memory or a scratch register (all asics).
  67 */
  68static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
  69{
  70	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
  71	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
  72		if (drv->cpu_addr) {
  73			*drv->cpu_addr = cpu_to_le32(seq);
  74		}
  75	} else {
  76		WREG32(drv->scratch_reg, seq);
  77	}
  78}
  79
  80/**
  81 * radeon_fence_read - read a fence value
  82 *
  83 * @rdev: radeon_device pointer
  84 * @ring: ring index the fence is associated with
  85 *
  86 * Reads a fence value from memory or a scratch register (all asics).
  87 * Returns the value of the fence read from memory or register.
  88 */
  89static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
  90{
  91	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
  92	u32 seq = 0;
  93
  94	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
  95		if (drv->cpu_addr) {
  96			seq = le32_to_cpu(*drv->cpu_addr);
  97		} else {
  98			seq = lower_32_bits(atomic64_read(&drv->last_seq));
  99		}
 100	} else {
 101		seq = RREG32(drv->scratch_reg);
 102	}
 103	return seq;
 104}
 105
 106/**
 107 * radeon_fence_schedule_check - schedule lockup check
 108 *
 109 * @rdev: radeon_device pointer
 110 * @ring: ring index we should work with
 111 *
 112 * Queues a delayed work item to check for lockups.
 113 */
 114static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
 115{
 116	/*
 117	 * Do not reset the timer here with mod_delayed_work,
 118	 * this can livelock in an interaction with TTM delayed destroy.
 119	 */
 120	queue_delayed_work(system_power_efficient_wq,
 121			   &rdev->fence_drv[ring].lockup_work,
 122			   RADEON_FENCE_JIFFIES_TIMEOUT);
 123}
 124
 125/**
 126 * radeon_fence_emit - emit a fence on the requested ring
 127 *
 128 * @rdev: radeon_device pointer
 129 * @fence: radeon fence object
 130 * @ring: ring index the fence is associated with
 131 *
 132 * Emits a fence command on the requested ring (all asics).
 133 * Returns 0 on success, -ENOMEM on failure.
 134 */
 135int radeon_fence_emit(struct radeon_device *rdev,
 136		      struct radeon_fence **fence,
 137		      int ring)
 138{
 139	u64 seq;
 140
 141	/* we are protected by the ring emission mutex */
 142	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
 143	if ((*fence) == NULL) {
 144		return -ENOMEM;
 145	}
 146	(*fence)->rdev = rdev;
 147	(*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
 148	(*fence)->ring = ring;
 149	(*fence)->is_vm_update = false;
 150	dma_fence_init(&(*fence)->base, &radeon_fence_ops,
 151		       &rdev->fence_queue.lock,
 152		       rdev->fence_context + ring,
 153		       seq);
 154	radeon_fence_ring_emit(rdev, ring, *fence);
 155	trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
 156	radeon_fence_schedule_check(rdev, ring);
 157	return 0;
 158}
 159
 160/**
 161 * radeon_fence_check_signaled - callback from fence_queue
 162 *
 163 * this function is called with fence_queue lock held, which is also used
 164 * for the fence locking itself, so unlocked variants are used for
 165 * fence_signal, and remove_wait_queue.
 166 */
 167static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key)
 168{
 169	struct radeon_fence *fence;
 170	u64 seq;
 171
 172	fence = container_of(wait, struct radeon_fence, fence_wake);
 173
 174	/*
 175	 * We cannot use radeon_fence_process here because we're already
 176	 * in the waitqueue, in a call from wake_up_all.
 177	 */
 178	seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
 179	if (seq >= fence->seq) {
 180		int ret = dma_fence_signal_locked(&fence->base);
 181
 182		if (!ret)
 183			DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
 184		else
 185			DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
 186
 187		radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
 188		__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
 189		dma_fence_put(&fence->base);
 190	} else
 191		DMA_FENCE_TRACE(&fence->base, "pending\n");
 192	return 0;
 193}
 194
 195/**
 196 * radeon_fence_activity - check for fence activity
 197 *
 198 * @rdev: radeon_device pointer
 199 * @ring: ring index the fence is associated with
 200 *
 201 * Checks the current fence value and calculates the last
 202 * signalled fence value. Returns true if activity occured
 203 * on the ring, and the fence_queue should be waken up.
 204 */
 205static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
 206{
 207	uint64_t seq, last_seq, last_emitted;
 208	unsigned count_loop = 0;
 209	bool wake = false;
 210
 211	/* Note there is a scenario here for an infinite loop but it's
 212	 * very unlikely to happen. For it to happen, the current polling
 213	 * process need to be interrupted by another process and another
 214	 * process needs to update the last_seq btw the atomic read and
 215	 * xchg of the current process.
 216	 *
 217	 * More over for this to go in infinite loop there need to be
 218	 * continuously new fence signaled ie radeon_fence_read needs
 219	 * to return a different value each time for both the currently
 220	 * polling process and the other process that xchg the last_seq
 221	 * btw atomic read and xchg of the current process. And the
 222	 * value the other process set as last seq must be higher than
 223	 * the seq value we just read. Which means that current process
 224	 * need to be interrupted after radeon_fence_read and before
 225	 * atomic xchg.
 226	 *
 227	 * To be even more safe we count the number of time we loop and
 228	 * we bail after 10 loop just accepting the fact that we might
 229	 * have temporarly set the last_seq not to the true real last
 230	 * seq but to an older one.
 231	 */
 232	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
 233	do {
 234		last_emitted = rdev->fence_drv[ring].sync_seq[ring];
 235		seq = radeon_fence_read(rdev, ring);
 236		seq |= last_seq & 0xffffffff00000000LL;
 237		if (seq < last_seq) {
 238			seq &= 0xffffffff;
 239			seq |= last_emitted & 0xffffffff00000000LL;
 240		}
 241
 242		if (seq <= last_seq || seq > last_emitted) {
 243			break;
 244		}
 245		/* If we loop over we don't want to return without
 246		 * checking if a fence is signaled as it means that the
 247		 * seq we just read is different from the previous on.
 248		 */
 249		wake = true;
 250		last_seq = seq;
 251		if ((count_loop++) > 10) {
 252			/* We looped over too many time leave with the
 253			 * fact that we might have set an older fence
 254			 * seq then the current real last seq as signaled
 255			 * by the hw.
 256			 */
 257			break;
 258		}
 259	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
 260
 261	if (seq < last_emitted)
 262		radeon_fence_schedule_check(rdev, ring);
 263
 264	return wake;
 265}
 266
 267/**
 268 * radeon_fence_check_lockup - check for hardware lockup
 269 *
 270 * @work: delayed work item
 271 *
 272 * Checks for fence activity and if there is none probe
 273 * the hardware if a lockup occured.
 274 */
 275static void radeon_fence_check_lockup(struct work_struct *work)
 276{
 277	struct radeon_fence_driver *fence_drv;
 278	struct radeon_device *rdev;
 279	int ring;
 280
 281	fence_drv = container_of(work, struct radeon_fence_driver,
 282				 lockup_work.work);
 283	rdev = fence_drv->rdev;
 284	ring = fence_drv - &rdev->fence_drv[0];
 285
 286	if (!down_read_trylock(&rdev->exclusive_lock)) {
 287		/* just reschedule the check if a reset is going on */
 288		radeon_fence_schedule_check(rdev, ring);
 289		return;
 290	}
 291
 292	if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) {
 293		unsigned long irqflags;
 294
 295		fence_drv->delayed_irq = false;
 296		spin_lock_irqsave(&rdev->irq.lock, irqflags);
 297		radeon_irq_set(rdev);
 298		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
 299	}
 300
 301	if (radeon_fence_activity(rdev, ring))
 302		wake_up_all(&rdev->fence_queue);
 303
 304	else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
 305
 306		/* good news we believe it's a lockup */
 307		dev_warn(rdev->dev, "GPU lockup (current fence id "
 308			 "0x%016llx last fence id 0x%016llx on ring %d)\n",
 309			 (uint64_t)atomic64_read(&fence_drv->last_seq),
 310			 fence_drv->sync_seq[ring], ring);
 311
 312		/* remember that we need an reset */
 313		rdev->needs_reset = true;
 314		wake_up_all(&rdev->fence_queue);
 315	}
 316	up_read(&rdev->exclusive_lock);
 317}
 318
 319/**
 320 * radeon_fence_process - process a fence
 321 *
 322 * @rdev: radeon_device pointer
 323 * @ring: ring index the fence is associated with
 324 *
 325 * Checks the current fence value and wakes the fence queue
 326 * if the sequence number has increased (all asics).
 327 */
 328void radeon_fence_process(struct radeon_device *rdev, int ring)
 329{
 330	if (radeon_fence_activity(rdev, ring))
 331		wake_up_all(&rdev->fence_queue);
 332}
 333
 334/**
 335 * radeon_fence_seq_signaled - check if a fence sequence number has signaled
 336 *
 337 * @rdev: radeon device pointer
 338 * @seq: sequence number
 339 * @ring: ring index the fence is associated with
 340 *
 341 * Check if the last signaled fence sequnce number is >= the requested
 342 * sequence number (all asics).
 343 * Returns true if the fence has signaled (current fence value
 344 * is >= requested value) or false if it has not (current fence
 345 * value is < the requested value.  Helper function for
 346 * radeon_fence_signaled().
 347 */
 348static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
 349				      u64 seq, unsigned ring)
 350{
 351	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
 352		return true;
 353	}
 354	/* poll new last sequence at least once */
 355	radeon_fence_process(rdev, ring);
 356	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
 357		return true;
 358	}
 359	return false;
 360}
 361
 362static bool radeon_fence_is_signaled(struct dma_fence *f)
 363{
 364	struct radeon_fence *fence = to_radeon_fence(f);
 365	struct radeon_device *rdev = fence->rdev;
 366	unsigned ring = fence->ring;
 367	u64 seq = fence->seq;
 368
 369	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
 370		return true;
 371	}
 372
 373	if (down_read_trylock(&rdev->exclusive_lock)) {
 374		radeon_fence_process(rdev, ring);
 375		up_read(&rdev->exclusive_lock);
 376
 377		if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
 378			return true;
 379		}
 380	}
 381	return false;
 382}
 383
 384/**
 385 * radeon_fence_enable_signaling - enable signalling on fence
 386 * @fence: fence
 387 *
 388 * This function is called with fence_queue lock held, and adds a callback
 389 * to fence_queue that checks if this fence is signaled, and if so it
 390 * signals the fence and removes itself.
 391 */
 392static bool radeon_fence_enable_signaling(struct dma_fence *f)
 393{
 394	struct radeon_fence *fence = to_radeon_fence(f);
 395	struct radeon_device *rdev = fence->rdev;
 396
 397	if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
 398		return false;
 399
 400	if (down_read_trylock(&rdev->exclusive_lock)) {
 401		radeon_irq_kms_sw_irq_get(rdev, fence->ring);
 402
 403		if (radeon_fence_activity(rdev, fence->ring))
 404			wake_up_all_locked(&rdev->fence_queue);
 405
 406		/* did fence get signaled after we enabled the sw irq? */
 407		if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
 408			radeon_irq_kms_sw_irq_put(rdev, fence->ring);
 409			up_read(&rdev->exclusive_lock);
 410			return false;
 411		}
 412
 413		up_read(&rdev->exclusive_lock);
 414	} else {
 415		/* we're probably in a lockup, lets not fiddle too much */
 416		if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
 417			rdev->fence_drv[fence->ring].delayed_irq = true;
 418		radeon_fence_schedule_check(rdev, fence->ring);
 419	}
 420
 421	fence->fence_wake.flags = 0;
 422	fence->fence_wake.private = NULL;
 423	fence->fence_wake.func = radeon_fence_check_signaled;
 424	__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
 425	dma_fence_get(f);
 426
 427	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
 428	return true;
 429}
 430
 431/**
 432 * radeon_fence_signaled - check if a fence has signaled
 433 *
 434 * @fence: radeon fence object
 435 *
 436 * Check if the requested fence has signaled (all asics).
 437 * Returns true if the fence has signaled or false if it has not.
 438 */
 439bool radeon_fence_signaled(struct radeon_fence *fence)
 440{
 441	if (!fence)
 442		return true;
 443
 444	if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
 445		int ret;
 446
 447		ret = dma_fence_signal(&fence->base);
 448		if (!ret)
 449			DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
 450		return true;
 451	}
 452	return false;
 453}
 454
 455/**
 456 * radeon_fence_any_seq_signaled - check if any sequence number is signaled
 457 *
 458 * @rdev: radeon device pointer
 459 * @seq: sequence numbers
 460 *
 461 * Check if the last signaled fence sequnce number is >= the requested
 462 * sequence number (all asics).
 463 * Returns true if any has signaled (current value is >= requested value)
 464 * or false if it has not. Helper function for radeon_fence_wait_seq.
 465 */
 466static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
 467{
 468	unsigned i;
 469
 470	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 471		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
 472			return true;
 473	}
 474	return false;
 475}
 476
 477/**
 478 * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers
 479 *
 480 * @rdev: radeon device pointer
 481 * @target_seq: sequence number(s) we want to wait for
 482 * @intr: use interruptable sleep
 483 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
 484 *
 485 * Wait for the requested sequence number(s) to be written by any ring
 486 * (all asics).  Sequnce number array is indexed by ring id.
 487 * @intr selects whether to use interruptable (true) or non-interruptable
 488 * (false) sleep when waiting for the sequence number.  Helper function
 489 * for radeon_fence_wait_*().
 490 * Returns remaining time if the sequence number has passed, 0 when
 491 * the wait timeout, or an error for all other cases.
 492 * -EDEADLK is returned when a GPU lockup has been detected.
 493 */
 494static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
 495					  u64 *target_seq, bool intr,
 496					  long timeout)
 497{
 498	long r;
 499	int i;
 500
 501	if (radeon_fence_any_seq_signaled(rdev, target_seq))
 502		return timeout;
 503
 504	/* enable IRQs and tracing */
 505	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 506		if (!target_seq[i])
 507			continue;
 508
 509		trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
 510		radeon_irq_kms_sw_irq_get(rdev, i);
 511	}
 512
 513	if (intr) {
 514		r = wait_event_interruptible_timeout(rdev->fence_queue, (
 515			radeon_fence_any_seq_signaled(rdev, target_seq)
 516			 || rdev->needs_reset), timeout);
 517	} else {
 518		r = wait_event_timeout(rdev->fence_queue, (
 519			radeon_fence_any_seq_signaled(rdev, target_seq)
 520			 || rdev->needs_reset), timeout);
 521	}
 522
 523	if (rdev->needs_reset)
 524		r = -EDEADLK;
 525
 526	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 527		if (!target_seq[i])
 528			continue;
 529
 530		radeon_irq_kms_sw_irq_put(rdev, i);
 531		trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
 532	}
 533
 534	return r;
 535}
 536
 537/**
 538 * radeon_fence_wait_timeout - wait for a fence to signal with timeout
 539 *
 540 * @fence: radeon fence object
 541 * @intr: use interruptible sleep
 542 *
 543 * Wait for the requested fence to signal (all asics).
 544 * @intr selects whether to use interruptable (true) or non-interruptable
 545 * (false) sleep when waiting for the fence.
 546 * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
 547 * Returns remaining time if the sequence number has passed, 0 when
 548 * the wait timeout, or an error for all other cases.
 549 */
 550long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout)
 551{
 552	uint64_t seq[RADEON_NUM_RINGS] = {};
 553	long r;
 554	int r_sig;
 555
 556	/*
 557	 * This function should not be called on !radeon fences.
 558	 * If this is the case, it would mean this function can
 559	 * also be called on radeon fences belonging to another card.
 560	 * exclusive_lock is not held in that case.
 561	 */
 562	if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
 563		return dma_fence_wait(&fence->base, intr);
 564
 565	seq[fence->ring] = fence->seq;
 566	r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
 567	if (r <= 0) {
 568		return r;
 569	}
 570
 571	r_sig = dma_fence_signal(&fence->base);
 572	if (!r_sig)
 573		DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
 574	return r;
 575}
 576
 577/**
 578 * radeon_fence_wait - wait for a fence to signal
 579 *
 580 * @fence: radeon fence object
 581 * @intr: use interruptible sleep
 582 *
 583 * Wait for the requested fence to signal (all asics).
 584 * @intr selects whether to use interruptable (true) or non-interruptable
 585 * (false) sleep when waiting for the fence.
 586 * Returns 0 if the fence has passed, error for all other cases.
 587 */
 588int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 589{
 590	long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
 591	if (r > 0) {
 592		return 0;
 593	} else {
 594		return r;
 595	}
 596}
 597
 598/**
 599 * radeon_fence_wait_any - wait for a fence to signal on any ring
 600 *
 601 * @rdev: radeon device pointer
 602 * @fences: radeon fence object(s)
 603 * @intr: use interruptable sleep
 604 *
 605 * Wait for any requested fence to signal (all asics).  Fence
 606 * array is indexed by ring id.  @intr selects whether to use
 607 * interruptable (true) or non-interruptable (false) sleep when
 608 * waiting for the fences. Used by the suballocator.
 609 * Returns 0 if any fence has passed, error for all other cases.
 610 */
 611int radeon_fence_wait_any(struct radeon_device *rdev,
 612			  struct radeon_fence **fences,
 613			  bool intr)
 614{
 615	uint64_t seq[RADEON_NUM_RINGS];
 616	unsigned i, num_rings = 0;
 617	long r;
 618
 619	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 620		seq[i] = 0;
 621
 622		if (!fences[i]) {
 623			continue;
 624		}
 625
 626		seq[i] = fences[i]->seq;
 627		++num_rings;
 628	}
 629
 630	/* nothing to wait for ? */
 631	if (num_rings == 0)
 632		return -ENOENT;
 633
 634	r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
 635	if (r < 0) {
 636		return r;
 637	}
 638	return 0;
 639}
 640
 641/**
 642 * radeon_fence_wait_next - wait for the next fence to signal
 643 *
 644 * @rdev: radeon device pointer
 645 * @ring: ring index the fence is associated with
 646 *
 647 * Wait for the next fence on the requested ring to signal (all asics).
 648 * Returns 0 if the next fence has passed, error for all other cases.
 649 * Caller must hold ring lock.
 650 */
 651int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
 652{
 653	uint64_t seq[RADEON_NUM_RINGS] = {};
 654	long r;
 655
 656	seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
 657	if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
 658		/* nothing to wait for, last_seq is
 659		   already the last emited fence */
 660		return -ENOENT;
 661	}
 662	r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
 663	if (r < 0)
 664		return r;
 665	return 0;
 666}
 667
 668/**
 669 * radeon_fence_wait_empty - wait for all fences to signal
 670 *
 671 * @rdev: radeon device pointer
 672 * @ring: ring index the fence is associated with
 673 *
 674 * Wait for all fences on the requested ring to signal (all asics).
 675 * Returns 0 if the fences have passed, error for all other cases.
 676 * Caller must hold ring lock.
 677 */
 678int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
 679{
 680	uint64_t seq[RADEON_NUM_RINGS] = {};
 681	long r;
 682
 683	seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
 684	if (!seq[ring])
 685		return 0;
 686
 687	r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
 688	if (r < 0) {
 689		if (r == -EDEADLK)
 690			return -EDEADLK;
 691
 692		dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
 693			ring, r);
 694	}
 695	return 0;
 696}
 697
 698/**
 699 * radeon_fence_ref - take a ref on a fence
 700 *
 701 * @fence: radeon fence object
 702 *
 703 * Take a reference on a fence (all asics).
 704 * Returns the fence.
 705 */
 706struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
 707{
 708	dma_fence_get(&fence->base);
 709	return fence;
 710}
 711
 712/**
 713 * radeon_fence_unref - remove a ref on a fence
 714 *
 715 * @fence: radeon fence object
 716 *
 717 * Remove a reference on a fence (all asics).
 718 */
 719void radeon_fence_unref(struct radeon_fence **fence)
 720{
 721	struct radeon_fence *tmp = *fence;
 722
 723	*fence = NULL;
 724	if (tmp) {
 725		dma_fence_put(&tmp->base);
 726	}
 727}
 728
 729/**
 730 * radeon_fence_count_emitted - get the count of emitted fences
 731 *
 732 * @rdev: radeon device pointer
 733 * @ring: ring index the fence is associated with
 734 *
 735 * Get the number of fences emitted on the requested ring (all asics).
 736 * Returns the number of emitted fences on the ring.  Used by the
 737 * dynpm code to ring track activity.
 738 */
 739unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
 740{
 741	uint64_t emitted;
 742
 743	/* We are not protected by ring lock when reading the last sequence
 744	 * but it's ok to report slightly wrong fence count here.
 745	 */
 746	radeon_fence_process(rdev, ring);
 747	emitted = rdev->fence_drv[ring].sync_seq[ring]
 748		- atomic64_read(&rdev->fence_drv[ring].last_seq);
 749	/* to avoid 32bits warp around */
 750	if (emitted > 0x10000000) {
 751		emitted = 0x10000000;
 752	}
 753	return (unsigned)emitted;
 754}
 755
 756/**
 757 * radeon_fence_need_sync - do we need a semaphore
 758 *
 759 * @fence: radeon fence object
 760 * @dst_ring: which ring to check against
 761 *
 762 * Check if the fence needs to be synced against another ring
 763 * (all asics).  If so, we need to emit a semaphore.
 764 * Returns true if we need to sync with another ring, false if
 765 * not.
 766 */
 767bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
 768{
 769	struct radeon_fence_driver *fdrv;
 770
 771	if (!fence) {
 772		return false;
 773	}
 774
 775	if (fence->ring == dst_ring) {
 776		return false;
 777	}
 778
 779	/* we are protected by the ring mutex */
 780	fdrv = &fence->rdev->fence_drv[dst_ring];
 781	if (fence->seq <= fdrv->sync_seq[fence->ring]) {
 782		return false;
 783	}
 784
 785	return true;
 786}
 787
 788/**
 789 * radeon_fence_note_sync - record the sync point
 790 *
 791 * @fence: radeon fence object
 792 * @dst_ring: which ring to check against
 793 *
 794 * Note the sequence number at which point the fence will
 795 * be synced with the requested ring (all asics).
 796 */
 797void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
 798{
 799	struct radeon_fence_driver *dst, *src;
 800	unsigned i;
 801
 802	if (!fence) {
 803		return;
 804	}
 805
 806	if (fence->ring == dst_ring) {
 807		return;
 808	}
 809
 810	/* we are protected by the ring mutex */
 811	src = &fence->rdev->fence_drv[fence->ring];
 812	dst = &fence->rdev->fence_drv[dst_ring];
 813	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 814		if (i == dst_ring) {
 815			continue;
 816		}
 817		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
 818	}
 819}
 820
 821/**
 822 * radeon_fence_driver_start_ring - make the fence driver
 823 * ready for use on the requested ring.
 824 *
 825 * @rdev: radeon device pointer
 826 * @ring: ring index to start the fence driver on
 827 *
 828 * Make the fence driver ready for processing (all asics).
 829 * Not all asics have all rings, so each asic will only
 830 * start the fence driver on the rings it has.
 831 * Returns 0 for success, errors for failure.
 832 */
 833int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
 834{
 835	uint64_t index;
 836	int r;
 837
 838	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
 839	if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
 840		rdev->fence_drv[ring].scratch_reg = 0;
 841		if (ring != R600_RING_TYPE_UVD_INDEX) {
 842			index = R600_WB_EVENT_OFFSET + ring * 4;
 843			rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
 844			rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
 845							 index;
 846
 847		} else {
 848			/* put fence directly behind firmware */
 849			index = ALIGN(rdev->uvd_fw->size, 8);
 850			rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
 851			rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
 852		}
 853
 854	} else {
 855		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
 856		if (r) {
 857			dev_err(rdev->dev, "fence failed to get scratch register\n");
 858			return r;
 859		}
 860		index = RADEON_WB_SCRATCH_OFFSET +
 861			rdev->fence_drv[ring].scratch_reg -
 862			rdev->scratch.reg_base;
 863		rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
 864		rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
 865	}
 866	radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
 867	rdev->fence_drv[ring].initialized = true;
 868	dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx\n",
 869		 ring, rdev->fence_drv[ring].gpu_addr);
 870	return 0;
 871}
 872
 873/**
 874 * radeon_fence_driver_init_ring - init the fence driver
 875 * for the requested ring.
 876 *
 877 * @rdev: radeon device pointer
 878 * @ring: ring index to start the fence driver on
 879 *
 880 * Init the fence driver for the requested ring (all asics).
 881 * Helper function for radeon_fence_driver_init().
 882 */
 883static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
 884{
 885	int i;
 886
 887	rdev->fence_drv[ring].scratch_reg = -1;
 888	rdev->fence_drv[ring].cpu_addr = NULL;
 889	rdev->fence_drv[ring].gpu_addr = 0;
 890	for (i = 0; i < RADEON_NUM_RINGS; ++i)
 891		rdev->fence_drv[ring].sync_seq[i] = 0;
 892	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
 893	rdev->fence_drv[ring].initialized = false;
 894	INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
 895			  radeon_fence_check_lockup);
 896	rdev->fence_drv[ring].rdev = rdev;
 897}
 898
 899/**
 900 * radeon_fence_driver_init - init the fence driver
 901 * for all possible rings.
 902 *
 903 * @rdev: radeon device pointer
 904 *
 905 * Init the fence driver for all possible rings (all asics).
 906 * Not all asics have all rings, so each asic will only
 907 * start the fence driver on the rings it has using
 908 * radeon_fence_driver_start_ring().
 909 * Returns 0 for success.
 910 */
 911int radeon_fence_driver_init(struct radeon_device *rdev)
 912{
 913	int ring;
 914
 915	init_waitqueue_head(&rdev->fence_queue);
 916	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
 917		radeon_fence_driver_init_ring(rdev, ring);
 918	}
 919	if (radeon_debugfs_fence_init(rdev)) {
 920		dev_err(rdev->dev, "fence debugfs file creation failed\n");
 921	}
 922	return 0;
 923}
 924
 925/**
 926 * radeon_fence_driver_fini - tear down the fence driver
 927 * for all possible rings.
 928 *
 929 * @rdev: radeon device pointer
 930 *
 931 * Tear down the fence driver for all possible rings (all asics).
 932 */
 933void radeon_fence_driver_fini(struct radeon_device *rdev)
 934{
 935	int ring, r;
 936
 937	mutex_lock(&rdev->ring_lock);
 938	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
 939		if (!rdev->fence_drv[ring].initialized)
 940			continue;
 941		r = radeon_fence_wait_empty(rdev, ring);
 942		if (r) {
 943			/* no need to trigger GPU reset as we are unloading */
 944			radeon_fence_driver_force_completion(rdev, ring);
 945		}
 946		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
 947		wake_up_all(&rdev->fence_queue);
 948		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
 949		rdev->fence_drv[ring].initialized = false;
 950	}
 951	mutex_unlock(&rdev->ring_lock);
 952}
 953
 954/**
 955 * radeon_fence_driver_force_completion - force all fence waiter to complete
 956 *
 957 * @rdev: radeon device pointer
 958 * @ring: the ring to complete
 959 *
 960 * In case of GPU reset failure make sure no process keep waiting on fence
 961 * that will never complete.
 962 */
 963void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
 964{
 965	if (rdev->fence_drv[ring].initialized) {
 966		radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
 967		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
 968	}
 969}
 970
 971
 972/*
 973 * Fence debugfs
 974 */
 975#if defined(CONFIG_DEBUG_FS)
 976static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
 977{
 978	struct drm_info_node *node = (struct drm_info_node *)m->private;
 979	struct drm_device *dev = node->minor->dev;
 980	struct radeon_device *rdev = dev->dev_private;
 981	int i, j;
 982
 983	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 984		if (!rdev->fence_drv[i].initialized)
 985			continue;
 986
 987		radeon_fence_process(rdev, i);
 988
 989		seq_printf(m, "--- ring %d ---\n", i);
 990		seq_printf(m, "Last signaled fence 0x%016llx\n",
 991			   (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
 992		seq_printf(m, "Last emitted        0x%016llx\n",
 993			   rdev->fence_drv[i].sync_seq[i]);
 994
 995		for (j = 0; j < RADEON_NUM_RINGS; ++j) {
 996			if (i != j && rdev->fence_drv[j].initialized)
 997				seq_printf(m, "Last sync to ring %d 0x%016llx\n",
 998					   j, rdev->fence_drv[i].sync_seq[j]);
 999		}
1000	}
1001	return 0;
1002}
1003
1004/**
1005 * radeon_debugfs_gpu_reset - manually trigger a gpu reset
1006 *
1007 * Manually trigger a gpu reset at the next fence wait.
1008 */
1009static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
1010{
1011	struct drm_info_node *node = (struct drm_info_node *) m->private;
1012	struct drm_device *dev = node->minor->dev;
1013	struct radeon_device *rdev = dev->dev_private;
1014
1015	down_read(&rdev->exclusive_lock);
1016	seq_printf(m, "%d\n", rdev->needs_reset);
1017	rdev->needs_reset = true;
1018	wake_up_all(&rdev->fence_queue);
1019	up_read(&rdev->exclusive_lock);
1020
1021	return 0;
1022}
1023
1024static struct drm_info_list radeon_debugfs_fence_list[] = {
1025	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
1026	{"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
1027};
1028#endif
1029
1030int radeon_debugfs_fence_init(struct radeon_device *rdev)
1031{
1032#if defined(CONFIG_DEBUG_FS)
1033	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
1034#else
1035	return 0;
1036#endif
1037}
1038
1039static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
1040{
1041	return "radeon";
1042}
1043
1044static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
1045{
1046	struct radeon_fence *fence = to_radeon_fence(f);
1047	switch (fence->ring) {
1048	case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
1049	case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
1050	case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2";
1051	case R600_RING_TYPE_DMA_INDEX: return "radeon.dma";
1052	case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1";
1053	case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
1054	case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
1055	case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
1056	default: WARN_ON_ONCE(1); return "radeon.unk";
1057	}
1058}
1059
1060static inline bool radeon_test_signaled(struct radeon_fence *fence)
1061{
1062	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1063}
1064
1065struct radeon_wait_cb {
1066	struct dma_fence_cb base;
1067	struct task_struct *task;
1068};
1069
1070static void
1071radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1072{
1073	struct radeon_wait_cb *wait =
1074		container_of(cb, struct radeon_wait_cb, base);
1075
1076	wake_up_process(wait->task);
1077}
1078
1079static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
1080					     signed long t)
1081{
1082	struct radeon_fence *fence = to_radeon_fence(f);
1083	struct radeon_device *rdev = fence->rdev;
1084	struct radeon_wait_cb cb;
1085
1086	cb.task = current;
1087
1088	if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
1089		return t;
1090
1091	while (t > 0) {
1092		if (intr)
1093			set_current_state(TASK_INTERRUPTIBLE);
1094		else
1095			set_current_state(TASK_UNINTERRUPTIBLE);
1096
1097		/*
1098		 * radeon_test_signaled must be called after
1099		 * set_current_state to prevent a race with wake_up_process
1100		 */
1101		if (radeon_test_signaled(fence))
1102			break;
1103
1104		if (rdev->needs_reset) {
1105			t = -EDEADLK;
1106			break;
1107		}
1108
1109		t = schedule_timeout(t);
1110
1111		if (t > 0 && intr && signal_pending(current))
1112			t = -ERESTARTSYS;
1113	}
1114
1115	__set_current_state(TASK_RUNNING);
1116	dma_fence_remove_callback(f, &cb.base);
1117
1118	return t;
1119}
1120
1121const struct dma_fence_ops radeon_fence_ops = {
1122	.get_driver_name = radeon_fence_get_driver_name,
1123	.get_timeline_name = radeon_fence_get_timeline_name,
1124	.enable_signaling = radeon_fence_enable_signaling,
1125	.signaled = radeon_fence_is_signaled,
1126	.wait = radeon_fence_default_wait,
1127	.release = NULL,
1128};