Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2019 Intel Corporation
   5 */
   6
   7#include <linux/debugobjects.h>
   8
   9#include "gt/intel_context.h"
  10#include "gt/intel_engine_heartbeat.h"
  11#include "gt/intel_engine_pm.h"
  12#include "gt/intel_ring.h"
  13
  14#include "i915_drv.h"
  15#include "i915_active.h"
  16
  17/*
  18 * Active refs memory management
  19 *
  20 * To be more economical with memory, we reap all the i915_active trees as
  21 * they idle (when we know the active requests are inactive) and allocate the
  22 * nodes from a local slab cache to hopefully reduce the fragmentation.
  23 */
  24static struct kmem_cache *slab_cache;
  25
  26struct active_node {
  27	struct rb_node node;
  28	struct i915_active_fence base;
  29	struct i915_active *ref;
  30	u64 timeline;
  31};
  32
  33#define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
  34
  35static inline struct active_node *
  36node_from_active(struct i915_active_fence *active)
  37{
  38	return container_of(active, struct active_node, base);
  39}
  40
  41#define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
  42
  43static inline bool is_barrier(const struct i915_active_fence *active)
  44{
  45	return IS_ERR(rcu_access_pointer(active->fence));
  46}
  47
  48static inline struct llist_node *barrier_to_ll(struct active_node *node)
  49{
  50	GEM_BUG_ON(!is_barrier(&node->base));
  51	return (struct llist_node *)&node->base.cb.node;
  52}
  53
  54static inline struct intel_engine_cs *
  55__barrier_to_engine(struct active_node *node)
  56{
  57	return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
  58}
  59
  60static inline struct intel_engine_cs *
  61barrier_to_engine(struct active_node *node)
  62{
  63	GEM_BUG_ON(!is_barrier(&node->base));
  64	return __barrier_to_engine(node);
  65}
  66
  67static inline struct active_node *barrier_from_ll(struct llist_node *x)
  68{
  69	return container_of((struct list_head *)x,
  70			    struct active_node, base.cb.node);
  71}
  72
  73#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
  74
  75static void *active_debug_hint(void *addr)
  76{
  77	struct i915_active *ref = addr;
  78
  79	return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
  80}
  81
  82static const struct debug_obj_descr active_debug_desc = {
  83	.name = "i915_active",
  84	.debug_hint = active_debug_hint,
  85};
  86
  87static void debug_active_init(struct i915_active *ref)
  88{
  89	debug_object_init(ref, &active_debug_desc);
  90}
  91
  92static void debug_active_activate(struct i915_active *ref)
  93{
  94	lockdep_assert_held(&ref->tree_lock);
  95	if (!atomic_read(&ref->count)) /* before the first inc */
  96		debug_object_activate(ref, &active_debug_desc);
  97}
  98
  99static void debug_active_deactivate(struct i915_active *ref)
 100{
 101	lockdep_assert_held(&ref->tree_lock);
 102	if (!atomic_read(&ref->count)) /* after the last dec */
 103		debug_object_deactivate(ref, &active_debug_desc);
 104}
 105
 106static void debug_active_fini(struct i915_active *ref)
 107{
 108	debug_object_free(ref, &active_debug_desc);
 109}
 110
 111static void debug_active_assert(struct i915_active *ref)
 112{
 113	debug_object_assert_init(ref, &active_debug_desc);
 114}
 115
 116#else
 117
 118static inline void debug_active_init(struct i915_active *ref) { }
 119static inline void debug_active_activate(struct i915_active *ref) { }
 120static inline void debug_active_deactivate(struct i915_active *ref) { }
 121static inline void debug_active_fini(struct i915_active *ref) { }
 122static inline void debug_active_assert(struct i915_active *ref) { }
 123
 124#endif
 125
 126static void
 127__active_retire(struct i915_active *ref)
 128{
 129	struct rb_root root = RB_ROOT;
 130	struct active_node *it, *n;
 131	unsigned long flags;
 132
 133	GEM_BUG_ON(i915_active_is_idle(ref));
 134
 135	/* return the unused nodes to our slabcache -- flushing the allocator */
 136	if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
 137		return;
 138
 139	GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
 140	debug_active_deactivate(ref);
 141
 142	/* Even if we have not used the cache, we may still have a barrier */
 143	if (!ref->cache)
 144		ref->cache = fetch_node(ref->tree.rb_node);
 145
 146	/* Keep the MRU cached node for reuse */
 147	if (ref->cache) {
 148		/* Discard all other nodes in the tree */
 149		rb_erase(&ref->cache->node, &ref->tree);
 150		root = ref->tree;
 151
 152		/* Rebuild the tree with only the cached node */
 153		rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
 154		rb_insert_color(&ref->cache->node, &ref->tree);
 155		GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
 156
 157		/* Make the cached node available for reuse with any timeline */
 158		ref->cache->timeline = 0; /* needs cmpxchg(u64) */
 159	}
 160
 161	spin_unlock_irqrestore(&ref->tree_lock, flags);
 162
 163	/* After the final retire, the entire struct may be freed */
 164	if (ref->retire)
 165		ref->retire(ref);
 166
 167	/* ... except if you wait on it, you must manage your own references! */
 168	wake_up_var(ref);
 169
 170	/* Finally free the discarded timeline tree  */
 171	rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
 172		GEM_BUG_ON(i915_active_fence_isset(&it->base));
 173		kmem_cache_free(slab_cache, it);
 174	}
 175}
 176
 177static void
 178active_work(struct work_struct *wrk)
 179{
 180	struct i915_active *ref = container_of(wrk, typeof(*ref), work);
 181
 182	GEM_BUG_ON(!atomic_read(&ref->count));
 183	if (atomic_add_unless(&ref->count, -1, 1))
 184		return;
 185
 186	__active_retire(ref);
 187}
 188
 189static void
 190active_retire(struct i915_active *ref)
 191{
 192	GEM_BUG_ON(!atomic_read(&ref->count));
 193	if (atomic_add_unless(&ref->count, -1, 1))
 194		return;
 195
 196	if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
 197		queue_work(system_unbound_wq, &ref->work);
 198		return;
 199	}
 200
 201	__active_retire(ref);
 202}
 203
 204static inline struct dma_fence **
 205__active_fence_slot(struct i915_active_fence *active)
 206{
 207	return (struct dma_fence ** __force)&active->fence;
 208}
 209
 210static inline bool
 211active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 212{
 213	struct i915_active_fence *active =
 214		container_of(cb, typeof(*active), cb);
 215
 216	return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
 217}
 218
 219static void
 220node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
 221{
 222	if (active_fence_cb(fence, cb))
 223		active_retire(container_of(cb, struct active_node, base.cb)->ref);
 224}
 225
 226static void
 227excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
 228{
 229	if (active_fence_cb(fence, cb))
 230		active_retire(container_of(cb, struct i915_active, excl.cb));
 231}
 232
 233static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
 234{
 235	struct active_node *it;
 236
 237	GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */
 238
 239	/*
 240	 * We track the most recently used timeline to skip a rbtree search
 241	 * for the common case, under typical loads we never need the rbtree
 242	 * at all. We can reuse the last slot if it is empty, that is
 243	 * after the previous activity has been retired, or if it matches the
 244	 * current timeline.
 245	 */
 246	it = READ_ONCE(ref->cache);
 247	if (it) {
 248		u64 cached = READ_ONCE(it->timeline);
 249
 250		/* Once claimed, this slot will only belong to this idx */
 251		if (cached == idx)
 252			return it;
 253
 254		/*
 255		 * An unclaimed cache [.timeline=0] can only be claimed once.
 256		 *
 257		 * If the value is already non-zero, some other thread has
 258		 * claimed the cache and we know that is does not match our
 259		 * idx. If, and only if, the timeline is currently zero is it
 260		 * worth competing to claim it atomically for ourselves (for
 261		 * only the winner of that race will cmpxchg return the old
 262		 * value of 0).
 263		 */
 264		if (!cached && !cmpxchg64(&it->timeline, 0, idx))
 265			return it;
 266	}
 267
 268	BUILD_BUG_ON(offsetof(typeof(*it), node));
 269
 270	/* While active, the tree can only be built; not destroyed */
 271	GEM_BUG_ON(i915_active_is_idle(ref));
 272
 273	it = fetch_node(ref->tree.rb_node);
 274	while (it) {
 275		if (it->timeline < idx) {
 276			it = fetch_node(it->node.rb_right);
 277		} else if (it->timeline > idx) {
 278			it = fetch_node(it->node.rb_left);
 279		} else {
 280			WRITE_ONCE(ref->cache, it);
 281			break;
 282		}
 283	}
 284
 285	/* NB: If the tree rotated beneath us, we may miss our target. */
 286	return it;
 287}
 288
 289static struct i915_active_fence *
 290active_instance(struct i915_active *ref, u64 idx)
 291{
 292	struct active_node *node;
 293	struct rb_node **p, *parent;
 294
 295	node = __active_lookup(ref, idx);
 296	if (likely(node))
 297		return &node->base;
 298
 299	spin_lock_irq(&ref->tree_lock);
 300	GEM_BUG_ON(i915_active_is_idle(ref));
 301
 302	parent = NULL;
 303	p = &ref->tree.rb_node;
 304	while (*p) {
 305		parent = *p;
 306
 307		node = rb_entry(parent, struct active_node, node);
 308		if (node->timeline == idx)
 309			goto out;
 310
 311		if (node->timeline < idx)
 312			p = &parent->rb_right;
 313		else
 314			p = &parent->rb_left;
 315	}
 316
 317	/*
 318	 * XXX: We should preallocate this before i915_active_ref() is ever
 319	 *  called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC.
 320	 */
 321	node = kmem_cache_alloc(slab_cache, GFP_ATOMIC);
 322	if (!node)
 323		goto out;
 324
 325	__i915_active_fence_init(&node->base, NULL, node_retire);
 326	node->ref = ref;
 327	node->timeline = idx;
 328
 329	rb_link_node(&node->node, parent, p);
 330	rb_insert_color(&node->node, &ref->tree);
 331
 332out:
 333	WRITE_ONCE(ref->cache, node);
 334	spin_unlock_irq(&ref->tree_lock);
 335
 336	return &node->base;
 337}
 338
 339void __i915_active_init(struct i915_active *ref,
 340			int (*active)(struct i915_active *ref),
 341			void (*retire)(struct i915_active *ref),
 342			unsigned long flags,
 343			struct lock_class_key *mkey,
 344			struct lock_class_key *wkey)
 345{
 346	debug_active_init(ref);
 347
 348	ref->flags = flags;
 349	ref->active = active;
 350	ref->retire = retire;
 351
 352	spin_lock_init(&ref->tree_lock);
 353	ref->tree = RB_ROOT;
 354	ref->cache = NULL;
 355
 356	init_llist_head(&ref->preallocated_barriers);
 357	atomic_set(&ref->count, 0);
 358	__mutex_init(&ref->mutex, "i915_active", mkey);
 359	__i915_active_fence_init(&ref->excl, NULL, excl_retire);
 360	INIT_WORK(&ref->work, active_work);
 361#if IS_ENABLED(CONFIG_LOCKDEP)
 362	lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
 363#endif
 364}
 365
 366static bool ____active_del_barrier(struct i915_active *ref,
 367				   struct active_node *node,
 368				   struct intel_engine_cs *engine)
 369
 370{
 371	struct llist_node *head = NULL, *tail = NULL;
 372	struct llist_node *pos, *next;
 373
 374	GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
 375
 376	/*
 377	 * Rebuild the llist excluding our node. We may perform this
 378	 * outside of the kernel_context timeline mutex and so someone
 379	 * else may be manipulating the engine->barrier_tasks, in
 380	 * which case either we or they will be upset :)
 381	 *
 382	 * A second __active_del_barrier() will report failure to claim
 383	 * the active_node and the caller will just shrug and know not to
 384	 * claim ownership of its node.
 385	 *
 386	 * A concurrent i915_request_add_active_barriers() will miss adding
 387	 * any of the tasks, but we will try again on the next -- and since
 388	 * we are actively using the barrier, we know that there will be
 389	 * at least another opportunity when we idle.
 390	 */
 391	llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
 392		if (node == barrier_from_ll(pos)) {
 393			node = NULL;
 394			continue;
 395		}
 396
 397		pos->next = head;
 398		head = pos;
 399		if (!tail)
 400			tail = pos;
 401	}
 402	if (head)
 403		llist_add_batch(head, tail, &engine->barrier_tasks);
 404
 405	return !node;
 406}
 407
 408static bool
 409__active_del_barrier(struct i915_active *ref, struct active_node *node)
 410{
 411	return ____active_del_barrier(ref, node, barrier_to_engine(node));
 412}
 413
 414static bool
 415replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
 416{
 417	if (!is_barrier(active)) /* proto-node used by our idle barrier? */
 418		return false;
 419
 420	/*
 421	 * This request is on the kernel_context timeline, and so
 422	 * we can use it to substitute for the pending idle-barrer
 423	 * request that we want to emit on the kernel_context.
 424	 */
 425	__active_del_barrier(ref, node_from_active(active));
 426	return true;
 427}
 428
 429int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
 430{
 431	struct dma_fence *fence = &rq->fence;
 432	struct i915_active_fence *active;
 433	int err;
 434
 435	/* Prevent reaping in case we malloc/wait while building the tree */
 436	err = i915_active_acquire(ref);
 437	if (err)
 438		return err;
 439
 440	active = active_instance(ref, i915_request_timeline(rq)->fence_context);
 441	if (!active) {
 442		err = -ENOMEM;
 443		goto out;
 444	}
 445
 446	if (replace_barrier(ref, active)) {
 447		RCU_INIT_POINTER(active->fence, NULL);
 448		atomic_dec(&ref->count);
 449	}
 450	if (!__i915_active_fence_set(active, fence))
 451		__i915_active_acquire(ref);
 452
 453out:
 454	i915_active_release(ref);
 455	return err;
 456}
 457
 458static struct dma_fence *
 459__i915_active_set_fence(struct i915_active *ref,
 460			struct i915_active_fence *active,
 461			struct dma_fence *fence)
 462{
 463	struct dma_fence *prev;
 464
 465	if (replace_barrier(ref, active)) {
 466		RCU_INIT_POINTER(active->fence, fence);
 467		return NULL;
 468	}
 469
 470	rcu_read_lock();
 471	prev = __i915_active_fence_set(active, fence);
 472	if (prev)
 473		prev = dma_fence_get_rcu(prev);
 474	else
 475		__i915_active_acquire(ref);
 476	rcu_read_unlock();
 477
 478	return prev;
 479}
 480
 481struct dma_fence *
 482i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
 483{
 484	/* We expect the caller to manage the exclusive timeline ordering */
 485	return __i915_active_set_fence(ref, &ref->excl, f);
 486}
 487
 488bool i915_active_acquire_if_busy(struct i915_active *ref)
 489{
 490	debug_active_assert(ref);
 491	return atomic_add_unless(&ref->count, 1, 0);
 492}
 493
 494static void __i915_active_activate(struct i915_active *ref)
 495{
 496	spin_lock_irq(&ref->tree_lock); /* __active_retire() */
 497	if (!atomic_fetch_inc(&ref->count))
 498		debug_active_activate(ref);
 499	spin_unlock_irq(&ref->tree_lock);
 500}
 501
 502int i915_active_acquire(struct i915_active *ref)
 503{
 504	int err;
 505
 506	if (i915_active_acquire_if_busy(ref))
 507		return 0;
 508
 509	if (!ref->active) {
 510		__i915_active_activate(ref);
 511		return 0;
 512	}
 513
 514	err = mutex_lock_interruptible(&ref->mutex);
 515	if (err)
 516		return err;
 517
 518	if (likely(!i915_active_acquire_if_busy(ref))) {
 519		err = ref->active(ref);
 520		if (!err)
 521			__i915_active_activate(ref);
 522	}
 523
 524	mutex_unlock(&ref->mutex);
 525
 526	return err;
 527}
 528
 529int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
 530{
 531	struct i915_active_fence *active;
 532	int err;
 533
 534	err = i915_active_acquire(ref);
 535	if (err)
 536		return err;
 537
 538	active = active_instance(ref, idx);
 539	if (!active) {
 540		i915_active_release(ref);
 541		return -ENOMEM;
 542	}
 543
 544	return 0; /* return with active ref */
 545}
 546
 547void i915_active_release(struct i915_active *ref)
 548{
 549	debug_active_assert(ref);
 550	active_retire(ref);
 551}
 552
 553static void enable_signaling(struct i915_active_fence *active)
 554{
 555	struct dma_fence *fence;
 556
 557	if (unlikely(is_barrier(active)))
 558		return;
 559
 560	fence = i915_active_fence_get(active);
 561	if (!fence)
 562		return;
 563
 564	dma_fence_enable_sw_signaling(fence);
 565	dma_fence_put(fence);
 566}
 567
 568static int flush_barrier(struct active_node *it)
 569{
 570	struct intel_engine_cs *engine;
 571
 572	if (likely(!is_barrier(&it->base)))
 573		return 0;
 574
 575	engine = __barrier_to_engine(it);
 576	smp_rmb(); /* serialise with add_active_barriers */
 577	if (!is_barrier(&it->base))
 578		return 0;
 579
 580	return intel_engine_flush_barriers(engine);
 581}
 582
 583static int flush_lazy_signals(struct i915_active *ref)
 584{
 585	struct active_node *it, *n;
 586	int err = 0;
 587
 588	enable_signaling(&ref->excl);
 589	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
 590		err = flush_barrier(it); /* unconnected idle barrier? */
 591		if (err)
 592			break;
 593
 594		enable_signaling(&it->base);
 595	}
 596
 597	return err;
 598}
 599
 600int __i915_active_wait(struct i915_active *ref, int state)
 601{
 602	might_sleep();
 603
 604	/* Any fence added after the wait begins will not be auto-signaled */
 605	if (i915_active_acquire_if_busy(ref)) {
 606		int err;
 607
 608		err = flush_lazy_signals(ref);
 609		i915_active_release(ref);
 610		if (err)
 611			return err;
 612
 613		if (___wait_var_event(ref, i915_active_is_idle(ref),
 614				      state, 0, 0, schedule()))
 615			return -EINTR;
 616	}
 617
 618	/*
 619	 * After the wait is complete, the caller may free the active.
 620	 * We have to flush any concurrent retirement before returning.
 621	 */
 622	flush_work(&ref->work);
 623	return 0;
 624}
 625
 626static int __await_active(struct i915_active_fence *active,
 627			  int (*fn)(void *arg, struct dma_fence *fence),
 628			  void *arg)
 629{
 630	struct dma_fence *fence;
 631
 632	if (is_barrier(active)) /* XXX flush the barrier? */
 633		return 0;
 634
 635	fence = i915_active_fence_get(active);
 636	if (fence) {
 637		int err;
 638
 639		err = fn(arg, fence);
 640		dma_fence_put(fence);
 641		if (err < 0)
 642			return err;
 643	}
 644
 645	return 0;
 646}
 647
 648struct wait_barrier {
 649	struct wait_queue_entry base;
 650	struct i915_active *ref;
 651};
 652
 653static int
 654barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
 655{
 656	struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
 657
 658	if (i915_active_is_idle(wb->ref)) {
 659		list_del(&wq->entry);
 660		i915_sw_fence_complete(wq->private);
 661		kfree(wq);
 662	}
 663
 664	return 0;
 665}
 666
 667static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
 668{
 669	struct wait_barrier *wb;
 670
 671	wb = kmalloc(sizeof(*wb), GFP_KERNEL);
 672	if (unlikely(!wb))
 673		return -ENOMEM;
 674
 675	GEM_BUG_ON(i915_active_is_idle(ref));
 676	if (!i915_sw_fence_await(fence)) {
 677		kfree(wb);
 678		return -EINVAL;
 679	}
 680
 681	wb->base.flags = 0;
 682	wb->base.func = barrier_wake;
 683	wb->base.private = fence;
 684	wb->ref = ref;
 685
 686	add_wait_queue(__var_waitqueue(ref), &wb->base);
 687	return 0;
 688}
 689
 690static int await_active(struct i915_active *ref,
 691			unsigned int flags,
 692			int (*fn)(void *arg, struct dma_fence *fence),
 693			void *arg, struct i915_sw_fence *barrier)
 694{
 695	int err = 0;
 696
 697	if (!i915_active_acquire_if_busy(ref))
 698		return 0;
 699
 700	if (flags & I915_ACTIVE_AWAIT_EXCL &&
 701	    rcu_access_pointer(ref->excl.fence)) {
 702		err = __await_active(&ref->excl, fn, arg);
 703		if (err)
 704			goto out;
 705	}
 706
 707	if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
 708		struct active_node *it, *n;
 709
 710		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
 711			err = __await_active(&it->base, fn, arg);
 712			if (err)
 713				goto out;
 714		}
 715	}
 716
 717	if (flags & I915_ACTIVE_AWAIT_BARRIER) {
 718		err = flush_lazy_signals(ref);
 719		if (err)
 720			goto out;
 721
 722		err = __await_barrier(ref, barrier);
 723		if (err)
 724			goto out;
 725	}
 726
 727out:
 728	i915_active_release(ref);
 729	return err;
 730}
 731
 732static int rq_await_fence(void *arg, struct dma_fence *fence)
 733{
 734	return i915_request_await_dma_fence(arg, fence);
 735}
 736
 737int i915_request_await_active(struct i915_request *rq,
 738			      struct i915_active *ref,
 739			      unsigned int flags)
 740{
 741	return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
 742}
 743
 744static int sw_await_fence(void *arg, struct dma_fence *fence)
 745{
 746	return i915_sw_fence_await_dma_fence(arg, fence, 0,
 747					     GFP_NOWAIT | __GFP_NOWARN);
 748}
 749
 750int i915_sw_fence_await_active(struct i915_sw_fence *fence,
 751			       struct i915_active *ref,
 752			       unsigned int flags)
 753{
 754	return await_active(ref, flags, sw_await_fence, fence, fence);
 755}
 756
 757void i915_active_fini(struct i915_active *ref)
 758{
 759	debug_active_fini(ref);
 760	GEM_BUG_ON(atomic_read(&ref->count));
 761	GEM_BUG_ON(work_pending(&ref->work));
 762	mutex_destroy(&ref->mutex);
 763
 764	if (ref->cache)
 765		kmem_cache_free(slab_cache, ref->cache);
 766}
 767
 768static inline bool is_idle_barrier(struct active_node *node, u64 idx)
 769{
 770	return node->timeline == idx && !i915_active_fence_isset(&node->base);
 771}
 772
 773static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
 774{
 775	struct rb_node *prev, *p;
 776
 777	if (RB_EMPTY_ROOT(&ref->tree))
 778		return NULL;
 779
 780	GEM_BUG_ON(i915_active_is_idle(ref));
 781
 782	/*
 783	 * Try to reuse any existing barrier nodes already allocated for this
 784	 * i915_active, due to overlapping active phases there is likely a
 785	 * node kept alive (as we reuse before parking). We prefer to reuse
 786	 * completely idle barriers (less hassle in manipulating the llists),
 787	 * but otherwise any will do.
 788	 */
 789	if (ref->cache && is_idle_barrier(ref->cache, idx)) {
 790		p = &ref->cache->node;
 791		goto match;
 792	}
 793
 794	prev = NULL;
 795	p = ref->tree.rb_node;
 796	while (p) {
 797		struct active_node *node =
 798			rb_entry(p, struct active_node, node);
 799
 800		if (is_idle_barrier(node, idx))
 801			goto match;
 802
 803		prev = p;
 804		if (node->timeline < idx)
 805			p = READ_ONCE(p->rb_right);
 806		else
 807			p = READ_ONCE(p->rb_left);
 808	}
 809
 810	/*
 811	 * No quick match, but we did find the leftmost rb_node for the
 812	 * kernel_context. Walk the rb_tree in-order to see if there were
 813	 * any idle-barriers on this timeline that we missed, or just use
 814	 * the first pending barrier.
 815	 */
 816	for (p = prev; p; p = rb_next(p)) {
 817		struct active_node *node =
 818			rb_entry(p, struct active_node, node);
 819		struct intel_engine_cs *engine;
 820
 821		if (node->timeline > idx)
 822			break;
 823
 824		if (node->timeline < idx)
 825			continue;
 826
 827		if (is_idle_barrier(node, idx))
 828			goto match;
 829
 830		/*
 831		 * The list of pending barriers is protected by the
 832		 * kernel_context timeline, which notably we do not hold
 833		 * here. i915_request_add_active_barriers() may consume
 834		 * the barrier before we claim it, so we have to check
 835		 * for success.
 836		 */
 837		engine = __barrier_to_engine(node);
 838		smp_rmb(); /* serialise with add_active_barriers */
 839		if (is_barrier(&node->base) &&
 840		    ____active_del_barrier(ref, node, engine))
 841			goto match;
 842	}
 843
 844	return NULL;
 845
 846match:
 847	spin_lock_irq(&ref->tree_lock);
 848	rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
 849	if (p == &ref->cache->node)
 850		WRITE_ONCE(ref->cache, NULL);
 851	spin_unlock_irq(&ref->tree_lock);
 852
 853	return rb_entry(p, struct active_node, node);
 854}
 855
 856int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 857					    struct intel_engine_cs *engine)
 858{
 859	intel_engine_mask_t tmp, mask = engine->mask;
 860	struct llist_node *first = NULL, *last = NULL;
 861	struct intel_gt *gt = engine->gt;
 862
 863	GEM_BUG_ON(i915_active_is_idle(ref));
 864
 865	/* Wait until the previous preallocation is completed */
 866	while (!llist_empty(&ref->preallocated_barriers))
 867		cond_resched();
 868
 869	/*
 870	 * Preallocate a node for each physical engine supporting the target
 871	 * engine (remember virtual engines have more than one sibling).
 872	 * We can then use the preallocated nodes in
 873	 * i915_active_acquire_barrier()
 874	 */
 875	GEM_BUG_ON(!mask);
 876	for_each_engine_masked(engine, gt, mask, tmp) {
 877		u64 idx = engine->kernel_context->timeline->fence_context;
 878		struct llist_node *prev = first;
 879		struct active_node *node;
 880
 881		rcu_read_lock();
 882		node = reuse_idle_barrier(ref, idx);
 883		rcu_read_unlock();
 884		if (!node) {
 885			node = kmem_cache_alloc(slab_cache, GFP_KERNEL);
 886			if (!node)
 887				goto unwind;
 888
 889			RCU_INIT_POINTER(node->base.fence, NULL);
 890			node->base.cb.func = node_retire;
 891			node->timeline = idx;
 892			node->ref = ref;
 893		}
 894
 895		if (!i915_active_fence_isset(&node->base)) {
 896			/*
 897			 * Mark this as being *our* unconnected proto-node.
 898			 *
 899			 * Since this node is not in any list, and we have
 900			 * decoupled it from the rbtree, we can reuse the
 901			 * request to indicate this is an idle-barrier node
 902			 * and then we can use the rb_node and list pointers
 903			 * for our tracking of the pending barrier.
 904			 */
 905			RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
 906			node->base.cb.node.prev = (void *)engine;
 907			__i915_active_acquire(ref);
 908		}
 909		GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
 910
 911		GEM_BUG_ON(barrier_to_engine(node) != engine);
 912		first = barrier_to_ll(node);
 913		first->next = prev;
 914		if (!last)
 915			last = first;
 916		intel_engine_pm_get(engine);
 917	}
 918
 919	GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
 920	llist_add_batch(first, last, &ref->preallocated_barriers);
 921
 922	return 0;
 923
 924unwind:
 925	while (first) {
 926		struct active_node *node = barrier_from_ll(first);
 927
 928		first = first->next;
 929
 930		atomic_dec(&ref->count);
 931		intel_engine_pm_put(barrier_to_engine(node));
 932
 933		kmem_cache_free(slab_cache, node);
 934	}
 935	return -ENOMEM;
 936}
 937
 938void i915_active_acquire_barrier(struct i915_active *ref)
 939{
 940	struct llist_node *pos, *next;
 941	unsigned long flags;
 942
 943	GEM_BUG_ON(i915_active_is_idle(ref));
 944
 945	/*
 946	 * Transfer the list of preallocated barriers into the
 947	 * i915_active rbtree, but only as proto-nodes. They will be
 948	 * populated by i915_request_add_active_barriers() to point to the
 949	 * request that will eventually release them.
 950	 */
 951	llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
 952		struct active_node *node = barrier_from_ll(pos);
 953		struct intel_engine_cs *engine = barrier_to_engine(node);
 954		struct rb_node **p, *parent;
 955
 956		spin_lock_irqsave_nested(&ref->tree_lock, flags,
 957					 SINGLE_DEPTH_NESTING);
 958		parent = NULL;
 959		p = &ref->tree.rb_node;
 960		while (*p) {
 961			struct active_node *it;
 962
 963			parent = *p;
 964
 965			it = rb_entry(parent, struct active_node, node);
 966			if (it->timeline < node->timeline)
 967				p = &parent->rb_right;
 968			else
 969				p = &parent->rb_left;
 970		}
 971		rb_link_node(&node->node, parent, p);
 972		rb_insert_color(&node->node, &ref->tree);
 973		spin_unlock_irqrestore(&ref->tree_lock, flags);
 974
 975		GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
 976		llist_add(barrier_to_ll(node), &engine->barrier_tasks);
 977		intel_engine_pm_put_delay(engine, 2);
 978	}
 979}
 980
 981static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
 982{
 983	return __active_fence_slot(&barrier_from_ll(node)->base);
 984}
 985
 986void i915_request_add_active_barriers(struct i915_request *rq)
 987{
 988	struct intel_engine_cs *engine = rq->engine;
 989	struct llist_node *node, *next;
 990	unsigned long flags;
 991
 992	GEM_BUG_ON(!intel_context_is_barrier(rq->context));
 993	GEM_BUG_ON(intel_engine_is_virtual(engine));
 994	GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
 995
 996	node = llist_del_all(&engine->barrier_tasks);
 997	if (!node)
 998		return;
 999	/*
1000	 * Attach the list of proto-fences to the in-flight request such
1001	 * that the parent i915_active will be released when this request
1002	 * is retired.
1003	 */
1004	spin_lock_irqsave(&rq->lock, flags);
1005	llist_for_each_safe(node, next, node) {
1006		/* serialise with reuse_idle_barrier */
1007		smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
1008		list_add_tail((struct list_head *)node, &rq->fence.cb_list);
1009	}
1010	spin_unlock_irqrestore(&rq->lock, flags);
1011}
1012
1013/*
1014 * __i915_active_fence_set: Update the last active fence along its timeline
1015 * @active: the active tracker
1016 * @fence: the new fence (under construction)
1017 *
1018 * Records the new @fence as the last active fence along its timeline in
1019 * this active tracker, moving the tracking callbacks from the previous
1020 * fence onto this one. Returns the previous fence (if not already completed),
1021 * which the caller must ensure is executed before the new fence. To ensure
1022 * that the order of fences within the timeline of the i915_active_fence is
1023 * understood, it should be locked by the caller.
1024 */
1025struct dma_fence *
1026__i915_active_fence_set(struct i915_active_fence *active,
1027			struct dma_fence *fence)
1028{
1029	struct dma_fence *prev;
1030	unsigned long flags;
1031
1032	if (fence == rcu_access_pointer(active->fence))
1033		return fence;
1034
1035	GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
1036
1037	/*
1038	 * Consider that we have two threads arriving (A and B), with
1039	 * C already resident as the active->fence.
1040	 *
1041	 * A does the xchg first, and so it sees C or NULL depending
1042	 * on the timing of the interrupt handler. If it is NULL, the
1043	 * previous fence must have been signaled and we know that
1044	 * we are first on the timeline. If it is still present,
1045	 * we acquire the lock on that fence and serialise with the interrupt
1046	 * handler, in the process removing it from any future interrupt
1047	 * callback. A will then wait on C before executing (if present).
1048	 *
1049	 * As B is second, it sees A as the previous fence and so waits for
1050	 * it to complete its transition and takes over the occupancy for
1051	 * itself -- remembering that it needs to wait on A before executing.
1052	 *
1053	 * Note the strong ordering of the timeline also provides consistent
1054	 * nesting rules for the fence->lock; the inner lock is always the
1055	 * older lock.
1056	 */
1057	spin_lock_irqsave(fence->lock, flags);
1058	prev = xchg(__active_fence_slot(active), fence);
1059	if (prev) {
1060		GEM_BUG_ON(prev == fence);
1061		spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
1062		__list_del_entry(&active->cb.node);
1063		spin_unlock(prev->lock); /* serialise with prev->cb_list */
1064	}
1065	list_add_tail(&active->cb.node, &fence->cb_list);
1066	spin_unlock_irqrestore(fence->lock, flags);
1067
1068	return prev;
1069}
1070
1071int i915_active_fence_set(struct i915_active_fence *active,
1072			  struct i915_request *rq)
1073{
1074	struct dma_fence *fence;
1075	int err = 0;
1076
1077	/* Must maintain timeline ordering wrt previous active requests */
1078	rcu_read_lock();
1079	fence = __i915_active_fence_set(active, &rq->fence);
1080	if (fence) /* but the previous fence may not belong to that timeline! */
1081		fence = dma_fence_get_rcu(fence);
1082	rcu_read_unlock();
1083	if (fence) {
1084		err = i915_request_await_dma_fence(rq, fence);
1085		dma_fence_put(fence);
1086	}
1087
1088	return err;
1089}
1090
1091void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
1092{
1093	active_fence_cb(fence, cb);
1094}
1095
1096struct auto_active {
1097	struct i915_active base;
1098	struct kref ref;
1099};
1100
1101struct i915_active *i915_active_get(struct i915_active *ref)
1102{
1103	struct auto_active *aa = container_of(ref, typeof(*aa), base);
1104
1105	kref_get(&aa->ref);
1106	return &aa->base;
1107}
1108
1109static void auto_release(struct kref *ref)
1110{
1111	struct auto_active *aa = container_of(ref, typeof(*aa), ref);
1112
1113	i915_active_fini(&aa->base);
1114	kfree(aa);
1115}
1116
1117void i915_active_put(struct i915_active *ref)
1118{
1119	struct auto_active *aa = container_of(ref, typeof(*aa), base);
1120
1121	kref_put(&aa->ref, auto_release);
1122}
1123
1124static int auto_active(struct i915_active *ref)
1125{
1126	i915_active_get(ref);
1127	return 0;
1128}
1129
1130static void auto_retire(struct i915_active *ref)
1131{
1132	i915_active_put(ref);
1133}
1134
1135struct i915_active *i915_active_create(void)
1136{
1137	struct auto_active *aa;
1138
1139	aa = kmalloc(sizeof(*aa), GFP_KERNEL);
1140	if (!aa)
1141		return NULL;
1142
1143	kref_init(&aa->ref);
1144	i915_active_init(&aa->base, auto_active, auto_retire, 0);
1145
1146	return &aa->base;
1147}
1148
1149#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1150#include "selftests/i915_active.c"
1151#endif
1152
1153void i915_active_module_exit(void)
1154{
1155	kmem_cache_destroy(slab_cache);
1156}
1157
1158int __init i915_active_module_init(void)
1159{
1160	slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
1161	if (!slab_cache)
1162		return -ENOMEM;
1163
1164	return 0;
1165}