Linux Audio

Check our new training course

Loading...
v5.14.15
   1/*
   2 * Copyright © 2008-2015 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include <linux/dma-fence-array.h>
  26#include <linux/dma-fence-chain.h>
  27#include <linux/irq_work.h>
  28#include <linux/prefetch.h>
 
  29#include <linux/sched.h>
  30#include <linux/sched/clock.h>
  31#include <linux/sched/signal.h>
  32
  33#include "gem/i915_gem_context.h"
  34#include "gt/intel_breadcrumbs.h"
  35#include "gt/intel_context.h"
  36#include "gt/intel_engine.h"
  37#include "gt/intel_engine_heartbeat.h"
  38#include "gt/intel_gpu_commands.h"
  39#include "gt/intel_reset.h"
  40#include "gt/intel_ring.h"
  41#include "gt/intel_rps.h"
  42
  43#include "i915_active.h"
  44#include "i915_drv.h"
  45#include "i915_globals.h"
  46#include "i915_trace.h"
  47#include "intel_pm.h"
  48
  49struct execute_cb {
  50	struct irq_work work;
  51	struct i915_sw_fence *fence;
  52	void (*hook)(struct i915_request *rq, struct dma_fence *signal);
  53	struct i915_request *signal;
  54};
  55
  56static struct i915_global_request {
  57	struct i915_global base;
  58	struct kmem_cache *slab_requests;
  59	struct kmem_cache *slab_execute_cbs;
  60} global;
  61
  62static const char *i915_fence_get_driver_name(struct dma_fence *fence)
  63{
  64	return dev_name(to_request(fence)->engine->i915->drm.dev);
  65}
  66
  67static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
  68{
  69	const struct i915_gem_context *ctx;
  70
  71	/*
  72	 * The timeline struct (as part of the ppgtt underneath a context)
  73	 * may be freed when the request is no longer in use by the GPU.
  74	 * We could extend the life of a context to beyond that of all
  75	 * fences, possibly keeping the hw resource around indefinitely,
  76	 * or we just give them a false name. Since
  77	 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
  78	 * lie seems justifiable.
  79	 */
  80	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  81		return "signaled";
  82
  83	ctx = i915_request_gem_context(to_request(fence));
  84	if (!ctx)
  85		return "[" DRIVER_NAME "]";
  86
  87	return ctx->name;
  88}
  89
  90static bool i915_fence_signaled(struct dma_fence *fence)
  91{
  92	return i915_request_completed(to_request(fence));
  93}
  94
  95static bool i915_fence_enable_signaling(struct dma_fence *fence)
  96{
  97	return i915_request_enable_breadcrumb(to_request(fence));
 
 
 
 
  98}
  99
 100static signed long i915_fence_wait(struct dma_fence *fence,
 101				   bool interruptible,
 102				   signed long timeout)
 103{
 104	return i915_request_wait(to_request(fence),
 105				 interruptible | I915_WAIT_PRIORITY,
 106				 timeout);
 107}
 108
 109struct kmem_cache *i915_request_slab_cache(void)
 110{
 111	return global.slab_requests;
 112}
 113
 114static void i915_fence_release(struct dma_fence *fence)
 115{
 116	struct i915_request *rq = to_request(fence);
 117
 118	/*
 119	 * The request is put onto a RCU freelist (i.e. the address
 120	 * is immediately reused), mark the fences as being freed now.
 121	 * Otherwise the debugobjects for the fences are only marked as
 122	 * freed when the slab cache itself is freed, and so we would get
 123	 * caught trying to reuse dead objects.
 124	 */
 125	i915_sw_fence_fini(&rq->submit);
 126	i915_sw_fence_fini(&rq->semaphore);
 127
 128	/*
 129	 * Keep one request on each engine for reserved use under mempressure
 130	 *
 131	 * We do not hold a reference to the engine here and so have to be
 132	 * very careful in what rq->engine we poke. The virtual engine is
 133	 * referenced via the rq->context and we released that ref during
 134	 * i915_request_retire(), ergo we must not dereference a virtual
 135	 * engine here. Not that we would want to, as the only consumer of
 136	 * the reserved engine->request_pool is the power management parking,
 137	 * which must-not-fail, and that is only run on the physical engines.
 138	 *
 139	 * Since the request must have been executed to be have completed,
 140	 * we know that it will have been processed by the HW and will
 141	 * not be unsubmitted again, so rq->engine and rq->execution_mask
 142	 * at this point is stable. rq->execution_mask will be a single
 143	 * bit if the last and _only_ engine it could execution on was a
 144	 * physical engine, if it's multiple bits then it started on and
 145	 * could still be on a virtual engine. Thus if the mask is not a
 146	 * power-of-two we assume that rq->engine may still be a virtual
 147	 * engine and so a dangling invalid pointer that we cannot dereference
 148	 *
 149	 * For example, consider the flow of a bonded request through a virtual
 150	 * engine. The request is created with a wide engine mask (all engines
 151	 * that we might execute on). On processing the bond, the request mask
 152	 * is reduced to one or more engines. If the request is subsequently
 153	 * bound to a single engine, it will then be constrained to only
 154	 * execute on that engine and never returned to the virtual engine
 155	 * after timeslicing away, see __unwind_incomplete_requests(). Thus we
 156	 * know that if the rq->execution_mask is a single bit, rq->engine
 157	 * can be a physical engine with the exact corresponding mask.
 158	 */
 159	if (is_power_of_2(rq->execution_mask) &&
 160	    !cmpxchg(&rq->engine->request_pool, NULL, rq))
 161		return;
 162
 163	kmem_cache_free(global.slab_requests, rq);
 164}
 165
 166const struct dma_fence_ops i915_fence_ops = {
 167	.get_driver_name = i915_fence_get_driver_name,
 168	.get_timeline_name = i915_fence_get_timeline_name,
 169	.enable_signaling = i915_fence_enable_signaling,
 170	.signaled = i915_fence_signaled,
 171	.wait = i915_fence_wait,
 172	.release = i915_fence_release,
 173};
 174
 175static void irq_execute_cb(struct irq_work *wrk)
 
 176{
 177	struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
 178
 179	i915_sw_fence_complete(cb->fence);
 180	kmem_cache_free(global.slab_execute_cbs, cb);
 181}
 182
 183static void irq_execute_cb_hook(struct irq_work *wrk)
 184{
 185	struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
 186
 187	cb->hook(container_of(cb->fence, struct i915_request, submit),
 188		 &cb->signal->fence);
 189	i915_request_put(cb->signal);
 190
 191	irq_execute_cb(wrk);
 
 
 
 
 
 192}
 193
 194static __always_inline void
 195__notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
 196{
 197	struct execute_cb *cb, *cn;
 198
 199	if (llist_empty(&rq->execute_cb))
 200		return;
 201
 202	llist_for_each_entry_safe(cb, cn,
 203				  llist_del_all(&rq->execute_cb),
 204				  work.node.llist)
 205		fn(&cb->work);
 206}
 207
 208static void __notify_execute_cb_irq(struct i915_request *rq)
 
 
 209{
 210	__notify_execute_cb(rq, irq_work_queue);
 211}
 212
 213static bool irq_work_imm(struct irq_work *wrk)
 
 
 
 
 214{
 215	wrk->func(wrk);
 216	return false;
 
 
 
 217}
 218
 219static void __notify_execute_cb_imm(struct i915_request *rq)
 
 
 
 220{
 221	__notify_execute_cb(rq, irq_work_imm);
 
 
 
 
 
 
 
 222}
 223
 224static void free_capture_list(struct i915_request *request)
 
 225{
 226	struct i915_capture_list *capture;
 227
 228	capture = fetch_and_zero(&request->capture_list);
 229	while (capture) {
 230		struct i915_capture_list *next = capture->next;
 231
 232		kfree(capture);
 233		capture = next;
 
 
 
 
 
 
 
 
 
 
 
 234	}
 235}
 236
 237static void __i915_request_fill(struct i915_request *rq, u8 val)
 238{
 239	void *vaddr = rq->ring->vaddr;
 240	u32 head;
 241
 242	head = rq->infix;
 243	if (rq->postfix < head) {
 244		memset(vaddr + head, val, rq->ring->size - head);
 245		head = 0;
 246	}
 247	memset(vaddr + head, val, rq->postfix - head);
 248}
 249
 250/**
 251 * i915_request_active_engine
 252 * @rq: request to inspect
 253 * @active: pointer in which to return the active engine
 254 *
 255 * Fills the currently active engine to the @active pointer if the request
 256 * is active and still not completed.
 257 *
 258 * Returns true if request was active or false otherwise.
 259 */
 260bool
 261i915_request_active_engine(struct i915_request *rq,
 262			   struct intel_engine_cs **active)
 263{
 264	struct intel_engine_cs *engine, *locked;
 265	bool ret = false;
 
 
 
 266
 267	/*
 268	 * Serialise with __i915_request_submit() so that it sees
 269	 * is-banned?, or we know the request is already inflight.
 270	 *
 271	 * Note that rq->engine is unstable, and so we double
 272	 * check that we have acquired the lock on the final engine.
 273	 */
 274	locked = READ_ONCE(rq->engine);
 275	spin_lock_irq(&locked->active.lock);
 276	while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
 277		spin_unlock(&locked->active.lock);
 278		locked = engine;
 279		spin_lock(&locked->active.lock);
 280	}
 281
 282	if (i915_request_is_active(rq)) {
 283		if (!__i915_request_is_complete(rq))
 284			*active = locked;
 285		ret = true;
 286	}
 
 287
 288	spin_unlock_irq(&locked->active.lock);
 
 
 
 
 
 
 
 
 
 289
 290	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 291}
 292
 
 
 
 
 
 
 
 
 293
 294static void remove_from_engine(struct i915_request *rq)
 
 
 
 
 295{
 296	struct intel_engine_cs *engine, *locked;
 
 
 
 
 
 297
 298	/*
 299	 * Virtual engines complicate acquiring the engine timeline lock,
 300	 * as their rq->engine pointer is not stable until under that
 301	 * engine lock. The simple ploy we use is to take the lock then
 302	 * check that the rq still belongs to the newly locked engine.
 
 
 
 
 
 303	 */
 304	locked = READ_ONCE(rq->engine);
 305	spin_lock_irq(&locked->active.lock);
 306	while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
 307		spin_unlock(&locked->active.lock);
 308		spin_lock(&engine->active.lock);
 309		locked = engine;
 310	}
 311	list_del_init(&rq->sched.link);
 312
 313	clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 314	clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
 
 315
 316	/* Prevent further __await_execution() registering a cb, then flush */
 317	set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
 
 
 
 318
 319	spin_unlock_irq(&locked->active.lock);
 320
 321	__notify_execute_cb_imm(rq);
 322}
 323
 324static void __rq_init_watchdog(struct i915_request *rq)
 325{
 326	rq->watchdog.timer.function = NULL;
 327}
 328
 329static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
 330{
 331	struct i915_request *rq =
 332		container_of(hrtimer, struct i915_request, watchdog.timer);
 333	struct intel_gt *gt = rq->engine->gt;
 
 334
 335	if (!i915_request_completed(rq)) {
 336		if (llist_add(&rq->watchdog.link, &gt->watchdog.list))
 337			schedule_work(&gt->watchdog.work);
 338	} else {
 339		i915_request_put(rq);
 
 
 340	}
 341
 342	return HRTIMER_NORESTART;
 
 
 
 343}
 344
 345static void __rq_arm_watchdog(struct i915_request *rq)
 346{
 347	struct i915_request_watchdog *wdg = &rq->watchdog;
 348	struct intel_context *ce = rq->context;
 349
 350	if (!ce->watchdog.timeout_us)
 351		return;
 352
 353	i915_request_get(rq);
 
 
 
 354
 355	hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 356	wdg->timer.function = __rq_watchdog_expired;
 357	hrtimer_start_range_ns(&wdg->timer,
 358			       ns_to_ktime(ce->watchdog.timeout_us *
 359					   NSEC_PER_USEC),
 360			       NSEC_PER_MSEC,
 361			       HRTIMER_MODE_REL);
 362}
 363
 364static void __rq_cancel_watchdog(struct i915_request *rq)
 
 365{
 366	struct i915_request_watchdog *wdg = &rq->watchdog;
 367
 368	if (wdg->timer.function && hrtimer_try_to_cancel(&wdg->timer) > 0)
 369		i915_request_put(rq);
 370}
 371
 372bool i915_request_retire(struct i915_request *rq)
 373{
 374	if (!__i915_request_is_complete(rq))
 375		return false;
 376
 377	RQ_TRACE(rq, "\n");
 378
 379	GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
 380	trace_i915_request_retire(rq);
 381	i915_request_mark_complete(rq);
 382
 383	__rq_cancel_watchdog(rq);
 384
 385	/*
 386	 * We know the GPU must have read the request to have
 387	 * sent us the seqno + interrupt, so use the position
 388	 * of tail of the request to update the last known position
 389	 * of the GPU head.
 390	 *
 391	 * Note this requires that we are always called in request
 392	 * completion order.
 393	 */
 394	GEM_BUG_ON(!list_is_first(&rq->link,
 395				  &i915_request_timeline(rq)->requests));
 396	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
 397		/* Poison before we release our space in the ring */
 398		__i915_request_fill(rq, POISON_FREE);
 399	rq->ring->head = rq->postfix;
 400
 401	if (!i915_request_signaled(rq)) {
 402		spin_lock_irq(&rq->lock);
 403		dma_fence_signal_locked(&rq->fence);
 404		spin_unlock_irq(&rq->lock);
 405	}
 406
 407	if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
 408		atomic_dec(&rq->engine->gt->rps.num_waiters);
 409
 410	/*
 411	 * We only loosely track inflight requests across preemption,
 412	 * and so we may find ourselves attempting to retire a _completed_
 413	 * request that we have removed from the HW and put back on a run
 414	 * queue.
 415	 *
 416	 * As we set I915_FENCE_FLAG_ACTIVE on the request, this should be
 417	 * after removing the breadcrumb and signaling it, so that we do not
 418	 * inadvertently attach the breadcrumb to a completed request.
 419	 */
 420	if (!list_empty(&rq->sched.link))
 421		remove_from_engine(rq);
 422	GEM_BUG_ON(!llist_empty(&rq->execute_cb));
 423
 424	__list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
 425
 426	intel_context_exit(rq->context);
 427	intel_context_unpin(rq->context);
 428
 429	free_capture_list(rq);
 430	i915_sched_node_fini(&rq->sched);
 431	i915_request_put(rq);
 432
 433	return true;
 434}
 435
 436void i915_request_retire_upto(struct i915_request *rq)
 437{
 438	struct intel_timeline * const tl = i915_request_timeline(rq);
 439	struct i915_request *tmp;
 440
 441	RQ_TRACE(rq, "\n");
 442	GEM_BUG_ON(!__i915_request_is_complete(rq));
 443
 444	do {
 445		tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
 446	} while (i915_request_retire(tmp) && tmp != rq);
 447}
 448
 449static struct i915_request * const *
 450__engine_active(struct intel_engine_cs *engine)
 451{
 452	return READ_ONCE(engine->execlists.active);
 453}
 454
 455static bool __request_in_flight(const struct i915_request *signal)
 456{
 457	struct i915_request * const *port, *rq;
 458	bool inflight = false;
 459
 460	if (!i915_request_is_ready(signal))
 461		return false;
 462
 463	/*
 464	 * Even if we have unwound the request, it may still be on
 465	 * the GPU (preempt-to-busy). If that request is inside an
 466	 * unpreemptible critical section, it will not be removed. Some
 467	 * GPU functions may even be stuck waiting for the paired request
 468	 * (__await_execution) to be submitted and cannot be preempted
 469	 * until the bond is executing.
 470	 *
 471	 * As we know that there are always preemption points between
 472	 * requests, we know that only the currently executing request
 473	 * may be still active even though we have cleared the flag.
 474	 * However, we can't rely on our tracking of ELSP[0] to know
 475	 * which request is currently active and so maybe stuck, as
 476	 * the tracking maybe an event behind. Instead assume that
 477	 * if the context is still inflight, then it is still active
 478	 * even if the active flag has been cleared.
 479	 *
 480	 * To further complicate matters, if there a pending promotion, the HW
 481	 * may either perform a context switch to the second inflight execlists,
 482	 * or it may switch to the pending set of execlists. In the case of the
 483	 * latter, it may send the ACK and we process the event copying the
 484	 * pending[] over top of inflight[], _overwriting_ our *active. Since
 485	 * this implies the HW is arbitrating and not struck in *active, we do
 486	 * not worry about complete accuracy, but we do require no read/write
 487	 * tearing of the pointer [the read of the pointer must be valid, even
 488	 * as the array is being overwritten, for which we require the writes
 489	 * to avoid tearing.]
 490	 *
 491	 * Note that the read of *execlists->active may race with the promotion
 492	 * of execlists->pending[] to execlists->inflight[], overwritting
 493	 * the value at *execlists->active. This is fine. The promotion implies
 494	 * that we received an ACK from the HW, and so the context is not
 495	 * stuck -- if we do not see ourselves in *active, the inflight status
 496	 * is valid. If instead we see ourselves being copied into *active,
 497	 * we are inflight and may signal the callback.
 498	 */
 499	if (!intel_context_inflight(signal->context))
 500		return false;
 501
 502	rcu_read_lock();
 503	for (port = __engine_active(signal->engine);
 504	     (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
 505	     port++) {
 506		if (rq->context == signal->context) {
 507			inflight = i915_seqno_passed(rq->fence.seqno,
 508						     signal->fence.seqno);
 509			break;
 510		}
 511	}
 512	rcu_read_unlock();
 513
 514	return inflight;
 515}
 516
 517static int
 518__await_execution(struct i915_request *rq,
 519		  struct i915_request *signal,
 520		  void (*hook)(struct i915_request *rq,
 521			       struct dma_fence *signal),
 522		  gfp_t gfp)
 523{
 524	struct execute_cb *cb;
 525
 526	if (i915_request_is_active(signal)) {
 527		if (hook)
 528			hook(rq, &signal->fence);
 529		return 0;
 530	}
 531
 532	cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
 533	if (!cb)
 534		return -ENOMEM;
 535
 536	cb->fence = &rq->submit;
 537	i915_sw_fence_await(cb->fence);
 538	init_irq_work(&cb->work, irq_execute_cb);
 539
 540	if (hook) {
 541		cb->hook = hook;
 542		cb->signal = i915_request_get(signal);
 543		cb->work.func = irq_execute_cb_hook;
 544	}
 545
 546	/*
 547	 * Register the callback first, then see if the signaler is already
 548	 * active. This ensures that if we race with the
 549	 * __notify_execute_cb from i915_request_submit() and we are not
 550	 * included in that list, we get a second bite of the cherry and
 551	 * execute it ourselves. After this point, a future
 552	 * i915_request_submit() will notify us.
 553	 *
 554	 * In i915_request_retire() we set the ACTIVE bit on a completed
 555	 * request (then flush the execute_cb). So by registering the
 556	 * callback first, then checking the ACTIVE bit, we serialise with
 557	 * the completed/retired request.
 558	 */
 559	if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
 560		if (i915_request_is_active(signal) ||
 561		    __request_in_flight(signal))
 562			__notify_execute_cb_imm(signal);
 563	}
 
 
 
 
 
 
 564
 565	return 0;
 566}
 567
 568static bool fatal_error(int error)
 569{
 570	switch (error) {
 571	case 0: /* not an error! */
 572	case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */
 573	case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */
 574		return false;
 575	default:
 576		return true;
 577	}
 578}
 579
 580void __i915_request_skip(struct i915_request *rq)
 581{
 582	GEM_BUG_ON(!fatal_error(rq->fence.error));
 583
 584	if (rq->infix == rq->postfix)
 585		return;
 586
 587	RQ_TRACE(rq, "error: %d\n", rq->fence.error);
 588
 589	/*
 590	 * As this request likely depends on state from the lost
 591	 * context, clear out all the user operations leaving the
 592	 * breadcrumb at the end (so we get the fence notifications).
 
 
 
 593	 */
 594	__i915_request_fill(rq, 0);
 595	rq->infix = rq->postfix;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 596}
 597
 598bool i915_request_set_error_once(struct i915_request *rq, int error)
 599{
 600	int old;
 
 601
 602	GEM_BUG_ON(!IS_ERR_VALUE((long)error));
 
 603
 604	if (i915_request_signaled(rq))
 605		return false;
 606
 607	old = READ_ONCE(rq->fence.error);
 608	do {
 609		if (fatal_error(old))
 610			return false;
 611	} while (!try_cmpxchg(&rq->fence.error, &old, error));
 612
 613	return true;
 
 614}
 615
 616struct i915_request *i915_request_mark_eio(struct i915_request *rq)
 617{
 618	if (__i915_request_is_complete(rq))
 619		return NULL;
 620
 621	GEM_BUG_ON(i915_request_signaled(rq));
 622
 623	/* As soon as the request is completed, it may be retired */
 624	rq = i915_request_get(rq);
 625
 626	i915_request_set_error_once(rq, -EIO);
 627	i915_request_mark_complete(rq);
 628
 629	return rq;
 630}
 631
 632bool __i915_request_submit(struct i915_request *request)
 633{
 634	struct intel_engine_cs *engine = request->engine;
 635	bool result = false;
 636
 637	RQ_TRACE(request, "\n");
 638
 639	GEM_BUG_ON(!irqs_disabled());
 640	lockdep_assert_held(&engine->active.lock);
 641
 642	/*
 643	 * With the advent of preempt-to-busy, we frequently encounter
 644	 * requests that we have unsubmitted from HW, but left running
 645	 * until the next ack and so have completed in the meantime. On
 646	 * resubmission of that completed request, we can skip
 647	 * updating the payload, and execlists can even skip submitting
 648	 * the request.
 649	 *
 650	 * We must remove the request from the caller's priority queue,
 651	 * and the caller must only call us when the request is in their
 652	 * priority queue, under the active.lock. This ensures that the
 653	 * request has *not* yet been retired and we can safely move
 654	 * the request into the engine->active.list where it will be
 655	 * dropped upon retiring. (Otherwise if resubmit a *retired*
 656	 * request, this would be a horrible use-after-free.)
 657	 */
 658	if (__i915_request_is_complete(request)) {
 659		list_del_init(&request->sched.link);
 660		goto active;
 661	}
 662
 663	if (unlikely(intel_context_is_banned(request->context)))
 664		i915_request_set_error_once(request, -EIO);
 
 
 
 
 665
 666	if (unlikely(fatal_error(request->fence.error)))
 667		__i915_request_skip(request);
 668
 669	/*
 670	 * Are we using semaphores when the gpu is already saturated?
 671	 *
 672	 * Using semaphores incurs a cost in having the GPU poll a
 673	 * memory location, busywaiting for it to change. The continual
 674	 * memory reads can have a noticeable impact on the rest of the
 675	 * system with the extra bus traffic, stalling the cpu as it too
 676	 * tries to access memory across the bus (perf stat -e bus-cycles).
 677	 *
 678	 * If we installed a semaphore on this request and we only submit
 679	 * the request after the signaler completed, that indicates the
 680	 * system is overloaded and using semaphores at this time only
 681	 * increases the amount of work we are doing. If so, we disable
 682	 * further use of semaphores until we are idle again, whence we
 683	 * optimistically try again.
 684	 */
 685	if (request->sched.semaphores &&
 686	    i915_sw_fence_signaled(&request->semaphore))
 687		engine->saturated |= request->sched.semaphores;
 688
 689	engine->emit_fini_breadcrumb(request,
 690				     request->ring->vaddr + request->postfix);
 691
 692	trace_i915_request_execute(request);
 693	engine->serial++;
 694	result = true;
 695
 696	GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
 697	list_move_tail(&request->sched.link, &engine->active.requests);
 698active:
 699	clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
 700	set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
 701
 702	/*
 703	 * XXX Rollback bonded-execution on __i915_request_unsubmit()?
 704	 *
 705	 * In the future, perhaps when we have an active time-slicing scheduler,
 706	 * it will be interesting to unsubmit parallel execution and remove
 707	 * busywaits from the GPU until their master is restarted. This is
 708	 * quite hairy, we have to carefully rollback the fence and do a
 709	 * preempt-to-idle cycle on the target engine, all the while the
 710	 * master execute_cb may refire.
 711	 */
 712	__notify_execute_cb_irq(request);
 713
 714	/* We may be recursing from the signal callback of another i915 fence */
 715	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
 716		i915_request_enable_breadcrumb(request);
 717
 718	return result;
 719}
 720
 721void i915_request_submit(struct i915_request *request)
 722{
 723	struct intel_engine_cs *engine = request->engine;
 724	unsigned long flags;
 725
 726	/* Will be called from irq-context when using foreign fences. */
 727	spin_lock_irqsave(&engine->active.lock, flags);
 728
 729	__i915_request_submit(request);
 730
 731	spin_unlock_irqrestore(&engine->active.lock, flags);
 732}
 733
 734void __i915_request_unsubmit(struct i915_request *request)
 735{
 736	struct intel_engine_cs *engine = request->engine;
 
 
 
 
 737
 738	/*
 739	 * Only unwind in reverse order, required so that the per-context list
 740	 * is kept in seqno/ring order.
 741	 */
 742	RQ_TRACE(request, "\n");
 743
 744	GEM_BUG_ON(!irqs_disabled());
 745	lockdep_assert_held(&engine->active.lock);
 
 746
 747	/*
 748	 * Before we remove this breadcrumb from the signal list, we have
 749	 * to ensure that a concurrent dma_fence_enable_signaling() does not
 750	 * attach itself. We first mark the request as no longer active and
 751	 * make sure that is visible to other cores, and then remove the
 752	 * breadcrumb if attached.
 753	 */
 754	GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
 755	clear_bit_unlock(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
 756	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
 757		i915_request_cancel_breadcrumb(request);
 
 758
 759	/* We've already spun, don't charge on resubmitting. */
 760	if (request->sched.semaphores && __i915_request_has_started(request))
 761		request->sched.semaphores = 0;
 
 
 
 
 762
 763	/*
 764	 * We don't need to wake_up any waiters on request->execute, they
 765	 * will get woken by any other event or us re-adding this request
 766	 * to the engine timeline (__i915_request_submit()). The waiters
 767	 * should be quite adapt at finding that the request now has a new
 768	 * global_seqno to the one they went to sleep on.
 769	 */
 770}
 771
 772void i915_request_unsubmit(struct i915_request *request)
 773{
 774	struct intel_engine_cs *engine = request->engine;
 775	unsigned long flags;
 776
 777	/* Will be called from irq-context when using foreign fences. */
 778	spin_lock_irqsave(&engine->active.lock, flags);
 779
 780	__i915_request_unsubmit(request);
 781
 782	spin_unlock_irqrestore(&engine->active.lock, flags);
 783}
 784
 785static void __cancel_request(struct i915_request *rq)
 786{
 787	struct intel_engine_cs *engine = NULL;
 788
 789	i915_request_active_engine(rq, &engine);
 790
 791	if (engine && intel_engine_pulse(engine))
 792		intel_gt_handle_error(engine->gt, engine->mask, 0,
 793				      "request cancellation by %s",
 794				      current->comm);
 795}
 796
 797void i915_request_cancel(struct i915_request *rq, int error)
 798{
 799	if (!i915_request_set_error_once(rq, error))
 800		return;
 801
 802	set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
 803
 804	__cancel_request(rq);
 805}
 806
 807static int __i915_sw_fence_call
 808submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 809{
 810	struct i915_request *request =
 811		container_of(fence, typeof(*request), submit);
 812
 813	switch (state) {
 814	case FENCE_COMPLETE:
 815		trace_i915_request_submit(request);
 816
 817		if (unlikely(fence->error))
 818			i915_request_set_error_once(request, fence->error);
 819		else
 820			__rq_arm_watchdog(request);
 821
 822		/*
 823		 * We need to serialize use of the submit_request() callback
 824		 * with its hotplugging performed during an emergency
 825		 * i915_gem_set_wedged().  We use the RCU mechanism to mark the
 826		 * critical section in order to force i915_gem_set_wedged() to
 827		 * wait until the submit_request() is completed before
 828		 * proceeding.
 829		 */
 830		rcu_read_lock();
 831		request->engine->submit_request(request);
 832		rcu_read_unlock();
 833		break;
 834
 835	case FENCE_FREE:
 836		i915_request_put(request);
 837		break;
 838	}
 839
 840	return NOTIFY_DONE;
 841}
 842
 843static int __i915_sw_fence_call
 844semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 845{
 846	struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
 847
 848	switch (state) {
 849	case FENCE_COMPLETE:
 850		break;
 851
 852	case FENCE_FREE:
 853		i915_request_put(rq);
 854		break;
 855	}
 856
 857	return NOTIFY_DONE;
 858}
 859
 860static void retire_requests(struct intel_timeline *tl)
 861{
 862	struct i915_request *rq, *rn;
 863
 864	list_for_each_entry_safe(rq, rn, &tl->requests, link)
 865		if (!i915_request_retire(rq))
 866			break;
 867}
 868
 869static noinline struct i915_request *
 870request_alloc_slow(struct intel_timeline *tl,
 871		   struct i915_request **rsvd,
 872		   gfp_t gfp)
 873{
 
 874	struct i915_request *rq;
 
 
 875
 876	/* If we cannot wait, dip into our reserves */
 877	if (!gfpflags_allow_blocking(gfp)) {
 878		rq = xchg(rsvd, NULL);
 879		if (!rq) /* Use the normal failure path for one final WARN */
 880			goto out;
 881
 882		return rq;
 883	}
 884
 885	if (list_empty(&tl->requests))
 886		goto out;
 887
 888	/* Move our oldest request to the slab-cache (if not in use!) */
 889	rq = list_first_entry(&tl->requests, typeof(*rq), link);
 890	i915_request_retire(rq);
 891
 892	rq = kmem_cache_alloc(global.slab_requests,
 893			      gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 894	if (rq)
 895		return rq;
 896
 897	/* Ratelimit ourselves to prevent oom from malicious clients */
 898	rq = list_last_entry(&tl->requests, typeof(*rq), link);
 899	cond_synchronize_rcu(rq->rcustate);
 900
 901	/* Retire our old requests in the hope that we free some */
 902	retire_requests(tl);
 903
 904out:
 905	return kmem_cache_alloc(global.slab_requests, gfp);
 906}
 907
 908static void __i915_request_ctor(void *arg)
 909{
 910	struct i915_request *rq = arg;
 911
 912	spin_lock_init(&rq->lock);
 913	i915_sched_node_init(&rq->sched);
 914	i915_sw_fence_init(&rq->submit, submit_notify);
 915	i915_sw_fence_init(&rq->semaphore, semaphore_notify);
 
 
 916
 917	rq->capture_list = NULL;
 
 
 
 
 
 918
 919	init_llist_head(&rq->execute_cb);
 920}
 
 
 
 
 
 
 
 921
 922struct i915_request *
 923__i915_request_create(struct intel_context *ce, gfp_t gfp)
 924{
 925	struct intel_timeline *tl = ce->timeline;
 926	struct i915_request *rq;
 927	u32 seqno;
 928	int ret;
 929
 930	might_alloc(gfp);
 
 
 931
 932	/* Check that the caller provided an already pinned context */
 933	__intel_context_pin(ce);
 
 
 
 934
 935	/*
 936	 * Beware: Dragons be flying overhead.
 937	 *
 938	 * We use RCU to look up requests in flight. The lookups may
 939	 * race with the request being allocated from the slab freelist.
 940	 * That is the request we are writing to here, may be in the process
 941	 * of being read by __i915_active_request_get_rcu(). As such,
 942	 * we have to be very careful when overwriting the contents. During
 943	 * the RCU lookup, we change chase the request->engine pointer,
 944	 * read the request->global_seqno and increment the reference count.
 945	 *
 946	 * The reference count is incremented atomically. If it is zero,
 947	 * the lookup knows the request is unallocated and complete. Otherwise,
 948	 * it is either still in use, or has been reallocated and reset
 949	 * with dma_fence_init(). This increment is safe for release as we
 950	 * check that the request we have a reference to and matches the active
 951	 * request.
 952	 *
 953	 * Before we increment the refcount, we chase the request->engine
 954	 * pointer. We must not call kmem_cache_zalloc() or else we set
 955	 * that pointer to NULL and cause a crash during the lookup. If
 956	 * we see the request is completed (based on the value of the
 957	 * old engine and seqno), the lookup is complete and reports NULL.
 958	 * If we decide the request is not completed (new engine or seqno),
 959	 * then we grab a reference and double check that it is still the
 960	 * active request - which it won't be and restart the lookup.
 961	 *
 962	 * Do not use kmem_cache_zalloc() here!
 963	 */
 964	rq = kmem_cache_alloc(global.slab_requests,
 965			      gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 966	if (unlikely(!rq)) {
 967		rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 968		if (!rq) {
 969			ret = -ENOMEM;
 970			goto err_unreserve;
 971		}
 972	}
 973
 974	rq->context = ce;
 975	rq->engine = ce->engine;
 976	rq->ring = ce->ring;
 977	rq->execution_mask = ce->engine->mask;
 978
 979	ret = intel_timeline_get_seqno(tl, rq, &seqno);
 980	if (ret)
 981		goto err_free;
 982
 983	dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
 984		       tl->fence_context, seqno);
 985
 986	RCU_INIT_POINTER(rq->timeline, tl);
 987	rq->hwsp_seqno = tl->hwsp_seqno;
 988	GEM_BUG_ON(__i915_request_is_complete(rq));
 989
 990	rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
 991
 992	/* We bump the ref for the fence chain */
 993	i915_sw_fence_reinit(&i915_request_get(rq)->submit);
 994	i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
 995
 996	i915_sched_node_reinit(&rq->sched);
 997
 998	/* No zalloc, everything must be cleared after use */
 
 
 
 
 
 
 
 
 
 999	rq->batch = NULL;
1000	__rq_init_watchdog(rq);
1001	GEM_BUG_ON(rq->capture_list);
1002	GEM_BUG_ON(!llist_empty(&rq->execute_cb));
1003
1004	/*
1005	 * Reserve space in the ring buffer for all the commands required to
1006	 * eventually emit this request. This is to guarantee that the
1007	 * i915_request_add() call can't fail. Note that the reserve may need
1008	 * to be redone if the request is not actually submitted straight
1009	 * away, e.g. because a GPU scheduler has deferred it.
1010	 *
1011	 * Note that due to how we add reserved_space to intel_ring_begin()
1012	 * we need to double our request to ensure that if we need to wrap
1013	 * around inside i915_request_add() there is sufficient space at
1014	 * the beginning of the ring as well.
1015	 */
1016	rq->reserved_space =
1017		2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
1018
1019	/*
1020	 * Record the position of the start of the request so that
1021	 * should we detect the updated seqno part-way through the
1022	 * GPU processing the request, we never over-estimate the
1023	 * position of the head.
1024	 */
1025	rq->head = rq->ring->emit;
1026
1027	ret = rq->engine->request_alloc(rq);
 
1028	if (ret)
1029		goto err_unwind;
1030
1031	rq->infix = rq->ring->emit; /* end of header; start of user payload */
1032
1033	intel_context_mark_active(ce);
1034	list_add_tail_rcu(&rq->link, &tl->requests);
1035
 
 
1036	return rq;
1037
1038err_unwind:
1039	ce->ring->emit = rq->head;
1040
1041	/* Make sure we didn't add ourselves to external state before freeing */
1042	GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
1043	GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
 
1044
1045err_free:
1046	kmem_cache_free(global.slab_requests, rq);
1047err_unreserve:
1048	intel_context_unpin(ce);
 
 
1049	return ERR_PTR(ret);
1050}
1051
1052struct i915_request *
1053i915_request_create(struct intel_context *ce)
1054{
1055	struct i915_request *rq;
1056	struct intel_timeline *tl;
1057
1058	tl = intel_context_timeline_lock(ce);
1059	if (IS_ERR(tl))
1060		return ERR_CAST(tl);
1061
1062	/* Move our oldest request to the slab-cache (if not in use!) */
1063	rq = list_first_entry(&tl->requests, typeof(*rq), link);
1064	if (!list_is_last(&rq->link, &tl->requests))
1065		i915_request_retire(rq);
1066
1067	intel_context_enter(ce);
1068	rq = __i915_request_create(ce, GFP_KERNEL);
1069	intel_context_exit(ce); /* active reference transferred to request */
1070	if (IS_ERR(rq))
1071		goto err_unlock;
1072
1073	/* Check that we do not interrupt ourselves with a new request */
1074	rq->cookie = lockdep_pin_lock(&tl->mutex);
1075
1076	return rq;
1077
1078err_unlock:
1079	intel_context_timeline_unlock(tl);
1080	return rq;
1081}
1082
1083static int
1084i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
1085{
1086	struct dma_fence *fence;
1087	int err;
1088
1089	if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
1090		return 0;
1091
1092	if (i915_request_started(signal))
1093		return 0;
1094
1095	/*
1096	 * The caller holds a reference on @signal, but we do not serialise
1097	 * against it being retired and removed from the lists.
1098	 *
1099	 * We do not hold a reference to the request before @signal, and
1100	 * so must be very careful to ensure that it is not _recycled_ as
1101	 * we follow the link backwards.
1102	 */
1103	fence = NULL;
1104	rcu_read_lock();
1105	do {
1106		struct list_head *pos = READ_ONCE(signal->link.prev);
1107		struct i915_request *prev;
1108
1109		/* Confirm signal has not been retired, the link is valid */
1110		if (unlikely(__i915_request_has_started(signal)))
1111			break;
1112
1113		/* Is signal the earliest request on its timeline? */
1114		if (pos == &rcu_dereference(signal->timeline)->requests)
1115			break;
1116
1117		/*
1118		 * Peek at the request before us in the timeline. That
1119		 * request will only be valid before it is retired, so
1120		 * after acquiring a reference to it, confirm that it is
1121		 * still part of the signaler's timeline.
1122		 */
1123		prev = list_entry(pos, typeof(*prev), link);
1124		if (!i915_request_get_rcu(prev))
1125			break;
1126
1127		/* After the strong barrier, confirm prev is still attached */
1128		if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) {
1129			i915_request_put(prev);
1130			break;
1131		}
1132
1133		fence = &prev->fence;
1134	} while (0);
1135	rcu_read_unlock();
1136	if (!fence)
1137		return 0;
1138
1139	err = 0;
1140	if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
1141		err = i915_sw_fence_await_dma_fence(&rq->submit,
1142						    fence, 0,
1143						    I915_FENCE_GFP);
1144	dma_fence_put(fence);
1145
1146	return err;
1147}
1148
1149static intel_engine_mask_t
1150already_busywaiting(struct i915_request *rq)
1151{
1152	/*
1153	 * Polling a semaphore causes bus traffic, delaying other users of
1154	 * both the GPU and CPU. We want to limit the impact on others,
1155	 * while taking advantage of early submission to reduce GPU
1156	 * latency. Therefore we restrict ourselves to not using more
1157	 * than one semaphore from each source, and not using a semaphore
1158	 * if we have detected the engine is saturated (i.e. would not be
1159	 * submitted early and cause bus traffic reading an already passed
1160	 * semaphore).
1161	 *
1162	 * See the are-we-too-late? check in __i915_request_submit().
1163	 */
1164	return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
1165}
1166
1167static int
1168__emit_semaphore_wait(struct i915_request *to,
1169		      struct i915_request *from,
1170		      u32 seqno)
1171{
1172	const int has_token = GRAPHICS_VER(to->engine->i915) >= 12;
1173	u32 hwsp_offset;
1174	int len, err;
1175	u32 *cs;
1176
1177	GEM_BUG_ON(GRAPHICS_VER(to->engine->i915) < 8);
1178	GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
1179
1180	/* We need to pin the signaler's HWSP until we are finished reading. */
1181	err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
1182	if (err)
1183		return err;
1184
1185	len = 4;
1186	if (has_token)
1187		len += 2;
1188
1189	cs = intel_ring_begin(to, len);
1190	if (IS_ERR(cs))
1191		return PTR_ERR(cs);
1192
1193	/*
1194	 * Using greater-than-or-equal here means we have to worry
1195	 * about seqno wraparound. To side step that issue, we swap
1196	 * the timeline HWSP upon wrapping, so that everyone listening
1197	 * for the old (pre-wrap) values do not see the much smaller
1198	 * (post-wrap) values than they were expecting (and so wait
1199	 * forever).
1200	 */
1201	*cs++ = (MI_SEMAPHORE_WAIT |
1202		 MI_SEMAPHORE_GLOBAL_GTT |
1203		 MI_SEMAPHORE_POLL |
1204		 MI_SEMAPHORE_SAD_GTE_SDD) +
1205		has_token;
1206	*cs++ = seqno;
1207	*cs++ = hwsp_offset;
1208	*cs++ = 0;
1209	if (has_token) {
1210		*cs++ = 0;
1211		*cs++ = MI_NOOP;
1212	}
1213
1214	intel_ring_advance(to, cs);
1215	return 0;
1216}
1217
1218static int
1219emit_semaphore_wait(struct i915_request *to,
1220		    struct i915_request *from,
1221		    gfp_t gfp)
1222{
1223	const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
1224	struct i915_sw_fence *wait = &to->submit;
1225
1226	if (!intel_context_use_semaphores(to->context))
1227		goto await_fence;
1228
1229	if (i915_request_has_initial_breadcrumb(to))
1230		goto await_fence;
1231
1232	/*
1233	 * If this or its dependents are waiting on an external fence
1234	 * that may fail catastrophically, then we want to avoid using
1235	 * sempahores as they bypass the fence signaling metadata, and we
1236	 * lose the fence->error propagation.
1237	 */
1238	if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
1239		goto await_fence;
1240
1241	/* Just emit the first semaphore we see as request space is limited. */
1242	if (already_busywaiting(to) & mask)
1243		goto await_fence;
1244
1245	if (i915_request_await_start(to, from) < 0)
1246		goto await_fence;
1247
1248	/* Only submit our spinner after the signaler is running! */
1249	if (__await_execution(to, from, NULL, gfp))
1250		goto await_fence;
1251
1252	if (__emit_semaphore_wait(to, from, from->fence.seqno))
1253		goto await_fence;
1254
1255	to->sched.semaphores |= mask;
1256	wait = &to->semaphore;
1257
1258await_fence:
1259	return i915_sw_fence_await_dma_fence(wait,
1260					     &from->fence, 0,
1261					     I915_FENCE_GFP);
1262}
1263
1264static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
1265					  struct dma_fence *fence)
1266{
1267	return __intel_timeline_sync_is_later(tl,
1268					      fence->context,
1269					      fence->seqno - 1);
1270}
1271
1272static int intel_timeline_sync_set_start(struct intel_timeline *tl,
1273					 const struct dma_fence *fence)
1274{
1275	return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
1276}
1277
1278static int
1279__i915_request_await_execution(struct i915_request *to,
1280			       struct i915_request *from,
1281			       void (*hook)(struct i915_request *rq,
1282					    struct dma_fence *signal))
1283{
1284	int err;
1285
1286	GEM_BUG_ON(intel_context_is_barrier(from->context));
 
1287
1288	/* Submit both requests at the same time */
1289	err = __await_execution(to, from, hook, I915_FENCE_GFP);
1290	if (err)
1291		return err;
1292
1293	/* Squash repeated depenendices to the same timelines */
1294	if (intel_timeline_sync_has_start(i915_request_timeline(to),
1295					  &from->fence))
1296		return 0;
1297
1298	/*
1299	 * Wait until the start of this request.
1300	 *
1301	 * The execution cb fires when we submit the request to HW. But in
1302	 * many cases this may be long before the request itself is ready to
1303	 * run (consider that we submit 2 requests for the same context, where
1304	 * the request of interest is behind an indefinite spinner). So we hook
1305	 * up to both to reduce our queues and keep the execution lag minimised
1306	 * in the worst case, though we hope that the await_start is elided.
1307	 */
1308	err = i915_request_await_start(to, from);
1309	if (err < 0)
1310		return err;
1311
1312	/*
1313	 * Ensure both start together [after all semaphores in signal]
1314	 *
1315	 * Now that we are queued to the HW at roughly the same time (thanks
1316	 * to the execute cb) and are ready to run at roughly the same time
1317	 * (thanks to the await start), our signaler may still be indefinitely
1318	 * delayed by waiting on a semaphore from a remote engine. If our
1319	 * signaler depends on a semaphore, so indirectly do we, and we do not
1320	 * want to start our payload until our signaler also starts theirs.
1321	 * So we wait.
1322	 *
1323	 * However, there is also a second condition for which we need to wait
1324	 * for the precise start of the signaler. Consider that the signaler
1325	 * was submitted in a chain of requests following another context
1326	 * (with just an ordinary intra-engine fence dependency between the
1327	 * two). In this case the signaler is queued to HW, but not for
1328	 * immediate execution, and so we must wait until it reaches the
1329	 * active slot.
1330	 */
1331	if (intel_engine_has_semaphores(to->engine) &&
1332	    !i915_request_has_initial_breadcrumb(to)) {
1333		err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
1334		if (err < 0)
1335			return err;
1336	}
1337
1338	/* Couple the dependency tree for PI on this exposed to->fence */
1339	if (to->engine->schedule) {
1340		err = i915_sched_node_add_dependency(&to->sched,
1341						     &from->sched,
1342						     I915_DEPENDENCY_WEAK);
1343		if (err < 0)
1344			return err;
1345	}
1346
1347	return intel_timeline_sync_set_start(i915_request_timeline(to),
1348					     &from->fence);
1349}
1350
1351static void mark_external(struct i915_request *rq)
1352{
1353	/*
1354	 * The downside of using semaphores is that we lose metadata passing
1355	 * along the signaling chain. This is particularly nasty when we
1356	 * need to pass along a fatal error such as EFAULT or EDEADLK. For
1357	 * fatal errors we want to scrub the request before it is executed,
1358	 * which means that we cannot preload the request onto HW and have
1359	 * it wait upon a semaphore.
1360	 */
1361	rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
1362}
1363
1364static int
1365__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1366{
1367	mark_external(rq);
1368	return i915_sw_fence_await_dma_fence(&rq->submit, fence,
1369					     i915_fence_context_timeout(rq->engine->i915,
1370									fence->context),
1371					     I915_FENCE_GFP);
1372}
1373
1374static int
1375i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1376{
1377	struct dma_fence *iter;
1378	int err = 0;
1379
1380	if (!to_dma_fence_chain(fence))
1381		return __i915_request_await_external(rq, fence);
1382
1383	dma_fence_chain_for_each(iter, fence) {
1384		struct dma_fence_chain *chain = to_dma_fence_chain(iter);
1385
1386		if (!dma_fence_is_i915(chain->fence)) {
1387			err = __i915_request_await_external(rq, iter);
1388			break;
1389		}
1390
1391		err = i915_request_await_dma_fence(rq, chain->fence);
1392		if (err < 0)
1393			break;
1394	}
1395
1396	dma_fence_put(iter);
1397	return err;
1398}
1399
1400int
1401i915_request_await_execution(struct i915_request *rq,
1402			     struct dma_fence *fence,
1403			     void (*hook)(struct i915_request *rq,
1404					  struct dma_fence *signal))
1405{
1406	struct dma_fence **child = &fence;
1407	unsigned int nchild = 1;
1408	int ret;
1409
1410	if (dma_fence_is_array(fence)) {
1411		struct dma_fence_array *array = to_dma_fence_array(fence);
1412
1413		/* XXX Error for signal-on-any fence arrays */
1414
1415		child = array->fences;
1416		nchild = array->num_fences;
1417		GEM_BUG_ON(!nchild);
1418	}
1419
1420	do {
1421		fence = *child++;
1422		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1423			continue;
1424
1425		if (fence->context == rq->fence.context)
1426			continue;
1427
1428		/*
1429		 * We don't squash repeated fence dependencies here as we
1430		 * want to run our callback in all cases.
1431		 */
1432
1433		if (dma_fence_is_i915(fence))
1434			ret = __i915_request_await_execution(rq,
1435							     to_request(fence),
1436							     hook);
1437		else
1438			ret = i915_request_await_external(rq, fence);
1439		if (ret < 0)
1440			return ret;
1441	} while (--nchild);
1442
1443	return 0;
1444}
1445
1446static int
1447await_request_submit(struct i915_request *to, struct i915_request *from)
1448{
1449	/*
1450	 * If we are waiting on a virtual engine, then it may be
1451	 * constrained to execute on a single engine *prior* to submission.
1452	 * When it is submitted, it will be first submitted to the virtual
1453	 * engine and then passed to the physical engine. We cannot allow
1454	 * the waiter to be submitted immediately to the physical engine
1455	 * as it may then bypass the virtual request.
1456	 */
1457	if (to->engine == READ_ONCE(from->engine))
1458		return i915_sw_fence_await_sw_fence_gfp(&to->submit,
1459							&from->submit,
1460							I915_FENCE_GFP);
1461	else
1462		return __i915_request_await_execution(to, from, NULL);
1463}
1464
1465static int
1466i915_request_await_request(struct i915_request *to, struct i915_request *from)
1467{
1468	int ret;
1469
1470	GEM_BUG_ON(to == from);
1471	GEM_BUG_ON(to->timeline == from->timeline);
1472
1473	if (i915_request_completed(from)) {
1474		i915_sw_fence_set_error_once(&to->submit, from->fence.error);
1475		return 0;
1476	}
1477
1478	if (to->engine->schedule) {
1479		ret = i915_sched_node_add_dependency(&to->sched,
1480						     &from->sched,
1481						     I915_DEPENDENCY_EXTERNAL);
1482		if (ret < 0)
1483			return ret;
1484	}
1485
1486	if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
1487		ret = await_request_submit(to, from);
1488	else
1489		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
1490	if (ret < 0)
1491		return ret;
1492
1493	return 0;
1494}
1495
1496int
1497i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
1498{
1499	struct dma_fence **child = &fence;
1500	unsigned int nchild = 1;
1501	int ret;
1502
1503	/*
1504	 * Note that if the fence-array was created in signal-on-any mode,
1505	 * we should *not* decompose it into its individual fences. However,
1506	 * we don't currently store which mode the fence-array is operating
1507	 * in. Fortunately, the only user of signal-on-any is private to
1508	 * amdgpu and we should not see any incoming fence-array from
1509	 * sync-file being in signal-on-any mode.
1510	 */
1511	if (dma_fence_is_array(fence)) {
1512		struct dma_fence_array *array = to_dma_fence_array(fence);
1513
1514		child = array->fences;
1515		nchild = array->num_fences;
1516		GEM_BUG_ON(!nchild);
1517	}
1518
1519	do {
1520		fence = *child++;
1521		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1522			continue;
1523
1524		/*
1525		 * Requests on the same timeline are explicitly ordered, along
1526		 * with their dependencies, by i915_request_add() which ensures
1527		 * that requests are submitted in-order through each ring.
1528		 */
1529		if (fence->context == rq->fence.context)
1530			continue;
1531
1532		/* Squash repeated waits to the same timelines */
1533		if (fence->context &&
1534		    intel_timeline_sync_is_later(i915_request_timeline(rq),
1535						 fence))
1536			continue;
1537
1538		if (dma_fence_is_i915(fence))
1539			ret = i915_request_await_request(rq, to_request(fence));
1540		else
1541			ret = i915_request_await_external(rq, fence);
 
 
1542		if (ret < 0)
1543			return ret;
1544
1545		/* Record the latest fence used against each timeline */
1546		if (fence->context)
1547			intel_timeline_sync_set(i915_request_timeline(rq),
1548						fence);
1549	} while (--nchild);
1550
1551	return 0;
1552}
1553
1554/**
1555 * i915_request_await_object - set this request to (async) wait upon a bo
1556 * @to: request we are wishing to use
1557 * @obj: object which may be in use on another ring.
1558 * @write: whether the wait is on behalf of a writer
1559 *
1560 * This code is meant to abstract object synchronization with the GPU.
1561 * Conceptually we serialise writes between engines inside the GPU.
1562 * We only allow one engine to write into a buffer at any time, but
1563 * multiple readers. To ensure each has a coherent view of memory, we must:
1564 *
1565 * - If there is an outstanding write request to the object, the new
1566 *   request must wait for it to complete (either CPU or in hw, requests
1567 *   on the same ring will be naturally ordered).
1568 *
1569 * - If we are a write request (pending_write_domain is set), the new
1570 *   request must wait for outstanding read requests to complete.
1571 *
1572 * Returns 0 if successful, else propagates up the lower layer error.
1573 */
1574int
1575i915_request_await_object(struct i915_request *to,
1576			  struct drm_i915_gem_object *obj,
1577			  bool write)
1578{
1579	struct dma_fence *excl;
1580	int ret = 0;
1581
1582	if (write) {
1583		struct dma_fence **shared;
1584		unsigned int count, i;
1585
1586		ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
1587					  &shared);
1588		if (ret)
1589			return ret;
1590
1591		for (i = 0; i < count; i++) {
1592			ret = i915_request_await_dma_fence(to, shared[i]);
1593			if (ret)
1594				break;
1595
1596			dma_fence_put(shared[i]);
1597		}
1598
1599		for (; i < count; i++)
1600			dma_fence_put(shared[i]);
1601		kfree(shared);
1602	} else {
1603		excl = dma_resv_get_excl_unlocked(obj->base.resv);
1604	}
1605
1606	if (excl) {
1607		if (ret == 0)
1608			ret = i915_request_await_dma_fence(to, excl);
1609
1610		dma_fence_put(excl);
1611	}
1612
1613	return ret;
1614}
1615
1616static struct i915_request *
1617__i915_request_add_to_timeline(struct i915_request *rq)
 
 
 
 
1618{
1619	struct intel_timeline *timeline = i915_request_timeline(rq);
 
 
1620	struct i915_request *prev;
 
 
1621
1622	/*
1623	 * Dependency tracking and request ordering along the timeline
1624	 * is special cased so that we can eliminate redundant ordering
1625	 * operations while building the request (we know that the timeline
1626	 * itself is ordered, and here we guarantee it).
1627	 *
1628	 * As we know we will need to emit tracking along the timeline,
1629	 * we embed the hooks into our request struct -- at the cost of
1630	 * having to have specialised no-allocation interfaces (which will
1631	 * be beneficial elsewhere).
1632	 *
1633	 * A second benefit to open-coding i915_request_await_request is
1634	 * that we can apply a slight variant of the rules specialised
1635	 * for timelines that jump between engines (such as virtual engines).
1636	 * If we consider the case of virtual engine, we must emit a dma-fence
1637	 * to prevent scheduling of the second request until the first is
1638	 * complete (to maximise our greedy late load balancing) and this
1639	 * precludes optimising to use semaphores serialisation of a single
1640	 * timeline across engines.
1641	 */
1642	prev = to_request(__i915_active_fence_set(&timeline->last_request,
1643						  &rq->fence));
1644	if (prev && !__i915_request_is_complete(prev)) {
1645		/*
1646		 * The requests are supposed to be kept in order. However,
1647		 * we need to be wary in case the timeline->last_request
1648		 * is used as a barrier for external modification to this
1649		 * context.
1650		 */
1651		GEM_BUG_ON(prev->context == rq->context &&
1652			   i915_seqno_passed(prev->fence.seqno,
1653					     rq->fence.seqno));
1654
1655		if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
1656			i915_sw_fence_await_sw_fence(&rq->submit,
1657						     &prev->submit,
1658						     &rq->submitq);
1659		else
1660			__i915_sw_fence_await_dma_fence(&rq->submit,
1661							&prev->fence,
1662							&rq->dmaq);
1663		if (rq->engine->schedule)
1664			__i915_sched_node_add_dependency(&rq->sched,
1665							 &prev->sched,
1666							 &rq->dep,
1667							 0);
1668	}
1669
1670	/*
1671	 * Make sure that no request gazumped us - if it was allocated after
1672	 * our i915_request_alloc() and called __i915_request_add() before
1673	 * us, the timeline will hold its seqno which is later than ours.
1674	 */
1675	GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1676
1677	return prev;
1678}
1679
1680/*
1681 * NB: This function is not allowed to fail. Doing so would mean the the
1682 * request is not being tracked for completion but the work itself is
1683 * going to happen on the hardware. This would be a Bad Thing(tm).
1684 */
1685struct i915_request *__i915_request_commit(struct i915_request *rq)
1686{
1687	struct intel_engine_cs *engine = rq->engine;
1688	struct intel_ring *ring = rq->ring;
1689	u32 *cs;
1690
1691	RQ_TRACE(rq, "\n");
1692
1693	/*
1694	 * To ensure that this call will not fail, space for its emissions
1695	 * should already have been reserved in the ring buffer. Let the ring
1696	 * know that it is time to use that space up.
1697	 */
1698	GEM_BUG_ON(rq->reserved_space > ring->space);
1699	rq->reserved_space = 0;
1700	rq->emitted_jiffies = jiffies;
 
 
 
 
 
 
 
 
 
 
 
 
1701
1702	/*
1703	 * Record the position of the start of the breadcrumb so that
1704	 * should we detect the updated seqno part-way through the
1705	 * GPU processing the request, we never over-estimate the
1706	 * position of the ring's HEAD.
1707	 */
1708	cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1709	GEM_BUG_ON(IS_ERR(cs));
1710	rq->postfix = intel_ring_offset(rq, cs);
1711
1712	return __i915_request_add_to_timeline(rq);
1713}
 
 
 
 
1714
1715void __i915_request_queue_bh(struct i915_request *rq)
1716{
1717	i915_sw_fence_commit(&rq->semaphore);
1718	i915_sw_fence_commit(&rq->submit);
1719}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1720
1721void __i915_request_queue(struct i915_request *rq,
1722			  const struct i915_sched_attr *attr)
1723{
1724	/*
1725	 * Let the backend know a new request has arrived that may need
1726	 * to adjust the existing execution schedule due to a high priority
1727	 * request - i.e. we may want to preempt the current request in order
1728	 * to run a high priority dependency chain *before* we can execute this
1729	 * request.
1730	 *
1731	 * This is called before the request is ready to run so that we can
1732	 * decide whether to preempt the entire chain so that it is ready to
1733	 * run at the earliest possible convenience.
1734	 */
1735	if (attr && rq->engine->schedule)
1736		rq->engine->schedule(rq, attr);
1737
1738	local_bh_disable();
1739	__i915_request_queue_bh(rq);
1740	local_bh_enable(); /* kick tasklets */
1741}
1742
1743void i915_request_add(struct i915_request *rq)
1744{
1745	struct intel_timeline * const tl = i915_request_timeline(rq);
1746	struct i915_sched_attr attr = {};
1747	struct i915_gem_context *ctx;
1748
1749	lockdep_assert_held(&tl->mutex);
1750	lockdep_unpin_lock(&tl->mutex, rq->cookie);
1751
1752	trace_i915_request_add(rq);
1753	__i915_request_commit(rq);
1754
1755	/* XXX placeholder for selftests */
1756	rcu_read_lock();
1757	ctx = rcu_dereference(rq->context->gem_context);
1758	if (ctx)
1759		attr = ctx->sched;
1760	rcu_read_unlock();
1761
1762	__i915_request_queue(rq, &attr);
 
 
1763
1764	mutex_unlock(&tl->mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1765}
1766
1767static unsigned long local_clock_ns(unsigned int *cpu)
1768{
1769	unsigned long t;
1770
1771	/*
1772	 * Cheaply and approximately convert from nanoseconds to microseconds.
1773	 * The result and subsequent calculations are also defined in the same
1774	 * approximate microseconds units. The principal source of timing
1775	 * error here is from the simple truncation.
1776	 *
1777	 * Note that local_clock() is only defined wrt to the current CPU;
1778	 * the comparisons are no longer valid if we switch CPUs. Instead of
1779	 * blocking preemption for the entire busywait, we can detect the CPU
1780	 * switch and use that as indicator of system load and a reason to
1781	 * stop busywaiting, see busywait_stop().
1782	 */
1783	*cpu = get_cpu();
1784	t = local_clock();
1785	put_cpu();
1786
1787	return t;
1788}
1789
1790static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1791{
1792	unsigned int this_cpu;
1793
1794	if (time_after(local_clock_ns(&this_cpu), timeout))
1795		return true;
1796
1797	return this_cpu != cpu;
1798}
1799
1800static bool __i915_spin_request(struct i915_request * const rq, int state)
 
1801{
1802	unsigned long timeout_ns;
1803	unsigned int cpu;
 
 
1804
1805	/*
1806	 * Only wait for the request if we know it is likely to complete.
1807	 *
1808	 * We don't track the timestamps around requests, nor the average
1809	 * request length, so we do not have a good indicator that this
1810	 * request will complete within the timeout. What we do know is the
1811	 * order in which requests are executed by the context and so we can
1812	 * tell if the request has been started. If the request is not even
1813	 * running yet, it is a fair assumption that it will not complete
1814	 * within our relatively short timeout.
1815	 */
1816	if (!i915_request_is_running(rq))
1817		return false;
1818
1819	/*
1820	 * When waiting for high frequency requests, e.g. during synchronous
1821	 * rendering split between the CPU and GPU, the finite amount of time
1822	 * required to set up the irq and wait upon it limits the response
1823	 * rate. By busywaiting on the request completion for a short while we
1824	 * can service the high frequency waits as quick as possible. However,
1825	 * if it is a slow request, we want to sleep as quickly as possible.
1826	 * The tradeoff between waiting and sleeping is roughly the time it
1827	 * takes to sleep on a request, on the order of a microsecond.
1828	 */
1829
1830	timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
1831	timeout_ns += local_clock_ns(&cpu);
1832	do {
1833		if (dma_fence_is_signaled(&rq->fence))
1834			return true;
 
 
 
 
 
 
 
 
 
1835
1836		if (signal_pending_state(state, current))
1837			break;
1838
1839		if (busywait_stop(timeout_ns, cpu))
1840			break;
1841
1842		cpu_relax();
1843	} while (!need_resched());
1844
1845	return false;
1846}
1847
1848struct request_wait {
1849	struct dma_fence_cb cb;
1850	struct task_struct *tsk;
1851};
1852
1853static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
1854{
1855	struct request_wait *wait = container_of(cb, typeof(*wait), cb);
 
1856
1857	wake_up_process(fetch_and_zero(&wait->tsk));
 
 
1858}
1859
1860/**
1861 * i915_request_wait - wait until execution of request has finished
1862 * @rq: the request to wait upon
1863 * @flags: how to wait
1864 * @timeout: how long to wait in jiffies
1865 *
1866 * i915_request_wait() waits for the request to be completed, for a
1867 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1868 * unbounded wait).
1869 *
 
 
 
 
1870 * Returns the remaining time (in jiffies) if the request completed, which may
1871 * be zero or -ETIME if the request is unfinished after the timeout expires.
1872 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1873 * pending before the request completes.
1874 */
1875long i915_request_wait(struct i915_request *rq,
1876		       unsigned int flags,
1877		       long timeout)
1878{
1879	const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1880		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1881	struct request_wait wait;
 
 
 
1882
1883	might_sleep();
 
 
 
 
 
1884	GEM_BUG_ON(timeout < 0);
1885
1886	if (dma_fence_is_signaled(&rq->fence))
1887		return timeout;
1888
1889	if (!timeout)
1890		return -ETIME;
1891
1892	trace_i915_request_wait_begin(rq, flags);
1893
1894	/*
1895	 * We must never wait on the GPU while holding a lock as we
1896	 * may need to perform a GPU reset. So while we don't need to
1897	 * serialise wait/reset with an explicit lock, we do want
1898	 * lockdep to detect potential dependency cycles.
1899	 */
1900	mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
1901
1902	/*
1903	 * Optimistic spin before touching IRQs.
1904	 *
1905	 * We may use a rather large value here to offset the penalty of
1906	 * switching away from the active task. Frequently, the client will
1907	 * wait upon an old swapbuffer to throttle itself to remain within a
1908	 * frame of the gpu. If the client is running in lockstep with the gpu,
1909	 * then it should not be waiting long at all, and a sleep now will incur
1910	 * extra scheduler latency in producing the next frame. To try to
1911	 * avoid adding the cost of enabling/disabling the interrupt to the
1912	 * short wait, we first spin to see if the request would have completed
1913	 * in the time taken to setup the interrupt.
1914	 *
1915	 * We need upto 5us to enable the irq, and upto 20us to hide the
1916	 * scheduler latency of a context switch, ignoring the secondary
1917	 * impacts from a context switch such as cache eviction.
1918	 *
1919	 * The scheme used for low-latency IO is called "hybrid interrupt
1920	 * polling". The suggestion there is to sleep until just before you
1921	 * expect to be woken by the device interrupt and then poll for its
1922	 * completion. That requires having a good predictor for the request
1923	 * duration, which we currently lack.
1924	 */
1925	if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
1926	    __i915_spin_request(rq, state))
1927		goto out;
1928
1929	/*
1930	 * This client is about to stall waiting for the GPU. In many cases
1931	 * this is undesirable and limits the throughput of the system, as
1932	 * many clients cannot continue processing user input/output whilst
1933	 * blocked. RPS autotuning may take tens of milliseconds to respond
1934	 * to the GPU load and thus incurs additional latency for the client.
1935	 * We can circumvent that by promoting the GPU frequency to maximum
1936	 * before we sleep. This makes the GPU throttle up much more quickly
1937	 * (good for benchmarks and user experience, e.g. window animations),
1938	 * but at a cost of spending more power processing the workload
1939	 * (bad for battery).
1940	 */
1941	if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq))
1942		intel_rps_boost(rq);
1943
1944	wait.tsk = current;
1945	if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
1946		goto out;
1947
1948	/*
1949	 * Flush the submission tasklet, but only if it may help this request.
1950	 *
1951	 * We sometimes experience some latency between the HW interrupts and
1952	 * tasklet execution (mostly due to ksoftirqd latency, but it can also
1953	 * be due to lazy CS events), so lets run the tasklet manually if there
1954	 * is a chance it may submit this request. If the request is not ready
1955	 * to run, as it is waiting for other fences to be signaled, flushing
1956	 * the tasklet is busy work without any advantage for this client.
1957	 *
1958	 * If the HW is being lazy, this is the last chance before we go to
1959	 * sleep to catch any pending events. We will check periodically in
1960	 * the heartbeat to flush the submission tasklets as a last resort
1961	 * for unhappy HW.
1962	 */
1963	if (i915_request_is_ready(rq))
1964		__intel_engine_flush_submission(rq->engine, false);
1965
1966	for (;;) {
 
1967		set_current_state(state);
1968
1969		if (dma_fence_is_signaled(&rq->fence))
1970			break;
1971
 
 
 
 
1972		if (signal_pending_state(state, current)) {
1973			timeout = -ERESTARTSYS;
1974			break;
1975		}
1976
1977		if (!timeout) {
1978			timeout = -ETIME;
1979			break;
1980		}
1981
1982		timeout = io_schedule_timeout(timeout);
1983	}
1984	__set_current_state(TASK_RUNNING);
1985
1986	if (READ_ONCE(wait.tsk))
1987		dma_fence_remove_callback(&rq->fence, &wait.cb);
1988	GEM_BUG_ON(!list_empty(&wait.cb.node));
1989
1990out:
1991	mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
1992	trace_i915_request_wait_end(rq);
1993	return timeout;
1994}
1995
1996static int print_sched_attr(const struct i915_sched_attr *attr,
1997			    char *buf, int x, int len)
1998{
1999	if (attr->priority == I915_PRIORITY_INVALID)
2000		return x;
 
 
 
2001
2002	x += snprintf(buf + x, len - x,
2003		      " prio=%d", attr->priority);
2004
2005	return x;
2006}
 
 
 
2007
2008static char queue_status(const struct i915_request *rq)
2009{
2010	if (i915_request_is_active(rq))
2011		return 'E';
2012
2013	if (i915_request_is_ready(rq))
2014		return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
2015
2016	return 'U';
2017}
 
2018
2019static const char *run_status(const struct i915_request *rq)
2020{
2021	if (__i915_request_is_complete(rq))
2022		return "!";
2023
2024	if (__i915_request_has_started(rq))
2025		return "*";
 
 
 
 
 
 
 
2026
2027	if (!i915_sw_fence_signaled(&rq->semaphore))
2028		return "&";
 
 
 
 
 
 
 
 
 
 
 
 
2029
2030	return "";
2031}
 
2032
2033static const char *fence_status(const struct i915_request *rq)
2034{
2035	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
2036		return "+";
 
2037
2038	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
2039		return "-";
 
 
 
 
 
2040
2041	return "";
2042}
2043
2044void i915_request_show(struct drm_printer *m,
2045		       const struct i915_request *rq,
2046		       const char *prefix,
2047		       int indent)
2048{
2049	const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
2050	char buf[80] = "";
2051	int x = 0;
2052
2053	/*
2054	 * The prefix is used to show the queue status, for which we use
2055	 * the following flags:
2056	 *
2057	 *  U [Unready]
2058	 *    - initial status upon being submitted by the user
2059	 *
2060	 *    - the request is not ready for execution as it is waiting
2061	 *      for external fences
2062	 *
2063	 *  R [Ready]
2064	 *    - all fences the request was waiting on have been signaled,
2065	 *      and the request is now ready for execution and will be
2066	 *      in a backend queue
2067	 *
2068	 *    - a ready request may still need to wait on semaphores
2069	 *      [internal fences]
2070	 *
2071	 *  V [Ready/virtual]
2072	 *    - same as ready, but queued over multiple backends
2073	 *
2074	 *  E [Executing]
2075	 *    - the request has been transferred from the backend queue and
2076	 *      submitted for execution on HW
2077	 *
2078	 *    - a completed request may still be regarded as executing, its
2079	 *      status may not be updated until it is retired and removed
2080	 *      from the lists
2081	 */
2082
2083	x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
2084
2085	drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
2086		   prefix, indent, "                ",
2087		   queue_status(rq),
2088		   rq->fence.context, rq->fence.seqno,
2089		   run_status(rq),
2090		   fence_status(rq),
2091		   buf,
2092		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
2093		   name);
2094}
2095
2096#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2097#include "selftests/mock_request.c"
2098#include "selftests/i915_request.c"
2099#endif
2100
2101static void i915_global_request_shrink(void)
2102{
2103	kmem_cache_shrink(global.slab_execute_cbs);
2104	kmem_cache_shrink(global.slab_requests);
2105}
2106
2107static void i915_global_request_exit(void)
2108{
2109	kmem_cache_destroy(global.slab_execute_cbs);
2110	kmem_cache_destroy(global.slab_requests);
2111}
2112
2113static struct i915_global_request global = { {
2114	.shrink = i915_global_request_shrink,
2115	.exit = i915_global_request_exit,
2116} };
2117
2118int __init i915_global_request_init(void)
2119{
2120	global.slab_requests =
2121		kmem_cache_create("i915_request",
2122				  sizeof(struct i915_request),
2123				  __alignof__(struct i915_request),
2124				  SLAB_HWCACHE_ALIGN |
2125				  SLAB_RECLAIM_ACCOUNT |
2126				  SLAB_TYPESAFE_BY_RCU,
2127				  __i915_request_ctor);
2128	if (!global.slab_requests)
2129		return -ENOMEM;
2130
2131	global.slab_execute_cbs = KMEM_CACHE(execute_cb,
2132					     SLAB_HWCACHE_ALIGN |
2133					     SLAB_RECLAIM_ACCOUNT |
2134					     SLAB_TYPESAFE_BY_RCU);
2135	if (!global.slab_execute_cbs)
2136		goto err_requests;
2137
2138	i915_global_register(&global.base);
2139	return 0;
2140
2141err_requests:
2142	kmem_cache_destroy(global.slab_requests);
2143	return -ENOMEM;
2144}
v4.17
   1/*
   2 * Copyright © 2008-2015 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
 
 
 
  25#include <linux/prefetch.h>
  26#include <linux/dma-fence-array.h>
  27#include <linux/sched.h>
  28#include <linux/sched/clock.h>
  29#include <linux/sched/signal.h>
  30
 
 
 
 
 
 
 
 
 
 
 
  31#include "i915_drv.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  32
  33static const char *i915_fence_get_driver_name(struct dma_fence *fence)
  34{
  35	return "i915";
  36}
  37
  38static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
  39{
 
 
  40	/*
  41	 * The timeline struct (as part of the ppgtt underneath a context)
  42	 * may be freed when the request is no longer in use by the GPU.
  43	 * We could extend the life of a context to beyond that of all
  44	 * fences, possibly keeping the hw resource around indefinitely,
  45	 * or we just give them a false name. Since
  46	 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
  47	 * lie seems justifiable.
  48	 */
  49	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  50		return "signaled";
  51
  52	return to_request(fence)->timeline->common->name;
 
 
 
 
  53}
  54
  55static bool i915_fence_signaled(struct dma_fence *fence)
  56{
  57	return i915_request_completed(to_request(fence));
  58}
  59
  60static bool i915_fence_enable_signaling(struct dma_fence *fence)
  61{
  62	if (i915_fence_signaled(fence))
  63		return false;
  64
  65	intel_engine_enable_signaling(to_request(fence), true);
  66	return !i915_fence_signaled(fence);
  67}
  68
  69static signed long i915_fence_wait(struct dma_fence *fence,
  70				   bool interruptible,
  71				   signed long timeout)
  72{
  73	return i915_request_wait(to_request(fence), interruptible, timeout);
 
 
 
 
 
 
 
  74}
  75
  76static void i915_fence_release(struct dma_fence *fence)
  77{
  78	struct i915_request *rq = to_request(fence);
  79
  80	/*
  81	 * The request is put onto a RCU freelist (i.e. the address
  82	 * is immediately reused), mark the fences as being freed now.
  83	 * Otherwise the debugobjects for the fences are only marked as
  84	 * freed when the slab cache itself is freed, and so we would get
  85	 * caught trying to reuse dead objects.
  86	 */
  87	i915_sw_fence_fini(&rq->submit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  88
  89	kmem_cache_free(rq->i915->requests, rq);
  90}
  91
  92const struct dma_fence_ops i915_fence_ops = {
  93	.get_driver_name = i915_fence_get_driver_name,
  94	.get_timeline_name = i915_fence_get_timeline_name,
  95	.enable_signaling = i915_fence_enable_signaling,
  96	.signaled = i915_fence_signaled,
  97	.wait = i915_fence_wait,
  98	.release = i915_fence_release,
  99};
 100
 101static inline void
 102i915_request_remove_from_client(struct i915_request *request)
 103{
 104	struct drm_i915_file_private *file_priv;
 105
 106	file_priv = request->file_priv;
 107	if (!file_priv)
 108		return;
 
 
 
 
 
 
 
 
 109
 110	spin_lock(&file_priv->mm.lock);
 111	if (request->file_priv) {
 112		list_del(&request->client_link);
 113		request->file_priv = NULL;
 114	}
 115	spin_unlock(&file_priv->mm.lock);
 116}
 117
 118static struct i915_dependency *
 119i915_dependency_alloc(struct drm_i915_private *i915)
 120{
 121	return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
 
 
 
 
 
 
 
 
 122}
 123
 124static void
 125i915_dependency_free(struct drm_i915_private *i915,
 126		     struct i915_dependency *dep)
 127{
 128	kmem_cache_free(i915->dependencies, dep);
 129}
 130
 131static void
 132__i915_priotree_add_dependency(struct i915_priotree *pt,
 133			       struct i915_priotree *signal,
 134			       struct i915_dependency *dep,
 135			       unsigned long flags)
 136{
 137	INIT_LIST_HEAD(&dep->dfs_link);
 138	list_add(&dep->wait_link, &signal->waiters_list);
 139	list_add(&dep->signal_link, &pt->signalers_list);
 140	dep->signaler = signal;
 141	dep->flags = flags;
 142}
 143
 144static int
 145i915_priotree_add_dependency(struct drm_i915_private *i915,
 146			     struct i915_priotree *pt,
 147			     struct i915_priotree *signal)
 148{
 149	struct i915_dependency *dep;
 150
 151	dep = i915_dependency_alloc(i915);
 152	if (!dep)
 153		return -ENOMEM;
 154
 155	__i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
 156	return 0;
 157}
 158
 159static void
 160i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
 161{
 162	struct i915_dependency *dep, *next;
 163
 164	GEM_BUG_ON(!list_empty(&pt->link));
 
 
 165
 166	/*
 167	 * Everyone we depended upon (the fences we wait to be signaled)
 168	 * should retire before us and remove themselves from our list.
 169	 * However, retirement is run independently on each timeline and
 170	 * so we may be called out-of-order.
 171	 */
 172	list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
 173		GEM_BUG_ON(!i915_priotree_signaled(dep->signaler));
 174		GEM_BUG_ON(!list_empty(&dep->dfs_link));
 175
 176		list_del(&dep->wait_link);
 177		if (dep->flags & I915_DEPENDENCY_ALLOC)
 178			i915_dependency_free(i915, dep);
 179	}
 
 180
 181	/* Remove ourselves from everyone who depends upon us */
 182	list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
 183		GEM_BUG_ON(dep->signaler != pt);
 184		GEM_BUG_ON(!list_empty(&dep->dfs_link));
 185
 186		list_del(&dep->signal_link);
 187		if (dep->flags & I915_DEPENDENCY_ALLOC)
 188			i915_dependency_free(i915, dep);
 
 189	}
 
 190}
 191
 192static void
 193i915_priotree_init(struct i915_priotree *pt)
 
 
 
 
 
 
 
 
 
 
 
 194{
 195	INIT_LIST_HEAD(&pt->signalers_list);
 196	INIT_LIST_HEAD(&pt->waiters_list);
 197	INIT_LIST_HEAD(&pt->link);
 198	pt->priority = I915_PRIORITY_INVALID;
 199}
 200
 201static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
 202{
 203	struct intel_engine_cs *engine;
 204	enum intel_engine_id id;
 205	int ret;
 
 
 
 
 
 
 
 
 
 206
 207	/* Carefully retire all requests without writing to the rings */
 208	ret = i915_gem_wait_for_idle(i915,
 209				     I915_WAIT_INTERRUPTIBLE |
 210				     I915_WAIT_LOCKED);
 211	if (ret)
 212		return ret;
 213
 214	/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
 215	for_each_engine(engine, i915, id) {
 216		struct i915_gem_timeline *timeline;
 217		struct intel_timeline *tl = engine->timeline;
 218
 219		if (!i915_seqno_passed(seqno, tl->seqno)) {
 220			/* Flush any waiters before we reuse the seqno */
 221			intel_engine_disarm_breadcrumbs(engine);
 222			GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
 223		}
 224
 225		/* Check we are idle before we fiddle with hw state! */
 226		GEM_BUG_ON(!intel_engine_is_idle(engine));
 227		GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
 228
 229		/* Finally reset hw state */
 230		intel_engine_init_global_seqno(engine, seqno);
 231		tl->seqno = seqno;
 232
 233		list_for_each_entry(timeline, &i915->gt.timelines, link)
 234			memset(timeline->engine[id].global_sync, 0,
 235			       sizeof(timeline->engine[id].global_sync));
 236	}
 237
 238	return 0;
 239}
 240
 241int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
 242{
 243	struct drm_i915_private *i915 = to_i915(dev);
 244
 245	lockdep_assert_held(&i915->drm.struct_mutex);
 246
 247	if (seqno == 0)
 248		return -EINVAL;
 249
 250	/* HWS page needs to be set less than what we will inject to ring */
 251	return reset_all_global_seqno(i915, seqno - 1);
 252}
 253
 254static void mark_busy(struct drm_i915_private *i915)
 255{
 256	if (i915->gt.awake)
 257		return;
 258
 259	GEM_BUG_ON(!i915->gt.active_requests);
 260
 261	intel_runtime_pm_get_noresume(i915);
 262
 263	/*
 264	 * It seems that the DMC likes to transition between the DC states a lot
 265	 * when there are no connected displays (no active power domains) during
 266	 * command submission.
 267	 *
 268	 * This activity has negative impact on the performance of the chip with
 269	 * huge latencies observed in the interrupt handler and elsewhere.
 270	 *
 271	 * Work around it by grabbing a GT IRQ power domain whilst there is any
 272	 * GT activity, preventing any DC state transitions.
 273	 */
 274	intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
 
 
 
 
 
 
 
 275
 276	i915->gt.awake = true;
 277	if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
 278		i915->gt.epoch = 1;
 279
 280	intel_enable_gt_powersave(i915);
 281	i915_update_gfx_val(i915);
 282	if (INTEL_GEN(i915) >= 6)
 283		gen6_rps_busy(i915);
 284	i915_pmu_gt_unparked(i915);
 285
 286	intel_engines_unpark(i915);
 287
 288	i915_queue_hangcheck(i915);
 
 289
 290	queue_delayed_work(i915->wq,
 291			   &i915->gt.retire_work,
 292			   round_jiffies_up_relative(HZ));
 293}
 294
 295static int reserve_engine(struct intel_engine_cs *engine)
 296{
 297	struct drm_i915_private *i915 = engine->i915;
 298	u32 active = ++engine->timeline->inflight_seqnos;
 299	u32 seqno = engine->timeline->seqno;
 300	int ret;
 301
 302	/* Reservation is fine until we need to wrap around */
 303	if (unlikely(add_overflows(seqno, active))) {
 304		ret = reset_all_global_seqno(i915, 0);
 305		if (ret) {
 306			engine->timeline->inflight_seqnos--;
 307			return ret;
 308		}
 309	}
 310
 311	if (!i915->gt.active_requests++)
 312		mark_busy(i915);
 313
 314	return 0;
 315}
 316
 317static void unreserve_engine(struct intel_engine_cs *engine)
 318{
 319	struct drm_i915_private *i915 = engine->i915;
 
 320
 321	if (!--i915->gt.active_requests) {
 322		/* Cancel the mark_busy() from our reserve_engine() */
 323		GEM_BUG_ON(!i915->gt.awake);
 324		mod_delayed_work(i915->wq,
 325				 &i915->gt.idle_work,
 326				 msecs_to_jiffies(100));
 327	}
 328
 329	GEM_BUG_ON(!engine->timeline->inflight_seqnos);
 330	engine->timeline->inflight_seqnos--;
 
 
 
 
 
 331}
 332
 333void i915_gem_retire_noop(struct i915_gem_active *active,
 334			  struct i915_request *request)
 335{
 336	/* Space left intentionally blank */
 
 
 
 337}
 338
 339static void advance_ring(struct i915_request *request)
 340{
 341	unsigned int tail;
 
 
 
 
 
 
 
 
 
 342
 343	/*
 344	 * We know the GPU must have read the request to have
 345	 * sent us the seqno + interrupt, so use the position
 346	 * of tail of the request to update the last known position
 347	 * of the GPU head.
 348	 *
 349	 * Note this requires that we are always called in request
 350	 * completion order.
 351	 */
 352	if (list_is_last(&request->ring_link, &request->ring->request_list)) {
 353		/*
 354		 * We may race here with execlists resubmitting this request
 355		 * as we retire it. The resubmission will move the ring->tail
 356		 * forwards (to request->wa_tail). We either read the
 357		 * current value that was written to hw, or the value that
 358		 * is just about to be. Either works, if we miss the last two
 359		 * noops - they are safe to be replayed on a reset.
 360		 */
 361		tail = READ_ONCE(request->ring->tail);
 362	} else {
 363		tail = request->postfix;
 364	}
 365	list_del(&request->ring_link);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 366
 367	request->ring->head = tail;
 368}
 369
 370static void free_capture_list(struct i915_request *request)
 371{
 372	struct i915_capture_list *capture;
 
 
 
 
 373
 374	capture = request->capture_list;
 375	while (capture) {
 376		struct i915_capture_list *next = capture->next;
 
 377
 378		kfree(capture);
 379		capture = next;
 380	}
 
 381}
 382
 383static void i915_request_retire(struct i915_request *request)
 384{
 385	struct intel_engine_cs *engine = request->engine;
 386	struct i915_gem_active *active, *next;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 387
 388	lockdep_assert_held(&request->i915->drm.struct_mutex);
 389	GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
 390	GEM_BUG_ON(!i915_request_completed(request));
 391	GEM_BUG_ON(!request->i915->gt.active_requests);
 
 
 
 
 
 
 
 392
 393	trace_i915_request_retire(request);
 
 394
 395	spin_lock_irq(&engine->timeline->lock);
 396	list_del_init(&request->link);
 397	spin_unlock_irq(&engine->timeline->lock);
 
 
 
 
 
 
 
 
 
 
 
 398
 399	unreserve_engine(request->engine);
 400	advance_ring(request);
 
 401
 402	free_capture_list(request);
 
 
 
 
 
 
 
 
 403
 404	/*
 405	 * Walk through the active list, calling retire on each. This allows
 406	 * objects to track their GPU activity and mark themselves as idle
 407	 * when their *last* active request is completed (updating state
 408	 * tracking lists for eviction, active references for GEM, etc).
 
 
 409	 *
 410	 * As the ->retire() may free the node, we decouple it first and
 411	 * pass along the auxiliary information (to avoid dereferencing
 412	 * the node after the callback).
 413	 */
 414	list_for_each_entry_safe(active, next, &request->active_list, link) {
 415		/*
 416		 * In microbenchmarks or focusing upon time inside the kernel,
 417		 * we may spend an inordinate amount of time simply handling
 418		 * the retirement of requests and processing their callbacks.
 419		 * Of which, this loop itself is particularly hot due to the
 420		 * cache misses when jumping around the list of i915_gem_active.
 421		 * So we try to keep this loop as streamlined as possible and
 422		 * also prefetch the next i915_gem_active to try and hide
 423		 * the likely cache miss.
 424		 */
 425		prefetchw(next);
 426
 427		INIT_LIST_HEAD(&active->link);
 428		RCU_INIT_POINTER(active->request, NULL);
 429
 430		active->retire(active, request);
 
 
 
 
 
 
 
 
 431	}
 
 432
 433	i915_request_remove_from_client(request);
 
 
 434
 435	/* Retirement decays the ban score as it is a sign of ctx progress */
 436	atomic_dec_if_positive(&request->ctx->ban_score);
 
 
 437
 438	/*
 439	 * The backing object for the context is done after switching to the
 440	 * *next* context. Therefore we cannot retire the previous context until
 441	 * the next context has already started running. However, since we
 442	 * cannot take the required locks at i915_request_submit() we
 443	 * defer the unpinning of the active context to now, retirement of
 444	 * the subsequent request.
 445	 */
 446	if (engine->last_retired_context)
 447		engine->context_unpin(engine, engine->last_retired_context);
 448	engine->last_retired_context = request->ctx;
 449
 450	spin_lock_irq(&request->lock);
 451	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags))
 452		dma_fence_signal_locked(&request->fence);
 453	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
 454		intel_engine_cancel_signaling(request);
 455	if (request->waitboost) {
 456		GEM_BUG_ON(!atomic_read(&request->i915->gt_pm.rps.num_waiters));
 457		atomic_dec(&request->i915->gt_pm.rps.num_waiters);
 458	}
 459	spin_unlock_irq(&request->lock);
 460
 461	i915_priotree_fini(request->i915, &request->priotree);
 462	i915_request_put(request);
 463}
 464
 465void i915_request_retire_upto(struct i915_request *rq)
 466{
 467	struct intel_engine_cs *engine = rq->engine;
 468	struct i915_request *tmp;
 469
 470	lockdep_assert_held(&rq->i915->drm.struct_mutex);
 471	GEM_BUG_ON(!i915_request_completed(rq));
 472
 473	if (list_empty(&rq->link))
 474		return;
 475
 
 476	do {
 477		tmp = list_first_entry(&engine->timeline->requests,
 478				       typeof(*tmp), link);
 
 479
 480		i915_request_retire(tmp);
 481	} while (tmp != rq);
 482}
 483
 484static u32 timeline_get_seqno(struct intel_timeline *tl)
 485{
 486	return ++tl->seqno;
 
 
 
 
 
 
 
 
 
 
 
 487}
 488
 489void __i915_request_submit(struct i915_request *request)
 490{
 491	struct intel_engine_cs *engine = request->engine;
 492	struct intel_timeline *timeline;
 493	u32 seqno;
 
 494
 495	GEM_BUG_ON(!irqs_disabled());
 496	lockdep_assert_held(&engine->timeline->lock);
 497
 498	/* Transfer from per-context onto the global per-engine timeline */
 499	timeline = engine->timeline;
 500	GEM_BUG_ON(timeline == request->timeline);
 501	GEM_BUG_ON(request->global_seqno);
 502
 503	seqno = timeline_get_seqno(timeline);
 504	GEM_BUG_ON(!seqno);
 505	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
 
 
 
 
 
 
 
 
 
 
 
 
 506
 507	/* We may be recursing from the signal callback of another i915 fence */
 508	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
 509	request->global_seqno = seqno;
 510	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
 511		intel_engine_enable_signaling(request, false);
 512	spin_unlock(&request->lock);
 513
 514	engine->emit_breadcrumb(request,
 515				request->ring->vaddr + request->postfix);
 516
 517	spin_lock(&request->timeline->lock);
 518	list_move_tail(&request->link, &timeline->requests);
 519	spin_unlock(&request->timeline->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520
 521	trace_i915_request_execute(request);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 522
 523	wake_up_all(&request->execute);
 524}
 525
 526void i915_request_submit(struct i915_request *request)
 527{
 528	struct intel_engine_cs *engine = request->engine;
 529	unsigned long flags;
 530
 531	/* Will be called from irq-context when using foreign fences. */
 532	spin_lock_irqsave(&engine->timeline->lock, flags);
 533
 534	__i915_request_submit(request);
 535
 536	spin_unlock_irqrestore(&engine->timeline->lock, flags);
 537}
 538
 539void __i915_request_unsubmit(struct i915_request *request)
 540{
 541	struct intel_engine_cs *engine = request->engine;
 542	struct intel_timeline *timeline;
 543
 544	GEM_BUG_ON(!irqs_disabled());
 545	lockdep_assert_held(&engine->timeline->lock);
 546
 547	/*
 548	 * Only unwind in reverse order, required so that the per-context list
 549	 * is kept in seqno/ring order.
 550	 */
 551	GEM_BUG_ON(!request->global_seqno);
 552	GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
 553	GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
 554				     request->global_seqno));
 555	engine->timeline->seqno--;
 556
 557	/* We may be recursing from the signal callback of another i915 fence */
 558	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
 559	request->global_seqno = 0;
 
 
 
 
 
 
 560	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
 561		intel_engine_cancel_signaling(request);
 562	spin_unlock(&request->lock);
 563
 564	/* Transfer back from the global per-engine timeline to per-context */
 565	timeline = request->timeline;
 566	GEM_BUG_ON(timeline == engine->timeline);
 567
 568	spin_lock(&timeline->lock);
 569	list_move(&request->link, &timeline->requests);
 570	spin_unlock(&timeline->lock);
 571
 572	/*
 573	 * We don't need to wake_up any waiters on request->execute, they
 574	 * will get woken by any other event or us re-adding this request
 575	 * to the engine timeline (__i915_request_submit()). The waiters
 576	 * should be quite adapt at finding that the request now has a new
 577	 * global_seqno to the one they went to sleep on.
 578	 */
 579}
 580
 581void i915_request_unsubmit(struct i915_request *request)
 582{
 583	struct intel_engine_cs *engine = request->engine;
 584	unsigned long flags;
 585
 586	/* Will be called from irq-context when using foreign fences. */
 587	spin_lock_irqsave(&engine->timeline->lock, flags);
 588
 589	__i915_request_unsubmit(request);
 590
 591	spin_unlock_irqrestore(&engine->timeline->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 592}
 593
 594static int __i915_sw_fence_call
 595submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 596{
 597	struct i915_request *request =
 598		container_of(fence, typeof(*request), submit);
 599
 600	switch (state) {
 601	case FENCE_COMPLETE:
 602		trace_i915_request_submit(request);
 
 
 
 
 
 
 603		/*
 604		 * We need to serialize use of the submit_request() callback
 605		 * with its hotplugging performed during an emergency
 606		 * i915_gem_set_wedged().  We use the RCU mechanism to mark the
 607		 * critical section in order to force i915_gem_set_wedged() to
 608		 * wait until the submit_request() is completed before
 609		 * proceeding.
 610		 */
 611		rcu_read_lock();
 612		request->engine->submit_request(request);
 613		rcu_read_unlock();
 614		break;
 615
 616	case FENCE_FREE:
 617		i915_request_put(request);
 618		break;
 619	}
 620
 621	return NOTIFY_DONE;
 622}
 623
 624/**
 625 * i915_request_alloc - allocate a request structure
 626 *
 627 * @engine: engine that we wish to issue the request on.
 628 * @ctx: context that the request will be associated with.
 629 *
 630 * Returns a pointer to the allocated request if successful,
 631 * or an error code if not.
 632 */
 633struct i915_request *
 634i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 635{
 636	struct drm_i915_private *i915 = engine->i915;
 637	struct i915_request *rq;
 638	struct intel_ring *ring;
 639	int ret;
 640
 641	lockdep_assert_held(&i915->drm.struct_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 642
 643	/*
 644	 * Preempt contexts are reserved for exclusive use to inject a
 645	 * preemption context switch. They are never to be used for any trivial
 646	 * request!
 647	 */
 648	GEM_BUG_ON(ctx == i915->preempt_context);
 649
 650	/*
 651	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
 652	 * EIO if the GPU is already wedged.
 653	 */
 654	if (i915_terminally_wedged(&i915->gpu_error))
 655		return ERR_PTR(-EIO);
 656
 657	/*
 658	 * Pinning the contexts may generate requests in order to acquire
 659	 * GGTT space, so do this first before we reserve a seqno for
 660	 * ourselves.
 661	 */
 662	ring = engine->context_pin(engine, ctx);
 663	if (IS_ERR(ring))
 664		return ERR_CAST(ring);
 665	GEM_BUG_ON(!ring);
 666
 667	ret = reserve_engine(engine);
 668	if (ret)
 669		goto err_unpin;
 
 
 
 
 670
 671	ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
 672	if (ret)
 673		goto err_unreserve;
 674
 675	/* Move the oldest request to the slab-cache (if not in use!) */
 676	rq = list_first_entry_or_null(&engine->timeline->requests,
 677				      typeof(*rq), link);
 678	if (rq && i915_request_completed(rq))
 679		i915_request_retire(rq);
 680
 681	/*
 682	 * Beware: Dragons be flying overhead.
 683	 *
 684	 * We use RCU to look up requests in flight. The lookups may
 685	 * race with the request being allocated from the slab freelist.
 686	 * That is the request we are writing to here, may be in the process
 687	 * of being read by __i915_gem_active_get_rcu(). As such,
 688	 * we have to be very careful when overwriting the contents. During
 689	 * the RCU lookup, we change chase the request->engine pointer,
 690	 * read the request->global_seqno and increment the reference count.
 691	 *
 692	 * The reference count is incremented atomically. If it is zero,
 693	 * the lookup knows the request is unallocated and complete. Otherwise,
 694	 * it is either still in use, or has been reallocated and reset
 695	 * with dma_fence_init(). This increment is safe for release as we
 696	 * check that the request we have a reference to and matches the active
 697	 * request.
 698	 *
 699	 * Before we increment the refcount, we chase the request->engine
 700	 * pointer. We must not call kmem_cache_zalloc() or else we set
 701	 * that pointer to NULL and cause a crash during the lookup. If
 702	 * we see the request is completed (based on the value of the
 703	 * old engine and seqno), the lookup is complete and reports NULL.
 704	 * If we decide the request is not completed (new engine or seqno),
 705	 * then we grab a reference and double check that it is still the
 706	 * active request - which it won't be and restart the lookup.
 707	 *
 708	 * Do not use kmem_cache_zalloc() here!
 709	 */
 710	rq = kmem_cache_alloc(i915->requests,
 711			      GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 712	if (unlikely(!rq)) {
 713		/* Ratelimit ourselves to prevent oom from malicious clients */
 714		ret = i915_gem_wait_for_idle(i915,
 715					     I915_WAIT_LOCKED |
 716					     I915_WAIT_INTERRUPTIBLE);
 717		if (ret)
 718			goto err_unreserve;
 719
 720		/*
 721		 * We've forced the client to stall and catch up with whatever
 722		 * backlog there might have been. As we are assuming that we
 723		 * caused the mempressure, now is an opportune time to
 724		 * recover as much memory from the request pool as is possible.
 725		 * Having already penalized the client to stall, we spend
 726		 * a little extra time to re-optimise page allocation.
 727		 */
 728		kmem_cache_shrink(i915->requests);
 729		rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
 730
 731		rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
 732		if (!rq) {
 733			ret = -ENOMEM;
 734			goto err_unreserve;
 735		}
 736	}
 737
 738	rq->timeline = i915_gem_context_lookup_timeline(ctx, engine);
 739	GEM_BUG_ON(rq->timeline == engine->timeline);
 
 
 
 
 
 
 740
 741	spin_lock_init(&rq->lock);
 742	dma_fence_init(&rq->fence,
 743		       &i915_fence_ops,
 744		       &rq->lock,
 745		       rq->timeline->fence_context,
 746		       timeline_get_seqno(rq->timeline));
 
 
 747
 748	/* We bump the ref for the fence chain */
 749	i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
 750	init_waitqueue_head(&rq->execute);
 751
 752	i915_priotree_init(&rq->priotree);
 753
 754	INIT_LIST_HEAD(&rq->active_list);
 755	rq->i915 = i915;
 756	rq->engine = engine;
 757	rq->ctx = ctx;
 758	rq->ring = ring;
 759
 760	/* No zalloc, must clear what we need by hand */
 761	rq->global_seqno = 0;
 762	rq->signaling.wait.seqno = 0;
 763	rq->file_priv = NULL;
 764	rq->batch = NULL;
 765	rq->capture_list = NULL;
 766	rq->waitboost = false;
 
 767
 768	/*
 769	 * Reserve space in the ring buffer for all the commands required to
 770	 * eventually emit this request. This is to guarantee that the
 771	 * i915_request_add() call can't fail. Note that the reserve may need
 772	 * to be redone if the request is not actually submitted straight
 773	 * away, e.g. because a GPU scheduler has deferred it.
 
 
 
 
 
 774	 */
 775	rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
 776	GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
 777
 778	/*
 779	 * Record the position of the start of the request so that
 780	 * should we detect the updated seqno part-way through the
 781	 * GPU processing the request, we never over-estimate the
 782	 * position of the head.
 783	 */
 784	rq->head = rq->ring->emit;
 785
 786	/* Unconditionally invalidate GPU caches and TLBs. */
 787	ret = engine->emit_flush(rq, EMIT_INVALIDATE);
 788	if (ret)
 789		goto err_unwind;
 790
 791	ret = engine->request_alloc(rq);
 792	if (ret)
 793		goto err_unwind;
 
 794
 795	/* Check that we didn't interrupt ourselves with a new request */
 796	GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
 797	return rq;
 798
 799err_unwind:
 800	rq->ring->emit = rq->head;
 801
 802	/* Make sure we didn't add ourselves to external state before freeing */
 803	GEM_BUG_ON(!list_empty(&rq->active_list));
 804	GEM_BUG_ON(!list_empty(&rq->priotree.signalers_list));
 805	GEM_BUG_ON(!list_empty(&rq->priotree.waiters_list));
 806
 807	kmem_cache_free(i915->requests, rq);
 
 808err_unreserve:
 809	unreserve_engine(engine);
 810err_unpin:
 811	engine->context_unpin(engine, ctx);
 812	return ERR_PTR(ret);
 813}
 814
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 815static int
 816i915_request_await_request(struct i915_request *to, struct i915_request *from)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 817{
 818	int ret;
 819
 820	GEM_BUG_ON(to == from);
 821	GEM_BUG_ON(to->timeline == from->timeline);
 822
 823	if (i915_request_completed(from))
 
 
 
 
 
 
 
 824		return 0;
 825
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 826	if (to->engine->schedule) {
 827		ret = i915_priotree_add_dependency(to->i915,
 828						   &to->priotree,
 829						   &from->priotree);
 830		if (ret < 0)
 831			return ret;
 832	}
 833
 834	if (to->engine == from->engine) {
 835		ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
 836						       &from->submit,
 837						       I915_FENCE_GFP);
 838		return ret < 0 ? ret : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 839	}
 840
 841	if (to->engine->semaphore.sync_to) {
 842		u32 seqno;
 
 843
 844		GEM_BUG_ON(!from->engine->semaphore.signal);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 845
 846		seqno = i915_request_global_seqno(from);
 847		if (!seqno)
 848			goto await_dma_fence;
 
 849
 850		if (seqno <= to->timeline->global_sync[from->engine->id])
 851			return 0;
 852
 853		trace_i915_gem_ring_sync_to(to, from);
 854		ret = to->engine->semaphore.sync_to(to, from);
 855		if (ret)
 
 
 
 
 
 
 
 
 
 856			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 857
 858		to->timeline->global_sync[from->engine->id] = seqno;
 
 
 
 
 859		return 0;
 860	}
 861
 862await_dma_fence:
 863	ret = i915_sw_fence_await_dma_fence(&to->submit,
 864					    &from->fence, 0,
 865					    I915_FENCE_GFP);
 866	return ret < 0 ? ret : 0;
 
 
 
 
 
 
 
 
 
 
 
 867}
 868
 869int
 870i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 871{
 872	struct dma_fence **child = &fence;
 873	unsigned int nchild = 1;
 874	int ret;
 875
 876	/*
 877	 * Note that if the fence-array was created in signal-on-any mode,
 878	 * we should *not* decompose it into its individual fences. However,
 879	 * we don't currently store which mode the fence-array is operating
 880	 * in. Fortunately, the only user of signal-on-any is private to
 881	 * amdgpu and we should not see any incoming fence-array from
 882	 * sync-file being in signal-on-any mode.
 883	 */
 884	if (dma_fence_is_array(fence)) {
 885		struct dma_fence_array *array = to_dma_fence_array(fence);
 886
 887		child = array->fences;
 888		nchild = array->num_fences;
 889		GEM_BUG_ON(!nchild);
 890	}
 891
 892	do {
 893		fence = *child++;
 894		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 895			continue;
 896
 897		/*
 898		 * Requests on the same timeline are explicitly ordered, along
 899		 * with their dependencies, by i915_request_add() which ensures
 900		 * that requests are submitted in-order through each ring.
 901		 */
 902		if (fence->context == rq->fence.context)
 903			continue;
 904
 905		/* Squash repeated waits to the same timelines */
 906		if (fence->context != rq->i915->mm.unordered_timeline &&
 907		    intel_timeline_sync_is_later(rq->timeline, fence))
 
 908			continue;
 909
 910		if (dma_fence_is_i915(fence))
 911			ret = i915_request_await_request(rq, to_request(fence));
 912		else
 913			ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
 914							    I915_FENCE_TIMEOUT,
 915							    I915_FENCE_GFP);
 916		if (ret < 0)
 917			return ret;
 918
 919		/* Record the latest fence used against each timeline */
 920		if (fence->context != rq->i915->mm.unordered_timeline)
 921			intel_timeline_sync_set(rq->timeline, fence);
 
 922	} while (--nchild);
 923
 924	return 0;
 925}
 926
 927/**
 928 * i915_request_await_object - set this request to (async) wait upon a bo
 929 * @to: request we are wishing to use
 930 * @obj: object which may be in use on another ring.
 931 * @write: whether the wait is on behalf of a writer
 932 *
 933 * This code is meant to abstract object synchronization with the GPU.
 934 * Conceptually we serialise writes between engines inside the GPU.
 935 * We only allow one engine to write into a buffer at any time, but
 936 * multiple readers. To ensure each has a coherent view of memory, we must:
 937 *
 938 * - If there is an outstanding write request to the object, the new
 939 *   request must wait for it to complete (either CPU or in hw, requests
 940 *   on the same ring will be naturally ordered).
 941 *
 942 * - If we are a write request (pending_write_domain is set), the new
 943 *   request must wait for outstanding read requests to complete.
 944 *
 945 * Returns 0 if successful, else propagates up the lower layer error.
 946 */
 947int
 948i915_request_await_object(struct i915_request *to,
 949			  struct drm_i915_gem_object *obj,
 950			  bool write)
 951{
 952	struct dma_fence *excl;
 953	int ret = 0;
 954
 955	if (write) {
 956		struct dma_fence **shared;
 957		unsigned int count, i;
 958
 959		ret = reservation_object_get_fences_rcu(obj->resv,
 960							&excl, &count, &shared);
 961		if (ret)
 962			return ret;
 963
 964		for (i = 0; i < count; i++) {
 965			ret = i915_request_await_dma_fence(to, shared[i]);
 966			if (ret)
 967				break;
 968
 969			dma_fence_put(shared[i]);
 970		}
 971
 972		for (; i < count; i++)
 973			dma_fence_put(shared[i]);
 974		kfree(shared);
 975	} else {
 976		excl = reservation_object_get_excl_rcu(obj->resv);
 977	}
 978
 979	if (excl) {
 980		if (ret == 0)
 981			ret = i915_request_await_dma_fence(to, excl);
 982
 983		dma_fence_put(excl);
 984	}
 985
 986	return ret;
 987}
 988
 989/*
 990 * NB: This function is not allowed to fail. Doing so would mean the the
 991 * request is not being tracked for completion but the work itself is
 992 * going to happen on the hardware. This would be a Bad Thing(tm).
 993 */
 994void __i915_request_add(struct i915_request *request, bool flush_caches)
 995{
 996	struct intel_engine_cs *engine = request->engine;
 997	struct intel_ring *ring = request->ring;
 998	struct intel_timeline *timeline = request->timeline;
 999	struct i915_request *prev;
1000	u32 *cs;
1001	int err;
1002
1003	lockdep_assert_held(&request->i915->drm.struct_mutex);
1004	trace_i915_request_add(request);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1005
1006	/*
1007	 * Make sure that no request gazumped us - if it was allocated after
1008	 * our i915_request_alloc() and called __i915_request_add() before
1009	 * us, the timeline will hold its seqno which is later than ours.
1010	 */
1011	GEM_BUG_ON(timeline->seqno != request->fence.seqno);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1012
1013	/*
1014	 * To ensure that this call will not fail, space for its emissions
1015	 * should already have been reserved in the ring buffer. Let the ring
1016	 * know that it is time to use that space up.
1017	 */
1018	request->reserved_space = 0;
1019
1020	/*
1021	 * Emit any outstanding flushes - execbuf can fail to emit the flush
1022	 * after having emitted the batchbuffer command. Hence we need to fix
1023	 * things up similar to emitting the lazy request. The difference here
1024	 * is that the flush _must_ happen before the next request, no matter
1025	 * what.
1026	 */
1027	if (flush_caches) {
1028		err = engine->emit_flush(request, EMIT_FLUSH);
1029
1030		/* Not allowed to fail! */
1031		WARN(err, "engine->emit_flush() failed: %d!\n", err);
1032	}
1033
1034	/*
1035	 * Record the position of the start of the breadcrumb so that
1036	 * should we detect the updated seqno part-way through the
1037	 * GPU processing the request, we never over-estimate the
1038	 * position of the ring's HEAD.
1039	 */
1040	cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
1041	GEM_BUG_ON(IS_ERR(cs));
1042	request->postfix = intel_ring_offset(request, cs);
1043
1044	/*
1045	 * Seal the request and mark it as pending execution. Note that
1046	 * we may inspect this state, without holding any locks, during
1047	 * hangcheck. Hence we apply the barrier to ensure that we do not
1048	 * see a more recent value in the hws than we are tracking.
1049	 */
1050
1051	prev = i915_gem_active_raw(&timeline->last_request,
1052				   &request->i915->drm.struct_mutex);
1053	if (prev && !i915_request_completed(prev)) {
1054		i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
1055					     &request->submitq);
1056		if (engine->schedule)
1057			__i915_priotree_add_dependency(&request->priotree,
1058						       &prev->priotree,
1059						       &request->dep,
1060						       0);
1061	}
1062
1063	spin_lock_irq(&timeline->lock);
1064	list_add_tail(&request->link, &timeline->requests);
1065	spin_unlock_irq(&timeline->lock);
1066
1067	GEM_BUG_ON(timeline->seqno != request->fence.seqno);
1068	i915_gem_active_set(&timeline->last_request, request);
1069
1070	list_add_tail(&request->ring_link, &ring->request_list);
1071	request->emitted_jiffies = jiffies;
1072
 
 
 
1073	/*
1074	 * Let the backend know a new request has arrived that may need
1075	 * to adjust the existing execution schedule due to a high priority
1076	 * request - i.e. we may want to preempt the current request in order
1077	 * to run a high priority dependency chain *before* we can execute this
1078	 * request.
1079	 *
1080	 * This is called before the request is ready to run so that we can
1081	 * decide whether to preempt the entire chain so that it is ready to
1082	 * run at the earliest possible convenience.
1083	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1084	rcu_read_lock();
1085	if (engine->schedule)
1086		engine->schedule(request, request->ctx->priority);
 
1087	rcu_read_unlock();
1088
1089	local_bh_disable();
1090	i915_sw_fence_commit(&request->submit);
1091	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1092
1093	/*
1094	 * In typical scenarios, we do not expect the previous request on
1095	 * the timeline to be still tracked by timeline->last_request if it
1096	 * has been completed. If the completed request is still here, that
1097	 * implies that request retirement is a long way behind submission,
1098	 * suggesting that we haven't been retiring frequently enough from
1099	 * the combination of retire-before-alloc, waiters and the background
1100	 * retirement worker. So if the last request on this timeline was
1101	 * already completed, do a catch up pass, flushing the retirement queue
1102	 * up to this client. Since we have now moved the heaviest operations
1103	 * during retirement onto secondary workers, such as freeing objects
1104	 * or contexts, retiring a bunch of requests is mostly list management
1105	 * (and cache misses), and so we should not be overly penalizing this
1106	 * client by performing excess work, though we may still performing
1107	 * work on behalf of others -- but instead we should benefit from
1108	 * improved resource management. (Well, that's the theory at least.)
1109	 */
1110	if (prev && i915_request_completed(prev))
1111		i915_request_retire_upto(prev);
1112}
1113
1114static unsigned long local_clock_us(unsigned int *cpu)
1115{
1116	unsigned long t;
1117
1118	/*
1119	 * Cheaply and approximately convert from nanoseconds to microseconds.
1120	 * The result and subsequent calculations are also defined in the same
1121	 * approximate microseconds units. The principal source of timing
1122	 * error here is from the simple truncation.
1123	 *
1124	 * Note that local_clock() is only defined wrt to the current CPU;
1125	 * the comparisons are no longer valid if we switch CPUs. Instead of
1126	 * blocking preemption for the entire busywait, we can detect the CPU
1127	 * switch and use that as indicator of system load and a reason to
1128	 * stop busywaiting, see busywait_stop().
1129	 */
1130	*cpu = get_cpu();
1131	t = local_clock() >> 10;
1132	put_cpu();
1133
1134	return t;
1135}
1136
1137static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1138{
1139	unsigned int this_cpu;
1140
1141	if (time_after(local_clock_us(&this_cpu), timeout))
1142		return true;
1143
1144	return this_cpu != cpu;
1145}
1146
1147static bool __i915_spin_request(const struct i915_request *rq,
1148				u32 seqno, int state, unsigned long timeout_us)
1149{
1150	struct intel_engine_cs *engine = rq->engine;
1151	unsigned int irq, cpu;
1152
1153	GEM_BUG_ON(!seqno);
1154
1155	/*
1156	 * Only wait for the request if we know it is likely to complete.
1157	 *
1158	 * We don't track the timestamps around requests, nor the average
1159	 * request length, so we do not have a good indicator that this
1160	 * request will complete within the timeout. What we do know is the
1161	 * order in which requests are executed by the engine and so we can
1162	 * tell if the request has started. If the request hasn't started yet,
1163	 * it is a fair assumption that it will not complete within our
1164	 * relatively short timeout.
1165	 */
1166	if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
1167		return false;
1168
1169	/*
1170	 * When waiting for high frequency requests, e.g. during synchronous
1171	 * rendering split between the CPU and GPU, the finite amount of time
1172	 * required to set up the irq and wait upon it limits the response
1173	 * rate. By busywaiting on the request completion for a short while we
1174	 * can service the high frequency waits as quick as possible. However,
1175	 * if it is a slow request, we want to sleep as quickly as possible.
1176	 * The tradeoff between waiting and sleeping is roughly the time it
1177	 * takes to sleep on a request, on the order of a microsecond.
1178	 */
1179
1180	irq = atomic_read(&engine->irq_count);
1181	timeout_us += local_clock_us(&cpu);
1182	do {
1183		if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
1184			return seqno == i915_request_global_seqno(rq);
1185
1186		/*
1187		 * Seqno are meant to be ordered *before* the interrupt. If
1188		 * we see an interrupt without a corresponding seqno advance,
1189		 * assume we won't see one in the near future but require
1190		 * the engine->seqno_barrier() to fixup coherency.
1191		 */
1192		if (atomic_read(&engine->irq_count) != irq)
1193			break;
1194
1195		if (signal_pending_state(state, current))
1196			break;
1197
1198		if (busywait_stop(timeout_us, cpu))
1199			break;
1200
1201		cpu_relax();
1202	} while (!need_resched());
1203
1204	return false;
1205}
1206
1207static bool __i915_wait_request_check_and_reset(struct i915_request *request)
 
 
 
 
 
1208{
1209	if (likely(!i915_reset_handoff(&request->i915->gpu_error)))
1210		return false;
1211
1212	__set_current_state(TASK_RUNNING);
1213	i915_reset(request->i915, 0);
1214	return true;
1215}
1216
1217/**
1218 * i915_request_wait - wait until execution of request has finished
1219 * @rq: the request to wait upon
1220 * @flags: how to wait
1221 * @timeout: how long to wait in jiffies
1222 *
1223 * i915_request_wait() waits for the request to be completed, for a
1224 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1225 * unbounded wait).
1226 *
1227 * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1228 * in via the flags, and vice versa if the struct_mutex is not held, the caller
1229 * must not specify that the wait is locked.
1230 *
1231 * Returns the remaining time (in jiffies) if the request completed, which may
1232 * be zero or -ETIME if the request is unfinished after the timeout expires.
1233 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1234 * pending before the request completes.
1235 */
1236long i915_request_wait(struct i915_request *rq,
1237		       unsigned int flags,
1238		       long timeout)
1239{
1240	const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1241		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1242	wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue;
1243	DEFINE_WAIT_FUNC(reset, default_wake_function);
1244	DEFINE_WAIT_FUNC(exec, default_wake_function);
1245	struct intel_wait wait;
1246
1247	might_sleep();
1248#if IS_ENABLED(CONFIG_LOCKDEP)
1249	GEM_BUG_ON(debug_locks &&
1250		   !!lockdep_is_held(&rq->i915->drm.struct_mutex) !=
1251		   !!(flags & I915_WAIT_LOCKED));
1252#endif
1253	GEM_BUG_ON(timeout < 0);
1254
1255	if (i915_request_completed(rq))
1256		return timeout;
1257
1258	if (!timeout)
1259		return -ETIME;
1260
1261	trace_i915_request_wait_begin(rq, flags);
1262
1263	add_wait_queue(&rq->execute, &exec);
1264	if (flags & I915_WAIT_LOCKED)
1265		add_wait_queue(errq, &reset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1266
1267	intel_wait_init(&wait, rq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1268
1269restart:
1270	do {
1271		set_current_state(state);
1272		if (intel_wait_update_request(&wait, rq))
 
1273			break;
1274
1275		if (flags & I915_WAIT_LOCKED &&
1276		    __i915_wait_request_check_and_reset(rq))
1277			continue;
1278
1279		if (signal_pending_state(state, current)) {
1280			timeout = -ERESTARTSYS;
1281			goto complete;
1282		}
1283
1284		if (!timeout) {
1285			timeout = -ETIME;
1286			goto complete;
1287		}
1288
1289		timeout = io_schedule_timeout(timeout);
1290	} while (1);
 
1291
1292	GEM_BUG_ON(!intel_wait_has_seqno(&wait));
1293	GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
 
1294
1295	/* Optimistic short spin before touching IRQs */
1296	if (__i915_spin_request(rq, wait.seqno, state, 5))
1297		goto complete;
 
 
1298
1299	set_current_state(state);
1300	if (intel_engine_add_wait(rq->engine, &wait))
1301		/*
1302		 * In order to check that we haven't missed the interrupt
1303		 * as we enabled it, we need to kick ourselves to do a
1304		 * coherent check on the seqno before we sleep.
1305		 */
1306		goto wakeup;
1307
1308	if (flags & I915_WAIT_LOCKED)
1309		__i915_wait_request_check_and_reset(rq);
1310
1311	for (;;) {
1312		if (signal_pending_state(state, current)) {
1313			timeout = -ERESTARTSYS;
1314			break;
1315		}
1316
1317		if (!timeout) {
1318			timeout = -ETIME;
1319			break;
1320		}
1321
1322		timeout = io_schedule_timeout(timeout);
 
1323
1324		if (intel_wait_complete(&wait) &&
1325		    intel_wait_check_request(&wait, rq))
1326			break;
1327
1328		set_current_state(state);
 
 
 
1329
1330wakeup:
1331		/*
1332		 * Carefully check if the request is complete, giving time
1333		 * for the seqno to be visible following the interrupt.
1334		 * We also have to check in case we are kicked by the GPU
1335		 * reset in order to drop the struct_mutex.
1336		 */
1337		if (__i915_request_irq_complete(rq))
1338			break;
1339
1340		/*
1341		 * If the GPU is hung, and we hold the lock, reset the GPU
1342		 * and then check for completion. On a full reset, the engine's
1343		 * HW seqno will be advanced passed us and we are complete.
1344		 * If we do a partial reset, we have to wait for the GPU to
1345		 * resume and update the breadcrumb.
1346		 *
1347		 * If we don't hold the mutex, we can just wait for the worker
1348		 * to come along and update the breadcrumb (either directly
1349		 * itself, or indirectly by recovering the GPU).
1350		 */
1351		if (flags & I915_WAIT_LOCKED &&
1352		    __i915_wait_request_check_and_reset(rq))
1353			continue;
1354
1355		/* Only spin if we know the GPU is processing this request */
1356		if (__i915_spin_request(rq, wait.seqno, state, 2))
1357			break;
1358
1359		if (!intel_wait_check_request(&wait, rq)) {
1360			intel_engine_remove_wait(rq->engine, &wait);
1361			goto restart;
1362		}
1363	}
1364
1365	intel_engine_remove_wait(rq->engine, &wait);
1366complete:
1367	__set_current_state(TASK_RUNNING);
1368	if (flags & I915_WAIT_LOCKED)
1369		remove_wait_queue(errq, &reset);
1370	remove_wait_queue(&rq->execute, &exec);
1371	trace_i915_request_wait_end(rq);
1372
1373	return timeout;
1374}
1375
1376static void engine_retire_requests(struct intel_engine_cs *engine)
 
 
 
1377{
1378	struct i915_request *request, *next;
1379	u32 seqno = intel_engine_get_seqno(engine);
1380	LIST_HEAD(retire);
1381
1382	spin_lock_irq(&engine->timeline->lock);
1383	list_for_each_entry_safe(request, next,
1384				 &engine->timeline->requests, link) {
1385		if (!i915_seqno_passed(seqno, request->global_seqno))
1386			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1387
1388		list_move_tail(&request->link, &retire);
1389	}
1390	spin_unlock_irq(&engine->timeline->lock);
 
 
1391
1392	list_for_each_entry_safe(request, next, &retire, link)
1393		i915_request_retire(request);
 
 
1394}
1395
1396void i915_retire_requests(struct drm_i915_private *i915)
 
 
 
 
 
1397{
1398	struct intel_engine_cs *engine;
1399	enum intel_engine_id id;
 
 
 
 
 
 
 
 
1400
1401	lockdep_assert_held(&i915->drm.struct_mutex);
 
 
 
 
 
1402
1403	if (!i915->gt.active_requests)
1404		return;
1405
1406	for_each_engine(engine, i915, id)
1407		engine_retire_requests(engine);
 
1408}
1409
1410#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1411#include "selftests/mock_request.c"
1412#include "selftests/i915_request.c"
1413#endif