Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2011-2012 Intel Corporation
   5 */
   6
   7/*
   8 * This file implements HW context support. On gen5+ a HW context consists of an
   9 * opaque GPU object which is referenced at times of context saves and restores.
  10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
  11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
  12 * something like a context does exist for the media ring, the code only
  13 * supports contexts for the render ring.
  14 *
  15 * In software, there is a distinction between contexts created by the user,
  16 * and the default HW context. The default HW context is used by GPU clients
  17 * that do not request setup of their own hardware context. The default
  18 * context's state is never restored to help prevent programming errors. This
  19 * would happen if a client ran and piggy-backed off another clients GPU state.
  20 * The default context only exists to give the GPU some offset to load as the
  21 * current to invoke a save of the context we actually care about. In fact, the
  22 * code could likely be constructed, albeit in a more complicated fashion, to
  23 * never use the default context, though that limits the driver's ability to
  24 * swap out, and/or destroy other contexts.
  25 *
  26 * All other contexts are created as a request by the GPU client. These contexts
  27 * store GPU state, and thus allow GPU clients to not re-emit state (and
  28 * potentially query certain state) at any time. The kernel driver makes
  29 * certain that the appropriate commands are inserted.
  30 *
  31 * The context life cycle is semi-complicated in that context BOs may live
  32 * longer than the context itself because of the way the hardware, and object
  33 * tracking works. Below is a very crude representation of the state machine
  34 * describing the context life.
  35 *                                         refcount     pincount     active
  36 * S0: initial state                          0            0           0
  37 * S1: context created                        1            0           0
  38 * S2: context is currently running           2            1           X
  39 * S3: GPU referenced, but not current        2            0           1
  40 * S4: context is current, but destroyed      1            1           0
  41 * S5: like S3, but destroyed                 1            0           1
  42 *
  43 * The most common (but not all) transitions:
  44 * S0->S1: client creates a context
  45 * S1->S2: client submits execbuf with context
  46 * S2->S3: other clients submits execbuf with context
  47 * S3->S1: context object was retired
  48 * S3->S2: clients submits another execbuf
  49 * S2->S4: context destroy called with current context
  50 * S3->S5->S0: destroy path
  51 * S4->S5->S0: destroy path on current context
  52 *
  53 * There are two confusing terms used above:
  54 *  The "current context" means the context which is currently running on the
  55 *  GPU. The GPU has loaded its state already and has stored away the gtt
  56 *  offset of the BO. The GPU is not actively referencing the data at this
  57 *  offset, but it will on the next context switch. The only way to avoid this
  58 *  is to do a GPU reset.
  59 *
  60 *  An "active context' is one which was previously the "current context" and is
  61 *  on the active list waiting for the next context switch to occur. Until this
  62 *  happens, the object must remain at the same gtt offset. It is therefore
  63 *  possible to destroy a context, but it is still active.
  64 *
  65 */
  66
  67#include <linux/highmem.h>
  68#include <linux/log2.h>
  69#include <linux/nospec.h>
  70
  71#include <drm/drm_cache.h>
  72#include <drm/drm_syncobj.h>
  73
  74#include "gt/gen6_ppgtt.h"
  75#include "gt/intel_context.h"
  76#include "gt/intel_context_param.h"
  77#include "gt/intel_engine_heartbeat.h"
  78#include "gt/intel_engine_user.h"
  79#include "gt/intel_gpu_commands.h"
  80#include "gt/intel_ring.h"
  81
  82#include "pxp/intel_pxp.h"
  83
  84#include "i915_file_private.h"
  85#include "i915_gem_context.h"
  86#include "i915_trace.h"
  87#include "i915_user_extensions.h"
  88
  89#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
  90
  91static struct kmem_cache *slab_luts;
  92
  93struct i915_lut_handle *i915_lut_handle_alloc(void)
  94{
  95	return kmem_cache_alloc(slab_luts, GFP_KERNEL);
  96}
  97
  98void i915_lut_handle_free(struct i915_lut_handle *lut)
  99{
 100	return kmem_cache_free(slab_luts, lut);
 101}
 102
 103static void lut_close(struct i915_gem_context *ctx)
 104{
 105	struct radix_tree_iter iter;
 106	void __rcu **slot;
 107
 108	mutex_lock(&ctx->lut_mutex);
 109	rcu_read_lock();
 110	radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
 111		struct i915_vma *vma = rcu_dereference_raw(*slot);
 112		struct drm_i915_gem_object *obj = vma->obj;
 113		struct i915_lut_handle *lut;
 114
 115		if (!kref_get_unless_zero(&obj->base.refcount))
 116			continue;
 117
 118		spin_lock(&obj->lut_lock);
 119		list_for_each_entry(lut, &obj->lut_list, obj_link) {
 120			if (lut->ctx != ctx)
 121				continue;
 122
 123			if (lut->handle != iter.index)
 124				continue;
 125
 126			list_del(&lut->obj_link);
 127			break;
 128		}
 129		spin_unlock(&obj->lut_lock);
 130
 131		if (&lut->obj_link != &obj->lut_list) {
 132			i915_lut_handle_free(lut);
 133			radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
 134			i915_vma_close(vma);
 135			i915_gem_object_put(obj);
 136		}
 137
 138		i915_gem_object_put(obj);
 139	}
 140	rcu_read_unlock();
 141	mutex_unlock(&ctx->lut_mutex);
 142}
 143
 144static struct intel_context *
 145lookup_user_engine(struct i915_gem_context *ctx,
 146		   unsigned long flags,
 147		   const struct i915_engine_class_instance *ci)
 148#define LOOKUP_USER_INDEX BIT(0)
 149{
 150	int idx;
 151
 152	if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
 153		return ERR_PTR(-EINVAL);
 154
 155	if (!i915_gem_context_user_engines(ctx)) {
 156		struct intel_engine_cs *engine;
 157
 158		engine = intel_engine_lookup_user(ctx->i915,
 159						  ci->engine_class,
 160						  ci->engine_instance);
 161		if (!engine)
 162			return ERR_PTR(-EINVAL);
 163
 164		idx = engine->legacy_idx;
 165	} else {
 166		idx = ci->engine_instance;
 167	}
 168
 169	return i915_gem_context_get_engine(ctx, idx);
 170}
 171
 172static int validate_priority(struct drm_i915_private *i915,
 173			     const struct drm_i915_gem_context_param *args)
 174{
 175	s64 priority = args->value;
 176
 177	if (args->size)
 178		return -EINVAL;
 179
 180	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
 181		return -ENODEV;
 182
 183	if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
 184	    priority < I915_CONTEXT_MIN_USER_PRIORITY)
 185		return -EINVAL;
 186
 187	if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
 188	    !capable(CAP_SYS_NICE))
 189		return -EPERM;
 190
 191	return 0;
 192}
 193
 194static void proto_context_close(struct drm_i915_private *i915,
 195				struct i915_gem_proto_context *pc)
 196{
 197	int i;
 198
 199	if (pc->pxp_wakeref)
 200		intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref);
 201	if (pc->vm)
 202		i915_vm_put(pc->vm);
 203	if (pc->user_engines) {
 204		for (i = 0; i < pc->num_user_engines; i++)
 205			kfree(pc->user_engines[i].siblings);
 206		kfree(pc->user_engines);
 207	}
 208	kfree(pc);
 209}
 210
 211static int proto_context_set_persistence(struct drm_i915_private *i915,
 212					 struct i915_gem_proto_context *pc,
 213					 bool persist)
 214{
 215	if (persist) {
 216		/*
 217		 * Only contexts that are short-lived [that will expire or be
 218		 * reset] are allowed to survive past termination. We require
 219		 * hangcheck to ensure that the persistent requests are healthy.
 220		 */
 221		if (!i915->params.enable_hangcheck)
 222			return -EINVAL;
 223
 224		pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
 225	} else {
 226		/* To cancel a context we use "preempt-to-idle" */
 227		if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
 228			return -ENODEV;
 229
 230		/*
 231		 * If the cancel fails, we then need to reset, cleanly!
 232		 *
 233		 * If the per-engine reset fails, all hope is lost! We resort
 234		 * to a full GPU reset in that unlikely case, but realistically
 235		 * if the engine could not reset, the full reset does not fare
 236		 * much better. The damage has been done.
 237		 *
 238		 * However, if we cannot reset an engine by itself, we cannot
 239		 * cleanup a hanging persistent context without causing
 240		 * colateral damage, and we should not pretend we can by
 241		 * exposing the interface.
 242		 */
 243		if (!intel_has_reset_engine(to_gt(i915)))
 244			return -ENODEV;
 245
 246		pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
 247	}
 248
 249	return 0;
 250}
 251
 252static int proto_context_set_protected(struct drm_i915_private *i915,
 253				       struct i915_gem_proto_context *pc,
 254				       bool protected)
 255{
 256	int ret = 0;
 257
 258	if (!protected) {
 259		pc->uses_protected_content = false;
 260	} else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
 261		ret = -ENODEV;
 262	} else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
 263		   !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
 264		ret = -EPERM;
 265	} else {
 266		pc->uses_protected_content = true;
 267
 268		/*
 269		 * protected context usage requires the PXP session to be up,
 270		 * which in turn requires the device to be active.
 271		 */
 272		pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 273
 274		if (!intel_pxp_is_active(&to_gt(i915)->pxp))
 275			ret = intel_pxp_start(&to_gt(i915)->pxp);
 276	}
 277
 278	return ret;
 279}
 280
 281static struct i915_gem_proto_context *
 282proto_context_create(struct drm_i915_private *i915, unsigned int flags)
 283{
 284	struct i915_gem_proto_context *pc, *err;
 285
 286	pc = kzalloc(sizeof(*pc), GFP_KERNEL);
 287	if (!pc)
 288		return ERR_PTR(-ENOMEM);
 289
 290	pc->num_user_engines = -1;
 291	pc->user_engines = NULL;
 292	pc->user_flags = BIT(UCONTEXT_BANNABLE) |
 293			 BIT(UCONTEXT_RECOVERABLE);
 294	if (i915->params.enable_hangcheck)
 295		pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
 296	pc->sched.priority = I915_PRIORITY_NORMAL;
 297
 298	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
 299		if (!HAS_EXECLISTS(i915)) {
 300			err = ERR_PTR(-EINVAL);
 301			goto proto_close;
 302		}
 303		pc->single_timeline = true;
 304	}
 305
 306	return pc;
 307
 308proto_close:
 309	proto_context_close(i915, pc);
 310	return err;
 311}
 312
 313static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
 314					 struct i915_gem_proto_context *pc,
 315					 u32 *id)
 316{
 317	int ret;
 318	void *old;
 319
 320	lockdep_assert_held(&fpriv->proto_context_lock);
 321
 322	ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
 323	if (ret)
 324		return ret;
 325
 326	old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL);
 327	if (xa_is_err(old)) {
 328		xa_erase(&fpriv->context_xa, *id);
 329		return xa_err(old);
 330	}
 331	WARN_ON(old);
 332
 333	return 0;
 334}
 335
 336static int proto_context_register(struct drm_i915_file_private *fpriv,
 337				  struct i915_gem_proto_context *pc,
 338				  u32 *id)
 339{
 340	int ret;
 341
 342	mutex_lock(&fpriv->proto_context_lock);
 343	ret = proto_context_register_locked(fpriv, pc, id);
 344	mutex_unlock(&fpriv->proto_context_lock);
 345
 346	return ret;
 347}
 348
 349static struct i915_address_space *
 350i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
 351{
 352	struct i915_address_space *vm;
 353
 354	xa_lock(&file_priv->vm_xa);
 355	vm = xa_load(&file_priv->vm_xa, id);
 356	if (vm)
 357		kref_get(&vm->ref);
 358	xa_unlock(&file_priv->vm_xa);
 359
 360	return vm;
 361}
 362
 363static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
 364			    struct i915_gem_proto_context *pc,
 365			    const struct drm_i915_gem_context_param *args)
 366{
 367	struct drm_i915_private *i915 = fpriv->dev_priv;
 368	struct i915_address_space *vm;
 369
 370	if (args->size)
 371		return -EINVAL;
 372
 373	if (!HAS_FULL_PPGTT(i915))
 374		return -ENODEV;
 375
 376	if (upper_32_bits(args->value))
 377		return -ENOENT;
 378
 379	vm = i915_gem_vm_lookup(fpriv, args->value);
 380	if (!vm)
 381		return -ENOENT;
 382
 383	if (pc->vm)
 384		i915_vm_put(pc->vm);
 385	pc->vm = vm;
 386
 387	return 0;
 388}
 389
 390struct set_proto_ctx_engines {
 391	struct drm_i915_private *i915;
 392	unsigned num_engines;
 393	struct i915_gem_proto_engine *engines;
 394};
 395
 396static int
 397set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
 398			      void *data)
 399{
 400	struct i915_context_engines_load_balance __user *ext =
 401		container_of_user(base, typeof(*ext), base);
 402	const struct set_proto_ctx_engines *set = data;
 403	struct drm_i915_private *i915 = set->i915;
 404	struct intel_engine_cs **siblings;
 405	u16 num_siblings, idx;
 406	unsigned int n;
 407	int err;
 408
 409	if (!HAS_EXECLISTS(i915))
 410		return -ENODEV;
 411
 412	if (get_user(idx, &ext->engine_index))
 413		return -EFAULT;
 414
 415	if (idx >= set->num_engines) {
 416		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
 417			idx, set->num_engines);
 418		return -EINVAL;
 419	}
 420
 421	idx = array_index_nospec(idx, set->num_engines);
 422	if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
 423		drm_dbg(&i915->drm,
 424			"Invalid placement[%d], already occupied\n", idx);
 425		return -EEXIST;
 426	}
 427
 428	if (get_user(num_siblings, &ext->num_siblings))
 429		return -EFAULT;
 430
 431	err = check_user_mbz(&ext->flags);
 432	if (err)
 433		return err;
 434
 435	err = check_user_mbz(&ext->mbz64);
 436	if (err)
 437		return err;
 438
 439	if (num_siblings == 0)
 440		return 0;
 441
 442	siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
 443	if (!siblings)
 444		return -ENOMEM;
 445
 446	for (n = 0; n < num_siblings; n++) {
 447		struct i915_engine_class_instance ci;
 448
 449		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
 450			err = -EFAULT;
 451			goto err_siblings;
 452		}
 453
 454		siblings[n] = intel_engine_lookup_user(i915,
 455						       ci.engine_class,
 456						       ci.engine_instance);
 457		if (!siblings[n]) {
 458			drm_dbg(&i915->drm,
 459				"Invalid sibling[%d]: { class:%d, inst:%d }\n",
 460				n, ci.engine_class, ci.engine_instance);
 461			err = -EINVAL;
 462			goto err_siblings;
 463		}
 464	}
 465
 466	if (num_siblings == 1) {
 467		set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
 468		set->engines[idx].engine = siblings[0];
 469		kfree(siblings);
 470	} else {
 471		set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
 472		set->engines[idx].num_siblings = num_siblings;
 473		set->engines[idx].siblings = siblings;
 474	}
 475
 476	return 0;
 477
 478err_siblings:
 479	kfree(siblings);
 480
 481	return err;
 482}
 483
 484static int
 485set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
 486{
 487	struct i915_context_engines_bond __user *ext =
 488		container_of_user(base, typeof(*ext), base);
 489	const struct set_proto_ctx_engines *set = data;
 490	struct drm_i915_private *i915 = set->i915;
 491	struct i915_engine_class_instance ci;
 492	struct intel_engine_cs *master;
 493	u16 idx, num_bonds;
 494	int err, n;
 495
 496	if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
 497	    !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
 498		drm_dbg(&i915->drm,
 499			"Bonding not supported on this platform\n");
 500		return -ENODEV;
 501	}
 502
 503	if (get_user(idx, &ext->virtual_index))
 504		return -EFAULT;
 505
 506	if (idx >= set->num_engines) {
 507		drm_dbg(&i915->drm,
 508			"Invalid index for virtual engine: %d >= %d\n",
 509			idx, set->num_engines);
 510		return -EINVAL;
 511	}
 512
 513	idx = array_index_nospec(idx, set->num_engines);
 514	if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
 515		drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
 516		return -EINVAL;
 517	}
 518
 519	if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
 520		drm_dbg(&i915->drm,
 521			"Bonding with virtual engines not allowed\n");
 522		return -EINVAL;
 523	}
 524
 525	err = check_user_mbz(&ext->flags);
 526	if (err)
 527		return err;
 528
 529	for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
 530		err = check_user_mbz(&ext->mbz64[n]);
 531		if (err)
 532			return err;
 533	}
 534
 535	if (copy_from_user(&ci, &ext->master, sizeof(ci)))
 536		return -EFAULT;
 537
 538	master = intel_engine_lookup_user(i915,
 539					  ci.engine_class,
 540					  ci.engine_instance);
 541	if (!master) {
 542		drm_dbg(&i915->drm,
 543			"Unrecognised master engine: { class:%u, instance:%u }\n",
 544			ci.engine_class, ci.engine_instance);
 545		return -EINVAL;
 546	}
 547
 548	if (intel_engine_uses_guc(master)) {
 549		drm_dbg(&i915->drm, "bonding extension not supported with GuC submission");
 550		return -ENODEV;
 551	}
 552
 553	if (get_user(num_bonds, &ext->num_bonds))
 554		return -EFAULT;
 555
 556	for (n = 0; n < num_bonds; n++) {
 557		struct intel_engine_cs *bond;
 558
 559		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
 560			return -EFAULT;
 561
 562		bond = intel_engine_lookup_user(i915,
 563						ci.engine_class,
 564						ci.engine_instance);
 565		if (!bond) {
 566			drm_dbg(&i915->drm,
 567				"Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
 568				n, ci.engine_class, ci.engine_instance);
 569			return -EINVAL;
 570		}
 571	}
 572
 573	return 0;
 574}
 575
 576static int
 577set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
 578				      void *data)
 579{
 580	struct i915_context_engines_parallel_submit __user *ext =
 581		container_of_user(base, typeof(*ext), base);
 582	const struct set_proto_ctx_engines *set = data;
 583	struct drm_i915_private *i915 = set->i915;
 584	struct i915_engine_class_instance prev_engine;
 585	u64 flags;
 586	int err = 0, n, i, j;
 587	u16 slot, width, num_siblings;
 588	struct intel_engine_cs **siblings = NULL;
 589	intel_engine_mask_t prev_mask;
 590
 591	if (get_user(slot, &ext->engine_index))
 592		return -EFAULT;
 593
 594	if (get_user(width, &ext->width))
 595		return -EFAULT;
 596
 597	if (get_user(num_siblings, &ext->num_siblings))
 598		return -EFAULT;
 599
 600	if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) &&
 601	    num_siblings != 1) {
 602		drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n",
 603			num_siblings);
 604		return -EINVAL;
 605	}
 606
 607	if (slot >= set->num_engines) {
 608		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
 609			slot, set->num_engines);
 610		return -EINVAL;
 611	}
 612
 613	if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
 614		drm_dbg(&i915->drm,
 615			"Invalid placement[%d], already occupied\n", slot);
 616		return -EINVAL;
 617	}
 618
 619	if (get_user(flags, &ext->flags))
 620		return -EFAULT;
 621
 622	if (flags) {
 623		drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags);
 624		return -EINVAL;
 625	}
 626
 627	for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
 628		err = check_user_mbz(&ext->mbz64[n]);
 629		if (err)
 630			return err;
 631	}
 632
 633	if (width < 2) {
 634		drm_dbg(&i915->drm, "Width (%d) < 2\n", width);
 635		return -EINVAL;
 636	}
 637
 638	if (num_siblings < 1) {
 639		drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",
 640			num_siblings);
 641		return -EINVAL;
 642	}
 643
 644	siblings = kmalloc_array(num_siblings * width,
 645				 sizeof(*siblings),
 646				 GFP_KERNEL);
 647	if (!siblings)
 648		return -ENOMEM;
 649
 650	/* Create contexts / engines */
 651	for (i = 0; i < width; ++i) {
 652		intel_engine_mask_t current_mask = 0;
 653
 654		for (j = 0; j < num_siblings; ++j) {
 655			struct i915_engine_class_instance ci;
 656
 657			n = i * num_siblings + j;
 658			if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
 659				err = -EFAULT;
 660				goto out_err;
 661			}
 662
 663			siblings[n] =
 664				intel_engine_lookup_user(i915, ci.engine_class,
 665							 ci.engine_instance);
 666			if (!siblings[n]) {
 667				drm_dbg(&i915->drm,
 668					"Invalid sibling[%d]: { class:%d, inst:%d }\n",
 669					n, ci.engine_class, ci.engine_instance);
 670				err = -EINVAL;
 671				goto out_err;
 672			}
 673
 674			/*
 675			 * We don't support breadcrumb handshake on these
 676			 * classes
 677			 */
 678			if (siblings[n]->class == RENDER_CLASS ||
 679			    siblings[n]->class == COMPUTE_CLASS) {
 680				err = -EINVAL;
 681				goto out_err;
 682			}
 683
 684			if (n) {
 685				if (prev_engine.engine_class !=
 686				    ci.engine_class) {
 687					drm_dbg(&i915->drm,
 688						"Mismatched class %d, %d\n",
 689						prev_engine.engine_class,
 690						ci.engine_class);
 691					err = -EINVAL;
 692					goto out_err;
 693				}
 694			}
 695
 696			prev_engine = ci;
 697			current_mask |= siblings[n]->logical_mask;
 698		}
 699
 700		if (i > 0) {
 701			if (current_mask != prev_mask << 1) {
 702				drm_dbg(&i915->drm,
 703					"Non contiguous logical mask 0x%x, 0x%x\n",
 704					prev_mask, current_mask);
 705				err = -EINVAL;
 706				goto out_err;
 707			}
 708		}
 709		prev_mask = current_mask;
 710	}
 711
 712	set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
 713	set->engines[slot].num_siblings = num_siblings;
 714	set->engines[slot].width = width;
 715	set->engines[slot].siblings = siblings;
 716
 717	return 0;
 718
 719out_err:
 720	kfree(siblings);
 721
 722	return err;
 723}
 724
 725static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
 726	[I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
 727	[I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
 728	[I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] =
 729		set_proto_ctx_engines_parallel_submit,
 730};
 731
 732static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
 733			         struct i915_gem_proto_context *pc,
 734			         const struct drm_i915_gem_context_param *args)
 735{
 736	struct drm_i915_private *i915 = fpriv->dev_priv;
 737	struct set_proto_ctx_engines set = { .i915 = i915 };
 738	struct i915_context_param_engines __user *user =
 739		u64_to_user_ptr(args->value);
 740	unsigned int n;
 741	u64 extensions;
 742	int err;
 743
 744	if (pc->num_user_engines >= 0) {
 745		drm_dbg(&i915->drm, "Cannot set engines twice");
 746		return -EINVAL;
 747	}
 748
 749	if (args->size < sizeof(*user) ||
 750	    !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
 751		drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
 752			args->size);
 753		return -EINVAL;
 754	}
 755
 756	set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
 757	/* RING_MASK has no shift so we can use it directly here */
 758	if (set.num_engines > I915_EXEC_RING_MASK + 1)
 759		return -EINVAL;
 760
 761	set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
 762	if (!set.engines)
 763		return -ENOMEM;
 764
 765	for (n = 0; n < set.num_engines; n++) {
 766		struct i915_engine_class_instance ci;
 767		struct intel_engine_cs *engine;
 768
 769		if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
 770			kfree(set.engines);
 771			return -EFAULT;
 772		}
 773
 774		memset(&set.engines[n], 0, sizeof(set.engines[n]));
 775
 776		if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
 777		    ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
 778			continue;
 779
 780		engine = intel_engine_lookup_user(i915,
 781						  ci.engine_class,
 782						  ci.engine_instance);
 783		if (!engine) {
 784			drm_dbg(&i915->drm,
 785				"Invalid engine[%d]: { class:%d, instance:%d }\n",
 786				n, ci.engine_class, ci.engine_instance);
 787			kfree(set.engines);
 788			return -ENOENT;
 789		}
 790
 791		set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
 792		set.engines[n].engine = engine;
 793	}
 794
 795	err = -EFAULT;
 796	if (!get_user(extensions, &user->extensions))
 797		err = i915_user_extensions(u64_to_user_ptr(extensions),
 798					   set_proto_ctx_engines_extensions,
 799					   ARRAY_SIZE(set_proto_ctx_engines_extensions),
 800					   &set);
 801	if (err) {
 802		kfree(set.engines);
 803		return err;
 804	}
 805
 806	pc->num_user_engines = set.num_engines;
 807	pc->user_engines = set.engines;
 808
 809	return 0;
 810}
 811
 812static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
 813			      struct i915_gem_proto_context *pc,
 814			      struct drm_i915_gem_context_param *args)
 815{
 816	struct drm_i915_private *i915 = fpriv->dev_priv;
 817	struct drm_i915_gem_context_param_sseu user_sseu;
 818	struct intel_sseu *sseu;
 819	int ret;
 820
 821	if (args->size < sizeof(user_sseu))
 822		return -EINVAL;
 823
 824	if (GRAPHICS_VER(i915) != 11)
 825		return -ENODEV;
 826
 827	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
 828			   sizeof(user_sseu)))
 829		return -EFAULT;
 830
 831	if (user_sseu.rsvd)
 832		return -EINVAL;
 833
 834	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
 835		return -EINVAL;
 836
 837	if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
 838		return -EINVAL;
 839
 840	if (pc->num_user_engines >= 0) {
 841		int idx = user_sseu.engine.engine_instance;
 842		struct i915_gem_proto_engine *pe;
 843
 844		if (idx >= pc->num_user_engines)
 845			return -EINVAL;
 846
 847		pe = &pc->user_engines[idx];
 848
 849		/* Only render engine supports RPCS configuration. */
 850		if (pe->engine->class != RENDER_CLASS)
 851			return -EINVAL;
 852
 853		sseu = &pe->sseu;
 854	} else {
 855		/* Only render engine supports RPCS configuration. */
 856		if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
 857			return -EINVAL;
 858
 859		/* There is only one render engine */
 860		if (user_sseu.engine.engine_instance != 0)
 861			return -EINVAL;
 862
 863		sseu = &pc->legacy_rcs_sseu;
 864	}
 865
 866	ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
 867	if (ret)
 868		return ret;
 869
 870	args->size = sizeof(user_sseu);
 871
 872	return 0;
 873}
 874
 875static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
 876			       struct i915_gem_proto_context *pc,
 877			       struct drm_i915_gem_context_param *args)
 878{
 879	int ret = 0;
 880
 881	switch (args->param) {
 882	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
 883		if (args->size)
 884			ret = -EINVAL;
 885		else if (args->value)
 886			pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
 887		else
 888			pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
 889		break;
 890
 891	case I915_CONTEXT_PARAM_BANNABLE:
 892		if (args->size)
 893			ret = -EINVAL;
 894		else if (!capable(CAP_SYS_ADMIN) && !args->value)
 895			ret = -EPERM;
 896		else if (args->value)
 897			pc->user_flags |= BIT(UCONTEXT_BANNABLE);
 898		else if (pc->uses_protected_content)
 899			ret = -EPERM;
 900		else
 901			pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
 902		break;
 903
 904	case I915_CONTEXT_PARAM_RECOVERABLE:
 905		if (args->size)
 906			ret = -EINVAL;
 907		else if (!args->value)
 908			pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
 909		else if (pc->uses_protected_content)
 910			ret = -EPERM;
 911		else
 912			pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
 913		break;
 914
 915	case I915_CONTEXT_PARAM_PRIORITY:
 916		ret = validate_priority(fpriv->dev_priv, args);
 917		if (!ret)
 918			pc->sched.priority = args->value;
 919		break;
 920
 921	case I915_CONTEXT_PARAM_SSEU:
 922		ret = set_proto_ctx_sseu(fpriv, pc, args);
 923		break;
 924
 925	case I915_CONTEXT_PARAM_VM:
 926		ret = set_proto_ctx_vm(fpriv, pc, args);
 927		break;
 928
 929	case I915_CONTEXT_PARAM_ENGINES:
 930		ret = set_proto_ctx_engines(fpriv, pc, args);
 931		break;
 932
 933	case I915_CONTEXT_PARAM_PERSISTENCE:
 934		if (args->size)
 935			ret = -EINVAL;
 936		else
 937			ret = proto_context_set_persistence(fpriv->dev_priv, pc,
 938							    args->value);
 939		break;
 940
 941	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
 942		ret = proto_context_set_protected(fpriv->dev_priv, pc,
 943						  args->value);
 944		break;
 945
 946	case I915_CONTEXT_PARAM_NO_ZEROMAP:
 947	case I915_CONTEXT_PARAM_BAN_PERIOD:
 948	case I915_CONTEXT_PARAM_RINGSIZE:
 949	default:
 950		ret = -EINVAL;
 951		break;
 952	}
 953
 954	return ret;
 955}
 956
 957static int intel_context_set_gem(struct intel_context *ce,
 958				 struct i915_gem_context *ctx,
 959				 struct intel_sseu sseu)
 960{
 961	int ret = 0;
 962
 963	GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
 964	RCU_INIT_POINTER(ce->gem_context, ctx);
 965
 966	GEM_BUG_ON(intel_context_is_pinned(ce));
 967	ce->ring_size = SZ_16K;
 968
 969	i915_vm_put(ce->vm);
 970	ce->vm = i915_gem_context_get_eb_vm(ctx);
 971
 972	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
 973	    intel_engine_has_timeslices(ce->engine) &&
 974	    intel_engine_has_semaphores(ce->engine))
 975		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
 976
 977	if (CONFIG_DRM_I915_REQUEST_TIMEOUT &&
 978	    ctx->i915->params.request_timeout_ms) {
 979		unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
 980
 981		intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
 982	}
 983
 984	/* A valid SSEU has no zero fields */
 985	if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
 986		ret = intel_context_reconfigure_sseu(ce, sseu);
 987
 988	return ret;
 989}
 990
 991static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
 992{
 993	while (count--) {
 994		struct intel_context *ce = e->engines[count], *child;
 995
 996		if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
 997			continue;
 998
 999		for_each_child(ce, child)
1000			intel_context_unpin(child);
1001		intel_context_unpin(ce);
1002	}
1003}
1004
1005static void unpin_engines(struct i915_gem_engines *e)
1006{
1007	__unpin_engines(e, e->num_engines);
1008}
1009
1010static void __free_engines(struct i915_gem_engines *e, unsigned int count)
1011{
1012	while (count--) {
1013		if (!e->engines[count])
1014			continue;
1015
1016		intel_context_put(e->engines[count]);
1017	}
1018	kfree(e);
1019}
1020
1021static void free_engines(struct i915_gem_engines *e)
1022{
1023	__free_engines(e, e->num_engines);
1024}
1025
1026static void free_engines_rcu(struct rcu_head *rcu)
1027{
1028	struct i915_gem_engines *engines =
1029		container_of(rcu, struct i915_gem_engines, rcu);
1030
1031	i915_sw_fence_fini(&engines->fence);
1032	free_engines(engines);
1033}
1034
1035static void accumulate_runtime(struct i915_drm_client *client,
1036			       struct i915_gem_engines *engines)
1037{
1038	struct i915_gem_engines_iter it;
1039	struct intel_context *ce;
1040
1041	if (!client)
1042		return;
1043
1044	/* Transfer accumulated runtime to the parent GEM context. */
1045	for_each_gem_engine(ce, engines, it) {
1046		unsigned int class = ce->engine->uabi_class;
1047
1048		GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime));
1049		atomic64_add(intel_context_get_total_runtime_ns(ce),
1050			     &client->past_runtime[class]);
1051	}
1052}
1053
1054static int
1055engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
1056{
1057	struct i915_gem_engines *engines =
1058		container_of(fence, typeof(*engines), fence);
1059	struct i915_gem_context *ctx = engines->ctx;
1060
1061	switch (state) {
1062	case FENCE_COMPLETE:
1063		if (!list_empty(&engines->link)) {
1064			unsigned long flags;
1065
1066			spin_lock_irqsave(&ctx->stale.lock, flags);
1067			list_del(&engines->link);
1068			spin_unlock_irqrestore(&ctx->stale.lock, flags);
1069		}
1070		accumulate_runtime(ctx->client, engines);
1071		i915_gem_context_put(ctx);
1072
1073		break;
1074
1075	case FENCE_FREE:
1076		init_rcu_head(&engines->rcu);
1077		call_rcu(&engines->rcu, free_engines_rcu);
1078		break;
1079	}
1080
1081	return NOTIFY_DONE;
1082}
1083
1084static struct i915_gem_engines *alloc_engines(unsigned int count)
1085{
1086	struct i915_gem_engines *e;
1087
1088	e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
1089	if (!e)
1090		return NULL;
1091
1092	i915_sw_fence_init(&e->fence, engines_notify);
1093	return e;
1094}
1095
1096static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
1097						struct intel_sseu rcs_sseu)
1098{
1099	const struct intel_gt *gt = to_gt(ctx->i915);
1100	struct intel_engine_cs *engine;
1101	struct i915_gem_engines *e, *err;
1102	enum intel_engine_id id;
1103
1104	e = alloc_engines(I915_NUM_ENGINES);
1105	if (!e)
1106		return ERR_PTR(-ENOMEM);
1107
1108	for_each_engine(engine, gt, id) {
1109		struct intel_context *ce;
1110		struct intel_sseu sseu = {};
1111		int ret;
1112
1113		if (engine->legacy_idx == INVALID_ENGINE)
1114			continue;
1115
1116		GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
1117		GEM_BUG_ON(e->engines[engine->legacy_idx]);
1118
1119		ce = intel_context_create(engine);
1120		if (IS_ERR(ce)) {
1121			err = ERR_CAST(ce);
1122			goto free_engines;
1123		}
1124
1125		e->engines[engine->legacy_idx] = ce;
1126		e->num_engines = max(e->num_engines, engine->legacy_idx + 1);
1127
1128		if (engine->class == RENDER_CLASS)
1129			sseu = rcs_sseu;
1130
1131		ret = intel_context_set_gem(ce, ctx, sseu);
1132		if (ret) {
1133			err = ERR_PTR(ret);
1134			goto free_engines;
1135		}
1136
1137	}
1138
1139	return e;
1140
1141free_engines:
1142	free_engines(e);
1143	return err;
1144}
1145
1146static int perma_pin_contexts(struct intel_context *ce)
1147{
1148	struct intel_context *child;
1149	int i = 0, j = 0, ret;
1150
1151	GEM_BUG_ON(!intel_context_is_parent(ce));
1152
1153	ret = intel_context_pin(ce);
1154	if (unlikely(ret))
1155		return ret;
1156
1157	for_each_child(ce, child) {
1158		ret = intel_context_pin(child);
1159		if (unlikely(ret))
1160			goto unwind;
1161		++i;
1162	}
1163
1164	set_bit(CONTEXT_PERMA_PIN, &ce->flags);
1165
1166	return 0;
1167
1168unwind:
1169	intel_context_unpin(ce);
1170	for_each_child(ce, child) {
1171		if (j++ < i)
1172			intel_context_unpin(child);
1173		else
1174			break;
1175	}
1176
1177	return ret;
1178}
1179
1180static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
1181					     unsigned int num_engines,
1182					     struct i915_gem_proto_engine *pe)
1183{
1184	struct i915_gem_engines *e, *err;
1185	unsigned int n;
1186
1187	e = alloc_engines(num_engines);
1188	if (!e)
1189		return ERR_PTR(-ENOMEM);
1190	e->num_engines = num_engines;
1191
1192	for (n = 0; n < num_engines; n++) {
1193		struct intel_context *ce, *child;
1194		int ret;
1195
1196		switch (pe[n].type) {
1197		case I915_GEM_ENGINE_TYPE_PHYSICAL:
1198			ce = intel_context_create(pe[n].engine);
1199			break;
1200
1201		case I915_GEM_ENGINE_TYPE_BALANCED:
1202			ce = intel_engine_create_virtual(pe[n].siblings,
1203							 pe[n].num_siblings, 0);
1204			break;
1205
1206		case I915_GEM_ENGINE_TYPE_PARALLEL:
1207			ce = intel_engine_create_parallel(pe[n].siblings,
1208							  pe[n].num_siblings,
1209							  pe[n].width);
1210			break;
1211
1212		case I915_GEM_ENGINE_TYPE_INVALID:
1213		default:
1214			GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
1215			continue;
1216		}
1217
1218		if (IS_ERR(ce)) {
1219			err = ERR_CAST(ce);
1220			goto free_engines;
1221		}
1222
1223		e->engines[n] = ce;
1224
1225		ret = intel_context_set_gem(ce, ctx, pe->sseu);
1226		if (ret) {
1227			err = ERR_PTR(ret);
1228			goto free_engines;
1229		}
1230		for_each_child(ce, child) {
1231			ret = intel_context_set_gem(child, ctx, pe->sseu);
1232			if (ret) {
1233				err = ERR_PTR(ret);
1234				goto free_engines;
1235			}
1236		}
1237
1238		/*
1239		 * XXX: Must be done after calling intel_context_set_gem as that
1240		 * function changes the ring size. The ring is allocated when
1241		 * the context is pinned. If the ring size is changed after
1242		 * allocation we have a mismatch of the ring size and will cause
1243		 * the context to hang. Presumably with a bit of reordering we
1244		 * could move the perma-pin step to the backend function
1245		 * intel_engine_create_parallel.
1246		 */
1247		if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
1248			ret = perma_pin_contexts(ce);
1249			if (ret) {
1250				err = ERR_PTR(ret);
1251				goto free_engines;
1252			}
1253		}
1254	}
1255
1256	return e;
1257
1258free_engines:
1259	free_engines(e);
1260	return err;
1261}
1262
1263static void i915_gem_context_release_work(struct work_struct *work)
1264{
1265	struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
1266						    release_work);
1267	struct i915_address_space *vm;
1268
1269	trace_i915_context_free(ctx);
1270	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1271
1272	spin_lock(&ctx->i915->gem.contexts.lock);
1273	list_del(&ctx->link);
1274	spin_unlock(&ctx->i915->gem.contexts.lock);
1275
1276	if (ctx->syncobj)
1277		drm_syncobj_put(ctx->syncobj);
1278
1279	vm = ctx->vm;
1280	if (vm)
1281		i915_vm_put(vm);
1282
1283	if (ctx->pxp_wakeref)
1284		intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
1285
1286	if (ctx->client)
1287		i915_drm_client_put(ctx->client);
1288
1289	mutex_destroy(&ctx->engines_mutex);
1290	mutex_destroy(&ctx->lut_mutex);
1291
1292	put_pid(ctx->pid);
1293	mutex_destroy(&ctx->mutex);
1294
1295	kfree_rcu(ctx, rcu);
1296}
1297
1298void i915_gem_context_release(struct kref *ref)
1299{
1300	struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
1301
1302	queue_work(ctx->i915->wq, &ctx->release_work);
1303}
1304
1305static inline struct i915_gem_engines *
1306__context_engines_static(const struct i915_gem_context *ctx)
1307{
1308	return rcu_dereference_protected(ctx->engines, true);
1309}
1310
1311static void __reset_context(struct i915_gem_context *ctx,
1312			    struct intel_engine_cs *engine)
1313{
1314	intel_gt_handle_error(engine->gt, engine->mask, 0,
1315			      "context closure in %s", ctx->name);
1316}
1317
1318static bool __cancel_engine(struct intel_engine_cs *engine)
1319{
1320	/*
1321	 * Send a "high priority pulse" down the engine to cause the
1322	 * current request to be momentarily preempted. (If it fails to
1323	 * be preempted, it will be reset). As we have marked our context
1324	 * as banned, any incomplete request, including any running, will
1325	 * be skipped following the preemption.
1326	 *
1327	 * If there is no hangchecking (one of the reasons why we try to
1328	 * cancel the context) and no forced preemption, there may be no
1329	 * means by which we reset the GPU and evict the persistent hog.
1330	 * Ergo if we are unable to inject a preemptive pulse that can
1331	 * kill the banned context, we fallback to doing a local reset
1332	 * instead.
1333	 */
1334	return intel_engine_pulse(engine) == 0;
1335}
1336
1337static struct intel_engine_cs *active_engine(struct intel_context *ce)
1338{
1339	struct intel_engine_cs *engine = NULL;
1340	struct i915_request *rq;
1341
1342	if (intel_context_has_inflight(ce))
1343		return intel_context_inflight(ce);
1344
1345	if (!ce->timeline)
1346		return NULL;
1347
1348	/*
1349	 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
1350	 * to the request to prevent it being transferred to a new timeline
1351	 * (and onto a new timeline->requests list).
1352	 */
1353	rcu_read_lock();
1354	list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
1355		bool found;
1356
1357		/* timeline is already completed upto this point? */
1358		if (!i915_request_get_rcu(rq))
1359			break;
1360
1361		/* Check with the backend if the request is inflight */
1362		found = true;
1363		if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
1364			found = i915_request_active_engine(rq, &engine);
1365
1366		i915_request_put(rq);
1367		if (found)
1368			break;
1369	}
1370	rcu_read_unlock();
1371
1372	return engine;
1373}
1374
1375static void
1376kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent)
1377{
1378	struct i915_gem_engines_iter it;
1379	struct intel_context *ce;
1380
1381	/*
1382	 * Map the user's engine back to the actual engines; one virtual
1383	 * engine will be mapped to multiple engines, and using ctx->engine[]
1384	 * the same engine may be have multiple instances in the user's map.
1385	 * However, we only care about pending requests, so only include
1386	 * engines on which there are incomplete requests.
1387	 */
1388	for_each_gem_engine(ce, engines, it) {
1389		struct intel_engine_cs *engine;
1390
1391		if ((exit || !persistent) && intel_context_revoke(ce))
1392			continue; /* Already marked. */
1393
1394		/*
1395		 * Check the current active state of this context; if we
1396		 * are currently executing on the GPU we need to evict
1397		 * ourselves. On the other hand, if we haven't yet been
1398		 * submitted to the GPU or if everything is complete,
1399		 * we have nothing to do.
1400		 */
1401		engine = active_engine(ce);
1402
1403		/* First attempt to gracefully cancel the context */
1404		if (engine && !__cancel_engine(engine) && (exit || !persistent))
1405			/*
1406			 * If we are unable to send a preemptive pulse to bump
1407			 * the context from the GPU, we have to resort to a full
1408			 * reset. We hope the collateral damage is worth it.
1409			 */
1410			__reset_context(engines->ctx, engine);
1411	}
1412}
1413
1414static void kill_context(struct i915_gem_context *ctx)
1415{
1416	struct i915_gem_engines *pos, *next;
1417
1418	spin_lock_irq(&ctx->stale.lock);
1419	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1420	list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
1421		if (!i915_sw_fence_await(&pos->fence)) {
1422			list_del_init(&pos->link);
1423			continue;
1424		}
1425
1426		spin_unlock_irq(&ctx->stale.lock);
1427
1428		kill_engines(pos, !ctx->i915->params.enable_hangcheck,
1429			     i915_gem_context_is_persistent(ctx));
1430
1431		spin_lock_irq(&ctx->stale.lock);
1432		GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
1433		list_safe_reset_next(pos, next, link);
1434		list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
1435
1436		i915_sw_fence_complete(&pos->fence);
1437	}
1438	spin_unlock_irq(&ctx->stale.lock);
1439}
1440
1441static void engines_idle_release(struct i915_gem_context *ctx,
1442				 struct i915_gem_engines *engines)
1443{
1444	struct i915_gem_engines_iter it;
1445	struct intel_context *ce;
1446
1447	INIT_LIST_HEAD(&engines->link);
1448
1449	engines->ctx = i915_gem_context_get(ctx);
1450
1451	for_each_gem_engine(ce, engines, it) {
1452		int err;
1453
1454		/* serialises with execbuf */
1455		intel_context_close(ce);
1456		if (!intel_context_pin_if_active(ce))
1457			continue;
1458
1459		/* Wait until context is finally scheduled out and retired */
1460		err = i915_sw_fence_await_active(&engines->fence,
1461						 &ce->active,
1462						 I915_ACTIVE_AWAIT_BARRIER);
1463		intel_context_unpin(ce);
1464		if (err)
1465			goto kill;
1466	}
1467
1468	spin_lock_irq(&ctx->stale.lock);
1469	if (!i915_gem_context_is_closed(ctx))
1470		list_add_tail(&engines->link, &ctx->stale.engines);
1471	spin_unlock_irq(&ctx->stale.lock);
1472
1473kill:
1474	if (list_empty(&engines->link)) /* raced, already closed */
1475		kill_engines(engines, true,
1476			     i915_gem_context_is_persistent(ctx));
1477
1478	i915_sw_fence_commit(&engines->fence);
1479}
1480
1481static void set_closed_name(struct i915_gem_context *ctx)
1482{
1483	char *s;
1484
1485	/* Replace '[]' with '<>' to indicate closed in debug prints */
1486
1487	s = strrchr(ctx->name, '[');
1488	if (!s)
1489		return;
1490
1491	*s = '<';
1492
1493	s = strchr(s + 1, ']');
1494	if (s)
1495		*s = '>';
1496}
1497
1498static void context_close(struct i915_gem_context *ctx)
1499{
1500	struct i915_drm_client *client;
1501
1502	/* Flush any concurrent set_engines() */
1503	mutex_lock(&ctx->engines_mutex);
1504	unpin_engines(__context_engines_static(ctx));
1505	engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
1506	i915_gem_context_set_closed(ctx);
1507	mutex_unlock(&ctx->engines_mutex);
1508
1509	mutex_lock(&ctx->mutex);
1510
1511	set_closed_name(ctx);
1512
1513	/*
1514	 * The LUT uses the VMA as a backpointer to unref the object,
1515	 * so we need to clear the LUT before we close all the VMA (inside
1516	 * the ppgtt).
1517	 */
1518	lut_close(ctx);
1519
1520	ctx->file_priv = ERR_PTR(-EBADF);
1521
1522	client = ctx->client;
1523	if (client) {
1524		spin_lock(&client->ctx_lock);
1525		list_del_rcu(&ctx->client_link);
1526		spin_unlock(&client->ctx_lock);
1527	}
1528
1529	mutex_unlock(&ctx->mutex);
1530
1531	/*
1532	 * If the user has disabled hangchecking, we can not be sure that
1533	 * the batches will ever complete after the context is closed,
1534	 * keeping the context and all resources pinned forever. So in this
1535	 * case we opt to forcibly kill off all remaining requests on
1536	 * context close.
1537	 */
1538	kill_context(ctx);
1539
1540	i915_gem_context_put(ctx);
1541}
1542
1543static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
1544{
1545	if (i915_gem_context_is_persistent(ctx) == state)
1546		return 0;
1547
1548	if (state) {
1549		/*
1550		 * Only contexts that are short-lived [that will expire or be
1551		 * reset] are allowed to survive past termination. We require
1552		 * hangcheck to ensure that the persistent requests are healthy.
1553		 */
1554		if (!ctx->i915->params.enable_hangcheck)
1555			return -EINVAL;
1556
1557		i915_gem_context_set_persistence(ctx);
1558	} else {
1559		/* To cancel a context we use "preempt-to-idle" */
1560		if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
1561			return -ENODEV;
1562
1563		/*
1564		 * If the cancel fails, we then need to reset, cleanly!
1565		 *
1566		 * If the per-engine reset fails, all hope is lost! We resort
1567		 * to a full GPU reset in that unlikely case, but realistically
1568		 * if the engine could not reset, the full reset does not fare
1569		 * much better. The damage has been done.
1570		 *
1571		 * However, if we cannot reset an engine by itself, we cannot
1572		 * cleanup a hanging persistent context without causing
1573		 * colateral damage, and we should not pretend we can by
1574		 * exposing the interface.
1575		 */
1576		if (!intel_has_reset_engine(to_gt(ctx->i915)))
1577			return -ENODEV;
1578
1579		i915_gem_context_clear_persistence(ctx);
1580	}
1581
1582	return 0;
1583}
1584
1585static struct i915_gem_context *
1586i915_gem_create_context(struct drm_i915_private *i915,
1587			const struct i915_gem_proto_context *pc)
1588{
1589	struct i915_gem_context *ctx;
1590	struct i915_address_space *vm = NULL;
1591	struct i915_gem_engines *e;
1592	int err;
1593	int i;
1594
1595	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1596	if (!ctx)
1597		return ERR_PTR(-ENOMEM);
1598
1599	kref_init(&ctx->ref);
1600	ctx->i915 = i915;
1601	ctx->sched = pc->sched;
1602	mutex_init(&ctx->mutex);
1603	INIT_LIST_HEAD(&ctx->link);
1604	INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
1605
1606	spin_lock_init(&ctx->stale.lock);
1607	INIT_LIST_HEAD(&ctx->stale.engines);
1608
1609	if (pc->vm) {
1610		vm = i915_vm_get(pc->vm);
1611	} else if (HAS_FULL_PPGTT(i915)) {
1612		struct i915_ppgtt *ppgtt;
1613
1614		ppgtt = i915_ppgtt_create(to_gt(i915), 0);
1615		if (IS_ERR(ppgtt)) {
1616			drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
1617				PTR_ERR(ppgtt));
1618			err = PTR_ERR(ppgtt);
1619			goto err_ctx;
1620		}
1621		vm = &ppgtt->vm;
1622	}
1623	if (vm)
1624		ctx->vm = vm;
1625
1626	mutex_init(&ctx->engines_mutex);
1627	if (pc->num_user_engines >= 0) {
1628		i915_gem_context_set_user_engines(ctx);
1629		e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
1630	} else {
1631		i915_gem_context_clear_user_engines(ctx);
1632		e = default_engines(ctx, pc->legacy_rcs_sseu);
1633	}
1634	if (IS_ERR(e)) {
1635		err = PTR_ERR(e);
1636		goto err_vm;
1637	}
1638	RCU_INIT_POINTER(ctx->engines, e);
1639
1640	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
1641	mutex_init(&ctx->lut_mutex);
1642
1643	/* NB: Mark all slices as needing a remap so that when the context first
1644	 * loads it will restore whatever remap state already exists. If there
1645	 * is no remap info, it will be a NOP. */
1646	ctx->remap_slice = ALL_L3_SLICES(i915);
1647
1648	ctx->user_flags = pc->user_flags;
1649
1650	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
1651		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
1652
1653	if (pc->single_timeline) {
1654		err = drm_syncobj_create(&ctx->syncobj,
1655					 DRM_SYNCOBJ_CREATE_SIGNALED,
1656					 NULL);
1657		if (err)
1658			goto err_engines;
1659	}
1660
1661	if (pc->uses_protected_content) {
1662		ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1663		ctx->uses_protected_content = true;
1664	}
1665
1666	trace_i915_context_create(ctx);
1667
1668	return ctx;
1669
1670err_engines:
1671	free_engines(e);
1672err_vm:
1673	if (ctx->vm)
1674		i915_vm_put(ctx->vm);
1675err_ctx:
1676	kfree(ctx);
1677	return ERR_PTR(err);
1678}
1679
1680static void init_contexts(struct i915_gem_contexts *gc)
1681{
1682	spin_lock_init(&gc->lock);
1683	INIT_LIST_HEAD(&gc->list);
1684}
1685
1686void i915_gem_init__contexts(struct drm_i915_private *i915)
1687{
1688	init_contexts(&i915->gem.contexts);
1689}
1690
1691/*
1692 * Note that this implicitly consumes the ctx reference, by placing
1693 * the ctx in the context_xa.
1694 */
1695static void gem_context_register(struct i915_gem_context *ctx,
1696				 struct drm_i915_file_private *fpriv,
1697				 u32 id)
1698{
1699	struct drm_i915_private *i915 = ctx->i915;
1700	void *old;
1701
1702	ctx->file_priv = fpriv;
1703
1704	ctx->pid = get_task_pid(current, PIDTYPE_PID);
1705	ctx->client = i915_drm_client_get(fpriv->client);
1706
1707	snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
1708		 current->comm, pid_nr(ctx->pid));
1709
1710	spin_lock(&ctx->client->ctx_lock);
1711	list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list);
1712	spin_unlock(&ctx->client->ctx_lock);
1713
1714	spin_lock(&i915->gem.contexts.lock);
1715	list_add_tail(&ctx->link, &i915->gem.contexts.list);
1716	spin_unlock(&i915->gem.contexts.lock);
1717
1718	/* And finally expose ourselves to userspace via the idr */
1719	old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
1720	WARN_ON(old);
1721}
1722
1723int i915_gem_context_open(struct drm_i915_private *i915,
1724			  struct drm_file *file)
1725{
1726	struct drm_i915_file_private *file_priv = file->driver_priv;
1727	struct i915_gem_proto_context *pc;
1728	struct i915_gem_context *ctx;
1729	int err;
1730
1731	mutex_init(&file_priv->proto_context_lock);
1732	xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC);
1733
1734	/* 0 reserved for the default context */
1735	xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1);
1736
1737	/* 0 reserved for invalid/unassigned ppgtt */
1738	xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
1739
1740	pc = proto_context_create(i915, 0);
1741	if (IS_ERR(pc)) {
1742		err = PTR_ERR(pc);
1743		goto err;
1744	}
1745
1746	ctx = i915_gem_create_context(i915, pc);
1747	proto_context_close(i915, pc);
1748	if (IS_ERR(ctx)) {
1749		err = PTR_ERR(ctx);
1750		goto err;
1751	}
1752
1753	gem_context_register(ctx, file_priv, 0);
1754
1755	return 0;
1756
1757err:
1758	xa_destroy(&file_priv->vm_xa);
1759	xa_destroy(&file_priv->context_xa);
1760	xa_destroy(&file_priv->proto_context_xa);
1761	mutex_destroy(&file_priv->proto_context_lock);
1762	return err;
1763}
1764
1765void i915_gem_context_close(struct drm_file *file)
1766{
1767	struct drm_i915_file_private *file_priv = file->driver_priv;
1768	struct i915_gem_proto_context *pc;
1769	struct i915_address_space *vm;
1770	struct i915_gem_context *ctx;
1771	unsigned long idx;
1772
1773	xa_for_each(&file_priv->proto_context_xa, idx, pc)
1774		proto_context_close(file_priv->dev_priv, pc);
1775	xa_destroy(&file_priv->proto_context_xa);
1776	mutex_destroy(&file_priv->proto_context_lock);
1777
1778	xa_for_each(&file_priv->context_xa, idx, ctx)
1779		context_close(ctx);
1780	xa_destroy(&file_priv->context_xa);
1781
1782	xa_for_each(&file_priv->vm_xa, idx, vm)
1783		i915_vm_put(vm);
1784	xa_destroy(&file_priv->vm_xa);
1785}
1786
1787int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
1788			     struct drm_file *file)
1789{
1790	struct drm_i915_private *i915 = to_i915(dev);
1791	struct drm_i915_gem_vm_control *args = data;
1792	struct drm_i915_file_private *file_priv = file->driver_priv;
1793	struct i915_ppgtt *ppgtt;
1794	u32 id;
1795	int err;
1796
1797	if (!HAS_FULL_PPGTT(i915))
1798		return -ENODEV;
1799
1800	if (args->flags)
1801		return -EINVAL;
1802
1803	ppgtt = i915_ppgtt_create(to_gt(i915), 0);
1804	if (IS_ERR(ppgtt))
1805		return PTR_ERR(ppgtt);
1806
1807	if (args->extensions) {
1808		err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1809					   NULL, 0,
1810					   ppgtt);
1811		if (err)
1812			goto err_put;
1813	}
1814
1815	err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1816		       xa_limit_32b, GFP_KERNEL);
1817	if (err)
1818		goto err_put;
1819
1820	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1821	args->vm_id = id;
1822	return 0;
1823
1824err_put:
1825	i915_vm_put(&ppgtt->vm);
1826	return err;
1827}
1828
1829int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1830			      struct drm_file *file)
1831{
1832	struct drm_i915_file_private *file_priv = file->driver_priv;
1833	struct drm_i915_gem_vm_control *args = data;
1834	struct i915_address_space *vm;
1835
1836	if (args->flags)
1837		return -EINVAL;
1838
1839	if (args->extensions)
1840		return -EINVAL;
1841
1842	vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1843	if (!vm)
1844		return -ENOENT;
1845
1846	i915_vm_put(vm);
1847	return 0;
1848}
1849
1850static int get_ppgtt(struct drm_i915_file_private *file_priv,
1851		     struct i915_gem_context *ctx,
1852		     struct drm_i915_gem_context_param *args)
1853{
1854	struct i915_address_space *vm;
1855	int err;
1856	u32 id;
1857
1858	if (!i915_gem_context_has_full_ppgtt(ctx))
1859		return -ENODEV;
1860
1861	vm = ctx->vm;
1862	GEM_BUG_ON(!vm);
1863
1864	/*
1865	 * Get a reference for the allocated handle.  Once the handle is
1866	 * visible in the vm_xa table, userspace could try to close it
1867	 * from under our feet, so we need to hold the extra reference
1868	 * first.
1869	 */
1870	i915_vm_get(vm);
1871
1872	err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1873	if (err) {
1874		i915_vm_put(vm);
1875		return err;
1876	}
1877
1878	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1879	args->value = id;
1880	args->size = 0;
1881
1882	return err;
1883}
1884
1885int
1886i915_gem_user_to_context_sseu(struct intel_gt *gt,
1887			      const struct drm_i915_gem_context_param_sseu *user,
1888			      struct intel_sseu *context)
1889{
1890	const struct sseu_dev_info *device = &gt->info.sseu;
1891	struct drm_i915_private *i915 = gt->i915;
1892	unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(device, 0);
1893
1894	/* No zeros in any field. */
1895	if (!user->slice_mask || !user->subslice_mask ||
1896	    !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1897		return -EINVAL;
1898
1899	/* Max > min. */
1900	if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1901		return -EINVAL;
1902
1903	/*
1904	 * Some future proofing on the types since the uAPI is wider than the
1905	 * current internal implementation.
1906	 */
1907	if (overflows_type(user->slice_mask, context->slice_mask) ||
1908	    overflows_type(user->subslice_mask, context->subslice_mask) ||
1909	    overflows_type(user->min_eus_per_subslice,
1910			   context->min_eus_per_subslice) ||
1911	    overflows_type(user->max_eus_per_subslice,
1912			   context->max_eus_per_subslice))
1913		return -EINVAL;
1914
1915	/* Check validity against hardware. */
1916	if (user->slice_mask & ~device->slice_mask)
1917		return -EINVAL;
1918
1919	if (user->subslice_mask & ~dev_subslice_mask)
1920		return -EINVAL;
1921
1922	if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1923		return -EINVAL;
1924
1925	context->slice_mask = user->slice_mask;
1926	context->subslice_mask = user->subslice_mask;
1927	context->min_eus_per_subslice = user->min_eus_per_subslice;
1928	context->max_eus_per_subslice = user->max_eus_per_subslice;
1929
1930	/* Part specific restrictions. */
1931	if (GRAPHICS_VER(i915) == 11) {
1932		unsigned int hw_s = hweight8(device->slice_mask);
1933		unsigned int hw_ss_per_s = hweight8(dev_subslice_mask);
1934		unsigned int req_s = hweight8(context->slice_mask);
1935		unsigned int req_ss = hweight8(context->subslice_mask);
1936
1937		/*
1938		 * Only full subslice enablement is possible if more than one
1939		 * slice is turned on.
1940		 */
1941		if (req_s > 1 && req_ss != hw_ss_per_s)
1942			return -EINVAL;
1943
1944		/*
1945		 * If more than four (SScount bitfield limit) subslices are
1946		 * requested then the number has to be even.
1947		 */
1948		if (req_ss > 4 && (req_ss & 1))
1949			return -EINVAL;
1950
1951		/*
1952		 * If only one slice is enabled and subslice count is below the
1953		 * device full enablement, it must be at most half of the all
1954		 * available subslices.
1955		 */
1956		if (req_s == 1 && req_ss < hw_ss_per_s &&
1957		    req_ss > (hw_ss_per_s / 2))
1958			return -EINVAL;
1959
1960		/* ABI restriction - VME use case only. */
1961
1962		/* All slices or one slice only. */
1963		if (req_s != 1 && req_s != hw_s)
1964			return -EINVAL;
1965
1966		/*
1967		 * Half subslices or full enablement only when one slice is
1968		 * enabled.
1969		 */
1970		if (req_s == 1 &&
1971		    (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1972			return -EINVAL;
1973
1974		/* No EU configuration changes. */
1975		if ((user->min_eus_per_subslice !=
1976		     device->max_eus_per_subslice) ||
1977		    (user->max_eus_per_subslice !=
1978		     device->max_eus_per_subslice))
1979			return -EINVAL;
1980	}
1981
1982	return 0;
1983}
1984
1985static int set_sseu(struct i915_gem_context *ctx,
1986		    struct drm_i915_gem_context_param *args)
1987{
1988	struct drm_i915_private *i915 = ctx->i915;
1989	struct drm_i915_gem_context_param_sseu user_sseu;
1990	struct intel_context *ce;
1991	struct intel_sseu sseu;
1992	unsigned long lookup;
1993	int ret;
1994
1995	if (args->size < sizeof(user_sseu))
1996		return -EINVAL;
1997
1998	if (GRAPHICS_VER(i915) != 11)
1999		return -ENODEV;
2000
2001	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2002			   sizeof(user_sseu)))
2003		return -EFAULT;
2004
2005	if (user_sseu.rsvd)
2006		return -EINVAL;
2007
2008	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2009		return -EINVAL;
2010
2011	lookup = 0;
2012	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2013		lookup |= LOOKUP_USER_INDEX;
2014
2015	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2016	if (IS_ERR(ce))
2017		return PTR_ERR(ce);
2018
2019	/* Only render engine supports RPCS configuration. */
2020	if (ce->engine->class != RENDER_CLASS) {
2021		ret = -ENODEV;
2022		goto out_ce;
2023	}
2024
2025	ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
2026	if (ret)
2027		goto out_ce;
2028
2029	ret = intel_context_reconfigure_sseu(ce, sseu);
2030	if (ret)
2031		goto out_ce;
2032
2033	args->size = sizeof(user_sseu);
2034
2035out_ce:
2036	intel_context_put(ce);
2037	return ret;
2038}
2039
2040static int
2041set_persistence(struct i915_gem_context *ctx,
2042		const struct drm_i915_gem_context_param *args)
2043{
2044	if (args->size)
2045		return -EINVAL;
2046
2047	return __context_set_persistence(ctx, args->value);
2048}
2049
2050static int set_priority(struct i915_gem_context *ctx,
2051			const struct drm_i915_gem_context_param *args)
2052{
2053	struct i915_gem_engines_iter it;
2054	struct intel_context *ce;
2055	int err;
2056
2057	err = validate_priority(ctx->i915, args);
2058	if (err)
2059		return err;
2060
2061	ctx->sched.priority = args->value;
2062
2063	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2064		if (!intel_engine_has_timeslices(ce->engine))
2065			continue;
2066
2067		if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
2068		    intel_engine_has_semaphores(ce->engine))
2069			intel_context_set_use_semaphores(ce);
2070		else
2071			intel_context_clear_use_semaphores(ce);
2072	}
2073	i915_gem_context_unlock_engines(ctx);
2074
2075	return 0;
2076}
2077
2078static int get_protected(struct i915_gem_context *ctx,
2079			 struct drm_i915_gem_context_param *args)
2080{
2081	args->size = 0;
2082	args->value = i915_gem_context_uses_protected_content(ctx);
2083
2084	return 0;
2085}
2086
2087static int ctx_setparam(struct drm_i915_file_private *fpriv,
2088			struct i915_gem_context *ctx,
2089			struct drm_i915_gem_context_param *args)
2090{
2091	int ret = 0;
2092
2093	switch (args->param) {
2094	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2095		if (args->size)
2096			ret = -EINVAL;
2097		else if (args->value)
2098			i915_gem_context_set_no_error_capture(ctx);
2099		else
2100			i915_gem_context_clear_no_error_capture(ctx);
2101		break;
2102
2103	case I915_CONTEXT_PARAM_BANNABLE:
2104		if (args->size)
2105			ret = -EINVAL;
2106		else if (!capable(CAP_SYS_ADMIN) && !args->value)
2107			ret = -EPERM;
2108		else if (args->value)
2109			i915_gem_context_set_bannable(ctx);
2110		else if (i915_gem_context_uses_protected_content(ctx))
2111			ret = -EPERM; /* can't clear this for protected contexts */
2112		else
2113			i915_gem_context_clear_bannable(ctx);
2114		break;
2115
2116	case I915_CONTEXT_PARAM_RECOVERABLE:
2117		if (args->size)
2118			ret = -EINVAL;
2119		else if (!args->value)
2120			i915_gem_context_clear_recoverable(ctx);
2121		else if (i915_gem_context_uses_protected_content(ctx))
2122			ret = -EPERM; /* can't set this for protected contexts */
2123		else
2124			i915_gem_context_set_recoverable(ctx);
2125		break;
2126
2127	case I915_CONTEXT_PARAM_PRIORITY:
2128		ret = set_priority(ctx, args);
2129		break;
2130
2131	case I915_CONTEXT_PARAM_SSEU:
2132		ret = set_sseu(ctx, args);
2133		break;
2134
2135	case I915_CONTEXT_PARAM_PERSISTENCE:
2136		ret = set_persistence(ctx, args);
2137		break;
2138
2139	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2140	case I915_CONTEXT_PARAM_NO_ZEROMAP:
2141	case I915_CONTEXT_PARAM_BAN_PERIOD:
2142	case I915_CONTEXT_PARAM_RINGSIZE:
2143	case I915_CONTEXT_PARAM_VM:
2144	case I915_CONTEXT_PARAM_ENGINES:
2145	default:
2146		ret = -EINVAL;
2147		break;
2148	}
2149
2150	return ret;
2151}
2152
2153struct create_ext {
2154	struct i915_gem_proto_context *pc;
2155	struct drm_i915_file_private *fpriv;
2156};
2157
2158static int create_setparam(struct i915_user_extension __user *ext, void *data)
2159{
2160	struct drm_i915_gem_context_create_ext_setparam local;
2161	const struct create_ext *arg = data;
2162
2163	if (copy_from_user(&local, ext, sizeof(local)))
2164		return -EFAULT;
2165
2166	if (local.param.ctx_id)
2167		return -EINVAL;
2168
2169	return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
2170}
2171
2172static int invalid_ext(struct i915_user_extension __user *ext, void *data)
2173{
2174	return -EINVAL;
2175}
2176
2177static const i915_user_extension_fn create_extensions[] = {
2178	[I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2179	[I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
2180};
2181
2182static bool client_is_banned(struct drm_i915_file_private *file_priv)
2183{
2184	return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2185}
2186
2187static inline struct i915_gem_context *
2188__context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2189{
2190	struct i915_gem_context *ctx;
2191
2192	rcu_read_lock();
2193	ctx = xa_load(&file_priv->context_xa, id);
2194	if (ctx && !kref_get_unless_zero(&ctx->ref))
2195		ctx = NULL;
2196	rcu_read_unlock();
2197
2198	return ctx;
2199}
2200
2201static struct i915_gem_context *
2202finalize_create_context_locked(struct drm_i915_file_private *file_priv,
2203			       struct i915_gem_proto_context *pc, u32 id)
2204{
2205	struct i915_gem_context *ctx;
2206	void *old;
2207
2208	lockdep_assert_held(&file_priv->proto_context_lock);
2209
2210	ctx = i915_gem_create_context(file_priv->dev_priv, pc);
2211	if (IS_ERR(ctx))
2212		return ctx;
2213
2214	/*
2215	 * One for the xarray and one for the caller.  We need to grab
2216	 * the reference *prior* to making the ctx visble to userspace
2217	 * in gem_context_register(), as at any point after that
2218	 * userspace can try to race us with another thread destroying
2219	 * the context under our feet.
2220	 */
2221	i915_gem_context_get(ctx);
2222
2223	gem_context_register(ctx, file_priv, id);
2224
2225	old = xa_erase(&file_priv->proto_context_xa, id);
2226	GEM_BUG_ON(old != pc);
2227	proto_context_close(file_priv->dev_priv, pc);
2228
2229	return ctx;
2230}
2231
2232struct i915_gem_context *
2233i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2234{
2235	struct i915_gem_proto_context *pc;
2236	struct i915_gem_context *ctx;
2237
2238	ctx = __context_lookup(file_priv, id);
2239	if (ctx)
2240		return ctx;
2241
2242	mutex_lock(&file_priv->proto_context_lock);
2243	/* Try one more time under the lock */
2244	ctx = __context_lookup(file_priv, id);
2245	if (!ctx) {
2246		pc = xa_load(&file_priv->proto_context_xa, id);
2247		if (!pc)
2248			ctx = ERR_PTR(-ENOENT);
2249		else
2250			ctx = finalize_create_context_locked(file_priv, pc, id);
2251	}
2252	mutex_unlock(&file_priv->proto_context_lock);
2253
2254	return ctx;
2255}
2256
2257int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2258				  struct drm_file *file)
2259{
2260	struct drm_i915_private *i915 = to_i915(dev);
2261	struct drm_i915_gem_context_create_ext *args = data;
2262	struct create_ext ext_data;
2263	int ret;
2264	u32 id;
2265
2266	if (!DRIVER_CAPS(i915)->has_logical_contexts)
2267		return -ENODEV;
2268
2269	if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2270		return -EINVAL;
2271
2272	ret = intel_gt_terminally_wedged(to_gt(i915));
2273	if (ret)
2274		return ret;
2275
2276	ext_data.fpriv = file->driver_priv;
2277	if (client_is_banned(ext_data.fpriv)) {
2278		drm_dbg(&i915->drm,
2279			"client %s[%d] banned from creating ctx\n",
2280			current->comm, task_pid_nr(current));
2281		return -EIO;
2282	}
2283
2284	ext_data.pc = proto_context_create(i915, args->flags);
2285	if (IS_ERR(ext_data.pc))
2286		return PTR_ERR(ext_data.pc);
2287
2288	if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2289		ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2290					   create_extensions,
2291					   ARRAY_SIZE(create_extensions),
2292					   &ext_data);
2293		if (ret)
2294			goto err_pc;
2295	}
2296
2297	if (GRAPHICS_VER(i915) > 12) {
2298		struct i915_gem_context *ctx;
2299
2300		/* Get ourselves a context ID */
2301		ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL,
2302			       xa_limit_32b, GFP_KERNEL);
2303		if (ret)
2304			goto err_pc;
2305
2306		ctx = i915_gem_create_context(i915, ext_data.pc);
2307		if (IS_ERR(ctx)) {
2308			ret = PTR_ERR(ctx);
2309			goto err_pc;
2310		}
2311
2312		proto_context_close(i915, ext_data.pc);
2313		gem_context_register(ctx, ext_data.fpriv, id);
2314	} else {
2315		ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
2316		if (ret < 0)
2317			goto err_pc;
2318	}
2319
2320	args->ctx_id = id;
2321
2322	return 0;
2323
2324err_pc:
2325	proto_context_close(i915, ext_data.pc);
2326	return ret;
2327}
2328
2329int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2330				   struct drm_file *file)
2331{
2332	struct drm_i915_gem_context_destroy *args = data;
2333	struct drm_i915_file_private *file_priv = file->driver_priv;
2334	struct i915_gem_proto_context *pc;
2335	struct i915_gem_context *ctx;
2336
2337	if (args->pad != 0)
2338		return -EINVAL;
2339
2340	if (!args->ctx_id)
2341		return -ENOENT;
2342
2343	/* We need to hold the proto-context lock here to prevent races
2344	 * with finalize_create_context_locked().
2345	 */
2346	mutex_lock(&file_priv->proto_context_lock);
2347	ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2348	pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
2349	mutex_unlock(&file_priv->proto_context_lock);
2350
2351	if (!ctx && !pc)
2352		return -ENOENT;
2353	GEM_WARN_ON(ctx && pc);
2354
2355	if (pc)
2356		proto_context_close(file_priv->dev_priv, pc);
2357
2358	if (ctx)
2359		context_close(ctx);
2360
2361	return 0;
2362}
2363
2364static int get_sseu(struct i915_gem_context *ctx,
2365		    struct drm_i915_gem_context_param *args)
2366{
2367	struct drm_i915_gem_context_param_sseu user_sseu;
2368	struct intel_context *ce;
2369	unsigned long lookup;
2370	int err;
2371
2372	if (args->size == 0)
2373		goto out;
2374	else if (args->size < sizeof(user_sseu))
2375		return -EINVAL;
2376
2377	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2378			   sizeof(user_sseu)))
2379		return -EFAULT;
2380
2381	if (user_sseu.rsvd)
2382		return -EINVAL;
2383
2384	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2385		return -EINVAL;
2386
2387	lookup = 0;
2388	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2389		lookup |= LOOKUP_USER_INDEX;
2390
2391	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2392	if (IS_ERR(ce))
2393		return PTR_ERR(ce);
2394
2395	err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2396	if (err) {
2397		intel_context_put(ce);
2398		return err;
2399	}
2400
2401	user_sseu.slice_mask = ce->sseu.slice_mask;
2402	user_sseu.subslice_mask = ce->sseu.subslice_mask;
2403	user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2404	user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2405
2406	intel_context_unlock_pinned(ce);
2407	intel_context_put(ce);
2408
2409	if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2410			 sizeof(user_sseu)))
2411		return -EFAULT;
2412
2413out:
2414	args->size = sizeof(user_sseu);
2415
2416	return 0;
2417}
2418
2419int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2420				    struct drm_file *file)
2421{
2422	struct drm_i915_file_private *file_priv = file->driver_priv;
2423	struct drm_i915_gem_context_param *args = data;
2424	struct i915_gem_context *ctx;
2425	struct i915_address_space *vm;
2426	int ret = 0;
2427
2428	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2429	if (IS_ERR(ctx))
2430		return PTR_ERR(ctx);
2431
2432	switch (args->param) {
2433	case I915_CONTEXT_PARAM_GTT_SIZE:
2434		args->size = 0;
2435		vm = i915_gem_context_get_eb_vm(ctx);
2436		args->value = vm->total;
2437		i915_vm_put(vm);
2438
2439		break;
2440
2441	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2442		args->size = 0;
2443		args->value = i915_gem_context_no_error_capture(ctx);
2444		break;
2445
2446	case I915_CONTEXT_PARAM_BANNABLE:
2447		args->size = 0;
2448		args->value = i915_gem_context_is_bannable(ctx);
2449		break;
2450
2451	case I915_CONTEXT_PARAM_RECOVERABLE:
2452		args->size = 0;
2453		args->value = i915_gem_context_is_recoverable(ctx);
2454		break;
2455
2456	case I915_CONTEXT_PARAM_PRIORITY:
2457		args->size = 0;
2458		args->value = ctx->sched.priority;
2459		break;
2460
2461	case I915_CONTEXT_PARAM_SSEU:
2462		ret = get_sseu(ctx, args);
2463		break;
2464
2465	case I915_CONTEXT_PARAM_VM:
2466		ret = get_ppgtt(file_priv, ctx, args);
2467		break;
2468
2469	case I915_CONTEXT_PARAM_PERSISTENCE:
2470		args->size = 0;
2471		args->value = i915_gem_context_is_persistent(ctx);
2472		break;
2473
2474	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2475		ret = get_protected(ctx, args);
2476		break;
2477
2478	case I915_CONTEXT_PARAM_NO_ZEROMAP:
2479	case I915_CONTEXT_PARAM_BAN_PERIOD:
2480	case I915_CONTEXT_PARAM_ENGINES:
2481	case I915_CONTEXT_PARAM_RINGSIZE:
2482	default:
2483		ret = -EINVAL;
2484		break;
2485	}
2486
2487	i915_gem_context_put(ctx);
2488	return ret;
2489}
2490
2491int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2492				    struct drm_file *file)
2493{
2494	struct drm_i915_file_private *file_priv = file->driver_priv;
2495	struct drm_i915_gem_context_param *args = data;
2496	struct i915_gem_proto_context *pc;
2497	struct i915_gem_context *ctx;
2498	int ret = 0;
2499
2500	mutex_lock(&file_priv->proto_context_lock);
2501	ctx = __context_lookup(file_priv, args->ctx_id);
2502	if (!ctx) {
2503		pc = xa_load(&file_priv->proto_context_xa, args->ctx_id);
2504		if (pc) {
2505			/* Contexts should be finalized inside
2506			 * GEM_CONTEXT_CREATE starting with graphics
2507			 * version 13.
2508			 */
2509			WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12);
2510			ret = set_proto_ctx_param(file_priv, pc, args);
2511		} else {
2512			ret = -ENOENT;
2513		}
2514	}
2515	mutex_unlock(&file_priv->proto_context_lock);
2516
2517	if (ctx) {
2518		ret = ctx_setparam(file_priv, ctx, args);
2519		i915_gem_context_put(ctx);
2520	}
2521
2522	return ret;
2523}
2524
2525int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2526				       void *data, struct drm_file *file)
2527{
2528	struct drm_i915_private *i915 = to_i915(dev);
2529	struct drm_i915_reset_stats *args = data;
2530	struct i915_gem_context *ctx;
2531
2532	if (args->flags || args->pad)
2533		return -EINVAL;
2534
2535	ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
2536	if (IS_ERR(ctx))
2537		return PTR_ERR(ctx);
2538
2539	/*
2540	 * We opt for unserialised reads here. This may result in tearing
2541	 * in the extremely unlikely event of a GPU hang on this context
2542	 * as we are querying them. If we need that extra layer of protection,
2543	 * we should wrap the hangstats with a seqlock.
2544	 */
2545
2546	if (capable(CAP_SYS_ADMIN))
2547		args->reset_count = i915_reset_count(&i915->gpu_error);
2548	else
2549		args->reset_count = 0;
2550
2551	args->batch_active = atomic_read(&ctx->guilty_count);
2552	args->batch_pending = atomic_read(&ctx->active_count);
2553
2554	i915_gem_context_put(ctx);
2555	return 0;
2556}
2557
2558/* GEM context-engines iterator: for_each_gem_engine() */
2559struct intel_context *
2560i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2561{
2562	const struct i915_gem_engines *e = it->engines;
2563	struct intel_context *ctx;
2564
2565	if (unlikely(!e))
2566		return NULL;
2567
2568	do {
2569		if (it->idx >= e->num_engines)
2570			return NULL;
2571
2572		ctx = e->engines[it->idx++];
2573	} while (!ctx);
2574
2575	return ctx;
2576}
2577
2578#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2579#include "selftests/mock_context.c"
2580#include "selftests/i915_gem_context.c"
2581#endif
2582
2583void i915_gem_context_module_exit(void)
2584{
2585	kmem_cache_destroy(slab_luts);
2586}
2587
2588int __init i915_gem_context_module_init(void)
2589{
2590	slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2591	if (!slab_luts)
2592		return -ENOMEM;
2593
2594	return 0;
2595}