Loading...
Note: File does not exist in v3.1.
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2011-2012 Intel Corporation
5 */
6
7/*
8 * This file implements HW context support. On gen5+ a HW context consists of an
9 * opaque GPU object which is referenced at times of context saves and restores.
10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
12 * something like a context does exist for the media ring, the code only
13 * supports contexts for the render ring.
14 *
15 * In software, there is a distinction between contexts created by the user,
16 * and the default HW context. The default HW context is used by GPU clients
17 * that do not request setup of their own hardware context. The default
18 * context's state is never restored to help prevent programming errors. This
19 * would happen if a client ran and piggy-backed off another clients GPU state.
20 * The default context only exists to give the GPU some offset to load as the
21 * current to invoke a save of the context we actually care about. In fact, the
22 * code could likely be constructed, albeit in a more complicated fashion, to
23 * never use the default context, though that limits the driver's ability to
24 * swap out, and/or destroy other contexts.
25 *
26 * All other contexts are created as a request by the GPU client. These contexts
27 * store GPU state, and thus allow GPU clients to not re-emit state (and
28 * potentially query certain state) at any time. The kernel driver makes
29 * certain that the appropriate commands are inserted.
30 *
31 * The context life cycle is semi-complicated in that context BOs may live
32 * longer than the context itself because of the way the hardware, and object
33 * tracking works. Below is a very crude representation of the state machine
34 * describing the context life.
35 * refcount pincount active
36 * S0: initial state 0 0 0
37 * S1: context created 1 0 0
38 * S2: context is currently running 2 1 X
39 * S3: GPU referenced, but not current 2 0 1
40 * S4: context is current, but destroyed 1 1 0
41 * S5: like S3, but destroyed 1 0 1
42 *
43 * The most common (but not all) transitions:
44 * S0->S1: client creates a context
45 * S1->S2: client submits execbuf with context
46 * S2->S3: other clients submits execbuf with context
47 * S3->S1: context object was retired
48 * S3->S2: clients submits another execbuf
49 * S2->S4: context destroy called with current context
50 * S3->S5->S0: destroy path
51 * S4->S5->S0: destroy path on current context
52 *
53 * There are two confusing terms used above:
54 * The "current context" means the context which is currently running on the
55 * GPU. The GPU has loaded its state already and has stored away the gtt
56 * offset of the BO. The GPU is not actively referencing the data at this
57 * offset, but it will on the next context switch. The only way to avoid this
58 * is to do a GPU reset.
59 *
60 * An "active context' is one which was previously the "current context" and is
61 * on the active list waiting for the next context switch to occur. Until this
62 * happens, the object must remain at the same gtt offset. It is therefore
63 * possible to destroy a context, but it is still active.
64 *
65 */
66
67#include <linux/log2.h>
68#include <linux/nospec.h>
69
70#include "gt/gen6_ppgtt.h"
71#include "gt/intel_context.h"
72#include "gt/intel_context_param.h"
73#include "gt/intel_engine_heartbeat.h"
74#include "gt/intel_engine_user.h"
75#include "gt/intel_execlists_submission.h" /* virtual_engine */
76#include "gt/intel_gpu_commands.h"
77#include "gt/intel_ring.h"
78
79#include "i915_gem_context.h"
80#include "i915_globals.h"
81#include "i915_trace.h"
82#include "i915_user_extensions.h"
83
84#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
85
86static struct i915_global_gem_context {
87 struct i915_global base;
88 struct kmem_cache *slab_luts;
89} global;
90
91struct i915_lut_handle *i915_lut_handle_alloc(void)
92{
93 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
94}
95
96void i915_lut_handle_free(struct i915_lut_handle *lut)
97{
98 return kmem_cache_free(global.slab_luts, lut);
99}
100
101static void lut_close(struct i915_gem_context *ctx)
102{
103 struct radix_tree_iter iter;
104 void __rcu **slot;
105
106 mutex_lock(&ctx->lut_mutex);
107 rcu_read_lock();
108 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
109 struct i915_vma *vma = rcu_dereference_raw(*slot);
110 struct drm_i915_gem_object *obj = vma->obj;
111 struct i915_lut_handle *lut;
112
113 if (!kref_get_unless_zero(&obj->base.refcount))
114 continue;
115
116 spin_lock(&obj->lut_lock);
117 list_for_each_entry(lut, &obj->lut_list, obj_link) {
118 if (lut->ctx != ctx)
119 continue;
120
121 if (lut->handle != iter.index)
122 continue;
123
124 list_del(&lut->obj_link);
125 break;
126 }
127 spin_unlock(&obj->lut_lock);
128
129 if (&lut->obj_link != &obj->lut_list) {
130 i915_lut_handle_free(lut);
131 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
132 i915_vma_close(vma);
133 i915_gem_object_put(obj);
134 }
135
136 i915_gem_object_put(obj);
137 }
138 rcu_read_unlock();
139 mutex_unlock(&ctx->lut_mutex);
140}
141
142static struct intel_context *
143lookup_user_engine(struct i915_gem_context *ctx,
144 unsigned long flags,
145 const struct i915_engine_class_instance *ci)
146#define LOOKUP_USER_INDEX BIT(0)
147{
148 int idx;
149
150 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
151 return ERR_PTR(-EINVAL);
152
153 if (!i915_gem_context_user_engines(ctx)) {
154 struct intel_engine_cs *engine;
155
156 engine = intel_engine_lookup_user(ctx->i915,
157 ci->engine_class,
158 ci->engine_instance);
159 if (!engine)
160 return ERR_PTR(-EINVAL);
161
162 idx = engine->legacy_idx;
163 } else {
164 idx = ci->engine_instance;
165 }
166
167 return i915_gem_context_get_engine(ctx, idx);
168}
169
170static struct i915_address_space *
171context_get_vm_rcu(struct i915_gem_context *ctx)
172{
173 GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
174
175 do {
176 struct i915_address_space *vm;
177
178 /*
179 * We do not allow downgrading from full-ppgtt [to a shared
180 * global gtt], so ctx->vm cannot become NULL.
181 */
182 vm = rcu_dereference(ctx->vm);
183 if (!kref_get_unless_zero(&vm->ref))
184 continue;
185
186 /*
187 * This ppgtt may have be reallocated between
188 * the read and the kref, and reassigned to a third
189 * context. In order to avoid inadvertent sharing
190 * of this ppgtt with that third context (and not
191 * src), we have to confirm that we have the same
192 * ppgtt after passing through the strong memory
193 * barrier implied by a successful
194 * kref_get_unless_zero().
195 *
196 * Once we have acquired the current ppgtt of ctx,
197 * we no longer care if it is released from ctx, as
198 * it cannot be reallocated elsewhere.
199 */
200
201 if (vm == rcu_access_pointer(ctx->vm))
202 return rcu_pointer_handoff(vm);
203
204 i915_vm_put(vm);
205 } while (1);
206}
207
208static void intel_context_set_gem(struct intel_context *ce,
209 struct i915_gem_context *ctx)
210{
211 GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
212 RCU_INIT_POINTER(ce->gem_context, ctx);
213
214 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
215 ce->ring = __intel_context_ring_size(SZ_16K);
216
217 if (rcu_access_pointer(ctx->vm)) {
218 struct i915_address_space *vm;
219
220 rcu_read_lock();
221 vm = context_get_vm_rcu(ctx); /* hmm */
222 rcu_read_unlock();
223
224 i915_vm_put(ce->vm);
225 ce->vm = vm;
226 }
227
228 GEM_BUG_ON(ce->timeline);
229 if (ctx->timeline)
230 ce->timeline = intel_timeline_get(ctx->timeline);
231
232 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
233 intel_engine_has_timeslices(ce->engine))
234 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
235
236 intel_context_set_watchdog_us(ce, ctx->watchdog.timeout_us);
237}
238
239static void __free_engines(struct i915_gem_engines *e, unsigned int count)
240{
241 while (count--) {
242 if (!e->engines[count])
243 continue;
244
245 intel_context_put(e->engines[count]);
246 }
247 kfree(e);
248}
249
250static void free_engines(struct i915_gem_engines *e)
251{
252 __free_engines(e, e->num_engines);
253}
254
255static void free_engines_rcu(struct rcu_head *rcu)
256{
257 struct i915_gem_engines *engines =
258 container_of(rcu, struct i915_gem_engines, rcu);
259
260 i915_sw_fence_fini(&engines->fence);
261 free_engines(engines);
262}
263
264static int __i915_sw_fence_call
265engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
266{
267 struct i915_gem_engines *engines =
268 container_of(fence, typeof(*engines), fence);
269
270 switch (state) {
271 case FENCE_COMPLETE:
272 if (!list_empty(&engines->link)) {
273 struct i915_gem_context *ctx = engines->ctx;
274 unsigned long flags;
275
276 spin_lock_irqsave(&ctx->stale.lock, flags);
277 list_del(&engines->link);
278 spin_unlock_irqrestore(&ctx->stale.lock, flags);
279 }
280 i915_gem_context_put(engines->ctx);
281 break;
282
283 case FENCE_FREE:
284 init_rcu_head(&engines->rcu);
285 call_rcu(&engines->rcu, free_engines_rcu);
286 break;
287 }
288
289 return NOTIFY_DONE;
290}
291
292static struct i915_gem_engines *alloc_engines(unsigned int count)
293{
294 struct i915_gem_engines *e;
295
296 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
297 if (!e)
298 return NULL;
299
300 i915_sw_fence_init(&e->fence, engines_notify);
301 return e;
302}
303
304static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
305{
306 const struct intel_gt *gt = &ctx->i915->gt;
307 struct intel_engine_cs *engine;
308 struct i915_gem_engines *e;
309 enum intel_engine_id id;
310
311 e = alloc_engines(I915_NUM_ENGINES);
312 if (!e)
313 return ERR_PTR(-ENOMEM);
314
315 for_each_engine(engine, gt, id) {
316 struct intel_context *ce;
317
318 if (engine->legacy_idx == INVALID_ENGINE)
319 continue;
320
321 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
322 GEM_BUG_ON(e->engines[engine->legacy_idx]);
323
324 ce = intel_context_create(engine);
325 if (IS_ERR(ce)) {
326 __free_engines(e, e->num_engines + 1);
327 return ERR_CAST(ce);
328 }
329
330 intel_context_set_gem(ce, ctx);
331
332 e->engines[engine->legacy_idx] = ce;
333 e->num_engines = max(e->num_engines, engine->legacy_idx);
334 }
335 e->num_engines++;
336
337 return e;
338}
339
340void i915_gem_context_release(struct kref *ref)
341{
342 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
343
344 trace_i915_context_free(ctx);
345 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
346
347 mutex_destroy(&ctx->engines_mutex);
348 mutex_destroy(&ctx->lut_mutex);
349
350 if (ctx->timeline)
351 intel_timeline_put(ctx->timeline);
352
353 put_pid(ctx->pid);
354 mutex_destroy(&ctx->mutex);
355
356 kfree_rcu(ctx, rcu);
357}
358
359static inline struct i915_gem_engines *
360__context_engines_static(const struct i915_gem_context *ctx)
361{
362 return rcu_dereference_protected(ctx->engines, true);
363}
364
365static void __reset_context(struct i915_gem_context *ctx,
366 struct intel_engine_cs *engine)
367{
368 intel_gt_handle_error(engine->gt, engine->mask, 0,
369 "context closure in %s", ctx->name);
370}
371
372static bool __cancel_engine(struct intel_engine_cs *engine)
373{
374 /*
375 * Send a "high priority pulse" down the engine to cause the
376 * current request to be momentarily preempted. (If it fails to
377 * be preempted, it will be reset). As we have marked our context
378 * as banned, any incomplete request, including any running, will
379 * be skipped following the preemption.
380 *
381 * If there is no hangchecking (one of the reasons why we try to
382 * cancel the context) and no forced preemption, there may be no
383 * means by which we reset the GPU and evict the persistent hog.
384 * Ergo if we are unable to inject a preemptive pulse that can
385 * kill the banned context, we fallback to doing a local reset
386 * instead.
387 */
388 return intel_engine_pulse(engine) == 0;
389}
390
391static struct intel_engine_cs *active_engine(struct intel_context *ce)
392{
393 struct intel_engine_cs *engine = NULL;
394 struct i915_request *rq;
395
396 if (intel_context_has_inflight(ce))
397 return intel_context_inflight(ce);
398
399 if (!ce->timeline)
400 return NULL;
401
402 /*
403 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
404 * to the request to prevent it being transferred to a new timeline
405 * (and onto a new timeline->requests list).
406 */
407 rcu_read_lock();
408 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
409 bool found;
410
411 /* timeline is already completed upto this point? */
412 if (!i915_request_get_rcu(rq))
413 break;
414
415 /* Check with the backend if the request is inflight */
416 found = true;
417 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
418 found = i915_request_active_engine(rq, &engine);
419
420 i915_request_put(rq);
421 if (found)
422 break;
423 }
424 rcu_read_unlock();
425
426 return engine;
427}
428
429static void kill_engines(struct i915_gem_engines *engines, bool ban)
430{
431 struct i915_gem_engines_iter it;
432 struct intel_context *ce;
433
434 /*
435 * Map the user's engine back to the actual engines; one virtual
436 * engine will be mapped to multiple engines, and using ctx->engine[]
437 * the same engine may be have multiple instances in the user's map.
438 * However, we only care about pending requests, so only include
439 * engines on which there are incomplete requests.
440 */
441 for_each_gem_engine(ce, engines, it) {
442 struct intel_engine_cs *engine;
443
444 if (ban && intel_context_set_banned(ce))
445 continue;
446
447 /*
448 * Check the current active state of this context; if we
449 * are currently executing on the GPU we need to evict
450 * ourselves. On the other hand, if we haven't yet been
451 * submitted to the GPU or if everything is complete,
452 * we have nothing to do.
453 */
454 engine = active_engine(ce);
455
456 /* First attempt to gracefully cancel the context */
457 if (engine && !__cancel_engine(engine) && ban)
458 /*
459 * If we are unable to send a preemptive pulse to bump
460 * the context from the GPU, we have to resort to a full
461 * reset. We hope the collateral damage is worth it.
462 */
463 __reset_context(engines->ctx, engine);
464 }
465}
466
467static void kill_context(struct i915_gem_context *ctx)
468{
469 bool ban = (!i915_gem_context_is_persistent(ctx) ||
470 !ctx->i915->params.enable_hangcheck);
471 struct i915_gem_engines *pos, *next;
472
473 spin_lock_irq(&ctx->stale.lock);
474 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
475 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
476 if (!i915_sw_fence_await(&pos->fence)) {
477 list_del_init(&pos->link);
478 continue;
479 }
480
481 spin_unlock_irq(&ctx->stale.lock);
482
483 kill_engines(pos, ban);
484
485 spin_lock_irq(&ctx->stale.lock);
486 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
487 list_safe_reset_next(pos, next, link);
488 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
489
490 i915_sw_fence_complete(&pos->fence);
491 }
492 spin_unlock_irq(&ctx->stale.lock);
493}
494
495static void engines_idle_release(struct i915_gem_context *ctx,
496 struct i915_gem_engines *engines)
497{
498 struct i915_gem_engines_iter it;
499 struct intel_context *ce;
500
501 INIT_LIST_HEAD(&engines->link);
502
503 engines->ctx = i915_gem_context_get(ctx);
504
505 for_each_gem_engine(ce, engines, it) {
506 int err;
507
508 /* serialises with execbuf */
509 set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
510 if (!intel_context_pin_if_active(ce))
511 continue;
512
513 /* Wait until context is finally scheduled out and retired */
514 err = i915_sw_fence_await_active(&engines->fence,
515 &ce->active,
516 I915_ACTIVE_AWAIT_BARRIER);
517 intel_context_unpin(ce);
518 if (err)
519 goto kill;
520 }
521
522 spin_lock_irq(&ctx->stale.lock);
523 if (!i915_gem_context_is_closed(ctx))
524 list_add_tail(&engines->link, &ctx->stale.engines);
525 spin_unlock_irq(&ctx->stale.lock);
526
527kill:
528 if (list_empty(&engines->link)) /* raced, already closed */
529 kill_engines(engines, true);
530
531 i915_sw_fence_commit(&engines->fence);
532}
533
534static void set_closed_name(struct i915_gem_context *ctx)
535{
536 char *s;
537
538 /* Replace '[]' with '<>' to indicate closed in debug prints */
539
540 s = strrchr(ctx->name, '[');
541 if (!s)
542 return;
543
544 *s = '<';
545
546 s = strchr(s + 1, ']');
547 if (s)
548 *s = '>';
549}
550
551static void context_close(struct i915_gem_context *ctx)
552{
553 struct i915_address_space *vm;
554
555 /* Flush any concurrent set_engines() */
556 mutex_lock(&ctx->engines_mutex);
557 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
558 i915_gem_context_set_closed(ctx);
559 mutex_unlock(&ctx->engines_mutex);
560
561 mutex_lock(&ctx->mutex);
562
563 set_closed_name(ctx);
564
565 vm = i915_gem_context_vm(ctx);
566 if (vm)
567 i915_vm_close(vm);
568
569 ctx->file_priv = ERR_PTR(-EBADF);
570
571 /*
572 * The LUT uses the VMA as a backpointer to unref the object,
573 * so we need to clear the LUT before we close all the VMA (inside
574 * the ppgtt).
575 */
576 lut_close(ctx);
577
578 spin_lock(&ctx->i915->gem.contexts.lock);
579 list_del(&ctx->link);
580 spin_unlock(&ctx->i915->gem.contexts.lock);
581
582 mutex_unlock(&ctx->mutex);
583
584 /*
585 * If the user has disabled hangchecking, we can not be sure that
586 * the batches will ever complete after the context is closed,
587 * keeping the context and all resources pinned forever. So in this
588 * case we opt to forcibly kill off all remaining requests on
589 * context close.
590 */
591 kill_context(ctx);
592
593 i915_gem_context_put(ctx);
594}
595
596static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
597{
598 if (i915_gem_context_is_persistent(ctx) == state)
599 return 0;
600
601 if (state) {
602 /*
603 * Only contexts that are short-lived [that will expire or be
604 * reset] are allowed to survive past termination. We require
605 * hangcheck to ensure that the persistent requests are healthy.
606 */
607 if (!ctx->i915->params.enable_hangcheck)
608 return -EINVAL;
609
610 i915_gem_context_set_persistence(ctx);
611 } else {
612 /* To cancel a context we use "preempt-to-idle" */
613 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
614 return -ENODEV;
615
616 /*
617 * If the cancel fails, we then need to reset, cleanly!
618 *
619 * If the per-engine reset fails, all hope is lost! We resort
620 * to a full GPU reset in that unlikely case, but realistically
621 * if the engine could not reset, the full reset does not fare
622 * much better. The damage has been done.
623 *
624 * However, if we cannot reset an engine by itself, we cannot
625 * cleanup a hanging persistent context without causing
626 * colateral damage, and we should not pretend we can by
627 * exposing the interface.
628 */
629 if (!intel_has_reset_engine(&ctx->i915->gt))
630 return -ENODEV;
631
632 i915_gem_context_clear_persistence(ctx);
633 }
634
635 return 0;
636}
637
638static struct i915_gem_context *
639__create_context(struct drm_i915_private *i915)
640{
641 struct i915_gem_context *ctx;
642 struct i915_gem_engines *e;
643 int err;
644 int i;
645
646 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
647 if (!ctx)
648 return ERR_PTR(-ENOMEM);
649
650 kref_init(&ctx->ref);
651 ctx->i915 = i915;
652 ctx->sched.priority = I915_PRIORITY_NORMAL;
653 mutex_init(&ctx->mutex);
654 INIT_LIST_HEAD(&ctx->link);
655
656 spin_lock_init(&ctx->stale.lock);
657 INIT_LIST_HEAD(&ctx->stale.engines);
658
659 mutex_init(&ctx->engines_mutex);
660 e = default_engines(ctx);
661 if (IS_ERR(e)) {
662 err = PTR_ERR(e);
663 goto err_free;
664 }
665 RCU_INIT_POINTER(ctx->engines, e);
666
667 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
668 mutex_init(&ctx->lut_mutex);
669
670 /* NB: Mark all slices as needing a remap so that when the context first
671 * loads it will restore whatever remap state already exists. If there
672 * is no remap info, it will be a NOP. */
673 ctx->remap_slice = ALL_L3_SLICES(i915);
674
675 i915_gem_context_set_bannable(ctx);
676 i915_gem_context_set_recoverable(ctx);
677 __context_set_persistence(ctx, true /* cgroup hook? */);
678
679 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
680 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
681
682 return ctx;
683
684err_free:
685 kfree(ctx);
686 return ERR_PTR(err);
687}
688
689static inline struct i915_gem_engines *
690__context_engines_await(const struct i915_gem_context *ctx,
691 bool *user_engines)
692{
693 struct i915_gem_engines *engines;
694
695 rcu_read_lock();
696 do {
697 engines = rcu_dereference(ctx->engines);
698 GEM_BUG_ON(!engines);
699
700 if (user_engines)
701 *user_engines = i915_gem_context_user_engines(ctx);
702
703 /* successful await => strong mb */
704 if (unlikely(!i915_sw_fence_await(&engines->fence)))
705 continue;
706
707 if (likely(engines == rcu_access_pointer(ctx->engines)))
708 break;
709
710 i915_sw_fence_complete(&engines->fence);
711 } while (1);
712 rcu_read_unlock();
713
714 return engines;
715}
716
717static int
718context_apply_all(struct i915_gem_context *ctx,
719 int (*fn)(struct intel_context *ce, void *data),
720 void *data)
721{
722 struct i915_gem_engines_iter it;
723 struct i915_gem_engines *e;
724 struct intel_context *ce;
725 int err = 0;
726
727 e = __context_engines_await(ctx, NULL);
728 for_each_gem_engine(ce, e, it) {
729 err = fn(ce, data);
730 if (err)
731 break;
732 }
733 i915_sw_fence_complete(&e->fence);
734
735 return err;
736}
737
738static int __apply_ppgtt(struct intel_context *ce, void *vm)
739{
740 i915_vm_put(ce->vm);
741 ce->vm = i915_vm_get(vm);
742 return 0;
743}
744
745static struct i915_address_space *
746__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
747{
748 struct i915_address_space *old;
749
750 old = rcu_replace_pointer(ctx->vm,
751 i915_vm_open(vm),
752 lockdep_is_held(&ctx->mutex));
753 GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
754
755 context_apply_all(ctx, __apply_ppgtt, vm);
756
757 return old;
758}
759
760static void __assign_ppgtt(struct i915_gem_context *ctx,
761 struct i915_address_space *vm)
762{
763 if (vm == rcu_access_pointer(ctx->vm))
764 return;
765
766 vm = __set_ppgtt(ctx, vm);
767 if (vm)
768 i915_vm_close(vm);
769}
770
771static void __set_timeline(struct intel_timeline **dst,
772 struct intel_timeline *src)
773{
774 struct intel_timeline *old = *dst;
775
776 *dst = src ? intel_timeline_get(src) : NULL;
777
778 if (old)
779 intel_timeline_put(old);
780}
781
782static int __apply_timeline(struct intel_context *ce, void *timeline)
783{
784 __set_timeline(&ce->timeline, timeline);
785 return 0;
786}
787
788static void __assign_timeline(struct i915_gem_context *ctx,
789 struct intel_timeline *timeline)
790{
791 __set_timeline(&ctx->timeline, timeline);
792 context_apply_all(ctx, __apply_timeline, timeline);
793}
794
795static int __apply_watchdog(struct intel_context *ce, void *timeout_us)
796{
797 return intel_context_set_watchdog_us(ce, (uintptr_t)timeout_us);
798}
799
800static int
801__set_watchdog(struct i915_gem_context *ctx, unsigned long timeout_us)
802{
803 int ret;
804
805 ret = context_apply_all(ctx, __apply_watchdog,
806 (void *)(uintptr_t)timeout_us);
807 if (!ret)
808 ctx->watchdog.timeout_us = timeout_us;
809
810 return ret;
811}
812
813static void __set_default_fence_expiry(struct i915_gem_context *ctx)
814{
815 struct drm_i915_private *i915 = ctx->i915;
816 int ret;
817
818 if (!IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) ||
819 !i915->params.request_timeout_ms)
820 return;
821
822 /* Default expiry for user fences. */
823 ret = __set_watchdog(ctx, i915->params.request_timeout_ms * 1000);
824 if (ret)
825 drm_notice(&i915->drm,
826 "Failed to configure default fence expiry! (%d)",
827 ret);
828}
829
830static struct i915_gem_context *
831i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
832{
833 struct i915_gem_context *ctx;
834
835 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
836 !HAS_EXECLISTS(i915))
837 return ERR_PTR(-EINVAL);
838
839 ctx = __create_context(i915);
840 if (IS_ERR(ctx))
841 return ctx;
842
843 if (HAS_FULL_PPGTT(i915)) {
844 struct i915_ppgtt *ppgtt;
845
846 ppgtt = i915_ppgtt_create(&i915->gt);
847 if (IS_ERR(ppgtt)) {
848 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
849 PTR_ERR(ppgtt));
850 context_close(ctx);
851 return ERR_CAST(ppgtt);
852 }
853
854 mutex_lock(&ctx->mutex);
855 __assign_ppgtt(ctx, &ppgtt->vm);
856 mutex_unlock(&ctx->mutex);
857
858 i915_vm_put(&ppgtt->vm);
859 }
860
861 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
862 struct intel_timeline *timeline;
863
864 timeline = intel_timeline_create(&i915->gt);
865 if (IS_ERR(timeline)) {
866 context_close(ctx);
867 return ERR_CAST(timeline);
868 }
869
870 __assign_timeline(ctx, timeline);
871 intel_timeline_put(timeline);
872 }
873
874 __set_default_fence_expiry(ctx);
875
876 trace_i915_context_create(ctx);
877
878 return ctx;
879}
880
881static void init_contexts(struct i915_gem_contexts *gc)
882{
883 spin_lock_init(&gc->lock);
884 INIT_LIST_HEAD(&gc->list);
885}
886
887void i915_gem_init__contexts(struct drm_i915_private *i915)
888{
889 init_contexts(&i915->gem.contexts);
890}
891
892static int gem_context_register(struct i915_gem_context *ctx,
893 struct drm_i915_file_private *fpriv,
894 u32 *id)
895{
896 struct drm_i915_private *i915 = ctx->i915;
897 struct i915_address_space *vm;
898 int ret;
899
900 ctx->file_priv = fpriv;
901
902 mutex_lock(&ctx->mutex);
903 vm = i915_gem_context_vm(ctx);
904 if (vm)
905 WRITE_ONCE(vm->file, fpriv); /* XXX */
906 mutex_unlock(&ctx->mutex);
907
908 ctx->pid = get_task_pid(current, PIDTYPE_PID);
909 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
910 current->comm, pid_nr(ctx->pid));
911
912 /* And finally expose ourselves to userspace via the idr */
913 ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
914 if (ret)
915 goto err_pid;
916
917 spin_lock(&i915->gem.contexts.lock);
918 list_add_tail(&ctx->link, &i915->gem.contexts.list);
919 spin_unlock(&i915->gem.contexts.lock);
920
921 return 0;
922
923err_pid:
924 put_pid(fetch_and_zero(&ctx->pid));
925 return ret;
926}
927
928int i915_gem_context_open(struct drm_i915_private *i915,
929 struct drm_file *file)
930{
931 struct drm_i915_file_private *file_priv = file->driver_priv;
932 struct i915_gem_context *ctx;
933 int err;
934 u32 id;
935
936 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
937
938 /* 0 reserved for invalid/unassigned ppgtt */
939 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
940
941 ctx = i915_gem_create_context(i915, 0);
942 if (IS_ERR(ctx)) {
943 err = PTR_ERR(ctx);
944 goto err;
945 }
946
947 err = gem_context_register(ctx, file_priv, &id);
948 if (err < 0)
949 goto err_ctx;
950
951 GEM_BUG_ON(id);
952 return 0;
953
954err_ctx:
955 context_close(ctx);
956err:
957 xa_destroy(&file_priv->vm_xa);
958 xa_destroy(&file_priv->context_xa);
959 return err;
960}
961
962void i915_gem_context_close(struct drm_file *file)
963{
964 struct drm_i915_file_private *file_priv = file->driver_priv;
965 struct i915_address_space *vm;
966 struct i915_gem_context *ctx;
967 unsigned long idx;
968
969 xa_for_each(&file_priv->context_xa, idx, ctx)
970 context_close(ctx);
971 xa_destroy(&file_priv->context_xa);
972
973 xa_for_each(&file_priv->vm_xa, idx, vm)
974 i915_vm_put(vm);
975 xa_destroy(&file_priv->vm_xa);
976}
977
978int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
979 struct drm_file *file)
980{
981 struct drm_i915_private *i915 = to_i915(dev);
982 struct drm_i915_gem_vm_control *args = data;
983 struct drm_i915_file_private *file_priv = file->driver_priv;
984 struct i915_ppgtt *ppgtt;
985 u32 id;
986 int err;
987
988 if (!HAS_FULL_PPGTT(i915))
989 return -ENODEV;
990
991 if (args->flags)
992 return -EINVAL;
993
994 ppgtt = i915_ppgtt_create(&i915->gt);
995 if (IS_ERR(ppgtt))
996 return PTR_ERR(ppgtt);
997
998 ppgtt->vm.file = file_priv;
999
1000 if (args->extensions) {
1001 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1002 NULL, 0,
1003 ppgtt);
1004 if (err)
1005 goto err_put;
1006 }
1007
1008 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1009 xa_limit_32b, GFP_KERNEL);
1010 if (err)
1011 goto err_put;
1012
1013 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1014 args->vm_id = id;
1015 return 0;
1016
1017err_put:
1018 i915_vm_put(&ppgtt->vm);
1019 return err;
1020}
1021
1022int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1023 struct drm_file *file)
1024{
1025 struct drm_i915_file_private *file_priv = file->driver_priv;
1026 struct drm_i915_gem_vm_control *args = data;
1027 struct i915_address_space *vm;
1028
1029 if (args->flags)
1030 return -EINVAL;
1031
1032 if (args->extensions)
1033 return -EINVAL;
1034
1035 vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1036 if (!vm)
1037 return -ENOENT;
1038
1039 i915_vm_put(vm);
1040 return 0;
1041}
1042
1043struct context_barrier_task {
1044 struct i915_active base;
1045 void (*task)(void *data);
1046 void *data;
1047};
1048
1049static void cb_retire(struct i915_active *base)
1050{
1051 struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
1052
1053 if (cb->task)
1054 cb->task(cb->data);
1055
1056 i915_active_fini(&cb->base);
1057 kfree(cb);
1058}
1059
1060I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
1061static int context_barrier_task(struct i915_gem_context *ctx,
1062 intel_engine_mask_t engines,
1063 bool (*skip)(struct intel_context *ce, void *data),
1064 int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data),
1065 int (*emit)(struct i915_request *rq, void *data),
1066 void (*task)(void *data),
1067 void *data)
1068{
1069 struct context_barrier_task *cb;
1070 struct i915_gem_engines_iter it;
1071 struct i915_gem_engines *e;
1072 struct i915_gem_ww_ctx ww;
1073 struct intel_context *ce;
1074 int err = 0;
1075
1076 GEM_BUG_ON(!task);
1077
1078 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1079 if (!cb)
1080 return -ENOMEM;
1081
1082 i915_active_init(&cb->base, NULL, cb_retire, 0);
1083 err = i915_active_acquire(&cb->base);
1084 if (err) {
1085 kfree(cb);
1086 return err;
1087 }
1088
1089 e = __context_engines_await(ctx, NULL);
1090 if (!e) {
1091 i915_active_release(&cb->base);
1092 return -ENOENT;
1093 }
1094
1095 for_each_gem_engine(ce, e, it) {
1096 struct i915_request *rq;
1097
1098 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
1099 ce->engine->mask)) {
1100 err = -ENXIO;
1101 break;
1102 }
1103
1104 if (!(ce->engine->mask & engines))
1105 continue;
1106
1107 if (skip && skip(ce, data))
1108 continue;
1109
1110 i915_gem_ww_ctx_init(&ww, true);
1111retry:
1112 err = intel_context_pin_ww(ce, &ww);
1113 if (err)
1114 goto err;
1115
1116 if (pin)
1117 err = pin(ce, &ww, data);
1118 if (err)
1119 goto err_unpin;
1120
1121 rq = i915_request_create(ce);
1122 if (IS_ERR(rq)) {
1123 err = PTR_ERR(rq);
1124 goto err_unpin;
1125 }
1126
1127 err = 0;
1128 if (emit)
1129 err = emit(rq, data);
1130 if (err == 0)
1131 err = i915_active_add_request(&cb->base, rq);
1132
1133 i915_request_add(rq);
1134err_unpin:
1135 intel_context_unpin(ce);
1136err:
1137 if (err == -EDEADLK) {
1138 err = i915_gem_ww_ctx_backoff(&ww);
1139 if (!err)
1140 goto retry;
1141 }
1142 i915_gem_ww_ctx_fini(&ww);
1143
1144 if (err)
1145 break;
1146 }
1147 i915_sw_fence_complete(&e->fence);
1148
1149 cb->task = err ? NULL : task; /* caller needs to unwind instead */
1150 cb->data = data;
1151
1152 i915_active_release(&cb->base);
1153
1154 return err;
1155}
1156
1157static int get_ppgtt(struct drm_i915_file_private *file_priv,
1158 struct i915_gem_context *ctx,
1159 struct drm_i915_gem_context_param *args)
1160{
1161 struct i915_address_space *vm;
1162 int err;
1163 u32 id;
1164
1165 if (!rcu_access_pointer(ctx->vm))
1166 return -ENODEV;
1167
1168 rcu_read_lock();
1169 vm = context_get_vm_rcu(ctx);
1170 rcu_read_unlock();
1171 if (!vm)
1172 return -ENODEV;
1173
1174 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1175 if (err)
1176 goto err_put;
1177
1178 i915_vm_open(vm);
1179
1180 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1181 args->value = id;
1182 args->size = 0;
1183
1184err_put:
1185 i915_vm_put(vm);
1186 return err;
1187}
1188
1189static void set_ppgtt_barrier(void *data)
1190{
1191 struct i915_address_space *old = data;
1192
1193 if (GRAPHICS_VER(old->i915) < 8)
1194 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
1195
1196 i915_vm_close(old);
1197}
1198
1199static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data)
1200{
1201 struct i915_address_space *vm = ce->vm;
1202
1203 if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915))
1204 /* ppGTT is not part of the legacy context image */
1205 return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww);
1206
1207 return 0;
1208}
1209
1210static int emit_ppgtt_update(struct i915_request *rq, void *data)
1211{
1212 struct i915_address_space *vm = rq->context->vm;
1213 struct intel_engine_cs *engine = rq->engine;
1214 u32 base = engine->mmio_base;
1215 u32 *cs;
1216 int i;
1217
1218 if (i915_vm_is_4lvl(vm)) {
1219 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1220 const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1221
1222 cs = intel_ring_begin(rq, 6);
1223 if (IS_ERR(cs))
1224 return PTR_ERR(cs);
1225
1226 *cs++ = MI_LOAD_REGISTER_IMM(2);
1227
1228 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1229 *cs++ = upper_32_bits(pd_daddr);
1230 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1231 *cs++ = lower_32_bits(pd_daddr);
1232
1233 *cs++ = MI_NOOP;
1234 intel_ring_advance(rq, cs);
1235 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1236 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1237 int err;
1238
1239 /* Magic required to prevent forcewake errors! */
1240 err = engine->emit_flush(rq, EMIT_INVALIDATE);
1241 if (err)
1242 return err;
1243
1244 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1245 if (IS_ERR(cs))
1246 return PTR_ERR(cs);
1247
1248 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
1249 for (i = GEN8_3LVL_PDPES; i--; ) {
1250 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1251
1252 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1253 *cs++ = upper_32_bits(pd_daddr);
1254 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1255 *cs++ = lower_32_bits(pd_daddr);
1256 }
1257 *cs++ = MI_NOOP;
1258 intel_ring_advance(rq, cs);
1259 }
1260
1261 return 0;
1262}
1263
1264static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1265{
1266 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1267 return !ce->state;
1268 else
1269 return !atomic_read(&ce->pin_count);
1270}
1271
1272static int set_ppgtt(struct drm_i915_file_private *file_priv,
1273 struct i915_gem_context *ctx,
1274 struct drm_i915_gem_context_param *args)
1275{
1276 struct i915_address_space *vm, *old;
1277 int err;
1278
1279 if (args->size)
1280 return -EINVAL;
1281
1282 if (!rcu_access_pointer(ctx->vm))
1283 return -ENODEV;
1284
1285 if (upper_32_bits(args->value))
1286 return -ENOENT;
1287
1288 rcu_read_lock();
1289 vm = xa_load(&file_priv->vm_xa, args->value);
1290 if (vm && !kref_get_unless_zero(&vm->ref))
1291 vm = NULL;
1292 rcu_read_unlock();
1293 if (!vm)
1294 return -ENOENT;
1295
1296 err = mutex_lock_interruptible(&ctx->mutex);
1297 if (err)
1298 goto out;
1299
1300 if (i915_gem_context_is_closed(ctx)) {
1301 err = -ENOENT;
1302 goto unlock;
1303 }
1304
1305 if (vm == rcu_access_pointer(ctx->vm))
1306 goto unlock;
1307
1308 old = __set_ppgtt(ctx, vm);
1309
1310 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1311 lut_close(ctx);
1312
1313 /*
1314 * We need to flush any requests using the current ppgtt before
1315 * we release it as the requests do not hold a reference themselves,
1316 * only indirectly through the context.
1317 */
1318 err = context_barrier_task(ctx, ALL_ENGINES,
1319 skip_ppgtt_update,
1320 pin_ppgtt_update,
1321 emit_ppgtt_update,
1322 set_ppgtt_barrier,
1323 old);
1324 if (err) {
1325 i915_vm_close(__set_ppgtt(ctx, old));
1326 i915_vm_close(old);
1327 lut_close(ctx); /* force a rebuild of the old obj:vma cache */
1328 }
1329
1330unlock:
1331 mutex_unlock(&ctx->mutex);
1332out:
1333 i915_vm_put(vm);
1334 return err;
1335}
1336
1337static int __apply_ringsize(struct intel_context *ce, void *sz)
1338{
1339 return intel_context_set_ring_size(ce, (unsigned long)sz);
1340}
1341
1342static int set_ringsize(struct i915_gem_context *ctx,
1343 struct drm_i915_gem_context_param *args)
1344{
1345 if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
1346 return -ENODEV;
1347
1348 if (args->size)
1349 return -EINVAL;
1350
1351 if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE))
1352 return -EINVAL;
1353
1354 if (args->value < I915_GTT_PAGE_SIZE)
1355 return -EINVAL;
1356
1357 if (args->value > 128 * I915_GTT_PAGE_SIZE)
1358 return -EINVAL;
1359
1360 return context_apply_all(ctx,
1361 __apply_ringsize,
1362 __intel_context_ring_size(args->value));
1363}
1364
1365static int __get_ringsize(struct intel_context *ce, void *arg)
1366{
1367 long sz;
1368
1369 sz = intel_context_get_ring_size(ce);
1370 GEM_BUG_ON(sz > INT_MAX);
1371
1372 return sz; /* stop on first engine */
1373}
1374
1375static int get_ringsize(struct i915_gem_context *ctx,
1376 struct drm_i915_gem_context_param *args)
1377{
1378 int sz;
1379
1380 if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
1381 return -ENODEV;
1382
1383 if (args->size)
1384 return -EINVAL;
1385
1386 sz = context_apply_all(ctx, __get_ringsize, NULL);
1387 if (sz < 0)
1388 return sz;
1389
1390 args->value = sz;
1391 return 0;
1392}
1393
1394int
1395i915_gem_user_to_context_sseu(struct intel_gt *gt,
1396 const struct drm_i915_gem_context_param_sseu *user,
1397 struct intel_sseu *context)
1398{
1399 const struct sseu_dev_info *device = >->info.sseu;
1400 struct drm_i915_private *i915 = gt->i915;
1401
1402 /* No zeros in any field. */
1403 if (!user->slice_mask || !user->subslice_mask ||
1404 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1405 return -EINVAL;
1406
1407 /* Max > min. */
1408 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1409 return -EINVAL;
1410
1411 /*
1412 * Some future proofing on the types since the uAPI is wider than the
1413 * current internal implementation.
1414 */
1415 if (overflows_type(user->slice_mask, context->slice_mask) ||
1416 overflows_type(user->subslice_mask, context->subslice_mask) ||
1417 overflows_type(user->min_eus_per_subslice,
1418 context->min_eus_per_subslice) ||
1419 overflows_type(user->max_eus_per_subslice,
1420 context->max_eus_per_subslice))
1421 return -EINVAL;
1422
1423 /* Check validity against hardware. */
1424 if (user->slice_mask & ~device->slice_mask)
1425 return -EINVAL;
1426
1427 if (user->subslice_mask & ~device->subslice_mask[0])
1428 return -EINVAL;
1429
1430 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1431 return -EINVAL;
1432
1433 context->slice_mask = user->slice_mask;
1434 context->subslice_mask = user->subslice_mask;
1435 context->min_eus_per_subslice = user->min_eus_per_subslice;
1436 context->max_eus_per_subslice = user->max_eus_per_subslice;
1437
1438 /* Part specific restrictions. */
1439 if (GRAPHICS_VER(i915) == 11) {
1440 unsigned int hw_s = hweight8(device->slice_mask);
1441 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1442 unsigned int req_s = hweight8(context->slice_mask);
1443 unsigned int req_ss = hweight8(context->subslice_mask);
1444
1445 /*
1446 * Only full subslice enablement is possible if more than one
1447 * slice is turned on.
1448 */
1449 if (req_s > 1 && req_ss != hw_ss_per_s)
1450 return -EINVAL;
1451
1452 /*
1453 * If more than four (SScount bitfield limit) subslices are
1454 * requested then the number has to be even.
1455 */
1456 if (req_ss > 4 && (req_ss & 1))
1457 return -EINVAL;
1458
1459 /*
1460 * If only one slice is enabled and subslice count is below the
1461 * device full enablement, it must be at most half of the all
1462 * available subslices.
1463 */
1464 if (req_s == 1 && req_ss < hw_ss_per_s &&
1465 req_ss > (hw_ss_per_s / 2))
1466 return -EINVAL;
1467
1468 /* ABI restriction - VME use case only. */
1469
1470 /* All slices or one slice only. */
1471 if (req_s != 1 && req_s != hw_s)
1472 return -EINVAL;
1473
1474 /*
1475 * Half subslices or full enablement only when one slice is
1476 * enabled.
1477 */
1478 if (req_s == 1 &&
1479 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1480 return -EINVAL;
1481
1482 /* No EU configuration changes. */
1483 if ((user->min_eus_per_subslice !=
1484 device->max_eus_per_subslice) ||
1485 (user->max_eus_per_subslice !=
1486 device->max_eus_per_subslice))
1487 return -EINVAL;
1488 }
1489
1490 return 0;
1491}
1492
1493static int set_sseu(struct i915_gem_context *ctx,
1494 struct drm_i915_gem_context_param *args)
1495{
1496 struct drm_i915_private *i915 = ctx->i915;
1497 struct drm_i915_gem_context_param_sseu user_sseu;
1498 struct intel_context *ce;
1499 struct intel_sseu sseu;
1500 unsigned long lookup;
1501 int ret;
1502
1503 if (args->size < sizeof(user_sseu))
1504 return -EINVAL;
1505
1506 if (GRAPHICS_VER(i915) != 11)
1507 return -ENODEV;
1508
1509 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1510 sizeof(user_sseu)))
1511 return -EFAULT;
1512
1513 if (user_sseu.rsvd)
1514 return -EINVAL;
1515
1516 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1517 return -EINVAL;
1518
1519 lookup = 0;
1520 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1521 lookup |= LOOKUP_USER_INDEX;
1522
1523 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1524 if (IS_ERR(ce))
1525 return PTR_ERR(ce);
1526
1527 /* Only render engine supports RPCS configuration. */
1528 if (ce->engine->class != RENDER_CLASS) {
1529 ret = -ENODEV;
1530 goto out_ce;
1531 }
1532
1533 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
1534 if (ret)
1535 goto out_ce;
1536
1537 ret = intel_context_reconfigure_sseu(ce, sseu);
1538 if (ret)
1539 goto out_ce;
1540
1541 args->size = sizeof(user_sseu);
1542
1543out_ce:
1544 intel_context_put(ce);
1545 return ret;
1546}
1547
1548struct set_engines {
1549 struct i915_gem_context *ctx;
1550 struct i915_gem_engines *engines;
1551};
1552
1553static int
1554set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1555{
1556 struct i915_context_engines_load_balance __user *ext =
1557 container_of_user(base, typeof(*ext), base);
1558 const struct set_engines *set = data;
1559 struct drm_i915_private *i915 = set->ctx->i915;
1560 struct intel_engine_cs *stack[16];
1561 struct intel_engine_cs **siblings;
1562 struct intel_context *ce;
1563 u16 num_siblings, idx;
1564 unsigned int n;
1565 int err;
1566
1567 if (!HAS_EXECLISTS(i915))
1568 return -ENODEV;
1569
1570 if (intel_uc_uses_guc_submission(&i915->gt.uc))
1571 return -ENODEV; /* not implement yet */
1572
1573 if (get_user(idx, &ext->engine_index))
1574 return -EFAULT;
1575
1576 if (idx >= set->engines->num_engines) {
1577 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
1578 idx, set->engines->num_engines);
1579 return -EINVAL;
1580 }
1581
1582 idx = array_index_nospec(idx, set->engines->num_engines);
1583 if (set->engines->engines[idx]) {
1584 drm_dbg(&i915->drm,
1585 "Invalid placement[%d], already occupied\n", idx);
1586 return -EEXIST;
1587 }
1588
1589 if (get_user(num_siblings, &ext->num_siblings))
1590 return -EFAULT;
1591
1592 err = check_user_mbz(&ext->flags);
1593 if (err)
1594 return err;
1595
1596 err = check_user_mbz(&ext->mbz64);
1597 if (err)
1598 return err;
1599
1600 siblings = stack;
1601 if (num_siblings > ARRAY_SIZE(stack)) {
1602 siblings = kmalloc_array(num_siblings,
1603 sizeof(*siblings),
1604 GFP_KERNEL);
1605 if (!siblings)
1606 return -ENOMEM;
1607 }
1608
1609 for (n = 0; n < num_siblings; n++) {
1610 struct i915_engine_class_instance ci;
1611
1612 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1613 err = -EFAULT;
1614 goto out_siblings;
1615 }
1616
1617 siblings[n] = intel_engine_lookup_user(i915,
1618 ci.engine_class,
1619 ci.engine_instance);
1620 if (!siblings[n]) {
1621 drm_dbg(&i915->drm,
1622 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
1623 n, ci.engine_class, ci.engine_instance);
1624 err = -EINVAL;
1625 goto out_siblings;
1626 }
1627 }
1628
1629 ce = intel_execlists_create_virtual(siblings, n);
1630 if (IS_ERR(ce)) {
1631 err = PTR_ERR(ce);
1632 goto out_siblings;
1633 }
1634
1635 intel_context_set_gem(ce, set->ctx);
1636
1637 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1638 intel_context_put(ce);
1639 err = -EEXIST;
1640 goto out_siblings;
1641 }
1642
1643out_siblings:
1644 if (siblings != stack)
1645 kfree(siblings);
1646
1647 return err;
1648}
1649
1650static int
1651set_engines__bond(struct i915_user_extension __user *base, void *data)
1652{
1653 struct i915_context_engines_bond __user *ext =
1654 container_of_user(base, typeof(*ext), base);
1655 const struct set_engines *set = data;
1656 struct drm_i915_private *i915 = set->ctx->i915;
1657 struct i915_engine_class_instance ci;
1658 struct intel_engine_cs *virtual;
1659 struct intel_engine_cs *master;
1660 u16 idx, num_bonds;
1661 int err, n;
1662
1663 if (get_user(idx, &ext->virtual_index))
1664 return -EFAULT;
1665
1666 if (idx >= set->engines->num_engines) {
1667 drm_dbg(&i915->drm,
1668 "Invalid index for virtual engine: %d >= %d\n",
1669 idx, set->engines->num_engines);
1670 return -EINVAL;
1671 }
1672
1673 idx = array_index_nospec(idx, set->engines->num_engines);
1674 if (!set->engines->engines[idx]) {
1675 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
1676 return -EINVAL;
1677 }
1678 virtual = set->engines->engines[idx]->engine;
1679
1680 err = check_user_mbz(&ext->flags);
1681 if (err)
1682 return err;
1683
1684 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1685 err = check_user_mbz(&ext->mbz64[n]);
1686 if (err)
1687 return err;
1688 }
1689
1690 if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1691 return -EFAULT;
1692
1693 master = intel_engine_lookup_user(i915,
1694 ci.engine_class, ci.engine_instance);
1695 if (!master) {
1696 drm_dbg(&i915->drm,
1697 "Unrecognised master engine: { class:%u, instance:%u }\n",
1698 ci.engine_class, ci.engine_instance);
1699 return -EINVAL;
1700 }
1701
1702 if (get_user(num_bonds, &ext->num_bonds))
1703 return -EFAULT;
1704
1705 for (n = 0; n < num_bonds; n++) {
1706 struct intel_engine_cs *bond;
1707
1708 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1709 return -EFAULT;
1710
1711 bond = intel_engine_lookup_user(i915,
1712 ci.engine_class,
1713 ci.engine_instance);
1714 if (!bond) {
1715 drm_dbg(&i915->drm,
1716 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1717 n, ci.engine_class, ci.engine_instance);
1718 return -EINVAL;
1719 }
1720
1721 /*
1722 * A non-virtual engine has no siblings to choose between; and
1723 * a submit fence will always be directed to the one engine.
1724 */
1725 if (intel_engine_is_virtual(virtual)) {
1726 err = intel_virtual_engine_attach_bond(virtual,
1727 master,
1728 bond);
1729 if (err)
1730 return err;
1731 }
1732 }
1733
1734 return 0;
1735}
1736
1737static const i915_user_extension_fn set_engines__extensions[] = {
1738 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1739 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1740};
1741
1742static int
1743set_engines(struct i915_gem_context *ctx,
1744 const struct drm_i915_gem_context_param *args)
1745{
1746 struct drm_i915_private *i915 = ctx->i915;
1747 struct i915_context_param_engines __user *user =
1748 u64_to_user_ptr(args->value);
1749 struct set_engines set = { .ctx = ctx };
1750 unsigned int num_engines, n;
1751 u64 extensions;
1752 int err;
1753
1754 if (!args->size) { /* switch back to legacy user_ring_map */
1755 if (!i915_gem_context_user_engines(ctx))
1756 return 0;
1757
1758 set.engines = default_engines(ctx);
1759 if (IS_ERR(set.engines))
1760 return PTR_ERR(set.engines);
1761
1762 goto replace;
1763 }
1764
1765 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1766 if (args->size < sizeof(*user) ||
1767 !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1768 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
1769 args->size);
1770 return -EINVAL;
1771 }
1772
1773 /*
1774 * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1775 * first 64 engines defined here.
1776 */
1777 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1778 set.engines = alloc_engines(num_engines);
1779 if (!set.engines)
1780 return -ENOMEM;
1781
1782 for (n = 0; n < num_engines; n++) {
1783 struct i915_engine_class_instance ci;
1784 struct intel_engine_cs *engine;
1785 struct intel_context *ce;
1786
1787 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1788 __free_engines(set.engines, n);
1789 return -EFAULT;
1790 }
1791
1792 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1793 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1794 set.engines->engines[n] = NULL;
1795 continue;
1796 }
1797
1798 engine = intel_engine_lookup_user(ctx->i915,
1799 ci.engine_class,
1800 ci.engine_instance);
1801 if (!engine) {
1802 drm_dbg(&i915->drm,
1803 "Invalid engine[%d]: { class:%d, instance:%d }\n",
1804 n, ci.engine_class, ci.engine_instance);
1805 __free_engines(set.engines, n);
1806 return -ENOENT;
1807 }
1808
1809 ce = intel_context_create(engine);
1810 if (IS_ERR(ce)) {
1811 __free_engines(set.engines, n);
1812 return PTR_ERR(ce);
1813 }
1814
1815 intel_context_set_gem(ce, ctx);
1816
1817 set.engines->engines[n] = ce;
1818 }
1819 set.engines->num_engines = num_engines;
1820
1821 err = -EFAULT;
1822 if (!get_user(extensions, &user->extensions))
1823 err = i915_user_extensions(u64_to_user_ptr(extensions),
1824 set_engines__extensions,
1825 ARRAY_SIZE(set_engines__extensions),
1826 &set);
1827 if (err) {
1828 free_engines(set.engines);
1829 return err;
1830 }
1831
1832replace:
1833 mutex_lock(&ctx->engines_mutex);
1834 if (i915_gem_context_is_closed(ctx)) {
1835 mutex_unlock(&ctx->engines_mutex);
1836 free_engines(set.engines);
1837 return -ENOENT;
1838 }
1839 if (args->size)
1840 i915_gem_context_set_user_engines(ctx);
1841 else
1842 i915_gem_context_clear_user_engines(ctx);
1843 set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
1844 mutex_unlock(&ctx->engines_mutex);
1845
1846 /* Keep track of old engine sets for kill_context() */
1847 engines_idle_release(ctx, set.engines);
1848
1849 return 0;
1850}
1851
1852static int
1853get_engines(struct i915_gem_context *ctx,
1854 struct drm_i915_gem_context_param *args)
1855{
1856 struct i915_context_param_engines __user *user;
1857 struct i915_gem_engines *e;
1858 size_t n, count, size;
1859 bool user_engines;
1860 int err = 0;
1861
1862 e = __context_engines_await(ctx, &user_engines);
1863 if (!e)
1864 return -ENOENT;
1865
1866 if (!user_engines) {
1867 i915_sw_fence_complete(&e->fence);
1868 args->size = 0;
1869 return 0;
1870 }
1871
1872 count = e->num_engines;
1873
1874 /* Be paranoid in case we have an impedance mismatch */
1875 if (!check_struct_size(user, engines, count, &size)) {
1876 err = -EINVAL;
1877 goto err_free;
1878 }
1879 if (overflows_type(size, args->size)) {
1880 err = -EINVAL;
1881 goto err_free;
1882 }
1883
1884 if (!args->size) {
1885 args->size = size;
1886 goto err_free;
1887 }
1888
1889 if (args->size < size) {
1890 err = -EINVAL;
1891 goto err_free;
1892 }
1893
1894 user = u64_to_user_ptr(args->value);
1895 if (put_user(0, &user->extensions)) {
1896 err = -EFAULT;
1897 goto err_free;
1898 }
1899
1900 for (n = 0; n < count; n++) {
1901 struct i915_engine_class_instance ci = {
1902 .engine_class = I915_ENGINE_CLASS_INVALID,
1903 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1904 };
1905
1906 if (e->engines[n]) {
1907 ci.engine_class = e->engines[n]->engine->uabi_class;
1908 ci.engine_instance = e->engines[n]->engine->uabi_instance;
1909 }
1910
1911 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1912 err = -EFAULT;
1913 goto err_free;
1914 }
1915 }
1916
1917 args->size = size;
1918
1919err_free:
1920 i915_sw_fence_complete(&e->fence);
1921 return err;
1922}
1923
1924static int
1925set_persistence(struct i915_gem_context *ctx,
1926 const struct drm_i915_gem_context_param *args)
1927{
1928 if (args->size)
1929 return -EINVAL;
1930
1931 return __context_set_persistence(ctx, args->value);
1932}
1933
1934static int __apply_priority(struct intel_context *ce, void *arg)
1935{
1936 struct i915_gem_context *ctx = arg;
1937
1938 if (!intel_engine_has_timeslices(ce->engine))
1939 return 0;
1940
1941 if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
1942 intel_context_set_use_semaphores(ce);
1943 else
1944 intel_context_clear_use_semaphores(ce);
1945
1946 return 0;
1947}
1948
1949static int set_priority(struct i915_gem_context *ctx,
1950 const struct drm_i915_gem_context_param *args)
1951{
1952 s64 priority = args->value;
1953
1954 if (args->size)
1955 return -EINVAL;
1956
1957 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1958 return -ENODEV;
1959
1960 if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1961 priority < I915_CONTEXT_MIN_USER_PRIORITY)
1962 return -EINVAL;
1963
1964 if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1965 !capable(CAP_SYS_NICE))
1966 return -EPERM;
1967
1968 ctx->sched.priority = priority;
1969 context_apply_all(ctx, __apply_priority, ctx);
1970
1971 return 0;
1972}
1973
1974static int ctx_setparam(struct drm_i915_file_private *fpriv,
1975 struct i915_gem_context *ctx,
1976 struct drm_i915_gem_context_param *args)
1977{
1978 int ret = 0;
1979
1980 switch (args->param) {
1981 case I915_CONTEXT_PARAM_NO_ZEROMAP:
1982 if (args->size)
1983 ret = -EINVAL;
1984 else if (args->value)
1985 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1986 else
1987 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1988 break;
1989
1990 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1991 if (args->size)
1992 ret = -EINVAL;
1993 else if (args->value)
1994 i915_gem_context_set_no_error_capture(ctx);
1995 else
1996 i915_gem_context_clear_no_error_capture(ctx);
1997 break;
1998
1999 case I915_CONTEXT_PARAM_BANNABLE:
2000 if (args->size)
2001 ret = -EINVAL;
2002 else if (!capable(CAP_SYS_ADMIN) && !args->value)
2003 ret = -EPERM;
2004 else if (args->value)
2005 i915_gem_context_set_bannable(ctx);
2006 else
2007 i915_gem_context_clear_bannable(ctx);
2008 break;
2009
2010 case I915_CONTEXT_PARAM_RECOVERABLE:
2011 if (args->size)
2012 ret = -EINVAL;
2013 else if (args->value)
2014 i915_gem_context_set_recoverable(ctx);
2015 else
2016 i915_gem_context_clear_recoverable(ctx);
2017 break;
2018
2019 case I915_CONTEXT_PARAM_PRIORITY:
2020 ret = set_priority(ctx, args);
2021 break;
2022
2023 case I915_CONTEXT_PARAM_SSEU:
2024 ret = set_sseu(ctx, args);
2025 break;
2026
2027 case I915_CONTEXT_PARAM_VM:
2028 ret = set_ppgtt(fpriv, ctx, args);
2029 break;
2030
2031 case I915_CONTEXT_PARAM_ENGINES:
2032 ret = set_engines(ctx, args);
2033 break;
2034
2035 case I915_CONTEXT_PARAM_PERSISTENCE:
2036 ret = set_persistence(ctx, args);
2037 break;
2038
2039 case I915_CONTEXT_PARAM_RINGSIZE:
2040 ret = set_ringsize(ctx, args);
2041 break;
2042
2043 case I915_CONTEXT_PARAM_BAN_PERIOD:
2044 default:
2045 ret = -EINVAL;
2046 break;
2047 }
2048
2049 return ret;
2050}
2051
2052struct create_ext {
2053 struct i915_gem_context *ctx;
2054 struct drm_i915_file_private *fpriv;
2055};
2056
2057static int create_setparam(struct i915_user_extension __user *ext, void *data)
2058{
2059 struct drm_i915_gem_context_create_ext_setparam local;
2060 const struct create_ext *arg = data;
2061
2062 if (copy_from_user(&local, ext, sizeof(local)))
2063 return -EFAULT;
2064
2065 if (local.param.ctx_id)
2066 return -EINVAL;
2067
2068 return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
2069}
2070
2071static int copy_ring_size(struct intel_context *dst,
2072 struct intel_context *src)
2073{
2074 long sz;
2075
2076 sz = intel_context_get_ring_size(src);
2077 if (sz < 0)
2078 return sz;
2079
2080 return intel_context_set_ring_size(dst, sz);
2081}
2082
2083static int clone_engines(struct i915_gem_context *dst,
2084 struct i915_gem_context *src)
2085{
2086 struct i915_gem_engines *clone, *e;
2087 bool user_engines;
2088 unsigned long n;
2089
2090 e = __context_engines_await(src, &user_engines);
2091 if (!e)
2092 return -ENOENT;
2093
2094 clone = alloc_engines(e->num_engines);
2095 if (!clone)
2096 goto err_unlock;
2097
2098 for (n = 0; n < e->num_engines; n++) {
2099 struct intel_engine_cs *engine;
2100
2101 if (!e->engines[n]) {
2102 clone->engines[n] = NULL;
2103 continue;
2104 }
2105 engine = e->engines[n]->engine;
2106
2107 /*
2108 * Virtual engines are singletons; they can only exist
2109 * inside a single context, because they embed their
2110 * HW context... As each virtual context implies a single
2111 * timeline (each engine can only dequeue a single request
2112 * at any time), it would be surprising for two contexts
2113 * to use the same engine. So let's create a copy of
2114 * the virtual engine instead.
2115 */
2116 if (intel_engine_is_virtual(engine))
2117 clone->engines[n] =
2118 intel_execlists_clone_virtual(engine);
2119 else
2120 clone->engines[n] = intel_context_create(engine);
2121 if (IS_ERR_OR_NULL(clone->engines[n])) {
2122 __free_engines(clone, n);
2123 goto err_unlock;
2124 }
2125
2126 intel_context_set_gem(clone->engines[n], dst);
2127
2128 /* Copy across the preferred ringsize */
2129 if (copy_ring_size(clone->engines[n], e->engines[n])) {
2130 __free_engines(clone, n + 1);
2131 goto err_unlock;
2132 }
2133 }
2134 clone->num_engines = n;
2135 i915_sw_fence_complete(&e->fence);
2136
2137 /* Serialised by constructor */
2138 engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
2139 if (user_engines)
2140 i915_gem_context_set_user_engines(dst);
2141 else
2142 i915_gem_context_clear_user_engines(dst);
2143 return 0;
2144
2145err_unlock:
2146 i915_sw_fence_complete(&e->fence);
2147 return -ENOMEM;
2148}
2149
2150static int clone_flags(struct i915_gem_context *dst,
2151 struct i915_gem_context *src)
2152{
2153 dst->user_flags = src->user_flags;
2154 return 0;
2155}
2156
2157static int clone_schedattr(struct i915_gem_context *dst,
2158 struct i915_gem_context *src)
2159{
2160 dst->sched = src->sched;
2161 return 0;
2162}
2163
2164static int clone_sseu(struct i915_gem_context *dst,
2165 struct i915_gem_context *src)
2166{
2167 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
2168 struct i915_gem_engines *clone;
2169 unsigned long n;
2170 int err;
2171
2172 /* no locking required; sole access under constructor*/
2173 clone = __context_engines_static(dst);
2174 if (e->num_engines != clone->num_engines) {
2175 err = -EINVAL;
2176 goto unlock;
2177 }
2178
2179 for (n = 0; n < e->num_engines; n++) {
2180 struct intel_context *ce = e->engines[n];
2181
2182 if (clone->engines[n]->engine->class != ce->engine->class) {
2183 /* Must have compatible engine maps! */
2184 err = -EINVAL;
2185 goto unlock;
2186 }
2187
2188 /* serialises with set_sseu */
2189 err = intel_context_lock_pinned(ce);
2190 if (err)
2191 goto unlock;
2192
2193 clone->engines[n]->sseu = ce->sseu;
2194 intel_context_unlock_pinned(ce);
2195 }
2196
2197 err = 0;
2198unlock:
2199 i915_gem_context_unlock_engines(src);
2200 return err;
2201}
2202
2203static int clone_timeline(struct i915_gem_context *dst,
2204 struct i915_gem_context *src)
2205{
2206 if (src->timeline)
2207 __assign_timeline(dst, src->timeline);
2208
2209 return 0;
2210}
2211
2212static int clone_vm(struct i915_gem_context *dst,
2213 struct i915_gem_context *src)
2214{
2215 struct i915_address_space *vm;
2216 int err = 0;
2217
2218 if (!rcu_access_pointer(src->vm))
2219 return 0;
2220
2221 rcu_read_lock();
2222 vm = context_get_vm_rcu(src);
2223 rcu_read_unlock();
2224
2225 if (!mutex_lock_interruptible(&dst->mutex)) {
2226 __assign_ppgtt(dst, vm);
2227 mutex_unlock(&dst->mutex);
2228 } else {
2229 err = -EINTR;
2230 }
2231
2232 i915_vm_put(vm);
2233 return err;
2234}
2235
2236static int create_clone(struct i915_user_extension __user *ext, void *data)
2237{
2238 static int (* const fn[])(struct i915_gem_context *dst,
2239 struct i915_gem_context *src) = {
2240#define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2241 MAP(ENGINES, clone_engines),
2242 MAP(FLAGS, clone_flags),
2243 MAP(SCHEDATTR, clone_schedattr),
2244 MAP(SSEU, clone_sseu),
2245 MAP(TIMELINE, clone_timeline),
2246 MAP(VM, clone_vm),
2247#undef MAP
2248 };
2249 struct drm_i915_gem_context_create_ext_clone local;
2250 const struct create_ext *arg = data;
2251 struct i915_gem_context *dst = arg->ctx;
2252 struct i915_gem_context *src;
2253 int err, bit;
2254
2255 if (copy_from_user(&local, ext, sizeof(local)))
2256 return -EFAULT;
2257
2258 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2259 I915_CONTEXT_CLONE_UNKNOWN);
2260
2261 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2262 return -EINVAL;
2263
2264 if (local.rsvd)
2265 return -EINVAL;
2266
2267 rcu_read_lock();
2268 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2269 rcu_read_unlock();
2270 if (!src)
2271 return -ENOENT;
2272
2273 GEM_BUG_ON(src == dst);
2274
2275 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2276 if (!(local.flags & BIT(bit)))
2277 continue;
2278
2279 err = fn[bit](dst, src);
2280 if (err)
2281 return err;
2282 }
2283
2284 return 0;
2285}
2286
2287static const i915_user_extension_fn create_extensions[] = {
2288 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2289 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2290};
2291
2292static bool client_is_banned(struct drm_i915_file_private *file_priv)
2293{
2294 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2295}
2296
2297int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2298 struct drm_file *file)
2299{
2300 struct drm_i915_private *i915 = to_i915(dev);
2301 struct drm_i915_gem_context_create_ext *args = data;
2302 struct create_ext ext_data;
2303 int ret;
2304 u32 id;
2305
2306 if (!DRIVER_CAPS(i915)->has_logical_contexts)
2307 return -ENODEV;
2308
2309 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2310 return -EINVAL;
2311
2312 ret = intel_gt_terminally_wedged(&i915->gt);
2313 if (ret)
2314 return ret;
2315
2316 ext_data.fpriv = file->driver_priv;
2317 if (client_is_banned(ext_data.fpriv)) {
2318 drm_dbg(&i915->drm,
2319 "client %s[%d] banned from creating ctx\n",
2320 current->comm, task_pid_nr(current));
2321 return -EIO;
2322 }
2323
2324 ext_data.ctx = i915_gem_create_context(i915, args->flags);
2325 if (IS_ERR(ext_data.ctx))
2326 return PTR_ERR(ext_data.ctx);
2327
2328 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2329 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2330 create_extensions,
2331 ARRAY_SIZE(create_extensions),
2332 &ext_data);
2333 if (ret)
2334 goto err_ctx;
2335 }
2336
2337 ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
2338 if (ret < 0)
2339 goto err_ctx;
2340
2341 args->ctx_id = id;
2342 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
2343
2344 return 0;
2345
2346err_ctx:
2347 context_close(ext_data.ctx);
2348 return ret;
2349}
2350
2351int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2352 struct drm_file *file)
2353{
2354 struct drm_i915_gem_context_destroy *args = data;
2355 struct drm_i915_file_private *file_priv = file->driver_priv;
2356 struct i915_gem_context *ctx;
2357
2358 if (args->pad != 0)
2359 return -EINVAL;
2360
2361 if (!args->ctx_id)
2362 return -ENOENT;
2363
2364 ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2365 if (!ctx)
2366 return -ENOENT;
2367
2368 context_close(ctx);
2369 return 0;
2370}
2371
2372static int get_sseu(struct i915_gem_context *ctx,
2373 struct drm_i915_gem_context_param *args)
2374{
2375 struct drm_i915_gem_context_param_sseu user_sseu;
2376 struct intel_context *ce;
2377 unsigned long lookup;
2378 int err;
2379
2380 if (args->size == 0)
2381 goto out;
2382 else if (args->size < sizeof(user_sseu))
2383 return -EINVAL;
2384
2385 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2386 sizeof(user_sseu)))
2387 return -EFAULT;
2388
2389 if (user_sseu.rsvd)
2390 return -EINVAL;
2391
2392 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2393 return -EINVAL;
2394
2395 lookup = 0;
2396 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2397 lookup |= LOOKUP_USER_INDEX;
2398
2399 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2400 if (IS_ERR(ce))
2401 return PTR_ERR(ce);
2402
2403 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2404 if (err) {
2405 intel_context_put(ce);
2406 return err;
2407 }
2408
2409 user_sseu.slice_mask = ce->sseu.slice_mask;
2410 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2411 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2412 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2413
2414 intel_context_unlock_pinned(ce);
2415 intel_context_put(ce);
2416
2417 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2418 sizeof(user_sseu)))
2419 return -EFAULT;
2420
2421out:
2422 args->size = sizeof(user_sseu);
2423
2424 return 0;
2425}
2426
2427int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2428 struct drm_file *file)
2429{
2430 struct drm_i915_file_private *file_priv = file->driver_priv;
2431 struct drm_i915_gem_context_param *args = data;
2432 struct i915_gem_context *ctx;
2433 int ret = 0;
2434
2435 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2436 if (!ctx)
2437 return -ENOENT;
2438
2439 switch (args->param) {
2440 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2441 args->size = 0;
2442 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2443 break;
2444
2445 case I915_CONTEXT_PARAM_GTT_SIZE:
2446 args->size = 0;
2447 rcu_read_lock();
2448 if (rcu_access_pointer(ctx->vm))
2449 args->value = rcu_dereference(ctx->vm)->total;
2450 else
2451 args->value = to_i915(dev)->ggtt.vm.total;
2452 rcu_read_unlock();
2453 break;
2454
2455 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2456 args->size = 0;
2457 args->value = i915_gem_context_no_error_capture(ctx);
2458 break;
2459
2460 case I915_CONTEXT_PARAM_BANNABLE:
2461 args->size = 0;
2462 args->value = i915_gem_context_is_bannable(ctx);
2463 break;
2464
2465 case I915_CONTEXT_PARAM_RECOVERABLE:
2466 args->size = 0;
2467 args->value = i915_gem_context_is_recoverable(ctx);
2468 break;
2469
2470 case I915_CONTEXT_PARAM_PRIORITY:
2471 args->size = 0;
2472 args->value = ctx->sched.priority;
2473 break;
2474
2475 case I915_CONTEXT_PARAM_SSEU:
2476 ret = get_sseu(ctx, args);
2477 break;
2478
2479 case I915_CONTEXT_PARAM_VM:
2480 ret = get_ppgtt(file_priv, ctx, args);
2481 break;
2482
2483 case I915_CONTEXT_PARAM_ENGINES:
2484 ret = get_engines(ctx, args);
2485 break;
2486
2487 case I915_CONTEXT_PARAM_PERSISTENCE:
2488 args->size = 0;
2489 args->value = i915_gem_context_is_persistent(ctx);
2490 break;
2491
2492 case I915_CONTEXT_PARAM_RINGSIZE:
2493 ret = get_ringsize(ctx, args);
2494 break;
2495
2496 case I915_CONTEXT_PARAM_BAN_PERIOD:
2497 default:
2498 ret = -EINVAL;
2499 break;
2500 }
2501
2502 i915_gem_context_put(ctx);
2503 return ret;
2504}
2505
2506int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2507 struct drm_file *file)
2508{
2509 struct drm_i915_file_private *file_priv = file->driver_priv;
2510 struct drm_i915_gem_context_param *args = data;
2511 struct i915_gem_context *ctx;
2512 int ret;
2513
2514 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2515 if (!ctx)
2516 return -ENOENT;
2517
2518 ret = ctx_setparam(file_priv, ctx, args);
2519
2520 i915_gem_context_put(ctx);
2521 return ret;
2522}
2523
2524int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2525 void *data, struct drm_file *file)
2526{
2527 struct drm_i915_private *i915 = to_i915(dev);
2528 struct drm_i915_reset_stats *args = data;
2529 struct i915_gem_context *ctx;
2530 int ret;
2531
2532 if (args->flags || args->pad)
2533 return -EINVAL;
2534
2535 ret = -ENOENT;
2536 rcu_read_lock();
2537 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2538 if (!ctx)
2539 goto out;
2540
2541 /*
2542 * We opt for unserialised reads here. This may result in tearing
2543 * in the extremely unlikely event of a GPU hang on this context
2544 * as we are querying them. If we need that extra layer of protection,
2545 * we should wrap the hangstats with a seqlock.
2546 */
2547
2548 if (capable(CAP_SYS_ADMIN))
2549 args->reset_count = i915_reset_count(&i915->gpu_error);
2550 else
2551 args->reset_count = 0;
2552
2553 args->batch_active = atomic_read(&ctx->guilty_count);
2554 args->batch_pending = atomic_read(&ctx->active_count);
2555
2556 ret = 0;
2557out:
2558 rcu_read_unlock();
2559 return ret;
2560}
2561
2562/* GEM context-engines iterator: for_each_gem_engine() */
2563struct intel_context *
2564i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2565{
2566 const struct i915_gem_engines *e = it->engines;
2567 struct intel_context *ctx;
2568
2569 if (unlikely(!e))
2570 return NULL;
2571
2572 do {
2573 if (it->idx >= e->num_engines)
2574 return NULL;
2575
2576 ctx = e->engines[it->idx++];
2577 } while (!ctx);
2578
2579 return ctx;
2580}
2581
2582#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2583#include "selftests/mock_context.c"
2584#include "selftests/i915_gem_context.c"
2585#endif
2586
2587static void i915_global_gem_context_shrink(void)
2588{
2589 kmem_cache_shrink(global.slab_luts);
2590}
2591
2592static void i915_global_gem_context_exit(void)
2593{
2594 kmem_cache_destroy(global.slab_luts);
2595}
2596
2597static struct i915_global_gem_context global = { {
2598 .shrink = i915_global_gem_context_shrink,
2599 .exit = i915_global_gem_context_exit,
2600} };
2601
2602int __init i915_global_gem_context_init(void)
2603{
2604 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2605 if (!global.slab_luts)
2606 return -ENOMEM;
2607
2608 i915_global_register(&global.base);
2609 return 0;
2610}