Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2019 Intel Corporation
   4 */
   5
   6#include <drm/drm_managed.h>
   7#include <drm/intel-gtt.h>
   8
   9#include "gem/i915_gem_internal.h"
  10#include "gem/i915_gem_lmem.h"
  11#include "pxp/intel_pxp.h"
  12
  13#include "i915_drv.h"
  14#include "i915_perf_oa_regs.h"
  15#include "i915_reg.h"
  16#include "intel_context.h"
  17#include "intel_engine_pm.h"
  18#include "intel_engine_regs.h"
  19#include "intel_ggtt_gmch.h"
  20#include "intel_gt.h"
  21#include "intel_gt_buffer_pool.h"
  22#include "intel_gt_clock_utils.h"
  23#include "intel_gt_debugfs.h"
  24#include "intel_gt_mcr.h"
  25#include "intel_gt_pm.h"
  26#include "intel_gt_regs.h"
  27#include "intel_gt_requests.h"
  28#include "intel_migrate.h"
  29#include "intel_mocs.h"
  30#include "intel_pci_config.h"
  31#include "intel_pm.h"
  32#include "intel_rc6.h"
  33#include "intel_renderstate.h"
  34#include "intel_rps.h"
  35#include "intel_sa_media.h"
  36#include "intel_gt_sysfs.h"
  37#include "intel_uncore.h"
  38#include "shmem_utils.h"
  39
  40void intel_gt_common_init_early(struct intel_gt *gt)
  41{
  42	spin_lock_init(gt->irq_lock);
  43
  44	INIT_LIST_HEAD(&gt->closed_vma);
  45	spin_lock_init(&gt->closed_lock);
  46
  47	init_llist_head(&gt->watchdog.list);
  48	INIT_WORK(&gt->watchdog.work, intel_gt_watchdog_work);
  49
  50	intel_gt_init_buffer_pool(gt);
  51	intel_gt_init_reset(gt);
  52	intel_gt_init_requests(gt);
  53	intel_gt_init_timelines(gt);
  54	mutex_init(&gt->tlb.invalidate_lock);
  55	seqcount_mutex_init(&gt->tlb.seqno, &gt->tlb.invalidate_lock);
  56	intel_gt_pm_init_early(gt);
  57
  58	intel_wopcm_init_early(&gt->wopcm);
  59	intel_uc_init_early(&gt->uc);
  60	intel_rps_init_early(&gt->rps);
  61}
  62
  63/* Preliminary initialization of Tile 0 */
  64int intel_root_gt_init_early(struct drm_i915_private *i915)
  65{
  66	struct intel_gt *gt = to_gt(i915);
  67
  68	gt->i915 = i915;
  69	gt->uncore = &i915->uncore;
  70	gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
  71	if (!gt->irq_lock)
  72		return -ENOMEM;
  73
  74	intel_gt_common_init_early(gt);
  75
  76	return 0;
  77}
  78
  79static int intel_gt_probe_lmem(struct intel_gt *gt)
  80{
  81	struct drm_i915_private *i915 = gt->i915;
  82	unsigned int instance = gt->info.id;
  83	int id = INTEL_REGION_LMEM_0 + instance;
  84	struct intel_memory_region *mem;
  85	int err;
  86
  87	mem = intel_gt_setup_lmem(gt);
  88	if (IS_ERR(mem)) {
  89		err = PTR_ERR(mem);
  90		if (err == -ENODEV)
  91			return 0;
  92
  93		drm_err(&i915->drm,
  94			"Failed to setup region(%d) type=%d\n",
  95			err, INTEL_MEMORY_LOCAL);
  96		return err;
  97	}
  98
  99	mem->id = id;
 100	mem->instance = instance;
 101
 102	intel_memory_region_set_name(mem, "local%u", mem->instance);
 103
 104	GEM_BUG_ON(!HAS_REGION(i915, id));
 105	GEM_BUG_ON(i915->mm.regions[id]);
 106	i915->mm.regions[id] = mem;
 107
 108	return 0;
 109}
 110
 111int intel_gt_assign_ggtt(struct intel_gt *gt)
 112{
 113	gt->ggtt = drmm_kzalloc(&gt->i915->drm, sizeof(*gt->ggtt), GFP_KERNEL);
 114
 115	return gt->ggtt ? 0 : -ENOMEM;
 116}
 117
 118int intel_gt_init_mmio(struct intel_gt *gt)
 119{
 120	intel_gt_init_clock_frequency(gt);
 121
 122	intel_uc_init_mmio(&gt->uc);
 123	intel_sseu_info_init(gt);
 124	intel_gt_mcr_init(gt);
 125
 126	return intel_engines_init_mmio(gt);
 127}
 128
 129static void init_unused_ring(struct intel_gt *gt, u32 base)
 130{
 131	struct intel_uncore *uncore = gt->uncore;
 132
 133	intel_uncore_write(uncore, RING_CTL(base), 0);
 134	intel_uncore_write(uncore, RING_HEAD(base), 0);
 135	intel_uncore_write(uncore, RING_TAIL(base), 0);
 136	intel_uncore_write(uncore, RING_START(base), 0);
 137}
 138
 139static void init_unused_rings(struct intel_gt *gt)
 140{
 141	struct drm_i915_private *i915 = gt->i915;
 142
 143	if (IS_I830(i915)) {
 144		init_unused_ring(gt, PRB1_BASE);
 145		init_unused_ring(gt, SRB0_BASE);
 146		init_unused_ring(gt, SRB1_BASE);
 147		init_unused_ring(gt, SRB2_BASE);
 148		init_unused_ring(gt, SRB3_BASE);
 149	} else if (GRAPHICS_VER(i915) == 2) {
 150		init_unused_ring(gt, SRB0_BASE);
 151		init_unused_ring(gt, SRB1_BASE);
 152	} else if (GRAPHICS_VER(i915) == 3) {
 153		init_unused_ring(gt, PRB1_BASE);
 154		init_unused_ring(gt, PRB2_BASE);
 155	}
 156}
 157
 158int intel_gt_init_hw(struct intel_gt *gt)
 159{
 160	struct drm_i915_private *i915 = gt->i915;
 161	struct intel_uncore *uncore = gt->uncore;
 162	int ret;
 163
 164	gt->last_init_time = ktime_get();
 165
 166	/* Double layer security blanket, see i915_gem_init() */
 167	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 168
 169	if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
 170		intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
 171
 172	if (IS_HASWELL(i915))
 173		intel_uncore_write(uncore,
 174				   HSW_MI_PREDICATE_RESULT_2,
 175				   IS_HSW_GT3(i915) ?
 176				   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
 177
 178	/* Apply the GT workarounds... */
 179	intel_gt_apply_workarounds(gt);
 180	/* ...and determine whether they are sticking. */
 181	intel_gt_verify_workarounds(gt, "init");
 182
 183	intel_gt_init_swizzling(gt);
 184
 185	/*
 186	 * At least 830 can leave some of the unused rings
 187	 * "active" (ie. head != tail) after resume which
 188	 * will prevent c3 entry. Makes sure all unused rings
 189	 * are totally idle.
 190	 */
 191	init_unused_rings(gt);
 192
 193	ret = i915_ppgtt_init_hw(gt);
 194	if (ret) {
 195		drm_err(&i915->drm, "Enabling PPGTT failed (%d)\n", ret);
 196		goto out;
 197	}
 198
 199	/* We can't enable contexts until all firmware is loaded */
 200	ret = intel_uc_init_hw(&gt->uc);
 201	if (ret) {
 202		i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
 203		goto out;
 204	}
 205
 206	intel_mocs_init(gt);
 207
 208out:
 209	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
 210	return ret;
 211}
 212
 213static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
 214{
 215	intel_uncore_rmw(uncore, reg, 0, set);
 216}
 217
 218static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
 219{
 220	intel_uncore_rmw(uncore, reg, clr, 0);
 221}
 222
 223static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
 224{
 225	intel_uncore_rmw(uncore, reg, 0, 0);
 226}
 227
 228static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
 229{
 230	GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
 231	GEN6_RING_FAULT_REG_POSTING_READ(engine);
 232}
 233
 234i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt)
 235{
 236	/* GT0_PERF_LIMIT_REASONS is available only for Gen11+ */
 237	if (GRAPHICS_VER(gt->i915) < 11)
 238		return INVALID_MMIO_REG;
 239
 240	return gt->type == GT_MEDIA ?
 241		MTL_MEDIA_PERF_LIMIT_REASONS : GT0_PERF_LIMIT_REASONS;
 242}
 243
 244void
 245intel_gt_clear_error_registers(struct intel_gt *gt,
 246			       intel_engine_mask_t engine_mask)
 247{
 248	struct drm_i915_private *i915 = gt->i915;
 249	struct intel_uncore *uncore = gt->uncore;
 250	u32 eir;
 251
 252	if (GRAPHICS_VER(i915) != 2)
 253		clear_register(uncore, PGTBL_ER);
 254
 255	if (GRAPHICS_VER(i915) < 4)
 256		clear_register(uncore, IPEIR(RENDER_RING_BASE));
 257	else
 258		clear_register(uncore, IPEIR_I965);
 259
 260	clear_register(uncore, EIR);
 261	eir = intel_uncore_read(uncore, EIR);
 262	if (eir) {
 263		/*
 264		 * some errors might have become stuck,
 265		 * mask them.
 266		 */
 267		drm_dbg(&gt->i915->drm, "EIR stuck: 0x%08x, masking\n", eir);
 268		rmw_set(uncore, EMR, eir);
 269		intel_uncore_write(uncore, GEN2_IIR,
 270				   I915_MASTER_ERROR_INTERRUPT);
 271	}
 272
 273	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
 274		intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG,
 275					   RING_FAULT_VALID, 0);
 276		intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
 277	} else if (GRAPHICS_VER(i915) >= 12) {
 278		rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
 279		intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
 280	} else if (GRAPHICS_VER(i915) >= 8) {
 281		rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
 282		intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
 283	} else if (GRAPHICS_VER(i915) >= 6) {
 284		struct intel_engine_cs *engine;
 285		enum intel_engine_id id;
 286
 287		for_each_engine_masked(engine, gt, engine_mask, id)
 288			gen6_clear_engine_error_register(engine);
 289	}
 290}
 291
 292static void gen6_check_faults(struct intel_gt *gt)
 293{
 294	struct intel_engine_cs *engine;
 295	enum intel_engine_id id;
 296	u32 fault;
 297
 298	for_each_engine(engine, gt, id) {
 299		fault = GEN6_RING_FAULT_REG_READ(engine);
 300		if (fault & RING_FAULT_VALID) {
 301			drm_dbg(&engine->i915->drm, "Unexpected fault\n"
 302				"\tAddr: 0x%08lx\n"
 303				"\tAddress space: %s\n"
 304				"\tSource ID: %d\n"
 305				"\tType: %d\n",
 306				fault & PAGE_MASK,
 307				fault & RING_FAULT_GTTSEL_MASK ?
 308				"GGTT" : "PPGTT",
 309				RING_FAULT_SRCID(fault),
 310				RING_FAULT_FAULT_TYPE(fault));
 311		}
 312	}
 313}
 314
 315static void xehp_check_faults(struct intel_gt *gt)
 316{
 317	u32 fault;
 318
 319	/*
 320	 * Although the fault register now lives in an MCR register range,
 321	 * the GAM registers are special and we only truly need to read
 322	 * the "primary" GAM instance rather than handling each instance
 323	 * individually.  intel_gt_mcr_read_any() will automatically steer
 324	 * toward the primary instance.
 325	 */
 326	fault = intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
 327	if (fault & RING_FAULT_VALID) {
 328		u32 fault_data0, fault_data1;
 329		u64 fault_addr;
 330
 331		fault_data0 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0);
 332		fault_data1 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1);
 333
 334		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
 335			     ((u64)fault_data0 << 12);
 336
 337		drm_dbg(&gt->i915->drm, "Unexpected fault\n"
 338			"\tAddr: 0x%08x_%08x\n"
 339			"\tAddress space: %s\n"
 340			"\tEngine ID: %d\n"
 341			"\tSource ID: %d\n"
 342			"\tType: %d\n",
 343			upper_32_bits(fault_addr), lower_32_bits(fault_addr),
 344			fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
 345			GEN8_RING_FAULT_ENGINE_ID(fault),
 346			RING_FAULT_SRCID(fault),
 347			RING_FAULT_FAULT_TYPE(fault));
 348	}
 349}
 350
 351static void gen8_check_faults(struct intel_gt *gt)
 352{
 353	struct intel_uncore *uncore = gt->uncore;
 354	i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
 355	u32 fault;
 356
 357	if (GRAPHICS_VER(gt->i915) >= 12) {
 358		fault_reg = GEN12_RING_FAULT_REG;
 359		fault_data0_reg = GEN12_FAULT_TLB_DATA0;
 360		fault_data1_reg = GEN12_FAULT_TLB_DATA1;
 361	} else {
 362		fault_reg = GEN8_RING_FAULT_REG;
 363		fault_data0_reg = GEN8_FAULT_TLB_DATA0;
 364		fault_data1_reg = GEN8_FAULT_TLB_DATA1;
 365	}
 366
 367	fault = intel_uncore_read(uncore, fault_reg);
 368	if (fault & RING_FAULT_VALID) {
 369		u32 fault_data0, fault_data1;
 370		u64 fault_addr;
 371
 372		fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
 373		fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
 374
 375		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
 376			     ((u64)fault_data0 << 12);
 377
 378		drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
 379			"\tAddr: 0x%08x_%08x\n"
 380			"\tAddress space: %s\n"
 381			"\tEngine ID: %d\n"
 382			"\tSource ID: %d\n"
 383			"\tType: %d\n",
 384			upper_32_bits(fault_addr), lower_32_bits(fault_addr),
 385			fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
 386			GEN8_RING_FAULT_ENGINE_ID(fault),
 387			RING_FAULT_SRCID(fault),
 388			RING_FAULT_FAULT_TYPE(fault));
 389	}
 390}
 391
 392void intel_gt_check_and_clear_faults(struct intel_gt *gt)
 393{
 394	struct drm_i915_private *i915 = gt->i915;
 395
 396	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
 397	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
 398		xehp_check_faults(gt);
 399	else if (GRAPHICS_VER(i915) >= 8)
 400		gen8_check_faults(gt);
 401	else if (GRAPHICS_VER(i915) >= 6)
 402		gen6_check_faults(gt);
 403	else
 404		return;
 405
 406	intel_gt_clear_error_registers(gt, ALL_ENGINES);
 407}
 408
 409void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
 410{
 411	struct intel_uncore *uncore = gt->uncore;
 412	intel_wakeref_t wakeref;
 413
 414	/*
 415	 * No actual flushing is required for the GTT write domain for reads
 416	 * from the GTT domain. Writes to it "immediately" go to main memory
 417	 * as far as we know, so there's no chipset flush. It also doesn't
 418	 * land in the GPU render cache.
 419	 *
 420	 * However, we do have to enforce the order so that all writes through
 421	 * the GTT land before any writes to the device, such as updates to
 422	 * the GATT itself.
 423	 *
 424	 * We also have to wait a bit for the writes to land from the GTT.
 425	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
 426	 * timing. This issue has only been observed when switching quickly
 427	 * between GTT writes and CPU reads from inside the kernel on recent hw,
 428	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
 429	 * system agents we cannot reproduce this behaviour, until Cannonlake
 430	 * that was!).
 431	 */
 432
 433	wmb();
 434
 435	if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
 436		return;
 437
 438	intel_gt_chipset_flush(gt);
 439
 440	with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
 441		unsigned long flags;
 442
 443		spin_lock_irqsave(&uncore->lock, flags);
 444		intel_uncore_posting_read_fw(uncore,
 445					     RING_HEAD(RENDER_RING_BASE));
 446		spin_unlock_irqrestore(&uncore->lock, flags);
 447	}
 448}
 449
 450void intel_gt_chipset_flush(struct intel_gt *gt)
 451{
 452	wmb();
 453	if (GRAPHICS_VER(gt->i915) < 6)
 454		intel_ggtt_gmch_flush();
 455}
 456
 457void intel_gt_driver_register(struct intel_gt *gt)
 458{
 459	intel_gsc_init(&gt->gsc, gt->i915);
 460
 461	intel_rps_driver_register(&gt->rps);
 462
 463	intel_gt_debugfs_register(gt);
 464	intel_gt_sysfs_register(gt);
 465}
 466
 467static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
 468{
 469	struct drm_i915_private *i915 = gt->i915;
 470	struct drm_i915_gem_object *obj;
 471	struct i915_vma *vma;
 472	int ret;
 473
 474	obj = i915_gem_object_create_lmem(i915, size,
 475					  I915_BO_ALLOC_VOLATILE |
 476					  I915_BO_ALLOC_GPU_ONLY);
 477	if (IS_ERR(obj))
 478		obj = i915_gem_object_create_stolen(i915, size);
 479	if (IS_ERR(obj))
 480		obj = i915_gem_object_create_internal(i915, size);
 481	if (IS_ERR(obj)) {
 482		drm_err(&i915->drm, "Failed to allocate scratch page\n");
 483		return PTR_ERR(obj);
 484	}
 485
 486	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
 487	if (IS_ERR(vma)) {
 488		ret = PTR_ERR(vma);
 489		goto err_unref;
 490	}
 491
 492	ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
 493	if (ret)
 494		goto err_unref;
 495
 496	gt->scratch = i915_vma_make_unshrinkable(vma);
 497
 498	return 0;
 499
 500err_unref:
 501	i915_gem_object_put(obj);
 502	return ret;
 503}
 504
 505static void intel_gt_fini_scratch(struct intel_gt *gt)
 506{
 507	i915_vma_unpin_and_release(&gt->scratch, 0);
 508}
 509
 510static struct i915_address_space *kernel_vm(struct intel_gt *gt)
 511{
 512	if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
 513		return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
 514	else
 515		return i915_vm_get(&gt->ggtt->vm);
 516}
 517
 518static int __engines_record_defaults(struct intel_gt *gt)
 519{
 520	struct i915_request *requests[I915_NUM_ENGINES] = {};
 521	struct intel_engine_cs *engine;
 522	enum intel_engine_id id;
 523	int err = 0;
 524
 525	/*
 526	 * As we reset the gpu during very early sanitisation, the current
 527	 * register state on the GPU should reflect its defaults values.
 528	 * We load a context onto the hw (with restore-inhibit), then switch
 529	 * over to a second context to save that default register state. We
 530	 * can then prime every new context with that state so they all start
 531	 * from the same default HW values.
 532	 */
 533
 534	for_each_engine(engine, gt, id) {
 535		struct intel_renderstate so;
 536		struct intel_context *ce;
 537		struct i915_request *rq;
 538
 539		/* We must be able to switch to something! */
 540		GEM_BUG_ON(!engine->kernel_context);
 541
 542		ce = intel_context_create(engine);
 543		if (IS_ERR(ce)) {
 544			err = PTR_ERR(ce);
 545			goto out;
 546		}
 547
 548		err = intel_renderstate_init(&so, ce);
 549		if (err)
 550			goto err;
 551
 552		rq = i915_request_create(ce);
 553		if (IS_ERR(rq)) {
 554			err = PTR_ERR(rq);
 555			goto err_fini;
 556		}
 557
 558		err = intel_engine_emit_ctx_wa(rq);
 559		if (err)
 560			goto err_rq;
 561
 562		err = intel_renderstate_emit(&so, rq);
 563		if (err)
 564			goto err_rq;
 565
 566err_rq:
 567		requests[id] = i915_request_get(rq);
 568		i915_request_add(rq);
 569err_fini:
 570		intel_renderstate_fini(&so, ce);
 571err:
 572		if (err) {
 573			intel_context_put(ce);
 574			goto out;
 575		}
 576	}
 577
 578	/* Flush the default context image to memory, and enable powersaving. */
 579	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
 580		err = -EIO;
 581		goto out;
 582	}
 583
 584	for (id = 0; id < ARRAY_SIZE(requests); id++) {
 585		struct i915_request *rq;
 586		struct file *state;
 587
 588		rq = requests[id];
 589		if (!rq)
 590			continue;
 591
 592		if (rq->fence.error) {
 593			err = -EIO;
 594			goto out;
 595		}
 596
 597		GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
 598		if (!rq->context->state)
 599			continue;
 600
 601		/* Keep a copy of the state's backing pages; free the obj */
 602		state = shmem_create_from_object(rq->context->state->obj);
 603		if (IS_ERR(state)) {
 604			err = PTR_ERR(state);
 605			goto out;
 606		}
 607		rq->engine->default_state = state;
 608	}
 609
 610out:
 611	/*
 612	 * If we have to abandon now, we expect the engines to be idle
 613	 * and ready to be torn-down. The quickest way we can accomplish
 614	 * this is by declaring ourselves wedged.
 615	 */
 616	if (err)
 617		intel_gt_set_wedged(gt);
 618
 619	for (id = 0; id < ARRAY_SIZE(requests); id++) {
 620		struct intel_context *ce;
 621		struct i915_request *rq;
 622
 623		rq = requests[id];
 624		if (!rq)
 625			continue;
 626
 627		ce = rq->context;
 628		i915_request_put(rq);
 629		intel_context_put(ce);
 630	}
 631	return err;
 632}
 633
 634static int __engines_verify_workarounds(struct intel_gt *gt)
 635{
 636	struct intel_engine_cs *engine;
 637	enum intel_engine_id id;
 638	int err = 0;
 639
 640	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
 641		return 0;
 642
 643	for_each_engine(engine, gt, id) {
 644		if (intel_engine_verify_workarounds(engine, "load"))
 645			err = -EIO;
 646	}
 647
 648	/* Flush and restore the kernel context for safety */
 649	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
 650		err = -EIO;
 651
 652	return err;
 653}
 654
 655static void __intel_gt_disable(struct intel_gt *gt)
 656{
 657	intel_gt_set_wedged_on_fini(gt);
 658
 659	intel_gt_suspend_prepare(gt);
 660	intel_gt_suspend_late(gt);
 661
 662	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
 663}
 664
 665int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
 666{
 667	long remaining_timeout;
 668
 669	/* If the device is asleep, we have no requests outstanding */
 670	if (!intel_gt_pm_is_awake(gt))
 671		return 0;
 672
 673	while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
 674							   &remaining_timeout)) > 0) {
 675		cond_resched();
 676		if (signal_pending(current))
 677			return -EINTR;
 678	}
 679
 680	if (timeout)
 681		return timeout;
 682
 683	if (remaining_timeout < 0)
 684		remaining_timeout = 0;
 685
 686	return intel_uc_wait_for_idle(&gt->uc, remaining_timeout);
 687}
 688
 689int intel_gt_init(struct intel_gt *gt)
 690{
 691	int err;
 692
 693	err = i915_inject_probe_error(gt->i915, -ENODEV);
 694	if (err)
 695		return err;
 696
 697	intel_gt_init_workarounds(gt);
 698
 699	/*
 700	 * This is just a security blanket to placate dragons.
 701	 * On some systems, we very sporadically observe that the first TLBs
 702	 * used by the CS may be stale, despite us poking the TLB reset. If
 703	 * we hold the forcewake during initialisation these problems
 704	 * just magically go away.
 705	 */
 706	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
 707
 708	err = intel_gt_init_scratch(gt,
 709				    GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
 710	if (err)
 711		goto out_fw;
 712
 713	intel_gt_pm_init(gt);
 714
 715	gt->vm = kernel_vm(gt);
 716	if (!gt->vm) {
 717		err = -ENOMEM;
 718		goto err_pm;
 719	}
 720
 721	intel_set_mocs_index(gt);
 722
 723	err = intel_engines_init(gt);
 724	if (err)
 725		goto err_engines;
 726
 727	err = intel_uc_init(&gt->uc);
 728	if (err)
 729		goto err_engines;
 730
 731	err = intel_gt_resume(gt);
 732	if (err)
 733		goto err_uc_init;
 734
 735	err = intel_gt_init_hwconfig(gt);
 736	if (err)
 737		drm_err(&gt->i915->drm, "Failed to retrieve hwconfig table: %pe\n",
 738			ERR_PTR(err));
 739
 740	err = __engines_record_defaults(gt);
 741	if (err)
 742		goto err_gt;
 743
 744	err = __engines_verify_workarounds(gt);
 745	if (err)
 746		goto err_gt;
 747
 748	intel_uc_init_late(&gt->uc);
 749
 750	err = i915_inject_probe_error(gt->i915, -EIO);
 751	if (err)
 752		goto err_gt;
 753
 754	intel_migrate_init(&gt->migrate, gt);
 755
 756	intel_pxp_init(&gt->pxp);
 757
 758	goto out_fw;
 759err_gt:
 760	__intel_gt_disable(gt);
 761	intel_uc_fini_hw(&gt->uc);
 762err_uc_init:
 763	intel_uc_fini(&gt->uc);
 764err_engines:
 765	intel_engines_release(gt);
 766	i915_vm_put(fetch_and_zero(&gt->vm));
 767err_pm:
 768	intel_gt_pm_fini(gt);
 769	intel_gt_fini_scratch(gt);
 770out_fw:
 771	if (err)
 772		intel_gt_set_wedged_on_init(gt);
 773	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
 774	return err;
 775}
 776
 777void intel_gt_driver_remove(struct intel_gt *gt)
 778{
 779	__intel_gt_disable(gt);
 780
 781	intel_migrate_fini(&gt->migrate);
 782	intel_uc_driver_remove(&gt->uc);
 783
 784	intel_engines_release(gt);
 785
 786	intel_gt_flush_buffer_pool(gt);
 787}
 788
 789void intel_gt_driver_unregister(struct intel_gt *gt)
 790{
 791	intel_wakeref_t wakeref;
 792
 793	intel_gt_sysfs_unregister(gt);
 794	intel_rps_driver_unregister(&gt->rps);
 795	intel_gsc_fini(&gt->gsc);
 796
 797	intel_pxp_fini(&gt->pxp);
 798
 799	/*
 800	 * Upon unregistering the device to prevent any new users, cancel
 801	 * all in-flight requests so that we can quickly unbind the active
 802	 * resources.
 803	 */
 804	intel_gt_set_wedged_on_fini(gt);
 805
 806	/* Scrub all HW state upon release */
 807	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
 808		__intel_gt_reset(gt, ALL_ENGINES);
 809}
 810
 811void intel_gt_driver_release(struct intel_gt *gt)
 812{
 813	struct i915_address_space *vm;
 814
 815	vm = fetch_and_zero(&gt->vm);
 816	if (vm) /* FIXME being called twice on error paths :( */
 817		i915_vm_put(vm);
 818
 819	intel_wa_list_free(&gt->wa_list);
 820	intel_gt_pm_fini(gt);
 821	intel_gt_fini_scratch(gt);
 822	intel_gt_fini_buffer_pool(gt);
 823	intel_gt_fini_hwconfig(gt);
 824}
 825
 826void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
 827{
 828	struct intel_gt *gt;
 829	unsigned int id;
 830
 831	/* We need to wait for inflight RCU frees to release their grip */
 832	rcu_barrier();
 833
 834	for_each_gt(gt, i915, id) {
 835		intel_uc_driver_late_release(&gt->uc);
 836		intel_gt_fini_requests(gt);
 837		intel_gt_fini_reset(gt);
 838		intel_gt_fini_timelines(gt);
 839		mutex_destroy(&gt->tlb.invalidate_lock);
 840		intel_engines_free(gt);
 841	}
 842}
 843
 844static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
 845{
 846	int ret;
 847
 848	if (!gt_is_root(gt)) {
 849		struct intel_uncore *uncore;
 850		spinlock_t *irq_lock;
 851
 852		uncore = drmm_kzalloc(&gt->i915->drm, sizeof(*uncore), GFP_KERNEL);
 853		if (!uncore)
 854			return -ENOMEM;
 855
 856		irq_lock = drmm_kzalloc(&gt->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
 857		if (!irq_lock)
 858			return -ENOMEM;
 859
 860		gt->uncore = uncore;
 861		gt->irq_lock = irq_lock;
 862
 863		intel_gt_common_init_early(gt);
 864	}
 865
 866	intel_uncore_init_early(gt->uncore, gt);
 867
 868	ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
 869	if (ret)
 870		return ret;
 871
 872	gt->phys_addr = phys_addr;
 873
 874	return 0;
 875}
 876
 877int intel_gt_probe_all(struct drm_i915_private *i915)
 878{
 879	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
 880	struct intel_gt *gt = &i915->gt0;
 881	const struct intel_gt_definition *gtdef;
 882	phys_addr_t phys_addr;
 883	unsigned int mmio_bar;
 884	unsigned int i;
 885	int ret;
 886
 887	mmio_bar = intel_mmio_bar(GRAPHICS_VER(i915));
 888	phys_addr = pci_resource_start(pdev, mmio_bar);
 889
 890	/*
 891	 * We always have at least one primary GT on any device
 892	 * and it has been already initialized early during probe
 893	 * in i915_driver_probe()
 894	 */
 895	gt->i915 = i915;
 896	gt->name = "Primary GT";
 897	gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
 898
 899	drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
 900	ret = intel_gt_tile_setup(gt, phys_addr);
 901	if (ret)
 902		return ret;
 903
 904	i915->gt[0] = gt;
 905
 906	if (!HAS_EXTRA_GT_LIST(i915))
 907		return 0;
 908
 909	for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
 910	     gtdef->name != NULL;
 911	     i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
 912		gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
 913		if (!gt) {
 914			ret = -ENOMEM;
 915			goto err;
 916		}
 917
 918		gt->i915 = i915;
 919		gt->name = gtdef->name;
 920		gt->type = gtdef->type;
 921		gt->info.engine_mask = gtdef->engine_mask;
 922		gt->info.id = i;
 923
 924		drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
 925		if (GEM_WARN_ON(range_overflows_t(resource_size_t,
 926						  gtdef->mapping_base,
 927						  SZ_16M,
 928						  pci_resource_len(pdev, mmio_bar)))) {
 929			ret = -ENODEV;
 930			goto err;
 931		}
 932
 933		switch (gtdef->type) {
 934		case GT_TILE:
 935			ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
 936			break;
 937
 938		case GT_MEDIA:
 939			ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
 940						     gtdef->gsi_offset);
 941			break;
 942
 943		case GT_PRIMARY:
 944			/* Primary GT should not appear in extra GT list */
 945		default:
 946			MISSING_CASE(gtdef->type);
 947			ret = -ENODEV;
 948		}
 949
 950		if (ret)
 951			goto err;
 952
 953		i915->gt[i] = gt;
 954	}
 955
 956	return 0;
 957
 958err:
 959	i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
 960	intel_gt_release_all(i915);
 961
 962	return ret;
 963}
 964
 965int intel_gt_tiles_init(struct drm_i915_private *i915)
 966{
 967	struct intel_gt *gt;
 968	unsigned int id;
 969	int ret;
 970
 971	for_each_gt(gt, i915, id) {
 972		ret = intel_gt_probe_lmem(gt);
 973		if (ret)
 974			return ret;
 975	}
 976
 977	return 0;
 978}
 979
 980void intel_gt_release_all(struct drm_i915_private *i915)
 981{
 982	struct intel_gt *gt;
 983	unsigned int id;
 984
 985	for_each_gt(gt, i915, id)
 986		i915->gt[id] = NULL;
 987}
 988
 989void intel_gt_info_print(const struct intel_gt_info *info,
 990			 struct drm_printer *p)
 991{
 992	drm_printf(p, "available engines: %x\n", info->engine_mask);
 993
 994	intel_sseu_dump(&info->sseu, p);
 995}
 996
 997struct reg_and_bit {
 998	union {
 999		i915_reg_t reg;
1000		i915_mcr_reg_t mcr_reg;
1001	};
1002	u32 bit;
1003};
1004
1005static struct reg_and_bit
1006get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
1007		const i915_reg_t *regs, const unsigned int num)
1008{
1009	const unsigned int class = engine->class;
1010	struct reg_and_bit rb = { };
1011
1012	if (drm_WARN_ON_ONCE(&engine->i915->drm,
1013			     class >= num || !regs[class].reg))
1014		return rb;
1015
1016	rb.reg = regs[class];
1017	if (gen8 && class == VIDEO_DECODE_CLASS)
1018		rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
1019	else
1020		rb.bit = engine->instance;
1021
1022	rb.bit = BIT(rb.bit);
1023
1024	return rb;
1025}
1026
1027/*
1028 * HW architecture suggest typical invalidation time at 40us,
1029 * with pessimistic cases up to 100us and a recommendation to
1030 * cap at 1ms. We go a bit higher just in case.
1031 */
1032#define TLB_INVAL_TIMEOUT_US 100
1033#define TLB_INVAL_TIMEOUT_MS 4
1034
1035/*
1036 * On Xe_HP the TLB invalidation registers are located at the same MMIO offsets
1037 * but are now considered MCR registers.  Since they exist within a GAM range,
1038 * the primary instance of the register rolls up the status from each unit.
1039 */
1040static int wait_for_invalidate(struct intel_gt *gt, struct reg_and_bit rb)
1041{
1042	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
1043		return intel_gt_mcr_wait_for_reg(gt, rb.mcr_reg, rb.bit, 0,
1044						 TLB_INVAL_TIMEOUT_US,
1045						 TLB_INVAL_TIMEOUT_MS);
1046	else
1047		return __intel_wait_for_register_fw(gt->uncore, rb.reg, rb.bit, 0,
1048						    TLB_INVAL_TIMEOUT_US,
1049						    TLB_INVAL_TIMEOUT_MS,
1050						    NULL);
1051}
1052
1053static void mmio_invalidate_full(struct intel_gt *gt)
1054{
1055	static const i915_reg_t gen8_regs[] = {
1056		[RENDER_CLASS]			= GEN8_RTCR,
1057		[VIDEO_DECODE_CLASS]		= GEN8_M1TCR, /* , GEN8_M2TCR */
1058		[VIDEO_ENHANCEMENT_CLASS]	= GEN8_VTCR,
1059		[COPY_ENGINE_CLASS]		= GEN8_BTCR,
1060	};
1061	static const i915_reg_t gen12_regs[] = {
1062		[RENDER_CLASS]			= GEN12_GFX_TLB_INV_CR,
1063		[VIDEO_DECODE_CLASS]		= GEN12_VD_TLB_INV_CR,
1064		[VIDEO_ENHANCEMENT_CLASS]	= GEN12_VE_TLB_INV_CR,
1065		[COPY_ENGINE_CLASS]		= GEN12_BLT_TLB_INV_CR,
1066		[COMPUTE_CLASS]			= GEN12_COMPCTX_TLB_INV_CR,
1067	};
1068	static const i915_mcr_reg_t xehp_regs[] = {
1069		[RENDER_CLASS]			= XEHP_GFX_TLB_INV_CR,
1070		[VIDEO_DECODE_CLASS]		= XEHP_VD_TLB_INV_CR,
1071		[VIDEO_ENHANCEMENT_CLASS]	= XEHP_VE_TLB_INV_CR,
1072		[COPY_ENGINE_CLASS]		= XEHP_BLT_TLB_INV_CR,
1073		[COMPUTE_CLASS]			= XEHP_COMPCTX_TLB_INV_CR,
1074	};
1075	struct drm_i915_private *i915 = gt->i915;
1076	struct intel_uncore *uncore = gt->uncore;
1077	struct intel_engine_cs *engine;
1078	intel_engine_mask_t awake, tmp;
1079	enum intel_engine_id id;
1080	const i915_reg_t *regs;
1081	unsigned int num = 0;
1082
1083	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
1084		regs = NULL;
1085		num = ARRAY_SIZE(xehp_regs);
1086	} else if (GRAPHICS_VER(i915) == 12) {
1087		regs = gen12_regs;
1088		num = ARRAY_SIZE(gen12_regs);
1089	} else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
1090		regs = gen8_regs;
1091		num = ARRAY_SIZE(gen8_regs);
1092	} else if (GRAPHICS_VER(i915) < 8) {
1093		return;
1094	}
1095
1096	if (drm_WARN_ONCE(&i915->drm, !num,
1097			  "Platform does not implement TLB invalidation!"))
1098		return;
1099
1100	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1101
1102	spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
1103
1104	awake = 0;
1105	for_each_engine(engine, gt, id) {
1106		struct reg_and_bit rb;
1107
1108		if (!intel_engine_pm_is_awake(engine))
1109			continue;
1110
1111		if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
1112			u32 val = BIT(engine->instance);
1113
1114			if (engine->class == VIDEO_DECODE_CLASS ||
1115			    engine->class == VIDEO_ENHANCEMENT_CLASS ||
1116			    engine->class == COMPUTE_CLASS)
1117				val = _MASKED_BIT_ENABLE(val);
1118			intel_gt_mcr_multicast_write_fw(gt,
1119							xehp_regs[engine->class],
1120							val);
1121		} else {
1122			rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
1123			if (!i915_mmio_reg_offset(rb.reg))
1124				continue;
1125
1126			if (GRAPHICS_VER(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS ||
1127			    engine->class == VIDEO_ENHANCEMENT_CLASS ||
1128			    engine->class == COMPUTE_CLASS))
1129				rb.bit = _MASKED_BIT_ENABLE(rb.bit);
1130
1131			intel_uncore_write_fw(uncore, rb.reg, rb.bit);
1132		}
1133		awake |= engine->mask;
1134	}
1135
1136	GT_TRACE(gt, "invalidated engines %08x\n", awake);
1137
1138	/* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
1139	if (awake &&
1140	    (IS_TIGERLAKE(i915) ||
1141	     IS_DG1(i915) ||
1142	     IS_ROCKETLAKE(i915) ||
1143	     IS_ALDERLAKE_S(i915) ||
1144	     IS_ALDERLAKE_P(i915)))
1145		intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
1146
1147	spin_unlock_irq(&uncore->lock);
1148
1149	for_each_engine_masked(engine, gt, awake, tmp) {
1150		struct reg_and_bit rb;
1151
1152		if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
1153			rb.mcr_reg = xehp_regs[engine->class];
1154			rb.bit = BIT(engine->instance);
1155		} else {
1156			rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
1157		}
1158
1159		if (wait_for_invalidate(gt, rb))
1160			drm_err_ratelimited(&gt->i915->drm,
1161					    "%s TLB invalidation did not complete in %ums!\n",
1162					    engine->name, TLB_INVAL_TIMEOUT_MS);
1163	}
1164
1165	/*
1166	 * Use delayed put since a) we mostly expect a flurry of TLB
1167	 * invalidations so it is good to avoid paying the forcewake cost and
1168	 * b) it works around a bug in Icelake which cannot cope with too rapid
1169	 * transitions.
1170	 */
1171	intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
1172}
1173
1174static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
1175{
1176	u32 cur = intel_gt_tlb_seqno(gt);
1177
1178	/* Only skip if a *full* TLB invalidate barrier has passed */
1179	return (s32)(cur - ALIGN(seqno, 2)) > 0;
1180}
1181
1182void intel_gt_invalidate_tlb(struct intel_gt *gt, u32 seqno)
1183{
1184	intel_wakeref_t wakeref;
1185
1186	if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
1187		return;
1188
1189	if (intel_gt_is_wedged(gt))
1190		return;
1191
1192	if (tlb_seqno_passed(gt, seqno))
1193		return;
1194
1195	with_intel_gt_pm_if_awake(gt, wakeref) {
1196		mutex_lock(&gt->tlb.invalidate_lock);
1197		if (tlb_seqno_passed(gt, seqno))
1198			goto unlock;
1199
1200		mmio_invalidate_full(gt);
1201
1202		write_seqcount_invalidate(&gt->tlb.seqno);
1203unlock:
1204		mutex_unlock(&gt->tlb.invalidate_lock);
1205	}
1206}