Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
   2 */
   3/*
   4 *
   5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   6 * All Rights Reserved.
   7 *
   8 * Permission is hereby granted, free of charge, to any person obtaining a
   9 * copy of this software and associated documentation files (the
  10 * "Software"), to deal in the Software without restriction, including
  11 * without limitation the rights to use, copy, modify, merge, publish,
  12 * distribute, sub license, and/or sell copies of the Software, and to
  13 * permit persons to whom the Software is furnished to do so, subject to
  14 * the following conditions:
  15 *
  16 * The above copyright notice and this permission notice (including the
  17 * next paragraph) shall be included in all copies or substantial portions
  18 * of the Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  27 *
  28 */
  29
  30#include <linux/acpi.h>
  31#include <linux/device.h>
  32#include <linux/module.h>
  33#include <linux/oom.h>
  34#include <linux/pci.h>
  35#include <linux/pm.h>
  36#include <linux/pm_runtime.h>
  37#include <linux/pnp.h>
  38#include <linux/slab.h>
  39#include <linux/string_helpers.h>
  40#include <linux/vga_switcheroo.h>
  41#include <linux/vt.h>
  42
  43#include <drm/drm_aperture.h>
  44#include <drm/drm_atomic_helper.h>
  45#include <drm/drm_ioctl.h>
  46#include <drm/drm_managed.h>
  47#include <drm/drm_probe_helper.h>
  48
  49#include "display/intel_acpi.h"
  50#include "display/intel_bw.h"
  51#include "display/intel_cdclk.h"
  52#include "display/intel_display_types.h"
  53#include "display/intel_dmc.h"
  54#include "display/intel_dp.h"
  55#include "display/intel_dpt.h"
  56#include "display/intel_fbdev.h"
  57#include "display/intel_hotplug.h"
  58#include "display/intel_overlay.h"
  59#include "display/intel_pch_refclk.h"
  60#include "display/intel_pipe_crc.h"
  61#include "display/intel_pps.h"
  62#include "display/intel_sprite.h"
  63#include "display/intel_vga.h"
  64#include "display/skl_watermark.h"
  65
  66#include "gem/i915_gem_context.h"
  67#include "gem/i915_gem_create.h"
  68#include "gem/i915_gem_dmabuf.h"
  69#include "gem/i915_gem_ioctls.h"
  70#include "gem/i915_gem_mman.h"
  71#include "gem/i915_gem_pm.h"
  72#include "gt/intel_gt.h"
  73#include "gt/intel_gt_pm.h"
  74#include "gt/intel_rc6.h"
  75
  76#include "pxp/intel_pxp_pm.h"
  77
  78#include "i915_file_private.h"
  79#include "i915_debugfs.h"
  80#include "i915_driver.h"
  81#include "i915_drm_client.h"
  82#include "i915_drv.h"
  83#include "i915_getparam.h"
  84#include "i915_hwmon.h"
  85#include "i915_ioc32.h"
  86#include "i915_ioctl.h"
  87#include "i915_irq.h"
  88#include "i915_memcpy.h"
  89#include "i915_perf.h"
  90#include "i915_query.h"
  91#include "i915_suspend.h"
  92#include "i915_switcheroo.h"
  93#include "i915_sysfs.h"
  94#include "i915_utils.h"
  95#include "i915_vgpu.h"
  96#include "intel_dram.h"
  97#include "intel_gvt.h"
  98#include "intel_memory_region.h"
  99#include "intel_pci_config.h"
 100#include "intel_pcode.h"
 101#include "intel_pm.h"
 102#include "intel_region_ttm.h"
 103#include "vlv_suspend.h"
 104
 105/* Intel Rapid Start Technology ACPI device name */
 106static const char irst_name[] = "INT3392";
 107
 108static const struct drm_driver i915_drm_driver;
 109
 110static void i915_release_bridge_dev(struct drm_device *dev,
 111				    void *bridge)
 112{
 113	pci_dev_put(bridge);
 114}
 115
 116static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
 117{
 118	int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
 119
 120	dev_priv->bridge_dev =
 121		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
 122	if (!dev_priv->bridge_dev) {
 123		drm_err(&dev_priv->drm, "bridge device not found\n");
 124		return -EIO;
 125	}
 126
 127	return drmm_add_action_or_reset(&dev_priv->drm, i915_release_bridge_dev,
 128					dev_priv->bridge_dev);
 129}
 130
 131/* Allocate space for the MCH regs if needed, return nonzero on error */
 132static int
 133intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
 134{
 135	int reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 136	u32 temp_lo, temp_hi = 0;
 137	u64 mchbar_addr;
 138	int ret;
 139
 140	if (GRAPHICS_VER(dev_priv) >= 4)
 141		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
 142	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
 143	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
 144
 145	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
 146#ifdef CONFIG_PNP
 147	if (mchbar_addr &&
 148	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
 149		return 0;
 150#endif
 151
 152	/* Get some space for it */
 153	dev_priv->mch_res.name = "i915 MCHBAR";
 154	dev_priv->mch_res.flags = IORESOURCE_MEM;
 155	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
 156				     &dev_priv->mch_res,
 157				     MCHBAR_SIZE, MCHBAR_SIZE,
 158				     PCIBIOS_MIN_MEM,
 159				     0, pcibios_align_resource,
 160				     dev_priv->bridge_dev);
 161	if (ret) {
 162		drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
 163		dev_priv->mch_res.start = 0;
 164		return ret;
 165	}
 166
 167	if (GRAPHICS_VER(dev_priv) >= 4)
 168		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
 169				       upper_32_bits(dev_priv->mch_res.start));
 170
 171	pci_write_config_dword(dev_priv->bridge_dev, reg,
 172			       lower_32_bits(dev_priv->mch_res.start));
 173	return 0;
 174}
 175
 176/* Setup MCHBAR if possible, return true if we should disable it again */
 177static void
 178intel_setup_mchbar(struct drm_i915_private *dev_priv)
 179{
 180	int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 181	u32 temp;
 182	bool enabled;
 183
 184	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 185		return;
 186
 187	dev_priv->mchbar_need_disable = false;
 188
 189	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
 190		pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
 191		enabled = !!(temp & DEVEN_MCHBAR_EN);
 192	} else {
 193		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
 194		enabled = temp & 1;
 195	}
 196
 197	/* If it's already enabled, don't have to do anything */
 198	if (enabled)
 199		return;
 200
 201	if (intel_alloc_mchbar_resource(dev_priv))
 202		return;
 203
 204	dev_priv->mchbar_need_disable = true;
 205
 206	/* Space is allocated or reserved, so enable it. */
 207	if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
 208		pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
 209				       temp | DEVEN_MCHBAR_EN);
 210	} else {
 211		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
 212		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
 213	}
 214}
 215
 216static void
 217intel_teardown_mchbar(struct drm_i915_private *dev_priv)
 218{
 219	int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 220
 221	if (dev_priv->mchbar_need_disable) {
 222		if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
 223			u32 deven_val;
 224
 225			pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
 226					      &deven_val);
 227			deven_val &= ~DEVEN_MCHBAR_EN;
 228			pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
 229					       deven_val);
 230		} else {
 231			u32 mchbar_val;
 232
 233			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
 234					      &mchbar_val);
 235			mchbar_val &= ~1;
 236			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
 237					       mchbar_val);
 238		}
 239	}
 240
 241	if (dev_priv->mch_res.start)
 242		release_resource(&dev_priv->mch_res);
 243}
 244
 245static int i915_workqueues_init(struct drm_i915_private *dev_priv)
 246{
 247	/*
 248	 * The i915 workqueue is primarily used for batched retirement of
 249	 * requests (and thus managing bo) once the task has been completed
 250	 * by the GPU. i915_retire_requests() is called directly when we
 251	 * need high-priority retirement, such as waiting for an explicit
 252	 * bo.
 253	 *
 254	 * It is also used for periodic low-priority events, such as
 255	 * idle-timers and recording error state.
 256	 *
 257	 * All tasks on the workqueue are expected to acquire the dev mutex
 258	 * so there is no point in running more than one instance of the
 259	 * workqueue at any time.  Use an ordered one.
 260	 */
 261	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
 262	if (dev_priv->wq == NULL)
 263		goto out_err;
 264
 265	dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
 266	if (dev_priv->display.hotplug.dp_wq == NULL)
 267		goto out_free_wq;
 268
 269	return 0;
 270
 271out_free_wq:
 272	destroy_workqueue(dev_priv->wq);
 273out_err:
 274	drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
 275
 276	return -ENOMEM;
 277}
 278
 279static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
 280{
 281	destroy_workqueue(dev_priv->display.hotplug.dp_wq);
 282	destroy_workqueue(dev_priv->wq);
 283}
 284
 285/*
 286 * We don't keep the workarounds for pre-production hardware, so we expect our
 287 * driver to fail on these machines in one way or another. A little warning on
 288 * dmesg may help both the user and the bug triagers.
 289 *
 290 * Our policy for removing pre-production workarounds is to keep the
 291 * current gen workarounds as a guide to the bring-up of the next gen
 292 * (workarounds have a habit of persisting!). Anything older than that
 293 * should be removed along with the complications they introduce.
 294 */
 295static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
 296{
 297	bool pre = false;
 298
 299	pre |= IS_HSW_EARLY_SDV(dev_priv);
 300	pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6;
 301	pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA;
 302	pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
 303	pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3;
 304	pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7;
 305
 306	if (pre) {
 307		drm_err(&dev_priv->drm, "This is a pre-production stepping. "
 308			  "It may not be fully functional.\n");
 309		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
 310	}
 311}
 312
 313static void sanitize_gpu(struct drm_i915_private *i915)
 314{
 315	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) {
 316		struct intel_gt *gt;
 317		unsigned int i;
 318
 319		for_each_gt(gt, i915, i)
 320			__intel_gt_reset(gt, ALL_ENGINES);
 321	}
 322}
 323
 324/**
 325 * i915_driver_early_probe - setup state not requiring device access
 326 * @dev_priv: device private
 327 *
 328 * Initialize everything that is a "SW-only" state, that is state not
 329 * requiring accessing the device or exposing the driver via kernel internal
 330 * or userspace interfaces. Example steps belonging here: lock initialization,
 331 * system memory allocation, setting up device specific attributes and
 332 * function hooks not requiring accessing the device.
 333 */
 334static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
 335{
 336	int ret = 0;
 337
 338	if (i915_inject_probe_failure(dev_priv))
 339		return -ENODEV;
 340
 341	intel_device_info_runtime_init_early(dev_priv);
 342
 343	intel_step_init(dev_priv);
 344
 345	intel_uncore_mmio_debug_init_early(dev_priv);
 346
 347	spin_lock_init(&dev_priv->irq_lock);
 348	spin_lock_init(&dev_priv->gpu_error.lock);
 349	mutex_init(&dev_priv->display.backlight.lock);
 350
 351	mutex_init(&dev_priv->sb_lock);
 352	cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
 353
 354	mutex_init(&dev_priv->display.audio.mutex);
 355	mutex_init(&dev_priv->display.wm.wm_mutex);
 356	mutex_init(&dev_priv->display.pps.mutex);
 357	mutex_init(&dev_priv->display.hdcp.comp_mutex);
 358	spin_lock_init(&dev_priv->display.dkl.phy_lock);
 359
 360	i915_memcpy_init_early(dev_priv);
 361	intel_runtime_pm_init_early(&dev_priv->runtime_pm);
 362
 363	ret = i915_workqueues_init(dev_priv);
 364	if (ret < 0)
 365		return ret;
 366
 367	ret = vlv_suspend_init(dev_priv);
 368	if (ret < 0)
 369		goto err_workqueues;
 370
 371	ret = intel_region_ttm_device_init(dev_priv);
 372	if (ret)
 373		goto err_ttm;
 374
 375	ret = intel_root_gt_init_early(dev_priv);
 376	if (ret < 0)
 377		goto err_rootgt;
 378
 379	i915_drm_clients_init(&dev_priv->clients, dev_priv);
 380
 381	i915_gem_init_early(dev_priv);
 382
 383	/* This must be called before any calls to HAS_PCH_* */
 384	intel_detect_pch(dev_priv);
 385
 386	intel_pm_setup(dev_priv);
 387	ret = intel_power_domains_init(dev_priv);
 388	if (ret < 0)
 389		goto err_gem;
 390	intel_irq_init(dev_priv);
 391	intel_init_display_hooks(dev_priv);
 392	intel_init_clock_gating_hooks(dev_priv);
 393
 394	intel_detect_preproduction_hw(dev_priv);
 395
 396	return 0;
 397
 398err_gem:
 399	i915_gem_cleanup_early(dev_priv);
 400	intel_gt_driver_late_release_all(dev_priv);
 401	i915_drm_clients_fini(&dev_priv->clients);
 402err_rootgt:
 403	intel_region_ttm_device_fini(dev_priv);
 404err_ttm:
 405	vlv_suspend_cleanup(dev_priv);
 406err_workqueues:
 407	i915_workqueues_cleanup(dev_priv);
 408	return ret;
 409}
 410
 411/**
 412 * i915_driver_late_release - cleanup the setup done in
 413 *			       i915_driver_early_probe()
 414 * @dev_priv: device private
 415 */
 416static void i915_driver_late_release(struct drm_i915_private *dev_priv)
 417{
 418	intel_irq_fini(dev_priv);
 419	intel_power_domains_cleanup(dev_priv);
 420	i915_gem_cleanup_early(dev_priv);
 421	intel_gt_driver_late_release_all(dev_priv);
 422	i915_drm_clients_fini(&dev_priv->clients);
 423	intel_region_ttm_device_fini(dev_priv);
 424	vlv_suspend_cleanup(dev_priv);
 425	i915_workqueues_cleanup(dev_priv);
 426
 427	cpu_latency_qos_remove_request(&dev_priv->sb_qos);
 428	mutex_destroy(&dev_priv->sb_lock);
 429
 430	i915_params_free(&dev_priv->params);
 431}
 432
 433/**
 434 * i915_driver_mmio_probe - setup device MMIO
 435 * @dev_priv: device private
 436 *
 437 * Setup minimal device state necessary for MMIO accesses later in the
 438 * initialization sequence. The setup here should avoid any other device-wide
 439 * side effects or exposing the driver via kernel internal or user space
 440 * interfaces.
 441 */
 442static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
 443{
 444	struct intel_gt *gt;
 445	int ret, i;
 446
 447	if (i915_inject_probe_failure(dev_priv))
 448		return -ENODEV;
 449
 450	ret = i915_get_bridge_dev(dev_priv);
 451	if (ret < 0)
 452		return ret;
 453
 454	for_each_gt(gt, dev_priv, i) {
 455		ret = intel_uncore_init_mmio(gt->uncore);
 456		if (ret)
 457			return ret;
 458
 459		ret = drmm_add_action_or_reset(&dev_priv->drm,
 460					       intel_uncore_fini_mmio,
 461					       gt->uncore);
 462		if (ret)
 463			return ret;
 464	}
 465
 466	/* Try to make sure MCHBAR is enabled before poking at it */
 467	intel_setup_mchbar(dev_priv);
 468	intel_device_info_runtime_init(dev_priv);
 469
 470	for_each_gt(gt, dev_priv, i) {
 471		ret = intel_gt_init_mmio(gt);
 472		if (ret)
 473			goto err_uncore;
 474	}
 475
 476	/* As early as possible, scrub existing GPU state before clobbering */
 477	sanitize_gpu(dev_priv);
 478
 479	return 0;
 480
 481err_uncore:
 482	intel_teardown_mchbar(dev_priv);
 483
 484	return ret;
 485}
 486
 487/**
 488 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
 489 * @dev_priv: device private
 490 */
 491static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
 492{
 493	intel_teardown_mchbar(dev_priv);
 494}
 495
 496/**
 497 * i915_set_dma_info - set all relevant PCI dma info as configured for the
 498 * platform
 499 * @i915: valid i915 instance
 500 *
 501 * Set the dma max segment size, device and coherent masks.  The dma mask set
 502 * needs to occur before i915_ggtt_probe_hw.
 503 *
 504 * A couple of platforms have special needs.  Address them as well.
 505 *
 506 */
 507static int i915_set_dma_info(struct drm_i915_private *i915)
 508{
 509	unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
 510	int ret;
 511
 512	GEM_BUG_ON(!mask_size);
 513
 514	/*
 515	 * We don't have a max segment size, so set it to the max so sg's
 516	 * debugging layer doesn't complain
 517	 */
 518	dma_set_max_seg_size(i915->drm.dev, UINT_MAX);
 519
 520	ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
 521	if (ret)
 522		goto mask_err;
 523
 524	/* overlay on gen2 is broken and can't address above 1G */
 525	if (GRAPHICS_VER(i915) == 2)
 526		mask_size = 30;
 527
 528	/*
 529	 * 965GM sometimes incorrectly writes to hardware status page (HWS)
 530	 * using 32bit addressing, overwriting memory if HWS is located
 531	 * above 4GB.
 532	 *
 533	 * The documentation also mentions an issue with undefined
 534	 * behaviour if any general state is accessed within a page above 4GB,
 535	 * which also needs to be handled carefully.
 536	 */
 537	if (IS_I965G(i915) || IS_I965GM(i915))
 538		mask_size = 32;
 539
 540	ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
 541	if (ret)
 542		goto mask_err;
 543
 544	return 0;
 545
 546mask_err:
 547	drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
 548	return ret;
 549}
 550
 551static int i915_pcode_init(struct drm_i915_private *i915)
 552{
 553	struct intel_gt *gt;
 554	int id, ret;
 555
 556	for_each_gt(gt, i915, id) {
 557		ret = intel_pcode_init(gt->uncore);
 558		if (ret) {
 559			drm_err(&gt->i915->drm, "gt%d: intel_pcode_init failed %d\n", id, ret);
 560			return ret;
 561		}
 562	}
 563
 564	return 0;
 565}
 566
 567/**
 568 * i915_driver_hw_probe - setup state requiring device access
 569 * @dev_priv: device private
 570 *
 571 * Setup state that requires accessing the device, but doesn't require
 572 * exposing the driver via kernel internal or userspace interfaces.
 573 */
 574static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 575{
 576	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
 577	struct pci_dev *root_pdev;
 578	int ret;
 579
 580	if (i915_inject_probe_failure(dev_priv))
 581		return -ENODEV;
 582
 583	if (HAS_PPGTT(dev_priv)) {
 584		if (intel_vgpu_active(dev_priv) &&
 585		    !intel_vgpu_has_full_ppgtt(dev_priv)) {
 586			i915_report_error(dev_priv,
 587					  "incompatible vGPU found, support for isolated ppGTT required\n");
 588			return -ENXIO;
 589		}
 590	}
 591
 592	if (HAS_EXECLISTS(dev_priv)) {
 593		/*
 594		 * Older GVT emulation depends upon intercepting CSB mmio,
 595		 * which we no longer use, preferring to use the HWSP cache
 596		 * instead.
 597		 */
 598		if (intel_vgpu_active(dev_priv) &&
 599		    !intel_vgpu_has_hwsp_emulation(dev_priv)) {
 600			i915_report_error(dev_priv,
 601					  "old vGPU host found, support for HWSP emulation required\n");
 602			return -ENXIO;
 603		}
 604	}
 605
 606	/* needs to be done before ggtt probe */
 607	intel_dram_edram_detect(dev_priv);
 608
 609	ret = i915_set_dma_info(dev_priv);
 610	if (ret)
 611		return ret;
 612
 613	i915_perf_init(dev_priv);
 614
 615	ret = intel_gt_assign_ggtt(to_gt(dev_priv));
 616	if (ret)
 617		goto err_perf;
 618
 619	ret = i915_ggtt_probe_hw(dev_priv);
 620	if (ret)
 621		goto err_perf;
 622
 623	ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver);
 624	if (ret)
 625		goto err_ggtt;
 626
 627	ret = i915_ggtt_init_hw(dev_priv);
 628	if (ret)
 629		goto err_ggtt;
 630
 631	ret = intel_memory_regions_hw_probe(dev_priv);
 632	if (ret)
 633		goto err_ggtt;
 634
 635	ret = intel_gt_tiles_init(dev_priv);
 636	if (ret)
 637		goto err_mem_regions;
 638
 639	ret = i915_ggtt_enable_hw(dev_priv);
 640	if (ret) {
 641		drm_err(&dev_priv->drm, "failed to enable GGTT\n");
 642		goto err_mem_regions;
 643	}
 644
 645	pci_set_master(pdev);
 646
 647	/* On the 945G/GM, the chipset reports the MSI capability on the
 648	 * integrated graphics even though the support isn't actually there
 649	 * according to the published specs.  It doesn't appear to function
 650	 * correctly in testing on 945G.
 651	 * This may be a side effect of MSI having been made available for PEG
 652	 * and the registers being closely associated.
 653	 *
 654	 * According to chipset errata, on the 965GM, MSI interrupts may
 655	 * be lost or delayed, and was defeatured. MSI interrupts seem to
 656	 * get lost on g4x as well, and interrupt delivery seems to stay
 657	 * properly dead afterwards. So we'll just disable them for all
 658	 * pre-gen5 chipsets.
 659	 *
 660	 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
 661	 * interrupts even when in MSI mode. This results in spurious
 662	 * interrupt warnings if the legacy irq no. is shared with another
 663	 * device. The kernel then disables that interrupt source and so
 664	 * prevents the other device from working properly.
 665	 */
 666	if (GRAPHICS_VER(dev_priv) >= 5) {
 667		if (pci_enable_msi(pdev) < 0)
 668			drm_dbg(&dev_priv->drm, "can't enable MSI");
 669	}
 670
 671	ret = intel_gvt_init(dev_priv);
 672	if (ret)
 673		goto err_msi;
 674
 675	intel_opregion_setup(dev_priv);
 676
 677	ret = i915_pcode_init(dev_priv);
 678	if (ret)
 679		goto err_msi;
 680
 681	/*
 682	 * Fill the dram structure to get the system dram info. This will be
 683	 * used for memory latency calculation.
 684	 */
 685	intel_dram_detect(dev_priv);
 686
 687	intel_bw_init_hw(dev_priv);
 688
 689	/*
 690	 * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
 691	 * This should be totally removed when we handle the pci states properly
 692	 * on runtime PM and on s2idle cases.
 693	 */
 694	root_pdev = pcie_find_root_port(pdev);
 695	if (root_pdev)
 696		pci_d3cold_disable(root_pdev);
 697
 698	return 0;
 699
 700err_msi:
 701	if (pdev->msi_enabled)
 702		pci_disable_msi(pdev);
 703err_mem_regions:
 704	intel_memory_regions_driver_release(dev_priv);
 705err_ggtt:
 706	i915_ggtt_driver_release(dev_priv);
 707	i915_gem_drain_freed_objects(dev_priv);
 708	i915_ggtt_driver_late_release(dev_priv);
 709err_perf:
 710	i915_perf_fini(dev_priv);
 711	return ret;
 712}
 713
 714/**
 715 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
 716 * @dev_priv: device private
 717 */
 718static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
 719{
 720	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
 721	struct pci_dev *root_pdev;
 722
 723	i915_perf_fini(dev_priv);
 724
 725	if (pdev->msi_enabled)
 726		pci_disable_msi(pdev);
 727
 728	root_pdev = pcie_find_root_port(pdev);
 729	if (root_pdev)
 730		pci_d3cold_enable(root_pdev);
 731}
 732
 733/**
 734 * i915_driver_register - register the driver with the rest of the system
 735 * @dev_priv: device private
 736 *
 737 * Perform any steps necessary to make the driver available via kernel
 738 * internal or userspace interfaces.
 739 */
 740static void i915_driver_register(struct drm_i915_private *dev_priv)
 741{
 742	struct intel_gt *gt;
 743	unsigned int i;
 744
 745	i915_gem_driver_register(dev_priv);
 746	i915_pmu_register(dev_priv);
 747
 748	intel_vgpu_register(dev_priv);
 749
 750	/* Reveal our presence to userspace */
 751	if (drm_dev_register(&dev_priv->drm, 0)) {
 752		drm_err(&dev_priv->drm,
 753			"Failed to register driver for userspace access!\n");
 754		return;
 755	}
 756
 757	i915_debugfs_register(dev_priv);
 758	i915_setup_sysfs(dev_priv);
 759
 760	/* Depends on sysfs having been initialized */
 761	i915_perf_register(dev_priv);
 762
 763	for_each_gt(gt, dev_priv, i)
 764		intel_gt_driver_register(gt);
 765
 766	i915_hwmon_register(dev_priv);
 767
 768	intel_display_driver_register(dev_priv);
 769
 770	intel_power_domains_enable(dev_priv);
 771	intel_runtime_pm_enable(&dev_priv->runtime_pm);
 772
 773	intel_register_dsm_handler();
 774
 775	if (i915_switcheroo_register(dev_priv))
 776		drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
 777}
 778
 779/**
 780 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
 781 * @dev_priv: device private
 782 */
 783static void i915_driver_unregister(struct drm_i915_private *dev_priv)
 784{
 785	struct intel_gt *gt;
 786	unsigned int i;
 787
 788	i915_switcheroo_unregister(dev_priv);
 789
 790	intel_unregister_dsm_handler();
 791
 792	intel_runtime_pm_disable(&dev_priv->runtime_pm);
 793	intel_power_domains_disable(dev_priv);
 794
 795	intel_display_driver_unregister(dev_priv);
 796
 797	for_each_gt(gt, dev_priv, i)
 798		intel_gt_driver_unregister(gt);
 799
 800	i915_hwmon_unregister(dev_priv);
 801
 802	i915_perf_unregister(dev_priv);
 803	i915_pmu_unregister(dev_priv);
 804
 805	i915_teardown_sysfs(dev_priv);
 806	drm_dev_unplug(&dev_priv->drm);
 807
 808	i915_gem_driver_unregister(dev_priv);
 809}
 810
 811void
 812i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p)
 813{
 814	drm_printf(p, "iommu: %s\n",
 815		   str_enabled_disabled(i915_vtd_active(i915)));
 816}
 817
 818static void i915_welcome_messages(struct drm_i915_private *dev_priv)
 819{
 820	if (drm_debug_enabled(DRM_UT_DRIVER)) {
 821		struct drm_printer p = drm_debug_printer("i915 device info:");
 822		struct intel_gt *gt;
 823		unsigned int i;
 824
 825		drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
 826			   INTEL_DEVID(dev_priv),
 827			   INTEL_REVID(dev_priv),
 828			   intel_platform_name(INTEL_INFO(dev_priv)->platform),
 829			   intel_subplatform(RUNTIME_INFO(dev_priv),
 830					     INTEL_INFO(dev_priv)->platform),
 831			   GRAPHICS_VER(dev_priv));
 832
 833		intel_device_info_print(INTEL_INFO(dev_priv),
 834					RUNTIME_INFO(dev_priv), &p);
 835		i915_print_iommu_status(dev_priv, &p);
 836		for_each_gt(gt, dev_priv, i)
 837			intel_gt_info_print(&gt->info, &p);
 838	}
 839
 840	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
 841		drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
 842	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
 843		drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
 844	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
 845		drm_info(&dev_priv->drm,
 846			 "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
 847}
 848
 849static struct drm_i915_private *
 850i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
 851{
 852	const struct intel_device_info *match_info =
 853		(struct intel_device_info *)ent->driver_data;
 854	struct intel_device_info *device_info;
 855	struct intel_runtime_info *runtime;
 856	struct drm_i915_private *i915;
 857
 858	i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
 859				  struct drm_i915_private, drm);
 860	if (IS_ERR(i915))
 861		return i915;
 862
 863	pci_set_drvdata(pdev, i915);
 864
 865	/* Device parameters start as a copy of module parameters. */
 866	i915_params_copy(&i915->params, &i915_modparams);
 867
 868	/* Setup the write-once "constant" device info */
 869	device_info = mkwrite_device_info(i915);
 870	memcpy(device_info, match_info, sizeof(*device_info));
 871
 872	/* Initialize initial runtime info from static const data and pdev. */
 873	runtime = RUNTIME_INFO(i915);
 874	memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime));
 875	runtime->device_id = pdev->device;
 876
 877	return i915;
 878}
 879
 880/**
 881 * i915_driver_probe - setup chip and create an initial config
 882 * @pdev: PCI device
 883 * @ent: matching PCI ID entry
 884 *
 885 * The driver probe routine has to do several things:
 886 *   - drive output discovery via intel_modeset_init()
 887 *   - initialize the memory manager
 888 *   - allocate initial config memory
 889 *   - setup the DRM framebuffer with the allocated memory
 890 */
 891int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 892{
 893	struct drm_i915_private *i915;
 894	int ret;
 895
 896	i915 = i915_driver_create(pdev, ent);
 897	if (IS_ERR(i915))
 898		return PTR_ERR(i915);
 899
 900	ret = pci_enable_device(pdev);
 901	if (ret)
 902		goto out_fini;
 903
 904	ret = i915_driver_early_probe(i915);
 905	if (ret < 0)
 906		goto out_pci_disable;
 907
 908	disable_rpm_wakeref_asserts(&i915->runtime_pm);
 909
 910	intel_vgpu_detect(i915);
 911
 912	ret = intel_gt_probe_all(i915);
 913	if (ret < 0)
 914		goto out_runtime_pm_put;
 915
 916	ret = i915_driver_mmio_probe(i915);
 917	if (ret < 0)
 918		goto out_tiles_cleanup;
 919
 920	ret = i915_driver_hw_probe(i915);
 921	if (ret < 0)
 922		goto out_cleanup_mmio;
 923
 924	ret = intel_modeset_init_noirq(i915);
 925	if (ret < 0)
 926		goto out_cleanup_hw;
 927
 928	ret = intel_irq_install(i915);
 929	if (ret)
 930		goto out_cleanup_modeset;
 931
 932	ret = intel_modeset_init_nogem(i915);
 933	if (ret)
 934		goto out_cleanup_irq;
 935
 936	ret = i915_gem_init(i915);
 937	if (ret)
 938		goto out_cleanup_modeset2;
 939
 940	ret = intel_modeset_init(i915);
 941	if (ret)
 942		goto out_cleanup_gem;
 943
 944	i915_driver_register(i915);
 945
 946	enable_rpm_wakeref_asserts(&i915->runtime_pm);
 947
 948	i915_welcome_messages(i915);
 949
 950	i915->do_release = true;
 951
 952	return 0;
 953
 954out_cleanup_gem:
 955	i915_gem_suspend(i915);
 956	i915_gem_driver_remove(i915);
 957	i915_gem_driver_release(i915);
 958out_cleanup_modeset2:
 959	/* FIXME clean up the error path */
 960	intel_modeset_driver_remove(i915);
 961	intel_irq_uninstall(i915);
 962	intel_modeset_driver_remove_noirq(i915);
 963	goto out_cleanup_modeset;
 964out_cleanup_irq:
 965	intel_irq_uninstall(i915);
 966out_cleanup_modeset:
 967	intel_modeset_driver_remove_nogem(i915);
 968out_cleanup_hw:
 969	i915_driver_hw_remove(i915);
 970	intel_memory_regions_driver_release(i915);
 971	i915_ggtt_driver_release(i915);
 972	i915_gem_drain_freed_objects(i915);
 973	i915_ggtt_driver_late_release(i915);
 974out_cleanup_mmio:
 975	i915_driver_mmio_release(i915);
 976out_tiles_cleanup:
 977	intel_gt_release_all(i915);
 978out_runtime_pm_put:
 979	enable_rpm_wakeref_asserts(&i915->runtime_pm);
 980	i915_driver_late_release(i915);
 981out_pci_disable:
 982	pci_disable_device(pdev);
 983out_fini:
 984	i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
 985	return ret;
 986}
 987
 988void i915_driver_remove(struct drm_i915_private *i915)
 989{
 990	intel_wakeref_t wakeref;
 991
 992	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 993
 994	i915_driver_unregister(i915);
 995
 996	/* Flush any external code that still may be under the RCU lock */
 997	synchronize_rcu();
 998
 999	i915_gem_suspend(i915);
1000
1001	intel_gvt_driver_remove(i915);
1002
1003	intel_modeset_driver_remove(i915);
1004
1005	intel_irq_uninstall(i915);
1006
1007	intel_modeset_driver_remove_noirq(i915);
1008
1009	i915_reset_error_state(i915);
1010	i915_gem_driver_remove(i915);
1011
1012	intel_modeset_driver_remove_nogem(i915);
1013
1014	i915_driver_hw_remove(i915);
1015
1016	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1017}
1018
1019static void i915_driver_release(struct drm_device *dev)
1020{
1021	struct drm_i915_private *dev_priv = to_i915(dev);
1022	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1023	intel_wakeref_t wakeref;
1024
1025	if (!dev_priv->do_release)
1026		return;
1027
1028	wakeref = intel_runtime_pm_get(rpm);
1029
1030	i915_gem_driver_release(dev_priv);
1031
1032	intel_memory_regions_driver_release(dev_priv);
1033	i915_ggtt_driver_release(dev_priv);
1034	i915_gem_drain_freed_objects(dev_priv);
1035	i915_ggtt_driver_late_release(dev_priv);
1036
1037	i915_driver_mmio_release(dev_priv);
1038
1039	intel_runtime_pm_put(rpm, wakeref);
1040
1041	intel_runtime_pm_driver_release(rpm);
1042
1043	i915_driver_late_release(dev_priv);
1044}
1045
1046static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1047{
1048	struct drm_i915_private *i915 = to_i915(dev);
1049	int ret;
1050
1051	ret = i915_gem_open(i915, file);
1052	if (ret)
1053		return ret;
1054
1055	return 0;
1056}
1057
1058/**
1059 * i915_driver_lastclose - clean up after all DRM clients have exited
1060 * @dev: DRM device
1061 *
1062 * Take care of cleaning up after all DRM clients have exited.  In the
1063 * mode setting case, we want to restore the kernel's initial mode (just
1064 * in case the last client left us in a bad state).
1065 *
1066 * Additionally, in the non-mode setting case, we'll tear down the GTT
1067 * and DMA structures, since the kernel won't be using them, and clea
1068 * up any GEM state.
1069 */
1070static void i915_driver_lastclose(struct drm_device *dev)
1071{
1072	intel_fbdev_restore_mode(dev);
1073
1074	vga_switcheroo_process_delayed_switch();
1075}
1076
1077static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1078{
1079	struct drm_i915_file_private *file_priv = file->driver_priv;
1080
1081	i915_gem_context_close(file);
1082	i915_drm_client_put(file_priv->client);
1083
1084	kfree_rcu(file_priv, rcu);
1085
1086	/* Catch up with all the deferred frees from "this" client */
1087	i915_gem_flush_free_objects(to_i915(dev));
1088}
1089
1090static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1091{
1092	struct intel_encoder *encoder;
1093
1094	if (!HAS_DISPLAY(dev_priv))
1095		return;
1096
1097	drm_modeset_lock_all(&dev_priv->drm);
1098	for_each_intel_encoder(&dev_priv->drm, encoder)
1099		if (encoder->suspend)
1100			encoder->suspend(encoder);
1101	drm_modeset_unlock_all(&dev_priv->drm);
1102}
1103
1104static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
1105{
1106	struct intel_encoder *encoder;
1107
1108	if (!HAS_DISPLAY(dev_priv))
1109		return;
1110
1111	drm_modeset_lock_all(&dev_priv->drm);
1112	for_each_intel_encoder(&dev_priv->drm, encoder)
1113		if (encoder->shutdown)
1114			encoder->shutdown(encoder);
1115	drm_modeset_unlock_all(&dev_priv->drm);
1116}
1117
1118void i915_driver_shutdown(struct drm_i915_private *i915)
1119{
1120	disable_rpm_wakeref_asserts(&i915->runtime_pm);
1121	intel_runtime_pm_disable(&i915->runtime_pm);
1122	intel_power_domains_disable(i915);
1123
1124	if (HAS_DISPLAY(i915)) {
1125		drm_kms_helper_poll_disable(&i915->drm);
1126
1127		drm_atomic_helper_shutdown(&i915->drm);
1128	}
1129
1130	intel_dp_mst_suspend(i915);
1131
1132	intel_runtime_pm_disable_interrupts(i915);
1133	intel_hpd_cancel_work(i915);
1134
1135	intel_suspend_encoders(i915);
1136	intel_shutdown_encoders(i915);
1137
1138	intel_dmc_ucode_suspend(i915);
1139
1140	i915_gem_suspend(i915);
1141
1142	/*
1143	 * The only requirement is to reboot with display DC states disabled,
1144	 * for now leaving all display power wells in the INIT power domain
1145	 * enabled.
1146	 *
1147	 * TODO:
1148	 * - unify the pci_driver::shutdown sequence here with the
1149	 *   pci_driver.driver.pm.poweroff,poweroff_late sequence.
1150	 * - unify the driver remove and system/runtime suspend sequences with
1151	 *   the above unified shutdown/poweroff sequence.
1152	 */
1153	intel_power_domains_driver_remove(i915);
1154	enable_rpm_wakeref_asserts(&i915->runtime_pm);
1155
1156	intel_runtime_pm_driver_release(&i915->runtime_pm);
1157}
1158
1159static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1160{
1161#if IS_ENABLED(CONFIG_ACPI_SLEEP)
1162	if (acpi_target_system_state() < ACPI_STATE_S3)
1163		return true;
1164#endif
1165	return false;
1166}
1167
1168static int i915_drm_prepare(struct drm_device *dev)
1169{
1170	struct drm_i915_private *i915 = to_i915(dev);
1171
1172	/*
1173	 * NB intel_display_suspend() may issue new requests after we've
1174	 * ostensibly marked the GPU as ready-to-sleep here. We need to
1175	 * split out that work and pull it forward so that after point,
1176	 * the GPU is not woken again.
1177	 */
1178	return i915_gem_backup_suspend(i915);
1179}
1180
1181static int i915_drm_suspend(struct drm_device *dev)
1182{
1183	struct drm_i915_private *dev_priv = to_i915(dev);
1184	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1185	pci_power_t opregion_target_state;
1186
1187	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1188
1189	/* We do a lot of poking in a lot of registers, make sure they work
1190	 * properly. */
1191	intel_power_domains_disable(dev_priv);
1192	if (HAS_DISPLAY(dev_priv))
1193		drm_kms_helper_poll_disable(dev);
1194
1195	pci_save_state(pdev);
1196
1197	intel_display_suspend(dev);
1198
1199	intel_dp_mst_suspend(dev_priv);
1200
1201	intel_runtime_pm_disable_interrupts(dev_priv);
1202	intel_hpd_cancel_work(dev_priv);
1203
1204	intel_suspend_encoders(dev_priv);
1205
1206	intel_suspend_hw(dev_priv);
1207
1208	/* Must be called before GGTT is suspended. */
1209	intel_dpt_suspend(dev_priv);
1210	i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
1211
1212	i915_save_display(dev_priv);
1213
1214	opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1215	intel_opregion_suspend(dev_priv, opregion_target_state);
1216
1217	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1218
1219	dev_priv->suspend_count++;
1220
1221	intel_dmc_ucode_suspend(dev_priv);
1222
1223	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1224
1225	i915_gem_drain_freed_objects(dev_priv);
1226
1227	return 0;
1228}
1229
1230static enum i915_drm_suspend_mode
1231get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
1232{
1233	if (hibernate)
1234		return I915_DRM_SUSPEND_HIBERNATE;
1235
1236	if (suspend_to_idle(dev_priv))
1237		return I915_DRM_SUSPEND_IDLE;
1238
1239	return I915_DRM_SUSPEND_MEM;
1240}
1241
1242static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1243{
1244	struct drm_i915_private *dev_priv = to_i915(dev);
1245	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1246	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1247	struct intel_gt *gt;
1248	int ret, i;
1249
1250	disable_rpm_wakeref_asserts(rpm);
1251
1252	i915_gem_suspend_late(dev_priv);
1253
1254	for_each_gt(gt, dev_priv, i)
1255		intel_uncore_suspend(gt->uncore);
1256
1257	intel_power_domains_suspend(dev_priv,
1258				    get_suspend_mode(dev_priv, hibernation));
1259
1260	intel_display_power_suspend_late(dev_priv);
1261
1262	ret = vlv_suspend_complete(dev_priv);
1263	if (ret) {
1264		drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
1265		intel_power_domains_resume(dev_priv);
1266
1267		goto out;
1268	}
1269
1270	pci_disable_device(pdev);
1271	/*
1272	 * During hibernation on some platforms the BIOS may try to access
1273	 * the device even though it's already in D3 and hang the machine. So
1274	 * leave the device in D0 on those platforms and hope the BIOS will
1275	 * power down the device properly. The issue was seen on multiple old
1276	 * GENs with different BIOS vendors, so having an explicit blacklist
1277	 * is inpractical; apply the workaround on everything pre GEN6. The
1278	 * platforms where the issue was seen:
1279	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
1280	 * Fujitsu FSC S7110
1281	 * Acer Aspire 1830T
1282	 */
1283	if (!(hibernation && GRAPHICS_VER(dev_priv) < 6))
1284		pci_set_power_state(pdev, PCI_D3hot);
1285
1286out:
1287	enable_rpm_wakeref_asserts(rpm);
1288	if (!dev_priv->uncore.user_forcewake_count)
1289		intel_runtime_pm_driver_release(rpm);
1290
1291	return ret;
1292}
1293
1294int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
1295				   pm_message_t state)
1296{
1297	int error;
1298
1299	if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
1300			     state.event != PM_EVENT_FREEZE))
1301		return -EINVAL;
1302
1303	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1304		return 0;
1305
1306	error = i915_drm_suspend(&i915->drm);
1307	if (error)
1308		return error;
1309
1310	return i915_drm_suspend_late(&i915->drm, false);
1311}
1312
1313static int i915_drm_resume(struct drm_device *dev)
1314{
1315	struct drm_i915_private *dev_priv = to_i915(dev);
1316	int ret;
1317
1318	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1319
1320	ret = i915_pcode_init(dev_priv);
1321	if (ret)
1322		return ret;
1323
1324	sanitize_gpu(dev_priv);
1325
1326	ret = i915_ggtt_enable_hw(dev_priv);
1327	if (ret)
1328		drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
1329
1330	i915_ggtt_resume(to_gt(dev_priv)->ggtt);
1331	/* Must be called after GGTT is resumed. */
1332	intel_dpt_resume(dev_priv);
1333
1334	intel_dmc_ucode_resume(dev_priv);
1335
1336	i915_restore_display(dev_priv);
1337	intel_pps_unlock_regs_wa(dev_priv);
1338
1339	intel_init_pch_refclk(dev_priv);
1340
1341	/*
1342	 * Interrupts have to be enabled before any batches are run. If not the
1343	 * GPU will hang. i915_gem_init_hw() will initiate batches to
1344	 * update/restore the context.
1345	 *
1346	 * drm_mode_config_reset() needs AUX interrupts.
1347	 *
1348	 * Modeset enabling in intel_modeset_init_hw() also needs working
1349	 * interrupts.
1350	 */
1351	intel_runtime_pm_enable_interrupts(dev_priv);
1352
1353	if (HAS_DISPLAY(dev_priv))
1354		drm_mode_config_reset(dev);
1355
1356	i915_gem_resume(dev_priv);
1357
1358	intel_modeset_init_hw(dev_priv);
1359	intel_init_clock_gating(dev_priv);
1360	intel_hpd_init(dev_priv);
1361
1362	/* MST sideband requires HPD interrupts enabled */
1363	intel_dp_mst_resume(dev_priv);
1364	intel_display_resume(dev);
1365
1366	intel_hpd_poll_disable(dev_priv);
1367	if (HAS_DISPLAY(dev_priv))
1368		drm_kms_helper_poll_enable(dev);
1369
1370	intel_opregion_resume(dev_priv);
1371
1372	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1373
1374	intel_power_domains_enable(dev_priv);
1375
1376	intel_gvt_resume(dev_priv);
1377
1378	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1379
1380	return 0;
1381}
1382
1383static int i915_drm_resume_early(struct drm_device *dev)
1384{
1385	struct drm_i915_private *dev_priv = to_i915(dev);
1386	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1387	struct intel_gt *gt;
1388	int ret, i;
1389
1390	/*
1391	 * We have a resume ordering issue with the snd-hda driver also
1392	 * requiring our device to be power up. Due to the lack of a
1393	 * parent/child relationship we currently solve this with an early
1394	 * resume hook.
1395	 *
1396	 * FIXME: This should be solved with a special hdmi sink device or
1397	 * similar so that power domains can be employed.
1398	 */
1399
1400	/*
1401	 * Note that we need to set the power state explicitly, since we
1402	 * powered off the device during freeze and the PCI core won't power
1403	 * it back up for us during thaw. Powering off the device during
1404	 * freeze is not a hard requirement though, and during the
1405	 * suspend/resume phases the PCI core makes sure we get here with the
1406	 * device powered on. So in case we change our freeze logic and keep
1407	 * the device powered we can also remove the following set power state
1408	 * call.
1409	 */
1410	ret = pci_set_power_state(pdev, PCI_D0);
1411	if (ret) {
1412		drm_err(&dev_priv->drm,
1413			"failed to set PCI D0 power state (%d)\n", ret);
1414		return ret;
1415	}
1416
1417	/*
1418	 * Note that pci_enable_device() first enables any parent bridge
1419	 * device and only then sets the power state for this device. The
1420	 * bridge enabling is a nop though, since bridge devices are resumed
1421	 * first. The order of enabling power and enabling the device is
1422	 * imposed by the PCI core as described above, so here we preserve the
1423	 * same order for the freeze/thaw phases.
1424	 *
1425	 * TODO: eventually we should remove pci_disable_device() /
1426	 * pci_enable_enable_device() from suspend/resume. Due to how they
1427	 * depend on the device enable refcount we can't anyway depend on them
1428	 * disabling/enabling the device.
1429	 */
1430	if (pci_enable_device(pdev))
1431		return -EIO;
1432
1433	pci_set_master(pdev);
1434
1435	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1436
1437	ret = vlv_resume_prepare(dev_priv, false);
1438	if (ret)
1439		drm_err(&dev_priv->drm,
1440			"Resume prepare failed: %d, continuing anyway\n", ret);
1441
1442	for_each_gt(gt, dev_priv, i) {
1443		intel_uncore_resume_early(gt->uncore);
1444		intel_gt_check_and_clear_faults(gt);
1445	}
1446
1447	intel_display_power_resume_early(dev_priv);
1448
1449	intel_power_domains_resume(dev_priv);
1450
1451	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1452
1453	return ret;
1454}
1455
1456int i915_driver_resume_switcheroo(struct drm_i915_private *i915)
1457{
1458	int ret;
1459
1460	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1461		return 0;
1462
1463	ret = i915_drm_resume_early(&i915->drm);
1464	if (ret)
1465		return ret;
1466
1467	return i915_drm_resume(&i915->drm);
1468}
1469
1470static int i915_pm_prepare(struct device *kdev)
1471{
1472	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1473
1474	if (!i915) {
1475		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1476		return -ENODEV;
1477	}
1478
1479	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1480		return 0;
1481
1482	return i915_drm_prepare(&i915->drm);
1483}
1484
1485static int i915_pm_suspend(struct device *kdev)
1486{
1487	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1488
1489	if (!i915) {
1490		dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1491		return -ENODEV;
1492	}
1493
1494	i915_ggtt_mark_pte_lost(i915, false);
1495
1496	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1497		return 0;
1498
1499	return i915_drm_suspend(&i915->drm);
1500}
1501
1502static int i915_pm_suspend_late(struct device *kdev)
1503{
1504	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1505
1506	/*
1507	 * We have a suspend ordering issue with the snd-hda driver also
1508	 * requiring our device to be power up. Due to the lack of a
1509	 * parent/child relationship we currently solve this with an late
1510	 * suspend hook.
1511	 *
1512	 * FIXME: This should be solved with a special hdmi sink device or
1513	 * similar so that power domains can be employed.
1514	 */
1515	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1516		return 0;
1517
1518	return i915_drm_suspend_late(&i915->drm, false);
1519}
1520
1521static int i915_pm_poweroff_late(struct device *kdev)
1522{
1523	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1524
1525	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1526		return 0;
1527
1528	return i915_drm_suspend_late(&i915->drm, true);
1529}
1530
1531static int i915_pm_resume_early(struct device *kdev)
1532{
1533	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1534
1535	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1536		return 0;
1537
1538	return i915_drm_resume_early(&i915->drm);
1539}
1540
1541static int i915_pm_resume(struct device *kdev)
1542{
1543	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1544
1545	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1546		return 0;
1547
1548	/*
1549	 * If IRST is enabled, or if we can't detect whether it's enabled,
1550	 * then we must assume we lost the GGTT page table entries, since
1551	 * they are not retained if IRST decided to enter S4.
1552	 */
1553	if (!IS_ENABLED(CONFIG_ACPI) || acpi_dev_present(irst_name, NULL, -1))
1554		i915_ggtt_mark_pte_lost(i915, true);
1555
1556	return i915_drm_resume(&i915->drm);
1557}
1558
1559/* freeze: before creating the hibernation_image */
1560static int i915_pm_freeze(struct device *kdev)
1561{
1562	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1563	int ret;
1564
1565	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
1566		ret = i915_drm_suspend(&i915->drm);
1567		if (ret)
1568			return ret;
1569	}
1570
1571	ret = i915_gem_freeze(i915);
1572	if (ret)
1573		return ret;
1574
1575	return 0;
1576}
1577
1578static int i915_pm_freeze_late(struct device *kdev)
1579{
1580	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1581	int ret;
1582
1583	if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
1584		ret = i915_drm_suspend_late(&i915->drm, true);
1585		if (ret)
1586			return ret;
1587	}
1588
1589	ret = i915_gem_freeze_late(i915);
1590	if (ret)
1591		return ret;
1592
1593	return 0;
1594}
1595
1596/* thaw: called after creating the hibernation image, but before turning off. */
1597static int i915_pm_thaw_early(struct device *kdev)
1598{
1599	return i915_pm_resume_early(kdev);
1600}
1601
1602static int i915_pm_thaw(struct device *kdev)
1603{
1604	return i915_pm_resume(kdev);
1605}
1606
1607/* restore: called after loading the hibernation image. */
1608static int i915_pm_restore_early(struct device *kdev)
1609{
1610	return i915_pm_resume_early(kdev);
1611}
1612
1613static int i915_pm_restore(struct device *kdev)
1614{
1615	struct drm_i915_private *i915 = kdev_to_i915(kdev);
1616
1617	i915_ggtt_mark_pte_lost(i915, true);
1618	return i915_pm_resume(kdev);
1619}
1620
1621static int intel_runtime_suspend(struct device *kdev)
1622{
1623	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1624	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1625	struct intel_gt *gt;
1626	int ret, i;
1627
1628	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1629		return -ENODEV;
1630
1631	drm_dbg(&dev_priv->drm, "Suspending device\n");
1632
1633	disable_rpm_wakeref_asserts(rpm);
1634
1635	/*
1636	 * We are safe here against re-faults, since the fault handler takes
1637	 * an RPM reference.
1638	 */
1639	i915_gem_runtime_suspend(dev_priv);
1640
1641	for_each_gt(gt, dev_priv, i)
1642		intel_gt_runtime_suspend(gt);
1643
1644	intel_runtime_pm_disable_interrupts(dev_priv);
1645
1646	for_each_gt(gt, dev_priv, i)
1647		intel_uncore_suspend(gt->uncore);
1648
1649	intel_display_power_suspend(dev_priv);
1650
1651	ret = vlv_suspend_complete(dev_priv);
1652	if (ret) {
1653		drm_err(&dev_priv->drm,
1654			"Runtime suspend failed, disabling it (%d)\n", ret);
1655		intel_uncore_runtime_resume(&dev_priv->uncore);
1656
1657		intel_runtime_pm_enable_interrupts(dev_priv);
1658
1659		for_each_gt(gt, dev_priv, i)
1660			intel_gt_runtime_resume(gt);
1661
1662		enable_rpm_wakeref_asserts(rpm);
1663
1664		return ret;
1665	}
1666
1667	enable_rpm_wakeref_asserts(rpm);
1668	intel_runtime_pm_driver_release(rpm);
1669
1670	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
1671		drm_err(&dev_priv->drm,
1672			"Unclaimed access detected prior to suspending\n");
1673
1674	rpm->suspended = true;
1675
1676	/*
1677	 * FIXME: We really should find a document that references the arguments
1678	 * used below!
1679	 */
1680	if (IS_BROADWELL(dev_priv)) {
1681		/*
1682		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1683		 * being detected, and the call we do at intel_runtime_resume()
1684		 * won't be able to restore them. Since PCI_D3hot matches the
1685		 * actual specification and appears to be working, use it.
1686		 */
1687		intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1688	} else {
1689		/*
1690		 * current versions of firmware which depend on this opregion
1691		 * notification have repurposed the D1 definition to mean
1692		 * "runtime suspended" vs. what you would normally expect (D3)
1693		 * to distinguish it from notifications that might be sent via
1694		 * the suspend path.
1695		 */
1696		intel_opregion_notify_adapter(dev_priv, PCI_D1);
1697	}
1698
1699	assert_forcewakes_inactive(&dev_priv->uncore);
1700
1701	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1702		intel_hpd_poll_enable(dev_priv);
1703
1704	drm_dbg(&dev_priv->drm, "Device suspended\n");
1705	return 0;
1706}
1707
1708static int intel_runtime_resume(struct device *kdev)
1709{
1710	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1711	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1712	struct intel_gt *gt;
1713	int ret, i;
1714
1715	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1716		return -ENODEV;
1717
1718	drm_dbg(&dev_priv->drm, "Resuming device\n");
1719
1720	drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
1721	disable_rpm_wakeref_asserts(rpm);
1722
1723	intel_opregion_notify_adapter(dev_priv, PCI_D0);
1724	rpm->suspended = false;
1725	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
1726		drm_dbg(&dev_priv->drm,
1727			"Unclaimed access during suspend, bios?\n");
1728
1729	intel_display_power_resume(dev_priv);
1730
1731	ret = vlv_resume_prepare(dev_priv, true);
1732
1733	for_each_gt(gt, dev_priv, i)
1734		intel_uncore_runtime_resume(gt->uncore);
1735
1736	intel_runtime_pm_enable_interrupts(dev_priv);
1737
1738	/*
1739	 * No point of rolling back things in case of an error, as the best
1740	 * we can do is to hope that things will still work (and disable RPM).
1741	 */
1742	for_each_gt(gt, dev_priv, i)
1743		intel_gt_runtime_resume(gt);
1744
1745	/*
1746	 * On VLV/CHV display interrupts are part of the display
1747	 * power well, so hpd is reinitialized from there. For
1748	 * everyone else do it here.
1749	 */
1750	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
1751		intel_hpd_init(dev_priv);
1752		intel_hpd_poll_disable(dev_priv);
1753	}
1754
1755	skl_watermark_ipc_update(dev_priv);
1756
1757	enable_rpm_wakeref_asserts(rpm);
1758
1759	if (ret)
1760		drm_err(&dev_priv->drm,
1761			"Runtime resume failed, disabling it (%d)\n", ret);
1762	else
1763		drm_dbg(&dev_priv->drm, "Device resumed\n");
1764
1765	return ret;
1766}
1767
1768const struct dev_pm_ops i915_pm_ops = {
1769	/*
1770	 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1771	 * PMSG_RESUME]
1772	 */
1773	.prepare = i915_pm_prepare,
1774	.suspend = i915_pm_suspend,
1775	.suspend_late = i915_pm_suspend_late,
1776	.resume_early = i915_pm_resume_early,
1777	.resume = i915_pm_resume,
1778
1779	/*
1780	 * S4 event handlers
1781	 * @freeze, @freeze_late    : called (1) before creating the
1782	 *                            hibernation image [PMSG_FREEZE] and
1783	 *                            (2) after rebooting, before restoring
1784	 *                            the image [PMSG_QUIESCE]
1785	 * @thaw, @thaw_early       : called (1) after creating the hibernation
1786	 *                            image, before writing it [PMSG_THAW]
1787	 *                            and (2) after failing to create or
1788	 *                            restore the image [PMSG_RECOVER]
1789	 * @poweroff, @poweroff_late: called after writing the hibernation
1790	 *                            image, before rebooting [PMSG_HIBERNATE]
1791	 * @restore, @restore_early : called after rebooting and restoring the
1792	 *                            hibernation image [PMSG_RESTORE]
1793	 */
1794	.freeze = i915_pm_freeze,
1795	.freeze_late = i915_pm_freeze_late,
1796	.thaw_early = i915_pm_thaw_early,
1797	.thaw = i915_pm_thaw,
1798	.poweroff = i915_pm_suspend,
1799	.poweroff_late = i915_pm_poweroff_late,
1800	.restore_early = i915_pm_restore_early,
1801	.restore = i915_pm_restore,
1802
1803	/* S0ix (via runtime suspend) event handlers */
1804	.runtime_suspend = intel_runtime_suspend,
1805	.runtime_resume = intel_runtime_resume,
1806};
1807
1808static const struct file_operations i915_driver_fops = {
1809	.owner = THIS_MODULE,
1810	.open = drm_open,
1811	.release = drm_release_noglobal,
1812	.unlocked_ioctl = drm_ioctl,
1813	.mmap = i915_gem_mmap,
1814	.poll = drm_poll,
1815	.read = drm_read,
1816	.compat_ioctl = i915_ioc32_compat_ioctl,
1817	.llseek = noop_llseek,
1818#ifdef CONFIG_PROC_FS
1819	.show_fdinfo = i915_drm_client_fdinfo,
1820#endif
1821};
1822
1823static int
1824i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1825			  struct drm_file *file)
1826{
1827	return -ENODEV;
1828}
1829
1830static const struct drm_ioctl_desc i915_ioctls[] = {
1831	DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1832	DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1833	DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1834	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1835	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1836	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1837	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
1838	DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1839	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1840	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1841	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1842	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1843	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1844	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1845	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
1846	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1847	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1848	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1849	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, drm_invalid_op, DRM_AUTH),
1850	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
1851	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1852	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1853	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
1854	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1855	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1856	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
1857	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1858	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1859	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1860	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW),
1861	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1862	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1863	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1864	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
1865	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1866	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1867	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
1868	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
1869	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1870	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
1871	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1872	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
1873	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
1874	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
1875	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
1876	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
1877	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1878	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1879	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1880	DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
1881	DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1882	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1883	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1884	DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
1885	DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
1886	DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
1887	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
1888	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
1889	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
1890};
1891
1892/*
1893 * Interface history:
1894 *
1895 * 1.1: Original.
1896 * 1.2: Add Power Management
1897 * 1.3: Add vblank support
1898 * 1.4: Fix cmdbuffer path, add heap destroy
1899 * 1.5: Add vblank pipe configuration
1900 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
1901 *      - Support vertical blank on secondary display pipe
1902 */
1903#define DRIVER_MAJOR		1
1904#define DRIVER_MINOR		6
1905#define DRIVER_PATCHLEVEL	0
1906
1907static const struct drm_driver i915_drm_driver = {
1908	/* Don't use MTRRs here; the Xserver or userspace app should
1909	 * deal with them for Intel hardware.
1910	 */
1911	.driver_features =
1912	    DRIVER_GEM |
1913	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
1914	    DRIVER_SYNCOBJ_TIMELINE,
1915	.release = i915_driver_release,
1916	.open = i915_driver_open,
1917	.lastclose = i915_driver_lastclose,
1918	.postclose = i915_driver_postclose,
1919
1920	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1921	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1922	.gem_prime_import = i915_gem_prime_import,
1923
1924	.dumb_create = i915_gem_dumb_create,
1925	.dumb_map_offset = i915_gem_dumb_mmap_offset,
1926
1927	.ioctls = i915_ioctls,
1928	.num_ioctls = ARRAY_SIZE(i915_ioctls),
1929	.fops = &i915_driver_fops,
1930	.name = DRIVER_NAME,
1931	.desc = DRIVER_DESC,
1932	.date = DRIVER_DATE,
1933	.major = DRIVER_MAJOR,
1934	.minor = DRIVER_MINOR,
1935	.patchlevel = DRIVER_PATCHLEVEL,
1936};