Loading...
Note: File does not exist in v6.2.
1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30#include <linux/acpi.h>
31#include <linux/device.h>
32#include <linux/oom.h>
33#include <linux/module.h>
34#include <linux/pci.h>
35#include <linux/pm.h>
36#include <linux/pm_runtime.h>
37#include <linux/pnp.h>
38#include <linux/slab.h>
39#include <linux/vga_switcheroo.h>
40#include <linux/vt.h>
41
42#include <drm/drm_aperture.h>
43#include <drm/drm_atomic_helper.h>
44#include <drm/drm_ioctl.h>
45#include <drm/drm_managed.h>
46#include <drm/drm_probe_helper.h>
47
48#include "display/intel_acpi.h"
49#include "display/intel_bw.h"
50#include "display/intel_cdclk.h"
51#include "display/intel_dmc.h"
52#include "display/intel_display_types.h"
53#include "display/intel_dp.h"
54#include "display/intel_fbdev.h"
55#include "display/intel_hotplug.h"
56#include "display/intel_overlay.h"
57#include "display/intel_pipe_crc.h"
58#include "display/intel_pps.h"
59#include "display/intel_sprite.h"
60#include "display/intel_vga.h"
61
62#include "gem/i915_gem_context.h"
63#include "gem/i915_gem_ioctls.h"
64#include "gem/i915_gem_mman.h"
65#include "gem/i915_gem_pm.h"
66#include "gt/intel_gt.h"
67#include "gt/intel_gt_pm.h"
68#include "gt/intel_rc6.h"
69
70#include "i915_debugfs.h"
71#include "i915_drv.h"
72#include "i915_ioc32.h"
73#include "i915_irq.h"
74#include "i915_memcpy.h"
75#include "i915_perf.h"
76#include "i915_query.h"
77#include "i915_suspend.h"
78#include "i915_switcheroo.h"
79#include "i915_sysfs.h"
80#include "i915_trace.h"
81#include "i915_vgpu.h"
82#include "intel_dram.h"
83#include "intel_gvt.h"
84#include "intel_memory_region.h"
85#include "intel_pm.h"
86#include "intel_region_ttm.h"
87#include "intel_sideband.h"
88#include "vlv_suspend.h"
89
90static const struct drm_driver driver;
91
92static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
93{
94 int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
95
96 dev_priv->bridge_dev =
97 pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
98 if (!dev_priv->bridge_dev) {
99 drm_err(&dev_priv->drm, "bridge device not found\n");
100 return -1;
101 }
102 return 0;
103}
104
105/* Allocate space for the MCH regs if needed, return nonzero on error */
106static int
107intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
108{
109 int reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
110 u32 temp_lo, temp_hi = 0;
111 u64 mchbar_addr;
112 int ret;
113
114 if (GRAPHICS_VER(dev_priv) >= 4)
115 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
116 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
117 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
118
119 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
120#ifdef CONFIG_PNP
121 if (mchbar_addr &&
122 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
123 return 0;
124#endif
125
126 /* Get some space for it */
127 dev_priv->mch_res.name = "i915 MCHBAR";
128 dev_priv->mch_res.flags = IORESOURCE_MEM;
129 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
130 &dev_priv->mch_res,
131 MCHBAR_SIZE, MCHBAR_SIZE,
132 PCIBIOS_MIN_MEM,
133 0, pcibios_align_resource,
134 dev_priv->bridge_dev);
135 if (ret) {
136 drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
137 dev_priv->mch_res.start = 0;
138 return ret;
139 }
140
141 if (GRAPHICS_VER(dev_priv) >= 4)
142 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
143 upper_32_bits(dev_priv->mch_res.start));
144
145 pci_write_config_dword(dev_priv->bridge_dev, reg,
146 lower_32_bits(dev_priv->mch_res.start));
147 return 0;
148}
149
150/* Setup MCHBAR if possible, return true if we should disable it again */
151static void
152intel_setup_mchbar(struct drm_i915_private *dev_priv)
153{
154 int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
155 u32 temp;
156 bool enabled;
157
158 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
159 return;
160
161 dev_priv->mchbar_need_disable = false;
162
163 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
164 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
165 enabled = !!(temp & DEVEN_MCHBAR_EN);
166 } else {
167 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
168 enabled = temp & 1;
169 }
170
171 /* If it's already enabled, don't have to do anything */
172 if (enabled)
173 return;
174
175 if (intel_alloc_mchbar_resource(dev_priv))
176 return;
177
178 dev_priv->mchbar_need_disable = true;
179
180 /* Space is allocated or reserved, so enable it. */
181 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
182 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
183 temp | DEVEN_MCHBAR_EN);
184 } else {
185 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
186 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
187 }
188}
189
190static void
191intel_teardown_mchbar(struct drm_i915_private *dev_priv)
192{
193 int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
194
195 if (dev_priv->mchbar_need_disable) {
196 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
197 u32 deven_val;
198
199 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
200 &deven_val);
201 deven_val &= ~DEVEN_MCHBAR_EN;
202 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
203 deven_val);
204 } else {
205 u32 mchbar_val;
206
207 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
208 &mchbar_val);
209 mchbar_val &= ~1;
210 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
211 mchbar_val);
212 }
213 }
214
215 if (dev_priv->mch_res.start)
216 release_resource(&dev_priv->mch_res);
217}
218
219static int i915_workqueues_init(struct drm_i915_private *dev_priv)
220{
221 /*
222 * The i915 workqueue is primarily used for batched retirement of
223 * requests (and thus managing bo) once the task has been completed
224 * by the GPU. i915_retire_requests() is called directly when we
225 * need high-priority retirement, such as waiting for an explicit
226 * bo.
227 *
228 * It is also used for periodic low-priority events, such as
229 * idle-timers and recording error state.
230 *
231 * All tasks on the workqueue are expected to acquire the dev mutex
232 * so there is no point in running more than one instance of the
233 * workqueue at any time. Use an ordered one.
234 */
235 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
236 if (dev_priv->wq == NULL)
237 goto out_err;
238
239 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
240 if (dev_priv->hotplug.dp_wq == NULL)
241 goto out_free_wq;
242
243 return 0;
244
245out_free_wq:
246 destroy_workqueue(dev_priv->wq);
247out_err:
248 drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
249
250 return -ENOMEM;
251}
252
253static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
254{
255 destroy_workqueue(dev_priv->hotplug.dp_wq);
256 destroy_workqueue(dev_priv->wq);
257}
258
259/*
260 * We don't keep the workarounds for pre-production hardware, so we expect our
261 * driver to fail on these machines in one way or another. A little warning on
262 * dmesg may help both the user and the bug triagers.
263 *
264 * Our policy for removing pre-production workarounds is to keep the
265 * current gen workarounds as a guide to the bring-up of the next gen
266 * (workarounds have a habit of persisting!). Anything older than that
267 * should be removed along with the complications they introduce.
268 */
269static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
270{
271 bool pre = false;
272
273 pre |= IS_HSW_EARLY_SDV(dev_priv);
274 pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
275 pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
276 pre |= IS_KBL_GT_STEP(dev_priv, 0, STEP_A0);
277 pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
278
279 if (pre) {
280 drm_err(&dev_priv->drm, "This is a pre-production stepping. "
281 "It may not be fully functional.\n");
282 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
283 }
284}
285
286static void sanitize_gpu(struct drm_i915_private *i915)
287{
288 if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
289 __intel_gt_reset(&i915->gt, ALL_ENGINES);
290}
291
292/**
293 * i915_driver_early_probe - setup state not requiring device access
294 * @dev_priv: device private
295 *
296 * Initialize everything that is a "SW-only" state, that is state not
297 * requiring accessing the device or exposing the driver via kernel internal
298 * or userspace interfaces. Example steps belonging here: lock initialization,
299 * system memory allocation, setting up device specific attributes and
300 * function hooks not requiring accessing the device.
301 */
302static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
303{
304 int ret = 0;
305
306 if (i915_inject_probe_failure(dev_priv))
307 return -ENODEV;
308
309 intel_device_info_subplatform_init(dev_priv);
310 intel_step_init(dev_priv);
311
312 intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
313 intel_uncore_init_early(&dev_priv->uncore, dev_priv);
314
315 spin_lock_init(&dev_priv->irq_lock);
316 spin_lock_init(&dev_priv->gpu_error.lock);
317 mutex_init(&dev_priv->backlight_lock);
318
319 mutex_init(&dev_priv->sb_lock);
320 cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
321
322 mutex_init(&dev_priv->av_mutex);
323 mutex_init(&dev_priv->wm.wm_mutex);
324 mutex_init(&dev_priv->pps_mutex);
325 mutex_init(&dev_priv->hdcp_comp_mutex);
326
327 i915_memcpy_init_early(dev_priv);
328 intel_runtime_pm_init_early(&dev_priv->runtime_pm);
329
330 ret = i915_workqueues_init(dev_priv);
331 if (ret < 0)
332 return ret;
333
334 ret = vlv_suspend_init(dev_priv);
335 if (ret < 0)
336 goto err_workqueues;
337
338 ret = intel_region_ttm_device_init(dev_priv);
339 if (ret)
340 goto err_ttm;
341
342 intel_wopcm_init_early(&dev_priv->wopcm);
343
344 intel_gt_init_early(&dev_priv->gt, dev_priv);
345
346 i915_gem_init_early(dev_priv);
347
348 /* This must be called before any calls to HAS_PCH_* */
349 intel_detect_pch(dev_priv);
350
351 intel_pm_setup(dev_priv);
352 ret = intel_power_domains_init(dev_priv);
353 if (ret < 0)
354 goto err_gem;
355 intel_irq_init(dev_priv);
356 intel_init_display_hooks(dev_priv);
357 intel_init_clock_gating_hooks(dev_priv);
358
359 intel_detect_preproduction_hw(dev_priv);
360
361 return 0;
362
363err_gem:
364 i915_gem_cleanup_early(dev_priv);
365 intel_gt_driver_late_release(&dev_priv->gt);
366 intel_region_ttm_device_fini(dev_priv);
367err_ttm:
368 vlv_suspend_cleanup(dev_priv);
369err_workqueues:
370 i915_workqueues_cleanup(dev_priv);
371 return ret;
372}
373
374/**
375 * i915_driver_late_release - cleanup the setup done in
376 * i915_driver_early_probe()
377 * @dev_priv: device private
378 */
379static void i915_driver_late_release(struct drm_i915_private *dev_priv)
380{
381 intel_irq_fini(dev_priv);
382 intel_power_domains_cleanup(dev_priv);
383 i915_gem_cleanup_early(dev_priv);
384 intel_gt_driver_late_release(&dev_priv->gt);
385 intel_region_ttm_device_fini(dev_priv);
386 vlv_suspend_cleanup(dev_priv);
387 i915_workqueues_cleanup(dev_priv);
388
389 cpu_latency_qos_remove_request(&dev_priv->sb_qos);
390 mutex_destroy(&dev_priv->sb_lock);
391
392 i915_params_free(&dev_priv->params);
393}
394
395/**
396 * i915_driver_mmio_probe - setup device MMIO
397 * @dev_priv: device private
398 *
399 * Setup minimal device state necessary for MMIO accesses later in the
400 * initialization sequence. The setup here should avoid any other device-wide
401 * side effects or exposing the driver via kernel internal or user space
402 * interfaces.
403 */
404static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
405{
406 int ret;
407
408 if (i915_inject_probe_failure(dev_priv))
409 return -ENODEV;
410
411 if (i915_get_bridge_dev(dev_priv))
412 return -EIO;
413
414 ret = intel_uncore_init_mmio(&dev_priv->uncore);
415 if (ret < 0)
416 goto err_bridge;
417
418 /* Try to make sure MCHBAR is enabled before poking at it */
419 intel_setup_mchbar(dev_priv);
420 intel_device_info_runtime_init(dev_priv);
421
422 ret = intel_gt_init_mmio(&dev_priv->gt);
423 if (ret)
424 goto err_uncore;
425
426 /* As early as possible, scrub existing GPU state before clobbering */
427 sanitize_gpu(dev_priv);
428
429 return 0;
430
431err_uncore:
432 intel_teardown_mchbar(dev_priv);
433 intel_uncore_fini_mmio(&dev_priv->uncore);
434err_bridge:
435 pci_dev_put(dev_priv->bridge_dev);
436
437 return ret;
438}
439
440/**
441 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
442 * @dev_priv: device private
443 */
444static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
445{
446 intel_teardown_mchbar(dev_priv);
447 intel_uncore_fini_mmio(&dev_priv->uncore);
448 pci_dev_put(dev_priv->bridge_dev);
449}
450
451static void intel_sanitize_options(struct drm_i915_private *dev_priv)
452{
453 intel_gvt_sanitize_options(dev_priv);
454}
455
456/**
457 * i915_set_dma_info - set all relevant PCI dma info as configured for the
458 * platform
459 * @i915: valid i915 instance
460 *
461 * Set the dma max segment size, device and coherent masks. The dma mask set
462 * needs to occur before i915_ggtt_probe_hw.
463 *
464 * A couple of platforms have special needs. Address them as well.
465 *
466 */
467static int i915_set_dma_info(struct drm_i915_private *i915)
468{
469 unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
470 int ret;
471
472 GEM_BUG_ON(!mask_size);
473
474 /*
475 * We don't have a max segment size, so set it to the max so sg's
476 * debugging layer doesn't complain
477 */
478 dma_set_max_seg_size(i915->drm.dev, UINT_MAX);
479
480 ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
481 if (ret)
482 goto mask_err;
483
484 /* overlay on gen2 is broken and can't address above 1G */
485 if (GRAPHICS_VER(i915) == 2)
486 mask_size = 30;
487
488 /*
489 * 965GM sometimes incorrectly writes to hardware status page (HWS)
490 * using 32bit addressing, overwriting memory if HWS is located
491 * above 4GB.
492 *
493 * The documentation also mentions an issue with undefined
494 * behaviour if any general state is accessed within a page above 4GB,
495 * which also needs to be handled carefully.
496 */
497 if (IS_I965G(i915) || IS_I965GM(i915))
498 mask_size = 32;
499
500 ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
501 if (ret)
502 goto mask_err;
503
504 return 0;
505
506mask_err:
507 drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
508 return ret;
509}
510
511/**
512 * i915_driver_hw_probe - setup state requiring device access
513 * @dev_priv: device private
514 *
515 * Setup state that requires accessing the device, but doesn't require
516 * exposing the driver via kernel internal or userspace interfaces.
517 */
518static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
519{
520 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
521 int ret;
522
523 if (i915_inject_probe_failure(dev_priv))
524 return -ENODEV;
525
526 if (HAS_PPGTT(dev_priv)) {
527 if (intel_vgpu_active(dev_priv) &&
528 !intel_vgpu_has_full_ppgtt(dev_priv)) {
529 i915_report_error(dev_priv,
530 "incompatible vGPU found, support for isolated ppGTT required\n");
531 return -ENXIO;
532 }
533 }
534
535 if (HAS_EXECLISTS(dev_priv)) {
536 /*
537 * Older GVT emulation depends upon intercepting CSB mmio,
538 * which we no longer use, preferring to use the HWSP cache
539 * instead.
540 */
541 if (intel_vgpu_active(dev_priv) &&
542 !intel_vgpu_has_hwsp_emulation(dev_priv)) {
543 i915_report_error(dev_priv,
544 "old vGPU host found, support for HWSP emulation required\n");
545 return -ENXIO;
546 }
547 }
548
549 intel_sanitize_options(dev_priv);
550
551 /* needs to be done before ggtt probe */
552 intel_dram_edram_detect(dev_priv);
553
554 ret = i915_set_dma_info(dev_priv);
555 if (ret)
556 return ret;
557
558 i915_perf_init(dev_priv);
559
560 ret = i915_ggtt_probe_hw(dev_priv);
561 if (ret)
562 goto err_perf;
563
564 ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
565 if (ret)
566 goto err_ggtt;
567
568 ret = i915_ggtt_init_hw(dev_priv);
569 if (ret)
570 goto err_ggtt;
571
572 ret = intel_memory_regions_hw_probe(dev_priv);
573 if (ret)
574 goto err_ggtt;
575
576 intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
577
578 ret = intel_gt_probe_lmem(&dev_priv->gt);
579 if (ret)
580 goto err_mem_regions;
581
582 ret = i915_ggtt_enable_hw(dev_priv);
583 if (ret) {
584 drm_err(&dev_priv->drm, "failed to enable GGTT\n");
585 goto err_mem_regions;
586 }
587
588 pci_set_master(pdev);
589
590 intel_gt_init_workarounds(dev_priv);
591
592 /* On the 945G/GM, the chipset reports the MSI capability on the
593 * integrated graphics even though the support isn't actually there
594 * according to the published specs. It doesn't appear to function
595 * correctly in testing on 945G.
596 * This may be a side effect of MSI having been made available for PEG
597 * and the registers being closely associated.
598 *
599 * According to chipset errata, on the 965GM, MSI interrupts may
600 * be lost or delayed, and was defeatured. MSI interrupts seem to
601 * get lost on g4x as well, and interrupt delivery seems to stay
602 * properly dead afterwards. So we'll just disable them for all
603 * pre-gen5 chipsets.
604 *
605 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
606 * interrupts even when in MSI mode. This results in spurious
607 * interrupt warnings if the legacy irq no. is shared with another
608 * device. The kernel then disables that interrupt source and so
609 * prevents the other device from working properly.
610 */
611 if (GRAPHICS_VER(dev_priv) >= 5) {
612 if (pci_enable_msi(pdev) < 0)
613 drm_dbg(&dev_priv->drm, "can't enable MSI");
614 }
615
616 ret = intel_gvt_init(dev_priv);
617 if (ret)
618 goto err_msi;
619
620 intel_opregion_setup(dev_priv);
621
622 intel_pcode_init(dev_priv);
623
624 /*
625 * Fill the dram structure to get the system dram info. This will be
626 * used for memory latency calculation.
627 */
628 intel_dram_detect(dev_priv);
629
630 intel_bw_init_hw(dev_priv);
631
632 return 0;
633
634err_msi:
635 if (pdev->msi_enabled)
636 pci_disable_msi(pdev);
637err_mem_regions:
638 intel_memory_regions_driver_release(dev_priv);
639err_ggtt:
640 i915_ggtt_driver_release(dev_priv);
641 i915_gem_drain_freed_objects(dev_priv);
642 i915_ggtt_driver_late_release(dev_priv);
643err_perf:
644 i915_perf_fini(dev_priv);
645 return ret;
646}
647
648/**
649 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
650 * @dev_priv: device private
651 */
652static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
653{
654 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
655
656 i915_perf_fini(dev_priv);
657
658 if (pdev->msi_enabled)
659 pci_disable_msi(pdev);
660}
661
662/**
663 * i915_driver_register - register the driver with the rest of the system
664 * @dev_priv: device private
665 *
666 * Perform any steps necessary to make the driver available via kernel
667 * internal or userspace interfaces.
668 */
669static void i915_driver_register(struct drm_i915_private *dev_priv)
670{
671 struct drm_device *dev = &dev_priv->drm;
672
673 i915_gem_driver_register(dev_priv);
674 i915_pmu_register(dev_priv);
675
676 intel_vgpu_register(dev_priv);
677
678 /* Reveal our presence to userspace */
679 if (drm_dev_register(dev, 0)) {
680 drm_err(&dev_priv->drm,
681 "Failed to register driver for userspace access!\n");
682 return;
683 }
684
685 i915_debugfs_register(dev_priv);
686 i915_setup_sysfs(dev_priv);
687
688 /* Depends on sysfs having been initialized */
689 i915_perf_register(dev_priv);
690
691 intel_gt_driver_register(&dev_priv->gt);
692
693 intel_display_driver_register(dev_priv);
694
695 intel_power_domains_enable(dev_priv);
696 intel_runtime_pm_enable(&dev_priv->runtime_pm);
697
698 intel_register_dsm_handler();
699
700 if (i915_switcheroo_register(dev_priv))
701 drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
702}
703
704/**
705 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
706 * @dev_priv: device private
707 */
708static void i915_driver_unregister(struct drm_i915_private *dev_priv)
709{
710 i915_switcheroo_unregister(dev_priv);
711
712 intel_unregister_dsm_handler();
713
714 intel_runtime_pm_disable(&dev_priv->runtime_pm);
715 intel_power_domains_disable(dev_priv);
716
717 intel_display_driver_unregister(dev_priv);
718
719 intel_gt_driver_unregister(&dev_priv->gt);
720
721 i915_perf_unregister(dev_priv);
722 i915_pmu_unregister(dev_priv);
723
724 i915_teardown_sysfs(dev_priv);
725 drm_dev_unplug(&dev_priv->drm);
726
727 i915_gem_driver_unregister(dev_priv);
728}
729
730static void i915_welcome_messages(struct drm_i915_private *dev_priv)
731{
732 if (drm_debug_enabled(DRM_UT_DRIVER)) {
733 struct drm_printer p = drm_debug_printer("i915 device info:");
734
735 drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
736 INTEL_DEVID(dev_priv),
737 INTEL_REVID(dev_priv),
738 intel_platform_name(INTEL_INFO(dev_priv)->platform),
739 intel_subplatform(RUNTIME_INFO(dev_priv),
740 INTEL_INFO(dev_priv)->platform),
741 GRAPHICS_VER(dev_priv));
742
743 intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
744 intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
745 intel_gt_info_print(&dev_priv->gt.info, &p);
746 }
747
748 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
749 drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
750 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
751 drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
752 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
753 drm_info(&dev_priv->drm,
754 "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
755}
756
757static struct drm_i915_private *
758i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
759{
760 const struct intel_device_info *match_info =
761 (struct intel_device_info *)ent->driver_data;
762 struct intel_device_info *device_info;
763 struct drm_i915_private *i915;
764
765 i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
766 struct drm_i915_private, drm);
767 if (IS_ERR(i915))
768 return i915;
769
770 pci_set_drvdata(pdev, i915);
771
772 /* Device parameters start as a copy of module parameters. */
773 i915_params_copy(&i915->params, &i915_modparams);
774
775 /* Setup the write-once "constant" device info */
776 device_info = mkwrite_device_info(i915);
777 memcpy(device_info, match_info, sizeof(*device_info));
778 RUNTIME_INFO(i915)->device_id = pdev->device;
779
780 return i915;
781}
782
783/**
784 * i915_driver_probe - setup chip and create an initial config
785 * @pdev: PCI device
786 * @ent: matching PCI ID entry
787 *
788 * The driver probe routine has to do several things:
789 * - drive output discovery via intel_modeset_init()
790 * - initialize the memory manager
791 * - allocate initial config memory
792 * - setup the DRM framebuffer with the allocated memory
793 */
794int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
795{
796 const struct intel_device_info *match_info =
797 (struct intel_device_info *)ent->driver_data;
798 struct drm_i915_private *i915;
799 int ret;
800
801 i915 = i915_driver_create(pdev, ent);
802 if (IS_ERR(i915))
803 return PTR_ERR(i915);
804
805 /* Disable nuclear pageflip by default on pre-ILK */
806 if (!i915->params.nuclear_pageflip && match_info->graphics_ver < 5)
807 i915->drm.driver_features &= ~DRIVER_ATOMIC;
808
809 /*
810 * Check if we support fake LMEM -- for now we only unleash this for
811 * the live selftests(test-and-exit).
812 */
813#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
814 if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
815 if (GRAPHICS_VER(i915) >= 9 && i915_selftest.live < 0 &&
816 i915->params.fake_lmem_start) {
817 mkwrite_device_info(i915)->memory_regions =
818 REGION_SMEM | REGION_LMEM | REGION_STOLEN_SMEM;
819 GEM_BUG_ON(!HAS_LMEM(i915));
820 }
821 }
822#endif
823
824 ret = pci_enable_device(pdev);
825 if (ret)
826 goto out_fini;
827
828 ret = i915_driver_early_probe(i915);
829 if (ret < 0)
830 goto out_pci_disable;
831
832 disable_rpm_wakeref_asserts(&i915->runtime_pm);
833
834 intel_vgpu_detect(i915);
835
836 ret = i915_driver_mmio_probe(i915);
837 if (ret < 0)
838 goto out_runtime_pm_put;
839
840 ret = i915_driver_hw_probe(i915);
841 if (ret < 0)
842 goto out_cleanup_mmio;
843
844 ret = intel_modeset_init_noirq(i915);
845 if (ret < 0)
846 goto out_cleanup_hw;
847
848 ret = intel_irq_install(i915);
849 if (ret)
850 goto out_cleanup_modeset;
851
852 ret = intel_modeset_init_nogem(i915);
853 if (ret)
854 goto out_cleanup_irq;
855
856 ret = i915_gem_init(i915);
857 if (ret)
858 goto out_cleanup_modeset2;
859
860 ret = intel_modeset_init(i915);
861 if (ret)
862 goto out_cleanup_gem;
863
864 i915_driver_register(i915);
865
866 enable_rpm_wakeref_asserts(&i915->runtime_pm);
867
868 i915_welcome_messages(i915);
869
870 i915->do_release = true;
871
872 return 0;
873
874out_cleanup_gem:
875 i915_gem_suspend(i915);
876 i915_gem_driver_remove(i915);
877 i915_gem_driver_release(i915);
878out_cleanup_modeset2:
879 /* FIXME clean up the error path */
880 intel_modeset_driver_remove(i915);
881 intel_irq_uninstall(i915);
882 intel_modeset_driver_remove_noirq(i915);
883 goto out_cleanup_modeset;
884out_cleanup_irq:
885 intel_irq_uninstall(i915);
886out_cleanup_modeset:
887 intel_modeset_driver_remove_nogem(i915);
888out_cleanup_hw:
889 i915_driver_hw_remove(i915);
890 intel_memory_regions_driver_release(i915);
891 i915_ggtt_driver_release(i915);
892 i915_gem_drain_freed_objects(i915);
893 i915_ggtt_driver_late_release(i915);
894out_cleanup_mmio:
895 i915_driver_mmio_release(i915);
896out_runtime_pm_put:
897 enable_rpm_wakeref_asserts(&i915->runtime_pm);
898 i915_driver_late_release(i915);
899out_pci_disable:
900 pci_disable_device(pdev);
901out_fini:
902 i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
903 return ret;
904}
905
906void i915_driver_remove(struct drm_i915_private *i915)
907{
908 disable_rpm_wakeref_asserts(&i915->runtime_pm);
909
910 i915_driver_unregister(i915);
911
912 /* Flush any external code that still may be under the RCU lock */
913 synchronize_rcu();
914
915 i915_gem_suspend(i915);
916
917 intel_gvt_driver_remove(i915);
918
919 intel_modeset_driver_remove(i915);
920
921 intel_irq_uninstall(i915);
922
923 intel_modeset_driver_remove_noirq(i915);
924
925 i915_reset_error_state(i915);
926 i915_gem_driver_remove(i915);
927
928 intel_modeset_driver_remove_nogem(i915);
929
930 i915_driver_hw_remove(i915);
931
932 enable_rpm_wakeref_asserts(&i915->runtime_pm);
933}
934
935static void i915_driver_release(struct drm_device *dev)
936{
937 struct drm_i915_private *dev_priv = to_i915(dev);
938 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
939
940 if (!dev_priv->do_release)
941 return;
942
943 disable_rpm_wakeref_asserts(rpm);
944
945 i915_gem_driver_release(dev_priv);
946
947 intel_memory_regions_driver_release(dev_priv);
948 i915_ggtt_driver_release(dev_priv);
949 i915_gem_drain_freed_objects(dev_priv);
950 i915_ggtt_driver_late_release(dev_priv);
951
952 i915_driver_mmio_release(dev_priv);
953
954 enable_rpm_wakeref_asserts(rpm);
955 intel_runtime_pm_driver_release(rpm);
956
957 i915_driver_late_release(dev_priv);
958}
959
960static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
961{
962 struct drm_i915_private *i915 = to_i915(dev);
963 int ret;
964
965 ret = i915_gem_open(i915, file);
966 if (ret)
967 return ret;
968
969 return 0;
970}
971
972/**
973 * i915_driver_lastclose - clean up after all DRM clients have exited
974 * @dev: DRM device
975 *
976 * Take care of cleaning up after all DRM clients have exited. In the
977 * mode setting case, we want to restore the kernel's initial mode (just
978 * in case the last client left us in a bad state).
979 *
980 * Additionally, in the non-mode setting case, we'll tear down the GTT
981 * and DMA structures, since the kernel won't be using them, and clea
982 * up any GEM state.
983 */
984static void i915_driver_lastclose(struct drm_device *dev)
985{
986 struct drm_i915_private *i915 = to_i915(dev);
987
988 intel_fbdev_restore_mode(dev);
989
990 if (HAS_DISPLAY(i915))
991 vga_switcheroo_process_delayed_switch();
992}
993
994static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
995{
996 struct drm_i915_file_private *file_priv = file->driver_priv;
997
998 i915_gem_context_close(file);
999
1000 kfree_rcu(file_priv, rcu);
1001
1002 /* Catch up with all the deferred frees from "this" client */
1003 i915_gem_flush_free_objects(to_i915(dev));
1004}
1005
1006static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1007{
1008 struct drm_device *dev = &dev_priv->drm;
1009 struct intel_encoder *encoder;
1010
1011 if (!HAS_DISPLAY(dev_priv))
1012 return;
1013
1014 drm_modeset_lock_all(dev);
1015 for_each_intel_encoder(dev, encoder)
1016 if (encoder->suspend)
1017 encoder->suspend(encoder);
1018 drm_modeset_unlock_all(dev);
1019}
1020
1021static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
1022{
1023 struct drm_device *dev = &dev_priv->drm;
1024 struct intel_encoder *encoder;
1025
1026 if (!HAS_DISPLAY(dev_priv))
1027 return;
1028
1029 drm_modeset_lock_all(dev);
1030 for_each_intel_encoder(dev, encoder)
1031 if (encoder->shutdown)
1032 encoder->shutdown(encoder);
1033 drm_modeset_unlock_all(dev);
1034}
1035
1036void i915_driver_shutdown(struct drm_i915_private *i915)
1037{
1038 disable_rpm_wakeref_asserts(&i915->runtime_pm);
1039 intel_runtime_pm_disable(&i915->runtime_pm);
1040 intel_power_domains_disable(i915);
1041
1042 i915_gem_suspend(i915);
1043
1044 if (HAS_DISPLAY(i915)) {
1045 drm_kms_helper_poll_disable(&i915->drm);
1046
1047 drm_atomic_helper_shutdown(&i915->drm);
1048 }
1049
1050 intel_dp_mst_suspend(i915);
1051
1052 intel_runtime_pm_disable_interrupts(i915);
1053 intel_hpd_cancel_work(i915);
1054
1055 intel_suspend_encoders(i915);
1056 intel_shutdown_encoders(i915);
1057
1058 intel_dmc_ucode_suspend(i915);
1059
1060 /*
1061 * The only requirement is to reboot with display DC states disabled,
1062 * for now leaving all display power wells in the INIT power domain
1063 * enabled.
1064 *
1065 * TODO:
1066 * - unify the pci_driver::shutdown sequence here with the
1067 * pci_driver.driver.pm.poweroff,poweroff_late sequence.
1068 * - unify the driver remove and system/runtime suspend sequences with
1069 * the above unified shutdown/poweroff sequence.
1070 */
1071 intel_power_domains_driver_remove(i915);
1072 enable_rpm_wakeref_asserts(&i915->runtime_pm);
1073
1074 intel_runtime_pm_driver_release(&i915->runtime_pm);
1075}
1076
1077static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1078{
1079#if IS_ENABLED(CONFIG_ACPI_SLEEP)
1080 if (acpi_target_system_state() < ACPI_STATE_S3)
1081 return true;
1082#endif
1083 return false;
1084}
1085
1086static int i915_drm_prepare(struct drm_device *dev)
1087{
1088 struct drm_i915_private *i915 = to_i915(dev);
1089
1090 /*
1091 * NB intel_display_suspend() may issue new requests after we've
1092 * ostensibly marked the GPU as ready-to-sleep here. We need to
1093 * split out that work and pull it forward so that after point,
1094 * the GPU is not woken again.
1095 */
1096 i915_gem_suspend(i915);
1097
1098 return 0;
1099}
1100
1101static int i915_drm_suspend(struct drm_device *dev)
1102{
1103 struct drm_i915_private *dev_priv = to_i915(dev);
1104 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1105 pci_power_t opregion_target_state;
1106
1107 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1108
1109 /* We do a lot of poking in a lot of registers, make sure they work
1110 * properly. */
1111 intel_power_domains_disable(dev_priv);
1112 if (HAS_DISPLAY(dev_priv))
1113 drm_kms_helper_poll_disable(dev);
1114
1115 pci_save_state(pdev);
1116
1117 intel_display_suspend(dev);
1118
1119 intel_dp_mst_suspend(dev_priv);
1120
1121 intel_runtime_pm_disable_interrupts(dev_priv);
1122 intel_hpd_cancel_work(dev_priv);
1123
1124 intel_suspend_encoders(dev_priv);
1125
1126 intel_suspend_hw(dev_priv);
1127
1128 i915_ggtt_suspend(&dev_priv->ggtt);
1129
1130 i915_save_display(dev_priv);
1131
1132 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1133 intel_opregion_suspend(dev_priv, opregion_target_state);
1134
1135 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1136
1137 dev_priv->suspend_count++;
1138
1139 intel_dmc_ucode_suspend(dev_priv);
1140
1141 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1142
1143 return 0;
1144}
1145
1146static enum i915_drm_suspend_mode
1147get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
1148{
1149 if (hibernate)
1150 return I915_DRM_SUSPEND_HIBERNATE;
1151
1152 if (suspend_to_idle(dev_priv))
1153 return I915_DRM_SUSPEND_IDLE;
1154
1155 return I915_DRM_SUSPEND_MEM;
1156}
1157
1158static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1159{
1160 struct drm_i915_private *dev_priv = to_i915(dev);
1161 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1162 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1163 int ret;
1164
1165 disable_rpm_wakeref_asserts(rpm);
1166
1167 i915_gem_suspend_late(dev_priv);
1168
1169 intel_uncore_suspend(&dev_priv->uncore);
1170
1171 intel_power_domains_suspend(dev_priv,
1172 get_suspend_mode(dev_priv, hibernation));
1173
1174 intel_display_power_suspend_late(dev_priv);
1175
1176 ret = vlv_suspend_complete(dev_priv);
1177 if (ret) {
1178 drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
1179 intel_power_domains_resume(dev_priv);
1180
1181 goto out;
1182 }
1183
1184 pci_disable_device(pdev);
1185 /*
1186 * During hibernation on some platforms the BIOS may try to access
1187 * the device even though it's already in D3 and hang the machine. So
1188 * leave the device in D0 on those platforms and hope the BIOS will
1189 * power down the device properly. The issue was seen on multiple old
1190 * GENs with different BIOS vendors, so having an explicit blacklist
1191 * is inpractical; apply the workaround on everything pre GEN6. The
1192 * platforms where the issue was seen:
1193 * Lenovo Thinkpad X301, X61s, X60, T60, X41
1194 * Fujitsu FSC S7110
1195 * Acer Aspire 1830T
1196 */
1197 if (!(hibernation && GRAPHICS_VER(dev_priv) < 6))
1198 pci_set_power_state(pdev, PCI_D3hot);
1199
1200out:
1201 enable_rpm_wakeref_asserts(rpm);
1202 if (!dev_priv->uncore.user_forcewake_count)
1203 intel_runtime_pm_driver_release(rpm);
1204
1205 return ret;
1206}
1207
1208int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
1209{
1210 int error;
1211
1212 if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
1213 state.event != PM_EVENT_FREEZE))
1214 return -EINVAL;
1215
1216 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1217 return 0;
1218
1219 error = i915_drm_suspend(&i915->drm);
1220 if (error)
1221 return error;
1222
1223 return i915_drm_suspend_late(&i915->drm, false);
1224}
1225
1226static int i915_drm_resume(struct drm_device *dev)
1227{
1228 struct drm_i915_private *dev_priv = to_i915(dev);
1229 int ret;
1230
1231 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1232
1233 sanitize_gpu(dev_priv);
1234
1235 ret = i915_ggtt_enable_hw(dev_priv);
1236 if (ret)
1237 drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
1238
1239 i915_ggtt_resume(&dev_priv->ggtt);
1240
1241 intel_dmc_ucode_resume(dev_priv);
1242
1243 i915_restore_display(dev_priv);
1244 intel_pps_unlock_regs_wa(dev_priv);
1245
1246 intel_init_pch_refclk(dev_priv);
1247
1248 /*
1249 * Interrupts have to be enabled before any batches are run. If not the
1250 * GPU will hang. i915_gem_init_hw() will initiate batches to
1251 * update/restore the context.
1252 *
1253 * drm_mode_config_reset() needs AUX interrupts.
1254 *
1255 * Modeset enabling in intel_modeset_init_hw() also needs working
1256 * interrupts.
1257 */
1258 intel_runtime_pm_enable_interrupts(dev_priv);
1259
1260 if (HAS_DISPLAY(dev_priv))
1261 drm_mode_config_reset(dev);
1262
1263 i915_gem_resume(dev_priv);
1264
1265 intel_modeset_init_hw(dev_priv);
1266 intel_init_clock_gating(dev_priv);
1267 intel_hpd_init(dev_priv);
1268
1269 /* MST sideband requires HPD interrupts enabled */
1270 intel_dp_mst_resume(dev_priv);
1271 intel_display_resume(dev);
1272
1273 intel_hpd_poll_disable(dev_priv);
1274 if (HAS_DISPLAY(dev_priv))
1275 drm_kms_helper_poll_enable(dev);
1276
1277 intel_opregion_resume(dev_priv);
1278
1279 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1280
1281 intel_power_domains_enable(dev_priv);
1282
1283 intel_gvt_resume(dev_priv);
1284
1285 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1286
1287 return 0;
1288}
1289
1290static int i915_drm_resume_early(struct drm_device *dev)
1291{
1292 struct drm_i915_private *dev_priv = to_i915(dev);
1293 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1294 int ret;
1295
1296 /*
1297 * We have a resume ordering issue with the snd-hda driver also
1298 * requiring our device to be power up. Due to the lack of a
1299 * parent/child relationship we currently solve this with an early
1300 * resume hook.
1301 *
1302 * FIXME: This should be solved with a special hdmi sink device or
1303 * similar so that power domains can be employed.
1304 */
1305
1306 /*
1307 * Note that we need to set the power state explicitly, since we
1308 * powered off the device during freeze and the PCI core won't power
1309 * it back up for us during thaw. Powering off the device during
1310 * freeze is not a hard requirement though, and during the
1311 * suspend/resume phases the PCI core makes sure we get here with the
1312 * device powered on. So in case we change our freeze logic and keep
1313 * the device powered we can also remove the following set power state
1314 * call.
1315 */
1316 ret = pci_set_power_state(pdev, PCI_D0);
1317 if (ret) {
1318 drm_err(&dev_priv->drm,
1319 "failed to set PCI D0 power state (%d)\n", ret);
1320 return ret;
1321 }
1322
1323 /*
1324 * Note that pci_enable_device() first enables any parent bridge
1325 * device and only then sets the power state for this device. The
1326 * bridge enabling is a nop though, since bridge devices are resumed
1327 * first. The order of enabling power and enabling the device is
1328 * imposed by the PCI core as described above, so here we preserve the
1329 * same order for the freeze/thaw phases.
1330 *
1331 * TODO: eventually we should remove pci_disable_device() /
1332 * pci_enable_enable_device() from suspend/resume. Due to how they
1333 * depend on the device enable refcount we can't anyway depend on them
1334 * disabling/enabling the device.
1335 */
1336 if (pci_enable_device(pdev))
1337 return -EIO;
1338
1339 pci_set_master(pdev);
1340
1341 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1342
1343 ret = vlv_resume_prepare(dev_priv, false);
1344 if (ret)
1345 drm_err(&dev_priv->drm,
1346 "Resume prepare failed: %d, continuing anyway\n", ret);
1347
1348 intel_uncore_resume_early(&dev_priv->uncore);
1349
1350 intel_gt_check_and_clear_faults(&dev_priv->gt);
1351
1352 intel_display_power_resume_early(dev_priv);
1353
1354 intel_power_domains_resume(dev_priv);
1355
1356 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1357
1358 return ret;
1359}
1360
1361int i915_resume_switcheroo(struct drm_i915_private *i915)
1362{
1363 int ret;
1364
1365 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1366 return 0;
1367
1368 ret = i915_drm_resume_early(&i915->drm);
1369 if (ret)
1370 return ret;
1371
1372 return i915_drm_resume(&i915->drm);
1373}
1374
1375static int i915_pm_prepare(struct device *kdev)
1376{
1377 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1378
1379 if (!i915) {
1380 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1381 return -ENODEV;
1382 }
1383
1384 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1385 return 0;
1386
1387 return i915_drm_prepare(&i915->drm);
1388}
1389
1390static int i915_pm_suspend(struct device *kdev)
1391{
1392 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1393
1394 if (!i915) {
1395 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1396 return -ENODEV;
1397 }
1398
1399 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1400 return 0;
1401
1402 return i915_drm_suspend(&i915->drm);
1403}
1404
1405static int i915_pm_suspend_late(struct device *kdev)
1406{
1407 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1408
1409 /*
1410 * We have a suspend ordering issue with the snd-hda driver also
1411 * requiring our device to be power up. Due to the lack of a
1412 * parent/child relationship we currently solve this with an late
1413 * suspend hook.
1414 *
1415 * FIXME: This should be solved with a special hdmi sink device or
1416 * similar so that power domains can be employed.
1417 */
1418 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1419 return 0;
1420
1421 return i915_drm_suspend_late(&i915->drm, false);
1422}
1423
1424static int i915_pm_poweroff_late(struct device *kdev)
1425{
1426 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1427
1428 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1429 return 0;
1430
1431 return i915_drm_suspend_late(&i915->drm, true);
1432}
1433
1434static int i915_pm_resume_early(struct device *kdev)
1435{
1436 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1437
1438 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1439 return 0;
1440
1441 return i915_drm_resume_early(&i915->drm);
1442}
1443
1444static int i915_pm_resume(struct device *kdev)
1445{
1446 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1447
1448 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1449 return 0;
1450
1451 return i915_drm_resume(&i915->drm);
1452}
1453
1454/* freeze: before creating the hibernation_image */
1455static int i915_pm_freeze(struct device *kdev)
1456{
1457 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1458 int ret;
1459
1460 if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
1461 ret = i915_drm_suspend(&i915->drm);
1462 if (ret)
1463 return ret;
1464 }
1465
1466 ret = i915_gem_freeze(i915);
1467 if (ret)
1468 return ret;
1469
1470 return 0;
1471}
1472
1473static int i915_pm_freeze_late(struct device *kdev)
1474{
1475 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1476 int ret;
1477
1478 if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
1479 ret = i915_drm_suspend_late(&i915->drm, true);
1480 if (ret)
1481 return ret;
1482 }
1483
1484 ret = i915_gem_freeze_late(i915);
1485 if (ret)
1486 return ret;
1487
1488 return 0;
1489}
1490
1491/* thaw: called after creating the hibernation image, but before turning off. */
1492static int i915_pm_thaw_early(struct device *kdev)
1493{
1494 return i915_pm_resume_early(kdev);
1495}
1496
1497static int i915_pm_thaw(struct device *kdev)
1498{
1499 return i915_pm_resume(kdev);
1500}
1501
1502/* restore: called after loading the hibernation image. */
1503static int i915_pm_restore_early(struct device *kdev)
1504{
1505 return i915_pm_resume_early(kdev);
1506}
1507
1508static int i915_pm_restore(struct device *kdev)
1509{
1510 return i915_pm_resume(kdev);
1511}
1512
1513static int intel_runtime_suspend(struct device *kdev)
1514{
1515 struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1516 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1517 int ret;
1518
1519 if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1520 return -ENODEV;
1521
1522 drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
1523
1524 disable_rpm_wakeref_asserts(rpm);
1525
1526 /*
1527 * We are safe here against re-faults, since the fault handler takes
1528 * an RPM reference.
1529 */
1530 i915_gem_runtime_suspend(dev_priv);
1531
1532 intel_gt_runtime_suspend(&dev_priv->gt);
1533
1534 intel_runtime_pm_disable_interrupts(dev_priv);
1535
1536 intel_uncore_suspend(&dev_priv->uncore);
1537
1538 intel_display_power_suspend(dev_priv);
1539
1540 ret = vlv_suspend_complete(dev_priv);
1541 if (ret) {
1542 drm_err(&dev_priv->drm,
1543 "Runtime suspend failed, disabling it (%d)\n", ret);
1544 intel_uncore_runtime_resume(&dev_priv->uncore);
1545
1546 intel_runtime_pm_enable_interrupts(dev_priv);
1547
1548 intel_gt_runtime_resume(&dev_priv->gt);
1549
1550 enable_rpm_wakeref_asserts(rpm);
1551
1552 return ret;
1553 }
1554
1555 enable_rpm_wakeref_asserts(rpm);
1556 intel_runtime_pm_driver_release(rpm);
1557
1558 if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
1559 drm_err(&dev_priv->drm,
1560 "Unclaimed access detected prior to suspending\n");
1561
1562 rpm->suspended = true;
1563
1564 /*
1565 * FIXME: We really should find a document that references the arguments
1566 * used below!
1567 */
1568 if (IS_BROADWELL(dev_priv)) {
1569 /*
1570 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1571 * being detected, and the call we do at intel_runtime_resume()
1572 * won't be able to restore them. Since PCI_D3hot matches the
1573 * actual specification and appears to be working, use it.
1574 */
1575 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1576 } else {
1577 /*
1578 * current versions of firmware which depend on this opregion
1579 * notification have repurposed the D1 definition to mean
1580 * "runtime suspended" vs. what you would normally expect (D3)
1581 * to distinguish it from notifications that might be sent via
1582 * the suspend path.
1583 */
1584 intel_opregion_notify_adapter(dev_priv, PCI_D1);
1585 }
1586
1587 assert_forcewakes_inactive(&dev_priv->uncore);
1588
1589 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1590 intel_hpd_poll_enable(dev_priv);
1591
1592 drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
1593 return 0;
1594}
1595
1596static int intel_runtime_resume(struct device *kdev)
1597{
1598 struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1599 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1600 int ret;
1601
1602 if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1603 return -ENODEV;
1604
1605 drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
1606
1607 drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
1608 disable_rpm_wakeref_asserts(rpm);
1609
1610 intel_opregion_notify_adapter(dev_priv, PCI_D0);
1611 rpm->suspended = false;
1612 if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
1613 drm_dbg(&dev_priv->drm,
1614 "Unclaimed access during suspend, bios?\n");
1615
1616 intel_display_power_resume(dev_priv);
1617
1618 ret = vlv_resume_prepare(dev_priv, true);
1619
1620 intel_uncore_runtime_resume(&dev_priv->uncore);
1621
1622 intel_runtime_pm_enable_interrupts(dev_priv);
1623
1624 /*
1625 * No point of rolling back things in case of an error, as the best
1626 * we can do is to hope that things will still work (and disable RPM).
1627 */
1628 intel_gt_runtime_resume(&dev_priv->gt);
1629
1630 /*
1631 * On VLV/CHV display interrupts are part of the display
1632 * power well, so hpd is reinitialized from there. For
1633 * everyone else do it here.
1634 */
1635 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
1636 intel_hpd_init(dev_priv);
1637 intel_hpd_poll_disable(dev_priv);
1638 }
1639
1640 intel_enable_ipc(dev_priv);
1641
1642 enable_rpm_wakeref_asserts(rpm);
1643
1644 if (ret)
1645 drm_err(&dev_priv->drm,
1646 "Runtime resume failed, disabling it (%d)\n", ret);
1647 else
1648 drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
1649
1650 return ret;
1651}
1652
1653const struct dev_pm_ops i915_pm_ops = {
1654 /*
1655 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1656 * PMSG_RESUME]
1657 */
1658 .prepare = i915_pm_prepare,
1659 .suspend = i915_pm_suspend,
1660 .suspend_late = i915_pm_suspend_late,
1661 .resume_early = i915_pm_resume_early,
1662 .resume = i915_pm_resume,
1663
1664 /*
1665 * S4 event handlers
1666 * @freeze, @freeze_late : called (1) before creating the
1667 * hibernation image [PMSG_FREEZE] and
1668 * (2) after rebooting, before restoring
1669 * the image [PMSG_QUIESCE]
1670 * @thaw, @thaw_early : called (1) after creating the hibernation
1671 * image, before writing it [PMSG_THAW]
1672 * and (2) after failing to create or
1673 * restore the image [PMSG_RECOVER]
1674 * @poweroff, @poweroff_late: called after writing the hibernation
1675 * image, before rebooting [PMSG_HIBERNATE]
1676 * @restore, @restore_early : called after rebooting and restoring the
1677 * hibernation image [PMSG_RESTORE]
1678 */
1679 .freeze = i915_pm_freeze,
1680 .freeze_late = i915_pm_freeze_late,
1681 .thaw_early = i915_pm_thaw_early,
1682 .thaw = i915_pm_thaw,
1683 .poweroff = i915_pm_suspend,
1684 .poweroff_late = i915_pm_poweroff_late,
1685 .restore_early = i915_pm_restore_early,
1686 .restore = i915_pm_restore,
1687
1688 /* S0ix (via runtime suspend) event handlers */
1689 .runtime_suspend = intel_runtime_suspend,
1690 .runtime_resume = intel_runtime_resume,
1691};
1692
1693static const struct file_operations i915_driver_fops = {
1694 .owner = THIS_MODULE,
1695 .open = drm_open,
1696 .release = drm_release_noglobal,
1697 .unlocked_ioctl = drm_ioctl,
1698 .mmap = i915_gem_mmap,
1699 .poll = drm_poll,
1700 .read = drm_read,
1701 .compat_ioctl = i915_ioc32_compat_ioctl,
1702 .llseek = noop_llseek,
1703};
1704
1705static int
1706i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1707 struct drm_file *file)
1708{
1709 return -ENODEV;
1710}
1711
1712static const struct drm_ioctl_desc i915_ioctls[] = {
1713 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1714 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1715 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1716 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1717 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1718 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1719 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
1720 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1721 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1722 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1723 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1724 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1725 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1726 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1727 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
1728 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1729 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1730 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1731 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, drm_invalid_op, DRM_AUTH),
1732 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
1733 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1734 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1735 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
1736 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1737 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1738 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
1739 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1740 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1741 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1742 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW),
1743 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1744 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1745 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1746 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
1747 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1748 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1749 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
1750 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
1751 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1752 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
1753 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1754 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
1755 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
1756 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
1757 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
1758 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
1759 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1760 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1761 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1762 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
1763 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1764 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1765 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1766 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
1767 DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
1768 DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
1769 DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
1770 DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
1771 DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
1772};
1773
1774static const struct drm_driver driver = {
1775 /* Don't use MTRRs here; the Xserver or userspace app should
1776 * deal with them for Intel hardware.
1777 */
1778 .driver_features =
1779 DRIVER_GEM |
1780 DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
1781 DRIVER_SYNCOBJ_TIMELINE,
1782 .release = i915_driver_release,
1783 .open = i915_driver_open,
1784 .lastclose = i915_driver_lastclose,
1785 .postclose = i915_driver_postclose,
1786
1787 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1788 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1789 .gem_prime_import = i915_gem_prime_import,
1790
1791 .dumb_create = i915_gem_dumb_create,
1792 .dumb_map_offset = i915_gem_dumb_mmap_offset,
1793
1794 .ioctls = i915_ioctls,
1795 .num_ioctls = ARRAY_SIZE(i915_ioctls),
1796 .fops = &i915_driver_fops,
1797 .name = DRIVER_NAME,
1798 .desc = DRIVER_DESC,
1799 .date = DRIVER_DATE,
1800 .major = DRIVER_MAJOR,
1801 .minor = DRIVER_MINOR,
1802 .patchlevel = DRIVER_PATCHLEVEL,
1803};