Loading...
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
33#include <drm/drm_fb_helper.h>
34#include <drm/drm_legacy.h>
35#include "intel_drv.h"
36#include <drm/i915_drm.h>
37#include "i915_drv.h"
38#include "i915_vgpu.h"
39#include "i915_trace.h"
40#include <linux/pci.h>
41#include <linux/console.h>
42#include <linux/vt.h>
43#include <linux/vgaarb.h>
44#include <linux/acpi.h>
45#include <linux/pnp.h>
46#include <linux/vga_switcheroo.h>
47#include <linux/slab.h>
48#include <acpi/video.h>
49#include <linux/pm.h>
50#include <linux/pm_runtime.h>
51#include <linux/oom.h>
52
53
54static int i915_getparam(struct drm_device *dev, void *data,
55 struct drm_file *file_priv)
56{
57 struct drm_i915_private *dev_priv = dev->dev_private;
58 drm_i915_getparam_t *param = data;
59 int value;
60
61 switch (param->param) {
62 case I915_PARAM_IRQ_ACTIVE:
63 case I915_PARAM_ALLOW_BATCHBUFFER:
64 case I915_PARAM_LAST_DISPATCH:
65 /* Reject all old ums/dri params. */
66 return -ENODEV;
67 case I915_PARAM_CHIPSET_ID:
68 value = dev->pdev->device;
69 break;
70 case I915_PARAM_REVISION:
71 value = dev->pdev->revision;
72 break;
73 case I915_PARAM_HAS_GEM:
74 value = 1;
75 break;
76 case I915_PARAM_NUM_FENCES_AVAIL:
77 value = dev_priv->num_fence_regs;
78 break;
79 case I915_PARAM_HAS_OVERLAY:
80 value = dev_priv->overlay ? 1 : 0;
81 break;
82 case I915_PARAM_HAS_PAGEFLIPPING:
83 value = 1;
84 break;
85 case I915_PARAM_HAS_EXECBUF2:
86 /* depends on GEM */
87 value = 1;
88 break;
89 case I915_PARAM_HAS_BSD:
90 value = intel_ring_initialized(&dev_priv->ring[VCS]);
91 break;
92 case I915_PARAM_HAS_BLT:
93 value = intel_ring_initialized(&dev_priv->ring[BCS]);
94 break;
95 case I915_PARAM_HAS_VEBOX:
96 value = intel_ring_initialized(&dev_priv->ring[VECS]);
97 break;
98 case I915_PARAM_HAS_BSD2:
99 value = intel_ring_initialized(&dev_priv->ring[VCS2]);
100 break;
101 case I915_PARAM_HAS_RELAXED_FENCING:
102 value = 1;
103 break;
104 case I915_PARAM_HAS_COHERENT_RINGS:
105 value = 1;
106 break;
107 case I915_PARAM_HAS_EXEC_CONSTANTS:
108 value = INTEL_INFO(dev)->gen >= 4;
109 break;
110 case I915_PARAM_HAS_RELAXED_DELTA:
111 value = 1;
112 break;
113 case I915_PARAM_HAS_GEN7_SOL_RESET:
114 value = 1;
115 break;
116 case I915_PARAM_HAS_LLC:
117 value = HAS_LLC(dev);
118 break;
119 case I915_PARAM_HAS_WT:
120 value = HAS_WT(dev);
121 break;
122 case I915_PARAM_HAS_ALIASING_PPGTT:
123 value = USES_PPGTT(dev);
124 break;
125 case I915_PARAM_HAS_WAIT_TIMEOUT:
126 value = 1;
127 break;
128 case I915_PARAM_HAS_SEMAPHORES:
129 value = i915_semaphore_is_enabled(dev);
130 break;
131 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
132 value = 1;
133 break;
134 case I915_PARAM_HAS_SECURE_BATCHES:
135 value = capable(CAP_SYS_ADMIN);
136 break;
137 case I915_PARAM_HAS_PINNED_BATCHES:
138 value = 1;
139 break;
140 case I915_PARAM_HAS_EXEC_NO_RELOC:
141 value = 1;
142 break;
143 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
144 value = 1;
145 break;
146 case I915_PARAM_CMD_PARSER_VERSION:
147 value = i915_cmd_parser_get_version();
148 break;
149 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
150 value = 1;
151 break;
152 case I915_PARAM_MMAP_VERSION:
153 value = 1;
154 break;
155 case I915_PARAM_SUBSLICE_TOTAL:
156 value = INTEL_INFO(dev)->subslice_total;
157 if (!value)
158 return -ENODEV;
159 break;
160 case I915_PARAM_EU_TOTAL:
161 value = INTEL_INFO(dev)->eu_total;
162 if (!value)
163 return -ENODEV;
164 break;
165 case I915_PARAM_HAS_GPU_RESET:
166 value = i915.enable_hangcheck &&
167 intel_has_gpu_reset(dev);
168 break;
169 case I915_PARAM_HAS_RESOURCE_STREAMER:
170 value = HAS_RESOURCE_STREAMER(dev);
171 break;
172 case I915_PARAM_HAS_EXEC_SOFTPIN:
173 value = 1;
174 break;
175 default:
176 DRM_DEBUG("Unknown parameter %d\n", param->param);
177 return -EINVAL;
178 }
179
180 if (copy_to_user(param->value, &value, sizeof(int))) {
181 DRM_ERROR("copy_to_user failed\n");
182 return -EFAULT;
183 }
184
185 return 0;
186}
187
188static int i915_get_bridge_dev(struct drm_device *dev)
189{
190 struct drm_i915_private *dev_priv = dev->dev_private;
191
192 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
193 if (!dev_priv->bridge_dev) {
194 DRM_ERROR("bridge device not found\n");
195 return -1;
196 }
197 return 0;
198}
199
200#define MCHBAR_I915 0x44
201#define MCHBAR_I965 0x48
202#define MCHBAR_SIZE (4*4096)
203
204#define DEVEN_REG 0x54
205#define DEVEN_MCHBAR_EN (1 << 28)
206
207/* Allocate space for the MCH regs if needed, return nonzero on error */
208static int
209intel_alloc_mchbar_resource(struct drm_device *dev)
210{
211 struct drm_i915_private *dev_priv = dev->dev_private;
212 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
213 u32 temp_lo, temp_hi = 0;
214 u64 mchbar_addr;
215 int ret;
216
217 if (INTEL_INFO(dev)->gen >= 4)
218 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
219 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
220 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
221
222 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
223#ifdef CONFIG_PNP
224 if (mchbar_addr &&
225 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
226 return 0;
227#endif
228
229 /* Get some space for it */
230 dev_priv->mch_res.name = "i915 MCHBAR";
231 dev_priv->mch_res.flags = IORESOURCE_MEM;
232 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
233 &dev_priv->mch_res,
234 MCHBAR_SIZE, MCHBAR_SIZE,
235 PCIBIOS_MIN_MEM,
236 0, pcibios_align_resource,
237 dev_priv->bridge_dev);
238 if (ret) {
239 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
240 dev_priv->mch_res.start = 0;
241 return ret;
242 }
243
244 if (INTEL_INFO(dev)->gen >= 4)
245 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
246 upper_32_bits(dev_priv->mch_res.start));
247
248 pci_write_config_dword(dev_priv->bridge_dev, reg,
249 lower_32_bits(dev_priv->mch_res.start));
250 return 0;
251}
252
253/* Setup MCHBAR if possible, return true if we should disable it again */
254static void
255intel_setup_mchbar(struct drm_device *dev)
256{
257 struct drm_i915_private *dev_priv = dev->dev_private;
258 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
259 u32 temp;
260 bool enabled;
261
262 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
263 return;
264
265 dev_priv->mchbar_need_disable = false;
266
267 if (IS_I915G(dev) || IS_I915GM(dev)) {
268 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
269 enabled = !!(temp & DEVEN_MCHBAR_EN);
270 } else {
271 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
272 enabled = temp & 1;
273 }
274
275 /* If it's already enabled, don't have to do anything */
276 if (enabled)
277 return;
278
279 if (intel_alloc_mchbar_resource(dev))
280 return;
281
282 dev_priv->mchbar_need_disable = true;
283
284 /* Space is allocated or reserved, so enable it. */
285 if (IS_I915G(dev) || IS_I915GM(dev)) {
286 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
287 temp | DEVEN_MCHBAR_EN);
288 } else {
289 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
290 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
291 }
292}
293
294static void
295intel_teardown_mchbar(struct drm_device *dev)
296{
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
299 u32 temp;
300
301 if (dev_priv->mchbar_need_disable) {
302 if (IS_I915G(dev) || IS_I915GM(dev)) {
303 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
304 temp &= ~DEVEN_MCHBAR_EN;
305 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
306 } else {
307 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
308 temp &= ~1;
309 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
310 }
311 }
312
313 if (dev_priv->mch_res.start)
314 release_resource(&dev_priv->mch_res);
315}
316
317/* true = enable decode, false = disable decoder */
318static unsigned int i915_vga_set_decode(void *cookie, bool state)
319{
320 struct drm_device *dev = cookie;
321
322 intel_modeset_vga_set_state(dev, state);
323 if (state)
324 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
325 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
326 else
327 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
328}
329
330static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
331{
332 struct drm_device *dev = pci_get_drvdata(pdev);
333 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
334
335 if (state == VGA_SWITCHEROO_ON) {
336 pr_info("switched on\n");
337 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
338 /* i915 resume handler doesn't set to D0 */
339 pci_set_power_state(dev->pdev, PCI_D0);
340 i915_resume_switcheroo(dev);
341 dev->switch_power_state = DRM_SWITCH_POWER_ON;
342 } else {
343 pr_info("switched off\n");
344 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
345 i915_suspend_switcheroo(dev, pmm);
346 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
347 }
348}
349
350static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
351{
352 struct drm_device *dev = pci_get_drvdata(pdev);
353
354 /*
355 * FIXME: open_count is protected by drm_global_mutex but that would lead to
356 * locking inversion with the driver load path. And the access here is
357 * completely racy anyway. So don't bother with locking for now.
358 */
359 return dev->open_count == 0;
360}
361
362static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
363 .set_gpu_state = i915_switcheroo_set_state,
364 .reprobe = NULL,
365 .can_switch = i915_switcheroo_can_switch,
366};
367
368static int i915_load_modeset_init(struct drm_device *dev)
369{
370 struct drm_i915_private *dev_priv = dev->dev_private;
371 int ret;
372
373 ret = intel_bios_init(dev_priv);
374 if (ret)
375 DRM_INFO("failed to find VBIOS tables\n");
376
377 /* If we have > 1 VGA cards, then we need to arbitrate access
378 * to the common VGA resources.
379 *
380 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
381 * then we do not take part in VGA arbitration and the
382 * vga_client_register() fails with -ENODEV.
383 */
384 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
385 if (ret && ret != -ENODEV)
386 goto out;
387
388 intel_register_dsm_handler();
389
390 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
391 if (ret)
392 goto cleanup_vga_client;
393
394 intel_power_domains_init_hw(dev_priv, false);
395
396 intel_csr_ucode_init(dev_priv);
397
398 ret = intel_irq_install(dev_priv);
399 if (ret)
400 goto cleanup_csr;
401
402 intel_setup_gmbus(dev);
403
404 /* Important: The output setup functions called by modeset_init need
405 * working irqs for e.g. gmbus and dp aux transfers. */
406 intel_modeset_init(dev);
407
408 intel_guc_ucode_init(dev);
409
410 ret = i915_gem_init(dev);
411 if (ret)
412 goto cleanup_irq;
413
414 intel_modeset_gem_init(dev);
415
416 /* Always safe in the mode setting case. */
417 /* FIXME: do pre/post-mode set stuff in core KMS code */
418 dev->vblank_disable_allowed = true;
419 if (INTEL_INFO(dev)->num_pipes == 0)
420 return 0;
421
422 ret = intel_fbdev_init(dev);
423 if (ret)
424 goto cleanup_gem;
425
426 /* Only enable hotplug handling once the fbdev is fully set up. */
427 intel_hpd_init(dev_priv);
428
429 /*
430 * Some ports require correctly set-up hpd registers for detection to
431 * work properly (leading to ghost connected connector status), e.g. VGA
432 * on gm45. Hence we can only set up the initial fbdev config after hpd
433 * irqs are fully enabled. Now we should scan for the initial config
434 * only once hotplug handling is enabled, but due to screwed-up locking
435 * around kms/fbdev init we can't protect the fdbev initial config
436 * scanning against hotplug events. Hence do this first and ignore the
437 * tiny window where we will loose hotplug notifactions.
438 */
439 intel_fbdev_initial_config_async(dev);
440
441 drm_kms_helper_poll_init(dev);
442
443 return 0;
444
445cleanup_gem:
446 mutex_lock(&dev->struct_mutex);
447 i915_gem_cleanup_ringbuffer(dev);
448 i915_gem_context_fini(dev);
449 mutex_unlock(&dev->struct_mutex);
450cleanup_irq:
451 intel_guc_ucode_fini(dev);
452 drm_irq_uninstall(dev);
453 intel_teardown_gmbus(dev);
454cleanup_csr:
455 intel_csr_ucode_fini(dev_priv);
456 vga_switcheroo_unregister_client(dev->pdev);
457cleanup_vga_client:
458 vga_client_register(dev->pdev, NULL, NULL, NULL);
459out:
460 return ret;
461}
462
463#if IS_ENABLED(CONFIG_FB)
464static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
465{
466 struct apertures_struct *ap;
467 struct pci_dev *pdev = dev_priv->dev->pdev;
468 bool primary;
469 int ret;
470
471 ap = alloc_apertures(1);
472 if (!ap)
473 return -ENOMEM;
474
475 ap->ranges[0].base = dev_priv->gtt.mappable_base;
476 ap->ranges[0].size = dev_priv->gtt.mappable_end;
477
478 primary =
479 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
480
481 ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
482
483 kfree(ap);
484
485 return ret;
486}
487#else
488static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
489{
490 return 0;
491}
492#endif
493
494#if !defined(CONFIG_VGA_CONSOLE)
495static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
496{
497 return 0;
498}
499#elif !defined(CONFIG_DUMMY_CONSOLE)
500static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
501{
502 return -ENODEV;
503}
504#else
505static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
506{
507 int ret = 0;
508
509 DRM_INFO("Replacing VGA console driver\n");
510
511 console_lock();
512 if (con_is_bound(&vga_con))
513 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
514 if (ret == 0) {
515 ret = do_unregister_con_driver(&vga_con);
516
517 /* Ignore "already unregistered". */
518 if (ret == -ENODEV)
519 ret = 0;
520 }
521 console_unlock();
522
523 return ret;
524}
525#endif
526
527static void i915_dump_device_info(struct drm_i915_private *dev_priv)
528{
529 const struct intel_device_info *info = &dev_priv->info;
530
531#define PRINT_S(name) "%s"
532#define SEP_EMPTY
533#define PRINT_FLAG(name) info->name ? #name "," : ""
534#define SEP_COMMA ,
535 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
536 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
537 info->gen,
538 dev_priv->dev->pdev->device,
539 dev_priv->dev->pdev->revision,
540 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
541#undef PRINT_S
542#undef SEP_EMPTY
543#undef PRINT_FLAG
544#undef SEP_COMMA
545}
546
547static void cherryview_sseu_info_init(struct drm_device *dev)
548{
549 struct drm_i915_private *dev_priv = dev->dev_private;
550 struct intel_device_info *info;
551 u32 fuse, eu_dis;
552
553 info = (struct intel_device_info *)&dev_priv->info;
554 fuse = I915_READ(CHV_FUSE_GT);
555
556 info->slice_total = 1;
557
558 if (!(fuse & CHV_FGT_DISABLE_SS0)) {
559 info->subslice_per_slice++;
560 eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
561 CHV_FGT_EU_DIS_SS0_R1_MASK);
562 info->eu_total += 8 - hweight32(eu_dis);
563 }
564
565 if (!(fuse & CHV_FGT_DISABLE_SS1)) {
566 info->subslice_per_slice++;
567 eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
568 CHV_FGT_EU_DIS_SS1_R1_MASK);
569 info->eu_total += 8 - hweight32(eu_dis);
570 }
571
572 info->subslice_total = info->subslice_per_slice;
573 /*
574 * CHV expected to always have a uniform distribution of EU
575 * across subslices.
576 */
577 info->eu_per_subslice = info->subslice_total ?
578 info->eu_total / info->subslice_total :
579 0;
580 /*
581 * CHV supports subslice power gating on devices with more than
582 * one subslice, and supports EU power gating on devices with
583 * more than one EU pair per subslice.
584 */
585 info->has_slice_pg = 0;
586 info->has_subslice_pg = (info->subslice_total > 1);
587 info->has_eu_pg = (info->eu_per_subslice > 2);
588}
589
590static void gen9_sseu_info_init(struct drm_device *dev)
591{
592 struct drm_i915_private *dev_priv = dev->dev_private;
593 struct intel_device_info *info;
594 int s_max = 3, ss_max = 4, eu_max = 8;
595 int s, ss;
596 u32 fuse2, s_enable, ss_disable, eu_disable;
597 u8 eu_mask = 0xff;
598
599 info = (struct intel_device_info *)&dev_priv->info;
600 fuse2 = I915_READ(GEN8_FUSE2);
601 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
602 GEN8_F2_S_ENA_SHIFT;
603 ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
604 GEN9_F2_SS_DIS_SHIFT;
605
606 info->slice_total = hweight32(s_enable);
607 /*
608 * The subslice disable field is global, i.e. it applies
609 * to each of the enabled slices.
610 */
611 info->subslice_per_slice = ss_max - hweight32(ss_disable);
612 info->subslice_total = info->slice_total *
613 info->subslice_per_slice;
614
615 /*
616 * Iterate through enabled slices and subslices to
617 * count the total enabled EU.
618 */
619 for (s = 0; s < s_max; s++) {
620 if (!(s_enable & (0x1 << s)))
621 /* skip disabled slice */
622 continue;
623
624 eu_disable = I915_READ(GEN9_EU_DISABLE(s));
625 for (ss = 0; ss < ss_max; ss++) {
626 int eu_per_ss;
627
628 if (ss_disable & (0x1 << ss))
629 /* skip disabled subslice */
630 continue;
631
632 eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
633 eu_mask);
634
635 /*
636 * Record which subslice(s) has(have) 7 EUs. we
637 * can tune the hash used to spread work among
638 * subslices if they are unbalanced.
639 */
640 if (eu_per_ss == 7)
641 info->subslice_7eu[s] |= 1 << ss;
642
643 info->eu_total += eu_per_ss;
644 }
645 }
646
647 /*
648 * SKL is expected to always have a uniform distribution
649 * of EU across subslices with the exception that any one
650 * EU in any one subslice may be fused off for die
651 * recovery. BXT is expected to be perfectly uniform in EU
652 * distribution.
653 */
654 info->eu_per_subslice = info->subslice_total ?
655 DIV_ROUND_UP(info->eu_total,
656 info->subslice_total) : 0;
657 /*
658 * SKL supports slice power gating on devices with more than
659 * one slice, and supports EU power gating on devices with
660 * more than one EU pair per subslice. BXT supports subslice
661 * power gating on devices with more than one subslice, and
662 * supports EU power gating on devices with more than one EU
663 * pair per subslice.
664 */
665 info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
666 (info->slice_total > 1));
667 info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
668 info->has_eu_pg = (info->eu_per_subslice > 2);
669}
670
671static void broadwell_sseu_info_init(struct drm_device *dev)
672{
673 struct drm_i915_private *dev_priv = dev->dev_private;
674 struct intel_device_info *info;
675 const int s_max = 3, ss_max = 3, eu_max = 8;
676 int s, ss;
677 u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
678
679 fuse2 = I915_READ(GEN8_FUSE2);
680 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
681 ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
682
683 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
684 eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
685 ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
686 (32 - GEN8_EU_DIS0_S1_SHIFT));
687 eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
688 ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
689 (32 - GEN8_EU_DIS1_S2_SHIFT));
690
691
692 info = (struct intel_device_info *)&dev_priv->info;
693 info->slice_total = hweight32(s_enable);
694
695 /*
696 * The subslice disable field is global, i.e. it applies
697 * to each of the enabled slices.
698 */
699 info->subslice_per_slice = ss_max - hweight32(ss_disable);
700 info->subslice_total = info->slice_total * info->subslice_per_slice;
701
702 /*
703 * Iterate through enabled slices and subslices to
704 * count the total enabled EU.
705 */
706 for (s = 0; s < s_max; s++) {
707 if (!(s_enable & (0x1 << s)))
708 /* skip disabled slice */
709 continue;
710
711 for (ss = 0; ss < ss_max; ss++) {
712 u32 n_disabled;
713
714 if (ss_disable & (0x1 << ss))
715 /* skip disabled subslice */
716 continue;
717
718 n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
719
720 /*
721 * Record which subslices have 7 EUs.
722 */
723 if (eu_max - n_disabled == 7)
724 info->subslice_7eu[s] |= 1 << ss;
725
726 info->eu_total += eu_max - n_disabled;
727 }
728 }
729
730 /*
731 * BDW is expected to always have a uniform distribution of EU across
732 * subslices with the exception that any one EU in any one subslice may
733 * be fused off for die recovery.
734 */
735 info->eu_per_subslice = info->subslice_total ?
736 DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
737
738 /*
739 * BDW supports slice power gating on devices with more than
740 * one slice.
741 */
742 info->has_slice_pg = (info->slice_total > 1);
743 info->has_subslice_pg = 0;
744 info->has_eu_pg = 0;
745}
746
747/*
748 * Determine various intel_device_info fields at runtime.
749 *
750 * Use it when either:
751 * - it's judged too laborious to fill n static structures with the limit
752 * when a simple if statement does the job,
753 * - run-time checks (eg read fuse/strap registers) are needed.
754 *
755 * This function needs to be called:
756 * - after the MMIO has been setup as we are reading registers,
757 * - after the PCH has been detected,
758 * - before the first usage of the fields it can tweak.
759 */
760static void intel_device_info_runtime_init(struct drm_device *dev)
761{
762 struct drm_i915_private *dev_priv = dev->dev_private;
763 struct intel_device_info *info;
764 enum pipe pipe;
765
766 info = (struct intel_device_info *)&dev_priv->info;
767
768 /*
769 * Skylake and Broxton currently don't expose the topmost plane as its
770 * use is exclusive with the legacy cursor and we only want to expose
771 * one of those, not both. Until we can safely expose the topmost plane
772 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
773 * we don't expose the topmost plane at all to prevent ABI breakage
774 * down the line.
775 */
776 if (IS_BROXTON(dev)) {
777 info->num_sprites[PIPE_A] = 2;
778 info->num_sprites[PIPE_B] = 2;
779 info->num_sprites[PIPE_C] = 1;
780 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
781 for_each_pipe(dev_priv, pipe)
782 info->num_sprites[pipe] = 2;
783 else
784 for_each_pipe(dev_priv, pipe)
785 info->num_sprites[pipe] = 1;
786
787 if (i915.disable_display) {
788 DRM_INFO("Display disabled (module parameter)\n");
789 info->num_pipes = 0;
790 } else if (info->num_pipes > 0 &&
791 (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
792 HAS_PCH_SPLIT(dev)) {
793 u32 fuse_strap = I915_READ(FUSE_STRAP);
794 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
795
796 /*
797 * SFUSE_STRAP is supposed to have a bit signalling the display
798 * is fused off. Unfortunately it seems that, at least in
799 * certain cases, fused off display means that PCH display
800 * reads don't land anywhere. In that case, we read 0s.
801 *
802 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
803 * should be set when taking over after the firmware.
804 */
805 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
806 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
807 (dev_priv->pch_type == PCH_CPT &&
808 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
809 DRM_INFO("Display fused off, disabling\n");
810 info->num_pipes = 0;
811 } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
812 DRM_INFO("PipeC fused off\n");
813 info->num_pipes -= 1;
814 }
815 } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
816 u32 dfsm = I915_READ(SKL_DFSM);
817 u8 disabled_mask = 0;
818 bool invalid;
819 int num_bits;
820
821 if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
822 disabled_mask |= BIT(PIPE_A);
823 if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
824 disabled_mask |= BIT(PIPE_B);
825 if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
826 disabled_mask |= BIT(PIPE_C);
827
828 num_bits = hweight8(disabled_mask);
829
830 switch (disabled_mask) {
831 case BIT(PIPE_A):
832 case BIT(PIPE_B):
833 case BIT(PIPE_A) | BIT(PIPE_B):
834 case BIT(PIPE_A) | BIT(PIPE_C):
835 invalid = true;
836 break;
837 default:
838 invalid = false;
839 }
840
841 if (num_bits > info->num_pipes || invalid)
842 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
843 disabled_mask);
844 else
845 info->num_pipes -= num_bits;
846 }
847
848 /* Initialize slice/subslice/EU info */
849 if (IS_CHERRYVIEW(dev))
850 cherryview_sseu_info_init(dev);
851 else if (IS_BROADWELL(dev))
852 broadwell_sseu_info_init(dev);
853 else if (INTEL_INFO(dev)->gen >= 9)
854 gen9_sseu_info_init(dev);
855
856 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
857 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
858 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
859 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
860 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
861 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
862 info->has_slice_pg ? "y" : "n");
863 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
864 info->has_subslice_pg ? "y" : "n");
865 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
866 info->has_eu_pg ? "y" : "n");
867}
868
869static void intel_init_dpio(struct drm_i915_private *dev_priv)
870{
871 /*
872 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
873 * CHV x1 PHY (DP/HDMI D)
874 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
875 */
876 if (IS_CHERRYVIEW(dev_priv)) {
877 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
878 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
879 } else if (IS_VALLEYVIEW(dev_priv)) {
880 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
881 }
882}
883
884static int i915_workqueues_init(struct drm_i915_private *dev_priv)
885{
886 /*
887 * The i915 workqueue is primarily used for batched retirement of
888 * requests (and thus managing bo) once the task has been completed
889 * by the GPU. i915_gem_retire_requests() is called directly when we
890 * need high-priority retirement, such as waiting for an explicit
891 * bo.
892 *
893 * It is also used for periodic low-priority events, such as
894 * idle-timers and recording error state.
895 *
896 * All tasks on the workqueue are expected to acquire the dev mutex
897 * so there is no point in running more than one instance of the
898 * workqueue at any time. Use an ordered one.
899 */
900 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
901 if (dev_priv->wq == NULL)
902 goto out_err;
903
904 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
905 if (dev_priv->hotplug.dp_wq == NULL)
906 goto out_free_wq;
907
908 dev_priv->gpu_error.hangcheck_wq =
909 alloc_ordered_workqueue("i915-hangcheck", 0);
910 if (dev_priv->gpu_error.hangcheck_wq == NULL)
911 goto out_free_dp_wq;
912
913 return 0;
914
915out_free_dp_wq:
916 destroy_workqueue(dev_priv->hotplug.dp_wq);
917out_free_wq:
918 destroy_workqueue(dev_priv->wq);
919out_err:
920 DRM_ERROR("Failed to allocate workqueues.\n");
921
922 return -ENOMEM;
923}
924
925static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
926{
927 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
928 destroy_workqueue(dev_priv->hotplug.dp_wq);
929 destroy_workqueue(dev_priv->wq);
930}
931
932static int i915_mmio_setup(struct drm_device *dev)
933{
934 struct drm_i915_private *dev_priv = to_i915(dev);
935 int mmio_bar;
936 int mmio_size;
937
938 mmio_bar = IS_GEN2(dev) ? 1 : 0;
939 /*
940 * Before gen4, the registers and the GTT are behind different BARs.
941 * However, from gen4 onwards, the registers and the GTT are shared
942 * in the same BAR, so we want to restrict this ioremap from
943 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
944 * the register BAR remains the same size for all the earlier
945 * generations up to Ironlake.
946 */
947 if (INTEL_INFO(dev)->gen < 5)
948 mmio_size = 512 * 1024;
949 else
950 mmio_size = 2 * 1024 * 1024;
951 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
952 if (dev_priv->regs == NULL) {
953 DRM_ERROR("failed to map registers\n");
954
955 return -EIO;
956 }
957
958 /* Try to make sure MCHBAR is enabled before poking at it */
959 intel_setup_mchbar(dev);
960
961 return 0;
962}
963
964static void i915_mmio_cleanup(struct drm_device *dev)
965{
966 struct drm_i915_private *dev_priv = to_i915(dev);
967
968 intel_teardown_mchbar(dev);
969 pci_iounmap(dev->pdev, dev_priv->regs);
970}
971
972/**
973 * i915_driver_load - setup chip and create an initial config
974 * @dev: DRM device
975 * @flags: startup flags
976 *
977 * The driver load routine has to do several things:
978 * - drive output discovery via intel_modeset_init()
979 * - initialize the memory manager
980 * - allocate initial config memory
981 * - setup the DRM framebuffer with the allocated memory
982 */
983int i915_driver_load(struct drm_device *dev, unsigned long flags)
984{
985 struct drm_i915_private *dev_priv;
986 struct intel_device_info *info, *device_info;
987 int ret = 0;
988 uint32_t aperture_size;
989
990 info = (struct intel_device_info *) flags;
991
992 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
993 if (dev_priv == NULL)
994 return -ENOMEM;
995
996 dev->dev_private = dev_priv;
997 dev_priv->dev = dev;
998
999 /* Setup the write-once "constant" device info */
1000 device_info = (struct intel_device_info *)&dev_priv->info;
1001 memcpy(device_info, info, sizeof(dev_priv->info));
1002 device_info->device_id = dev->pdev->device;
1003
1004 spin_lock_init(&dev_priv->irq_lock);
1005 spin_lock_init(&dev_priv->gpu_error.lock);
1006 mutex_init(&dev_priv->backlight_lock);
1007 spin_lock_init(&dev_priv->uncore.lock);
1008 spin_lock_init(&dev_priv->mm.object_stat_lock);
1009 spin_lock_init(&dev_priv->mmio_flip_lock);
1010 mutex_init(&dev_priv->sb_lock);
1011 mutex_init(&dev_priv->modeset_restore_lock);
1012 mutex_init(&dev_priv->av_mutex);
1013
1014 ret = i915_workqueues_init(dev_priv);
1015 if (ret < 0)
1016 goto out_free_priv;
1017
1018 intel_pm_setup(dev);
1019
1020 intel_runtime_pm_get(dev_priv);
1021
1022 intel_display_crc_init(dev);
1023
1024 i915_dump_device_info(dev_priv);
1025
1026 /* Not all pre-production machines fall into this category, only the
1027 * very first ones. Almost everything should work, except for maybe
1028 * suspend/resume. And we don't implement workarounds that affect only
1029 * pre-production machines. */
1030 if (IS_HSW_EARLY_SDV(dev))
1031 DRM_INFO("This is an early pre-production Haswell machine. "
1032 "It may not be fully functional.\n");
1033
1034 if (i915_get_bridge_dev(dev)) {
1035 ret = -EIO;
1036 goto out_runtime_pm_put;
1037 }
1038
1039 ret = i915_mmio_setup(dev);
1040 if (ret < 0)
1041 goto put_bridge;
1042
1043 /* This must be called before any calls to HAS_PCH_* */
1044 intel_detect_pch(dev);
1045
1046 intel_uncore_init(dev);
1047
1048 ret = i915_gem_gtt_init(dev);
1049 if (ret)
1050 goto out_uncore_fini;
1051
1052 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1053 * otherwise the vga fbdev driver falls over. */
1054 ret = i915_kick_out_firmware_fb(dev_priv);
1055 if (ret) {
1056 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1057 goto out_gtt;
1058 }
1059
1060 ret = i915_kick_out_vgacon(dev_priv);
1061 if (ret) {
1062 DRM_ERROR("failed to remove conflicting VGA console\n");
1063 goto out_gtt;
1064 }
1065
1066 pci_set_master(dev->pdev);
1067
1068 /* overlay on gen2 is broken and can't address above 1G */
1069 if (IS_GEN2(dev))
1070 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1071
1072 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1073 * using 32bit addressing, overwriting memory if HWS is located
1074 * above 4GB.
1075 *
1076 * The documentation also mentions an issue with undefined
1077 * behaviour if any general state is accessed within a page above 4GB,
1078 * which also needs to be handled carefully.
1079 */
1080 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1081 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1082
1083 aperture_size = dev_priv->gtt.mappable_end;
1084
1085 dev_priv->gtt.mappable =
1086 io_mapping_create_wc(dev_priv->gtt.mappable_base,
1087 aperture_size);
1088 if (dev_priv->gtt.mappable == NULL) {
1089 ret = -EIO;
1090 goto out_gtt;
1091 }
1092
1093 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
1094 aperture_size);
1095
1096 intel_irq_init(dev_priv);
1097 intel_uncore_sanitize(dev);
1098
1099 intel_opregion_setup(dev);
1100
1101 i915_gem_load_init(dev);
1102 i915_gem_shrinker_init(dev_priv);
1103
1104 /* On the 945G/GM, the chipset reports the MSI capability on the
1105 * integrated graphics even though the support isn't actually there
1106 * according to the published specs. It doesn't appear to function
1107 * correctly in testing on 945G.
1108 * This may be a side effect of MSI having been made available for PEG
1109 * and the registers being closely associated.
1110 *
1111 * According to chipset errata, on the 965GM, MSI interrupts may
1112 * be lost or delayed, but we use them anyways to avoid
1113 * stuck interrupts on some machines.
1114 */
1115 if (!IS_I945G(dev) && !IS_I945GM(dev)) {
1116 if (pci_enable_msi(dev->pdev) < 0)
1117 DRM_DEBUG_DRIVER("can't enable MSI");
1118 }
1119
1120 intel_device_info_runtime_init(dev);
1121
1122 intel_init_dpio(dev_priv);
1123
1124 if (INTEL_INFO(dev)->num_pipes) {
1125 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1126 if (ret)
1127 goto out_gem_unload;
1128 }
1129
1130 intel_power_domains_init(dev_priv);
1131
1132 ret = i915_load_modeset_init(dev);
1133 if (ret < 0) {
1134 DRM_ERROR("failed to init modeset\n");
1135 goto out_power_well;
1136 }
1137
1138 /*
1139 * Notify a valid surface after modesetting,
1140 * when running inside a VM.
1141 */
1142 if (intel_vgpu_active(dev))
1143 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1144
1145 i915_setup_sysfs(dev);
1146
1147 if (INTEL_INFO(dev)->num_pipes) {
1148 /* Must be done after probing outputs */
1149 intel_opregion_init(dev);
1150 acpi_video_register();
1151 }
1152
1153 if (IS_GEN5(dev))
1154 intel_gpu_ips_init(dev_priv);
1155
1156 intel_runtime_pm_enable(dev_priv);
1157
1158 i915_audio_component_init(dev_priv);
1159
1160 intel_runtime_pm_put(dev_priv);
1161
1162 return 0;
1163
1164out_power_well:
1165 intel_power_domains_fini(dev_priv);
1166 drm_vblank_cleanup(dev);
1167out_gem_unload:
1168 i915_gem_shrinker_cleanup(dev_priv);
1169
1170 if (dev->pdev->msi_enabled)
1171 pci_disable_msi(dev->pdev);
1172
1173 intel_teardown_mchbar(dev);
1174 pm_qos_remove_request(&dev_priv->pm_qos);
1175 arch_phys_wc_del(dev_priv->gtt.mtrr);
1176 io_mapping_free(dev_priv->gtt.mappable);
1177out_gtt:
1178 i915_global_gtt_cleanup(dev);
1179out_uncore_fini:
1180 intel_uncore_fini(dev);
1181 i915_mmio_cleanup(dev);
1182put_bridge:
1183 pci_dev_put(dev_priv->bridge_dev);
1184 i915_gem_load_cleanup(dev);
1185out_runtime_pm_put:
1186 intel_runtime_pm_put(dev_priv);
1187 i915_workqueues_cleanup(dev_priv);
1188out_free_priv:
1189 kfree(dev_priv);
1190
1191 return ret;
1192}
1193
1194int i915_driver_unload(struct drm_device *dev)
1195{
1196 struct drm_i915_private *dev_priv = dev->dev_private;
1197 int ret;
1198
1199 intel_fbdev_fini(dev);
1200
1201 i915_audio_component_cleanup(dev_priv);
1202
1203 ret = i915_gem_suspend(dev);
1204 if (ret) {
1205 DRM_ERROR("failed to idle hardware: %d\n", ret);
1206 return ret;
1207 }
1208
1209 intel_power_domains_fini(dev_priv);
1210
1211 intel_gpu_ips_teardown();
1212
1213 i915_teardown_sysfs(dev);
1214
1215 i915_gem_shrinker_cleanup(dev_priv);
1216
1217 io_mapping_free(dev_priv->gtt.mappable);
1218 arch_phys_wc_del(dev_priv->gtt.mtrr);
1219
1220 acpi_video_unregister();
1221
1222 drm_vblank_cleanup(dev);
1223
1224 intel_modeset_cleanup(dev);
1225
1226 /*
1227 * free the memory space allocated for the child device
1228 * config parsed from VBT
1229 */
1230 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1231 kfree(dev_priv->vbt.child_dev);
1232 dev_priv->vbt.child_dev = NULL;
1233 dev_priv->vbt.child_dev_num = 0;
1234 }
1235 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1236 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1237 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1238 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1239
1240 vga_switcheroo_unregister_client(dev->pdev);
1241 vga_client_register(dev->pdev, NULL, NULL, NULL);
1242
1243 intel_csr_ucode_fini(dev_priv);
1244
1245 /* Free error state after interrupts are fully disabled. */
1246 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1247 i915_destroy_error_state(dev);
1248
1249 if (dev->pdev->msi_enabled)
1250 pci_disable_msi(dev->pdev);
1251
1252 intel_opregion_fini(dev);
1253
1254 /* Flush any outstanding unpin_work. */
1255 flush_workqueue(dev_priv->wq);
1256
1257 intel_guc_ucode_fini(dev);
1258 mutex_lock(&dev->struct_mutex);
1259 i915_gem_cleanup_ringbuffer(dev);
1260 i915_gem_context_fini(dev);
1261 mutex_unlock(&dev->struct_mutex);
1262 intel_fbc_cleanup_cfb(dev_priv);
1263
1264 pm_qos_remove_request(&dev_priv->pm_qos);
1265
1266 i915_global_gtt_cleanup(dev);
1267
1268 intel_uncore_fini(dev);
1269 i915_mmio_cleanup(dev);
1270
1271 i915_gem_load_cleanup(dev);
1272 pci_dev_put(dev_priv->bridge_dev);
1273 i915_workqueues_cleanup(dev_priv);
1274 kfree(dev_priv);
1275
1276 return 0;
1277}
1278
1279int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1280{
1281 int ret;
1282
1283 ret = i915_gem_open(dev, file);
1284 if (ret)
1285 return ret;
1286
1287 return 0;
1288}
1289
1290/**
1291 * i915_driver_lastclose - clean up after all DRM clients have exited
1292 * @dev: DRM device
1293 *
1294 * Take care of cleaning up after all DRM clients have exited. In the
1295 * mode setting case, we want to restore the kernel's initial mode (just
1296 * in case the last client left us in a bad state).
1297 *
1298 * Additionally, in the non-mode setting case, we'll tear down the GTT
1299 * and DMA structures, since the kernel won't be using them, and clea
1300 * up any GEM state.
1301 */
1302void i915_driver_lastclose(struct drm_device *dev)
1303{
1304 intel_fbdev_restore_mode(dev);
1305 vga_switcheroo_process_delayed_switch();
1306}
1307
1308void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1309{
1310 mutex_lock(&dev->struct_mutex);
1311 i915_gem_context_close(dev, file);
1312 i915_gem_release(dev, file);
1313 mutex_unlock(&dev->struct_mutex);
1314}
1315
1316void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1317{
1318 struct drm_i915_file_private *file_priv = file->driver_priv;
1319
1320 kfree(file_priv);
1321}
1322
1323static int
1324i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1325 struct drm_file *file)
1326{
1327 return -ENODEV;
1328}
1329
1330const struct drm_ioctl_desc i915_ioctls[] = {
1331 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1332 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1333 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1334 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1335 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1336 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1337 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1338 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1339 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1340 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1341 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1342 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1343 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1344 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1345 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
1346 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1347 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1348 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1349 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1350 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
1351 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1352 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1353 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1354 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1355 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1356 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1357 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1358 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1359 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1360 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1361 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1362 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1363 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
1364 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1365 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1366 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
1367 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
1368 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1369 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1370 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1371 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
1372 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
1373 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
1374 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
1375 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1376 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1377 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1378 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1379 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
1380 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1381 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1382 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1383};
1384
1385int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include "drmP.h"
32#include "drm.h"
33#include "drm_crtc_helper.h"
34#include "drm_fb_helper.h"
35#include "intel_drv.h"
36#include "i915_drm.h"
37#include "i915_drv.h"
38#include "i915_trace.h"
39#include <linux/pci.h>
40#include <linux/vgaarb.h>
41#include <linux/acpi.h>
42#include <linux/pnp.h>
43#include <linux/vga_switcheroo.h>
44#include <linux/slab.h>
45#include <acpi/video.h>
46#include <asm/pat.h>
47
48#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
49
50#define BEGIN_LP_RING(n) \
51 intel_ring_begin(LP_RING(dev_priv), (n))
52
53#define OUT_RING(x) \
54 intel_ring_emit(LP_RING(dev_priv), x)
55
56#define ADVANCE_LP_RING() \
57 intel_ring_advance(LP_RING(dev_priv))
58
59/**
60 * Lock test for when it's just for synchronization of ring access.
61 *
62 * In that case, we don't need to do it when GEM is initialized as nobody else
63 * has access to the ring.
64 */
65#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
66 if (LP_RING(dev->dev_private)->obj == NULL) \
67 LOCK_TEST_WITH_RETURN(dev, file); \
68} while (0)
69
70static inline u32
71intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
72{
73 if (I915_NEED_GFX_HWS(dev_priv->dev))
74 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
75 else
76 return intel_read_status_page(LP_RING(dev_priv), reg);
77}
78
79#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
80#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
81#define I915_BREADCRUMB_INDEX 0x21
82
83void i915_update_dri1_breadcrumb(struct drm_device *dev)
84{
85 drm_i915_private_t *dev_priv = dev->dev_private;
86 struct drm_i915_master_private *master_priv;
87
88 if (dev->primary->master) {
89 master_priv = dev->primary->master->driver_priv;
90 if (master_priv->sarea_priv)
91 master_priv->sarea_priv->last_dispatch =
92 READ_BREADCRUMB(dev_priv);
93 }
94}
95
96static void i915_write_hws_pga(struct drm_device *dev)
97{
98 drm_i915_private_t *dev_priv = dev->dev_private;
99 u32 addr;
100
101 addr = dev_priv->status_page_dmah->busaddr;
102 if (INTEL_INFO(dev)->gen >= 4)
103 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
104 I915_WRITE(HWS_PGA, addr);
105}
106
107/**
108 * Sets up the hardware status page for devices that need a physical address
109 * in the register.
110 */
111static int i915_init_phys_hws(struct drm_device *dev)
112{
113 drm_i915_private_t *dev_priv = dev->dev_private;
114
115 /* Program Hardware Status Page */
116 dev_priv->status_page_dmah =
117 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
118
119 if (!dev_priv->status_page_dmah) {
120 DRM_ERROR("Can not allocate hardware status page\n");
121 return -ENOMEM;
122 }
123
124 memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
125 0, PAGE_SIZE);
126
127 i915_write_hws_pga(dev);
128
129 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
130 return 0;
131}
132
133/**
134 * Frees the hardware status page, whether it's a physical address or a virtual
135 * address set up by the X Server.
136 */
137static void i915_free_hws(struct drm_device *dev)
138{
139 drm_i915_private_t *dev_priv = dev->dev_private;
140 struct intel_ring_buffer *ring = LP_RING(dev_priv);
141
142 if (dev_priv->status_page_dmah) {
143 drm_pci_free(dev, dev_priv->status_page_dmah);
144 dev_priv->status_page_dmah = NULL;
145 }
146
147 if (ring->status_page.gfx_addr) {
148 ring->status_page.gfx_addr = 0;
149 iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
150 }
151
152 /* Need to rewrite hardware status page */
153 I915_WRITE(HWS_PGA, 0x1ffff000);
154}
155
156void i915_kernel_lost_context(struct drm_device * dev)
157{
158 drm_i915_private_t *dev_priv = dev->dev_private;
159 struct drm_i915_master_private *master_priv;
160 struct intel_ring_buffer *ring = LP_RING(dev_priv);
161
162 /*
163 * We should never lose context on the ring with modesetting
164 * as we don't expose it to userspace
165 */
166 if (drm_core_check_feature(dev, DRIVER_MODESET))
167 return;
168
169 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
170 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
171 ring->space = ring->head - (ring->tail + 8);
172 if (ring->space < 0)
173 ring->space += ring->size;
174
175 if (!dev->primary->master)
176 return;
177
178 master_priv = dev->primary->master->driver_priv;
179 if (ring->head == ring->tail && master_priv->sarea_priv)
180 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
181}
182
183static int i915_dma_cleanup(struct drm_device * dev)
184{
185 drm_i915_private_t *dev_priv = dev->dev_private;
186 int i;
187
188 /* Make sure interrupts are disabled here because the uninstall ioctl
189 * may not have been called from userspace and after dev_private
190 * is freed, it's too late.
191 */
192 if (dev->irq_enabled)
193 drm_irq_uninstall(dev);
194
195 mutex_lock(&dev->struct_mutex);
196 for (i = 0; i < I915_NUM_RINGS; i++)
197 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
198 mutex_unlock(&dev->struct_mutex);
199
200 /* Clear the HWS virtual address at teardown */
201 if (I915_NEED_GFX_HWS(dev))
202 i915_free_hws(dev);
203
204 return 0;
205}
206
207static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
208{
209 drm_i915_private_t *dev_priv = dev->dev_private;
210 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
211 int ret;
212
213 master_priv->sarea = drm_getsarea(dev);
214 if (master_priv->sarea) {
215 master_priv->sarea_priv = (drm_i915_sarea_t *)
216 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
217 } else {
218 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
219 }
220
221 if (init->ring_size != 0) {
222 if (LP_RING(dev_priv)->obj != NULL) {
223 i915_dma_cleanup(dev);
224 DRM_ERROR("Client tried to initialize ringbuffer in "
225 "GEM mode\n");
226 return -EINVAL;
227 }
228
229 ret = intel_render_ring_init_dri(dev,
230 init->ring_start,
231 init->ring_size);
232 if (ret) {
233 i915_dma_cleanup(dev);
234 return ret;
235 }
236 }
237
238 dev_priv->cpp = init->cpp;
239 dev_priv->back_offset = init->back_offset;
240 dev_priv->front_offset = init->front_offset;
241 dev_priv->current_page = 0;
242 if (master_priv->sarea_priv)
243 master_priv->sarea_priv->pf_current_page = 0;
244
245 /* Allow hardware batchbuffers unless told otherwise.
246 */
247 dev_priv->dri1.allow_batchbuffer = 1;
248
249 return 0;
250}
251
252static int i915_dma_resume(struct drm_device * dev)
253{
254 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
255 struct intel_ring_buffer *ring = LP_RING(dev_priv);
256
257 DRM_DEBUG_DRIVER("%s\n", __func__);
258
259 if (ring->virtual_start == NULL) {
260 DRM_ERROR("can not ioremap virtual address for"
261 " ring buffer\n");
262 return -ENOMEM;
263 }
264
265 /* Program Hardware Status Page */
266 if (!ring->status_page.page_addr) {
267 DRM_ERROR("Can not find hardware status page\n");
268 return -EINVAL;
269 }
270 DRM_DEBUG_DRIVER("hw status page @ %p\n",
271 ring->status_page.page_addr);
272 if (ring->status_page.gfx_addr != 0)
273 intel_ring_setup_status_page(ring);
274 else
275 i915_write_hws_pga(dev);
276
277 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
278
279 return 0;
280}
281
282static int i915_dma_init(struct drm_device *dev, void *data,
283 struct drm_file *file_priv)
284{
285 drm_i915_init_t *init = data;
286 int retcode = 0;
287
288 if (drm_core_check_feature(dev, DRIVER_MODESET))
289 return -ENODEV;
290
291 switch (init->func) {
292 case I915_INIT_DMA:
293 retcode = i915_initialize(dev, init);
294 break;
295 case I915_CLEANUP_DMA:
296 retcode = i915_dma_cleanup(dev);
297 break;
298 case I915_RESUME_DMA:
299 retcode = i915_dma_resume(dev);
300 break;
301 default:
302 retcode = -EINVAL;
303 break;
304 }
305
306 return retcode;
307}
308
309/* Implement basically the same security restrictions as hardware does
310 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
311 *
312 * Most of the calculations below involve calculating the size of a
313 * particular instruction. It's important to get the size right as
314 * that tells us where the next instruction to check is. Any illegal
315 * instruction detected will be given a size of zero, which is a
316 * signal to abort the rest of the buffer.
317 */
318static int validate_cmd(int cmd)
319{
320 switch (((cmd >> 29) & 0x7)) {
321 case 0x0:
322 switch ((cmd >> 23) & 0x3f) {
323 case 0x0:
324 return 1; /* MI_NOOP */
325 case 0x4:
326 return 1; /* MI_FLUSH */
327 default:
328 return 0; /* disallow everything else */
329 }
330 break;
331 case 0x1:
332 return 0; /* reserved */
333 case 0x2:
334 return (cmd & 0xff) + 2; /* 2d commands */
335 case 0x3:
336 if (((cmd >> 24) & 0x1f) <= 0x18)
337 return 1;
338
339 switch ((cmd >> 24) & 0x1f) {
340 case 0x1c:
341 return 1;
342 case 0x1d:
343 switch ((cmd >> 16) & 0xff) {
344 case 0x3:
345 return (cmd & 0x1f) + 2;
346 case 0x4:
347 return (cmd & 0xf) + 2;
348 default:
349 return (cmd & 0xffff) + 2;
350 }
351 case 0x1e:
352 if (cmd & (1 << 23))
353 return (cmd & 0xffff) + 1;
354 else
355 return 1;
356 case 0x1f:
357 if ((cmd & (1 << 23)) == 0) /* inline vertices */
358 return (cmd & 0x1ffff) + 2;
359 else if (cmd & (1 << 17)) /* indirect random */
360 if ((cmd & 0xffff) == 0)
361 return 0; /* unknown length, too hard */
362 else
363 return (((cmd & 0xffff) + 1) / 2) + 1;
364 else
365 return 2; /* indirect sequential */
366 default:
367 return 0;
368 }
369 default:
370 return 0;
371 }
372
373 return 0;
374}
375
376static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
377{
378 drm_i915_private_t *dev_priv = dev->dev_private;
379 int i, ret;
380
381 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
382 return -EINVAL;
383
384 for (i = 0; i < dwords;) {
385 int sz = validate_cmd(buffer[i]);
386 if (sz == 0 || i + sz > dwords)
387 return -EINVAL;
388 i += sz;
389 }
390
391 ret = BEGIN_LP_RING((dwords+1)&~1);
392 if (ret)
393 return ret;
394
395 for (i = 0; i < dwords; i++)
396 OUT_RING(buffer[i]);
397 if (dwords & 1)
398 OUT_RING(0);
399
400 ADVANCE_LP_RING();
401
402 return 0;
403}
404
405int
406i915_emit_box(struct drm_device *dev,
407 struct drm_clip_rect *box,
408 int DR1, int DR4)
409{
410 struct drm_i915_private *dev_priv = dev->dev_private;
411 int ret;
412
413 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
414 box->y2 <= 0 || box->x2 <= 0) {
415 DRM_ERROR("Bad box %d,%d..%d,%d\n",
416 box->x1, box->y1, box->x2, box->y2);
417 return -EINVAL;
418 }
419
420 if (INTEL_INFO(dev)->gen >= 4) {
421 ret = BEGIN_LP_RING(4);
422 if (ret)
423 return ret;
424
425 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
426 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
427 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
428 OUT_RING(DR4);
429 } else {
430 ret = BEGIN_LP_RING(6);
431 if (ret)
432 return ret;
433
434 OUT_RING(GFX_OP_DRAWRECT_INFO);
435 OUT_RING(DR1);
436 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
437 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
438 OUT_RING(DR4);
439 OUT_RING(0);
440 }
441 ADVANCE_LP_RING();
442
443 return 0;
444}
445
446/* XXX: Emitting the counter should really be moved to part of the IRQ
447 * emit. For now, do it in both places:
448 */
449
450static void i915_emit_breadcrumb(struct drm_device *dev)
451{
452 drm_i915_private_t *dev_priv = dev->dev_private;
453 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
454
455 dev_priv->counter++;
456 if (dev_priv->counter > 0x7FFFFFFFUL)
457 dev_priv->counter = 0;
458 if (master_priv->sarea_priv)
459 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
460
461 if (BEGIN_LP_RING(4) == 0) {
462 OUT_RING(MI_STORE_DWORD_INDEX);
463 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
464 OUT_RING(dev_priv->counter);
465 OUT_RING(0);
466 ADVANCE_LP_RING();
467 }
468}
469
470static int i915_dispatch_cmdbuffer(struct drm_device * dev,
471 drm_i915_cmdbuffer_t *cmd,
472 struct drm_clip_rect *cliprects,
473 void *cmdbuf)
474{
475 int nbox = cmd->num_cliprects;
476 int i = 0, count, ret;
477
478 if (cmd->sz & 0x3) {
479 DRM_ERROR("alignment");
480 return -EINVAL;
481 }
482
483 i915_kernel_lost_context(dev);
484
485 count = nbox ? nbox : 1;
486
487 for (i = 0; i < count; i++) {
488 if (i < nbox) {
489 ret = i915_emit_box(dev, &cliprects[i],
490 cmd->DR1, cmd->DR4);
491 if (ret)
492 return ret;
493 }
494
495 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
496 if (ret)
497 return ret;
498 }
499
500 i915_emit_breadcrumb(dev);
501 return 0;
502}
503
504static int i915_dispatch_batchbuffer(struct drm_device * dev,
505 drm_i915_batchbuffer_t * batch,
506 struct drm_clip_rect *cliprects)
507{
508 struct drm_i915_private *dev_priv = dev->dev_private;
509 int nbox = batch->num_cliprects;
510 int i, count, ret;
511
512 if ((batch->start | batch->used) & 0x7) {
513 DRM_ERROR("alignment");
514 return -EINVAL;
515 }
516
517 i915_kernel_lost_context(dev);
518
519 count = nbox ? nbox : 1;
520 for (i = 0; i < count; i++) {
521 if (i < nbox) {
522 ret = i915_emit_box(dev, &cliprects[i],
523 batch->DR1, batch->DR4);
524 if (ret)
525 return ret;
526 }
527
528 if (!IS_I830(dev) && !IS_845G(dev)) {
529 ret = BEGIN_LP_RING(2);
530 if (ret)
531 return ret;
532
533 if (INTEL_INFO(dev)->gen >= 4) {
534 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
535 OUT_RING(batch->start);
536 } else {
537 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
538 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
539 }
540 } else {
541 ret = BEGIN_LP_RING(4);
542 if (ret)
543 return ret;
544
545 OUT_RING(MI_BATCH_BUFFER);
546 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
547 OUT_RING(batch->start + batch->used - 4);
548 OUT_RING(0);
549 }
550 ADVANCE_LP_RING();
551 }
552
553
554 if (IS_G4X(dev) || IS_GEN5(dev)) {
555 if (BEGIN_LP_RING(2) == 0) {
556 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
557 OUT_RING(MI_NOOP);
558 ADVANCE_LP_RING();
559 }
560 }
561
562 i915_emit_breadcrumb(dev);
563 return 0;
564}
565
566static int i915_dispatch_flip(struct drm_device * dev)
567{
568 drm_i915_private_t *dev_priv = dev->dev_private;
569 struct drm_i915_master_private *master_priv =
570 dev->primary->master->driver_priv;
571 int ret;
572
573 if (!master_priv->sarea_priv)
574 return -EINVAL;
575
576 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
577 __func__,
578 dev_priv->current_page,
579 master_priv->sarea_priv->pf_current_page);
580
581 i915_kernel_lost_context(dev);
582
583 ret = BEGIN_LP_RING(10);
584 if (ret)
585 return ret;
586
587 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
588 OUT_RING(0);
589
590 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
591 OUT_RING(0);
592 if (dev_priv->current_page == 0) {
593 OUT_RING(dev_priv->back_offset);
594 dev_priv->current_page = 1;
595 } else {
596 OUT_RING(dev_priv->front_offset);
597 dev_priv->current_page = 0;
598 }
599 OUT_RING(0);
600
601 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
602 OUT_RING(0);
603
604 ADVANCE_LP_RING();
605
606 master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
607
608 if (BEGIN_LP_RING(4) == 0) {
609 OUT_RING(MI_STORE_DWORD_INDEX);
610 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
611 OUT_RING(dev_priv->counter);
612 OUT_RING(0);
613 ADVANCE_LP_RING();
614 }
615
616 master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
617 return 0;
618}
619
620static int i915_quiescent(struct drm_device *dev)
621{
622 struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
623
624 i915_kernel_lost_context(dev);
625 return intel_wait_ring_idle(ring);
626}
627
628static int i915_flush_ioctl(struct drm_device *dev, void *data,
629 struct drm_file *file_priv)
630{
631 int ret;
632
633 if (drm_core_check_feature(dev, DRIVER_MODESET))
634 return -ENODEV;
635
636 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
637
638 mutex_lock(&dev->struct_mutex);
639 ret = i915_quiescent(dev);
640 mutex_unlock(&dev->struct_mutex);
641
642 return ret;
643}
644
645static int i915_batchbuffer(struct drm_device *dev, void *data,
646 struct drm_file *file_priv)
647{
648 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
649 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
650 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
651 master_priv->sarea_priv;
652 drm_i915_batchbuffer_t *batch = data;
653 int ret;
654 struct drm_clip_rect *cliprects = NULL;
655
656 if (drm_core_check_feature(dev, DRIVER_MODESET))
657 return -ENODEV;
658
659 if (!dev_priv->dri1.allow_batchbuffer) {
660 DRM_ERROR("Batchbuffer ioctl disabled\n");
661 return -EINVAL;
662 }
663
664 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
665 batch->start, batch->used, batch->num_cliprects);
666
667 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
668
669 if (batch->num_cliprects < 0)
670 return -EINVAL;
671
672 if (batch->num_cliprects) {
673 cliprects = kcalloc(batch->num_cliprects,
674 sizeof(struct drm_clip_rect),
675 GFP_KERNEL);
676 if (cliprects == NULL)
677 return -ENOMEM;
678
679 ret = copy_from_user(cliprects, batch->cliprects,
680 batch->num_cliprects *
681 sizeof(struct drm_clip_rect));
682 if (ret != 0) {
683 ret = -EFAULT;
684 goto fail_free;
685 }
686 }
687
688 mutex_lock(&dev->struct_mutex);
689 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
690 mutex_unlock(&dev->struct_mutex);
691
692 if (sarea_priv)
693 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
694
695fail_free:
696 kfree(cliprects);
697
698 return ret;
699}
700
701static int i915_cmdbuffer(struct drm_device *dev, void *data,
702 struct drm_file *file_priv)
703{
704 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
705 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
706 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
707 master_priv->sarea_priv;
708 drm_i915_cmdbuffer_t *cmdbuf = data;
709 struct drm_clip_rect *cliprects = NULL;
710 void *batch_data;
711 int ret;
712
713 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
714 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
715
716 if (drm_core_check_feature(dev, DRIVER_MODESET))
717 return -ENODEV;
718
719 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
720
721 if (cmdbuf->num_cliprects < 0)
722 return -EINVAL;
723
724 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
725 if (batch_data == NULL)
726 return -ENOMEM;
727
728 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
729 if (ret != 0) {
730 ret = -EFAULT;
731 goto fail_batch_free;
732 }
733
734 if (cmdbuf->num_cliprects) {
735 cliprects = kcalloc(cmdbuf->num_cliprects,
736 sizeof(struct drm_clip_rect), GFP_KERNEL);
737 if (cliprects == NULL) {
738 ret = -ENOMEM;
739 goto fail_batch_free;
740 }
741
742 ret = copy_from_user(cliprects, cmdbuf->cliprects,
743 cmdbuf->num_cliprects *
744 sizeof(struct drm_clip_rect));
745 if (ret != 0) {
746 ret = -EFAULT;
747 goto fail_clip_free;
748 }
749 }
750
751 mutex_lock(&dev->struct_mutex);
752 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
753 mutex_unlock(&dev->struct_mutex);
754 if (ret) {
755 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
756 goto fail_clip_free;
757 }
758
759 if (sarea_priv)
760 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
761
762fail_clip_free:
763 kfree(cliprects);
764fail_batch_free:
765 kfree(batch_data);
766
767 return ret;
768}
769
770static int i915_emit_irq(struct drm_device * dev)
771{
772 drm_i915_private_t *dev_priv = dev->dev_private;
773 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
774
775 i915_kernel_lost_context(dev);
776
777 DRM_DEBUG_DRIVER("\n");
778
779 dev_priv->counter++;
780 if (dev_priv->counter > 0x7FFFFFFFUL)
781 dev_priv->counter = 1;
782 if (master_priv->sarea_priv)
783 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
784
785 if (BEGIN_LP_RING(4) == 0) {
786 OUT_RING(MI_STORE_DWORD_INDEX);
787 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
788 OUT_RING(dev_priv->counter);
789 OUT_RING(MI_USER_INTERRUPT);
790 ADVANCE_LP_RING();
791 }
792
793 return dev_priv->counter;
794}
795
796static int i915_wait_irq(struct drm_device * dev, int irq_nr)
797{
798 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
799 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
800 int ret = 0;
801 struct intel_ring_buffer *ring = LP_RING(dev_priv);
802
803 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
804 READ_BREADCRUMB(dev_priv));
805
806 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
807 if (master_priv->sarea_priv)
808 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
809 return 0;
810 }
811
812 if (master_priv->sarea_priv)
813 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
814
815 if (ring->irq_get(ring)) {
816 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
817 READ_BREADCRUMB(dev_priv) >= irq_nr);
818 ring->irq_put(ring);
819 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
820 ret = -EBUSY;
821
822 if (ret == -EBUSY) {
823 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
824 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
825 }
826
827 return ret;
828}
829
830/* Needs the lock as it touches the ring.
831 */
832static int i915_irq_emit(struct drm_device *dev, void *data,
833 struct drm_file *file_priv)
834{
835 drm_i915_private_t *dev_priv = dev->dev_private;
836 drm_i915_irq_emit_t *emit = data;
837 int result;
838
839 if (drm_core_check_feature(dev, DRIVER_MODESET))
840 return -ENODEV;
841
842 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
843 DRM_ERROR("called with no initialization\n");
844 return -EINVAL;
845 }
846
847 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
848
849 mutex_lock(&dev->struct_mutex);
850 result = i915_emit_irq(dev);
851 mutex_unlock(&dev->struct_mutex);
852
853 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
854 DRM_ERROR("copy_to_user\n");
855 return -EFAULT;
856 }
857
858 return 0;
859}
860
861/* Doesn't need the hardware lock.
862 */
863static int i915_irq_wait(struct drm_device *dev, void *data,
864 struct drm_file *file_priv)
865{
866 drm_i915_private_t *dev_priv = dev->dev_private;
867 drm_i915_irq_wait_t *irqwait = data;
868
869 if (drm_core_check_feature(dev, DRIVER_MODESET))
870 return -ENODEV;
871
872 if (!dev_priv) {
873 DRM_ERROR("called with no initialization\n");
874 return -EINVAL;
875 }
876
877 return i915_wait_irq(dev, irqwait->irq_seq);
878}
879
880static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
881 struct drm_file *file_priv)
882{
883 drm_i915_private_t *dev_priv = dev->dev_private;
884 drm_i915_vblank_pipe_t *pipe = data;
885
886 if (drm_core_check_feature(dev, DRIVER_MODESET))
887 return -ENODEV;
888
889 if (!dev_priv) {
890 DRM_ERROR("called with no initialization\n");
891 return -EINVAL;
892 }
893
894 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
895
896 return 0;
897}
898
899/**
900 * Schedule buffer swap at given vertical blank.
901 */
902static int i915_vblank_swap(struct drm_device *dev, void *data,
903 struct drm_file *file_priv)
904{
905 /* The delayed swap mechanism was fundamentally racy, and has been
906 * removed. The model was that the client requested a delayed flip/swap
907 * from the kernel, then waited for vblank before continuing to perform
908 * rendering. The problem was that the kernel might wake the client
909 * up before it dispatched the vblank swap (since the lock has to be
910 * held while touching the ringbuffer), in which case the client would
911 * clear and start the next frame before the swap occurred, and
912 * flicker would occur in addition to likely missing the vblank.
913 *
914 * In the absence of this ioctl, userland falls back to a correct path
915 * of waiting for a vblank, then dispatching the swap on its own.
916 * Context switching to userland and back is plenty fast enough for
917 * meeting the requirements of vblank swapping.
918 */
919 return -EINVAL;
920}
921
922static int i915_flip_bufs(struct drm_device *dev, void *data,
923 struct drm_file *file_priv)
924{
925 int ret;
926
927 if (drm_core_check_feature(dev, DRIVER_MODESET))
928 return -ENODEV;
929
930 DRM_DEBUG_DRIVER("%s\n", __func__);
931
932 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
933
934 mutex_lock(&dev->struct_mutex);
935 ret = i915_dispatch_flip(dev);
936 mutex_unlock(&dev->struct_mutex);
937
938 return ret;
939}
940
941static int i915_getparam(struct drm_device *dev, void *data,
942 struct drm_file *file_priv)
943{
944 drm_i915_private_t *dev_priv = dev->dev_private;
945 drm_i915_getparam_t *param = data;
946 int value;
947
948 if (!dev_priv) {
949 DRM_ERROR("called with no initialization\n");
950 return -EINVAL;
951 }
952
953 switch (param->param) {
954 case I915_PARAM_IRQ_ACTIVE:
955 value = dev->pdev->irq ? 1 : 0;
956 break;
957 case I915_PARAM_ALLOW_BATCHBUFFER:
958 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
959 break;
960 case I915_PARAM_LAST_DISPATCH:
961 value = READ_BREADCRUMB(dev_priv);
962 break;
963 case I915_PARAM_CHIPSET_ID:
964 value = dev->pci_device;
965 break;
966 case I915_PARAM_HAS_GEM:
967 value = 1;
968 break;
969 case I915_PARAM_NUM_FENCES_AVAIL:
970 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
971 break;
972 case I915_PARAM_HAS_OVERLAY:
973 value = dev_priv->overlay ? 1 : 0;
974 break;
975 case I915_PARAM_HAS_PAGEFLIPPING:
976 value = 1;
977 break;
978 case I915_PARAM_HAS_EXECBUF2:
979 /* depends on GEM */
980 value = 1;
981 break;
982 case I915_PARAM_HAS_BSD:
983 value = intel_ring_initialized(&dev_priv->ring[VCS]);
984 break;
985 case I915_PARAM_HAS_BLT:
986 value = intel_ring_initialized(&dev_priv->ring[BCS]);
987 break;
988 case I915_PARAM_HAS_RELAXED_FENCING:
989 value = 1;
990 break;
991 case I915_PARAM_HAS_COHERENT_RINGS:
992 value = 1;
993 break;
994 case I915_PARAM_HAS_EXEC_CONSTANTS:
995 value = INTEL_INFO(dev)->gen >= 4;
996 break;
997 case I915_PARAM_HAS_RELAXED_DELTA:
998 value = 1;
999 break;
1000 case I915_PARAM_HAS_GEN7_SOL_RESET:
1001 value = 1;
1002 break;
1003 case I915_PARAM_HAS_LLC:
1004 value = HAS_LLC(dev);
1005 break;
1006 case I915_PARAM_HAS_ALIASING_PPGTT:
1007 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
1008 break;
1009 default:
1010 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1011 param->param);
1012 return -EINVAL;
1013 }
1014
1015 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1016 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1017 return -EFAULT;
1018 }
1019
1020 return 0;
1021}
1022
1023static int i915_setparam(struct drm_device *dev, void *data,
1024 struct drm_file *file_priv)
1025{
1026 drm_i915_private_t *dev_priv = dev->dev_private;
1027 drm_i915_setparam_t *param = data;
1028
1029 if (!dev_priv) {
1030 DRM_ERROR("called with no initialization\n");
1031 return -EINVAL;
1032 }
1033
1034 switch (param->param) {
1035 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1036 break;
1037 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1038 break;
1039 case I915_SETPARAM_ALLOW_BATCHBUFFER:
1040 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1041 break;
1042 case I915_SETPARAM_NUM_USED_FENCES:
1043 if (param->value > dev_priv->num_fence_regs ||
1044 param->value < 0)
1045 return -EINVAL;
1046 /* Userspace can use first N regs */
1047 dev_priv->fence_reg_start = param->value;
1048 break;
1049 default:
1050 DRM_DEBUG_DRIVER("unknown parameter %d\n",
1051 param->param);
1052 return -EINVAL;
1053 }
1054
1055 return 0;
1056}
1057
1058static int i915_set_status_page(struct drm_device *dev, void *data,
1059 struct drm_file *file_priv)
1060{
1061 drm_i915_private_t *dev_priv = dev->dev_private;
1062 drm_i915_hws_addr_t *hws = data;
1063 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1064
1065 if (drm_core_check_feature(dev, DRIVER_MODESET))
1066 return -ENODEV;
1067
1068 if (!I915_NEED_GFX_HWS(dev))
1069 return -EINVAL;
1070
1071 if (!dev_priv) {
1072 DRM_ERROR("called with no initialization\n");
1073 return -EINVAL;
1074 }
1075
1076 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1077 WARN(1, "tried to set status page when mode setting active\n");
1078 return 0;
1079 }
1080
1081 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1082
1083 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1084
1085 dev_priv->dri1.gfx_hws_cpu_addr = ioremap_wc(dev->agp->base + hws->addr,
1086 4096);
1087 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1088 i915_dma_cleanup(dev);
1089 ring->status_page.gfx_addr = 0;
1090 DRM_ERROR("can not ioremap virtual address for"
1091 " G33 hw status page\n");
1092 return -ENOMEM;
1093 }
1094
1095 memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1096 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
1097
1098 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1099 ring->status_page.gfx_addr);
1100 DRM_DEBUG_DRIVER("load hws at %p\n",
1101 ring->status_page.page_addr);
1102 return 0;
1103}
1104
1105static int i915_get_bridge_dev(struct drm_device *dev)
1106{
1107 struct drm_i915_private *dev_priv = dev->dev_private;
1108
1109 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
1110 if (!dev_priv->bridge_dev) {
1111 DRM_ERROR("bridge device not found\n");
1112 return -1;
1113 }
1114 return 0;
1115}
1116
1117#define MCHBAR_I915 0x44
1118#define MCHBAR_I965 0x48
1119#define MCHBAR_SIZE (4*4096)
1120
1121#define DEVEN_REG 0x54
1122#define DEVEN_MCHBAR_EN (1 << 28)
1123
1124/* Allocate space for the MCH regs if needed, return nonzero on error */
1125static int
1126intel_alloc_mchbar_resource(struct drm_device *dev)
1127{
1128 drm_i915_private_t *dev_priv = dev->dev_private;
1129 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1130 u32 temp_lo, temp_hi = 0;
1131 u64 mchbar_addr;
1132 int ret;
1133
1134 if (INTEL_INFO(dev)->gen >= 4)
1135 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
1136 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
1137 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
1138
1139 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
1140#ifdef CONFIG_PNP
1141 if (mchbar_addr &&
1142 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
1143 return 0;
1144#endif
1145
1146 /* Get some space for it */
1147 dev_priv->mch_res.name = "i915 MCHBAR";
1148 dev_priv->mch_res.flags = IORESOURCE_MEM;
1149 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
1150 &dev_priv->mch_res,
1151 MCHBAR_SIZE, MCHBAR_SIZE,
1152 PCIBIOS_MIN_MEM,
1153 0, pcibios_align_resource,
1154 dev_priv->bridge_dev);
1155 if (ret) {
1156 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
1157 dev_priv->mch_res.start = 0;
1158 return ret;
1159 }
1160
1161 if (INTEL_INFO(dev)->gen >= 4)
1162 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
1163 upper_32_bits(dev_priv->mch_res.start));
1164
1165 pci_write_config_dword(dev_priv->bridge_dev, reg,
1166 lower_32_bits(dev_priv->mch_res.start));
1167 return 0;
1168}
1169
1170/* Setup MCHBAR if possible, return true if we should disable it again */
1171static void
1172intel_setup_mchbar(struct drm_device *dev)
1173{
1174 drm_i915_private_t *dev_priv = dev->dev_private;
1175 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1176 u32 temp;
1177 bool enabled;
1178
1179 dev_priv->mchbar_need_disable = false;
1180
1181 if (IS_I915G(dev) || IS_I915GM(dev)) {
1182 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1183 enabled = !!(temp & DEVEN_MCHBAR_EN);
1184 } else {
1185 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1186 enabled = temp & 1;
1187 }
1188
1189 /* If it's already enabled, don't have to do anything */
1190 if (enabled)
1191 return;
1192
1193 if (intel_alloc_mchbar_resource(dev))
1194 return;
1195
1196 dev_priv->mchbar_need_disable = true;
1197
1198 /* Space is allocated or reserved, so enable it. */
1199 if (IS_I915G(dev) || IS_I915GM(dev)) {
1200 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
1201 temp | DEVEN_MCHBAR_EN);
1202 } else {
1203 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1204 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
1205 }
1206}
1207
1208static void
1209intel_teardown_mchbar(struct drm_device *dev)
1210{
1211 drm_i915_private_t *dev_priv = dev->dev_private;
1212 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1213 u32 temp;
1214
1215 if (dev_priv->mchbar_need_disable) {
1216 if (IS_I915G(dev) || IS_I915GM(dev)) {
1217 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1218 temp &= ~DEVEN_MCHBAR_EN;
1219 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
1220 } else {
1221 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1222 temp &= ~1;
1223 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
1224 }
1225 }
1226
1227 if (dev_priv->mch_res.start)
1228 release_resource(&dev_priv->mch_res);
1229}
1230
1231/* true = enable decode, false = disable decoder */
1232static unsigned int i915_vga_set_decode(void *cookie, bool state)
1233{
1234 struct drm_device *dev = cookie;
1235
1236 intel_modeset_vga_set_state(dev, state);
1237 if (state)
1238 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1239 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1240 else
1241 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1242}
1243
1244static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1245{
1246 struct drm_device *dev = pci_get_drvdata(pdev);
1247 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1248 if (state == VGA_SWITCHEROO_ON) {
1249 pr_info("switched on\n");
1250 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1251 /* i915 resume handler doesn't set to D0 */
1252 pci_set_power_state(dev->pdev, PCI_D0);
1253 i915_resume(dev);
1254 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1255 } else {
1256 pr_err("switched off\n");
1257 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1258 i915_suspend(dev, pmm);
1259 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1260 }
1261}
1262
1263static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1264{
1265 struct drm_device *dev = pci_get_drvdata(pdev);
1266 bool can_switch;
1267
1268 spin_lock(&dev->count_lock);
1269 can_switch = (dev->open_count == 0);
1270 spin_unlock(&dev->count_lock);
1271 return can_switch;
1272}
1273
1274static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
1275 .set_gpu_state = i915_switcheroo_set_state,
1276 .reprobe = NULL,
1277 .can_switch = i915_switcheroo_can_switch,
1278};
1279
1280static int i915_load_modeset_init(struct drm_device *dev)
1281{
1282 struct drm_i915_private *dev_priv = dev->dev_private;
1283 int ret;
1284
1285 ret = intel_parse_bios(dev);
1286 if (ret)
1287 DRM_INFO("failed to find VBIOS tables\n");
1288
1289 /* If we have > 1 VGA cards, then we need to arbitrate access
1290 * to the common VGA resources.
1291 *
1292 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1293 * then we do not take part in VGA arbitration and the
1294 * vga_client_register() fails with -ENODEV.
1295 */
1296 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1297 if (ret && ret != -ENODEV)
1298 goto out;
1299
1300 intel_register_dsm_handler();
1301
1302 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
1303 if (ret)
1304 goto cleanup_vga_client;
1305
1306 /* Initialise stolen first so that we may reserve preallocated
1307 * objects for the BIOS to KMS transition.
1308 */
1309 ret = i915_gem_init_stolen(dev);
1310 if (ret)
1311 goto cleanup_vga_switcheroo;
1312
1313 intel_modeset_init(dev);
1314
1315 ret = i915_gem_init(dev);
1316 if (ret)
1317 goto cleanup_gem_stolen;
1318
1319 intel_modeset_gem_init(dev);
1320
1321 ret = drm_irq_install(dev);
1322 if (ret)
1323 goto cleanup_gem;
1324
1325 /* Always safe in the mode setting case. */
1326 /* FIXME: do pre/post-mode set stuff in core KMS code */
1327 dev->vblank_disable_allowed = 1;
1328
1329 ret = intel_fbdev_init(dev);
1330 if (ret)
1331 goto cleanup_irq;
1332
1333 drm_kms_helper_poll_init(dev);
1334
1335 /* We're off and running w/KMS */
1336 dev_priv->mm.suspended = 0;
1337
1338 return 0;
1339
1340cleanup_irq:
1341 drm_irq_uninstall(dev);
1342cleanup_gem:
1343 mutex_lock(&dev->struct_mutex);
1344 i915_gem_cleanup_ringbuffer(dev);
1345 mutex_unlock(&dev->struct_mutex);
1346 i915_gem_cleanup_aliasing_ppgtt(dev);
1347cleanup_gem_stolen:
1348 i915_gem_cleanup_stolen(dev);
1349cleanup_vga_switcheroo:
1350 vga_switcheroo_unregister_client(dev->pdev);
1351cleanup_vga_client:
1352 vga_client_register(dev->pdev, NULL, NULL, NULL);
1353out:
1354 return ret;
1355}
1356
1357int i915_master_create(struct drm_device *dev, struct drm_master *master)
1358{
1359 struct drm_i915_master_private *master_priv;
1360
1361 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1362 if (!master_priv)
1363 return -ENOMEM;
1364
1365 master->driver_priv = master_priv;
1366 return 0;
1367}
1368
1369void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1370{
1371 struct drm_i915_master_private *master_priv = master->driver_priv;
1372
1373 if (!master_priv)
1374 return;
1375
1376 kfree(master_priv);
1377
1378 master->driver_priv = NULL;
1379}
1380
1381static void
1382i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
1383 unsigned long size)
1384{
1385 dev_priv->mm.gtt_mtrr = -1;
1386
1387#if defined(CONFIG_X86_PAT)
1388 if (cpu_has_pat)
1389 return;
1390#endif
1391
1392 /* Set up a WC MTRR for non-PAT systems. This is more common than
1393 * one would think, because the kernel disables PAT on first
1394 * generation Core chips because WC PAT gets overridden by a UC
1395 * MTRR if present. Even if a UC MTRR isn't present.
1396 */
1397 dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1);
1398 if (dev_priv->mm.gtt_mtrr < 0) {
1399 DRM_INFO("MTRR allocation failed. Graphics "
1400 "performance may suffer.\n");
1401 }
1402}
1403
1404static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1405{
1406 struct apertures_struct *ap;
1407 struct pci_dev *pdev = dev_priv->dev->pdev;
1408 bool primary;
1409
1410 ap = alloc_apertures(1);
1411 if (!ap)
1412 return;
1413
1414 ap->ranges[0].base = dev_priv->dev->agp->base;
1415 ap->ranges[0].size =
1416 dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1417 primary =
1418 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1419
1420 remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
1421
1422 kfree(ap);
1423}
1424
1425/**
1426 * i915_driver_load - setup chip and create an initial config
1427 * @dev: DRM device
1428 * @flags: startup flags
1429 *
1430 * The driver load routine has to do several things:
1431 * - drive output discovery via intel_modeset_init()
1432 * - initialize the memory manager
1433 * - allocate initial config memory
1434 * - setup the DRM framebuffer with the allocated memory
1435 */
1436int i915_driver_load(struct drm_device *dev, unsigned long flags)
1437{
1438 struct drm_i915_private *dev_priv;
1439 struct intel_device_info *info;
1440 int ret = 0, mmio_bar;
1441 uint32_t aperture_size;
1442
1443 info = (struct intel_device_info *) flags;
1444
1445 /* Refuse to load on gen6+ without kms enabled. */
1446 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1447 return -ENODEV;
1448
1449
1450 /* i915 has 4 more counters */
1451 dev->counters += 4;
1452 dev->types[6] = _DRM_STAT_IRQ;
1453 dev->types[7] = _DRM_STAT_PRIMARY;
1454 dev->types[8] = _DRM_STAT_SECONDARY;
1455 dev->types[9] = _DRM_STAT_DMA;
1456
1457 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
1458 if (dev_priv == NULL)
1459 return -ENOMEM;
1460
1461 dev->dev_private = (void *)dev_priv;
1462 dev_priv->dev = dev;
1463 dev_priv->info = info;
1464
1465 if (i915_get_bridge_dev(dev)) {
1466 ret = -EIO;
1467 goto free_priv;
1468 }
1469
1470 dev_priv->mm.gtt = intel_gtt_get();
1471 if (!dev_priv->mm.gtt) {
1472 DRM_ERROR("Failed to initialize GTT\n");
1473 ret = -ENODEV;
1474 goto put_bridge;
1475 }
1476
1477 i915_kick_out_firmware_fb(dev_priv);
1478
1479 pci_set_master(dev->pdev);
1480
1481 /* overlay on gen2 is broken and can't address above 1G */
1482 if (IS_GEN2(dev))
1483 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1484
1485 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1486 * using 32bit addressing, overwriting memory if HWS is located
1487 * above 4GB.
1488 *
1489 * The documentation also mentions an issue with undefined
1490 * behaviour if any general state is accessed within a page above 4GB,
1491 * which also needs to be handled carefully.
1492 */
1493 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1494 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1495
1496 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1497 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
1498 if (!dev_priv->regs) {
1499 DRM_ERROR("failed to map registers\n");
1500 ret = -EIO;
1501 goto put_bridge;
1502 }
1503
1504 aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1505
1506 dev_priv->mm.gtt_mapping =
1507 io_mapping_create_wc(dev->agp->base, aperture_size);
1508 if (dev_priv->mm.gtt_mapping == NULL) {
1509 ret = -EIO;
1510 goto out_rmmap;
1511 }
1512
1513 i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size);
1514
1515 /* The i915 workqueue is primarily used for batched retirement of
1516 * requests (and thus managing bo) once the task has been completed
1517 * by the GPU. i915_gem_retire_requests() is called directly when we
1518 * need high-priority retirement, such as waiting for an explicit
1519 * bo.
1520 *
1521 * It is also used for periodic low-priority events, such as
1522 * idle-timers and recording error state.
1523 *
1524 * All tasks on the workqueue are expected to acquire the dev mutex
1525 * so there is no point in running more than one instance of the
1526 * workqueue at any time: max_active = 1 and NON_REENTRANT.
1527 */
1528 dev_priv->wq = alloc_workqueue("i915",
1529 WQ_UNBOUND | WQ_NON_REENTRANT,
1530 1);
1531 if (dev_priv->wq == NULL) {
1532 DRM_ERROR("Failed to create our workqueue.\n");
1533 ret = -ENOMEM;
1534 goto out_mtrrfree;
1535 }
1536
1537 intel_irq_init(dev);
1538
1539 /* Try to make sure MCHBAR is enabled before poking at it */
1540 intel_setup_mchbar(dev);
1541 intel_setup_gmbus(dev);
1542 intel_opregion_setup(dev);
1543
1544 /* Make sure the bios did its job and set up vital registers */
1545 intel_setup_bios(dev);
1546
1547 i915_gem_load(dev);
1548
1549 /* Init HWS */
1550 if (!I915_NEED_GFX_HWS(dev)) {
1551 ret = i915_init_phys_hws(dev);
1552 if (ret)
1553 goto out_gem_unload;
1554 }
1555
1556 /* On the 945G/GM, the chipset reports the MSI capability on the
1557 * integrated graphics even though the support isn't actually there
1558 * according to the published specs. It doesn't appear to function
1559 * correctly in testing on 945G.
1560 * This may be a side effect of MSI having been made available for PEG
1561 * and the registers being closely associated.
1562 *
1563 * According to chipset errata, on the 965GM, MSI interrupts may
1564 * be lost or delayed, but we use them anyways to avoid
1565 * stuck interrupts on some machines.
1566 */
1567 if (!IS_I945G(dev) && !IS_I945GM(dev))
1568 pci_enable_msi(dev->pdev);
1569
1570 spin_lock_init(&dev_priv->gt_lock);
1571 spin_lock_init(&dev_priv->irq_lock);
1572 spin_lock_init(&dev_priv->error_lock);
1573 spin_lock_init(&dev_priv->rps_lock);
1574
1575 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1576 dev_priv->num_pipe = 3;
1577 else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1578 dev_priv->num_pipe = 2;
1579 else
1580 dev_priv->num_pipe = 1;
1581
1582 ret = drm_vblank_init(dev, dev_priv->num_pipe);
1583 if (ret)
1584 goto out_gem_unload;
1585
1586 /* Start out suspended */
1587 dev_priv->mm.suspended = 1;
1588
1589 intel_detect_pch(dev);
1590
1591 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1592 ret = i915_load_modeset_init(dev);
1593 if (ret < 0) {
1594 DRM_ERROR("failed to init modeset\n");
1595 goto out_gem_unload;
1596 }
1597 }
1598
1599 i915_setup_sysfs(dev);
1600
1601 /* Must be done after probing outputs */
1602 intel_opregion_init(dev);
1603 acpi_video_register();
1604
1605 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1606 (unsigned long) dev);
1607
1608 if (IS_GEN5(dev))
1609 intel_gpu_ips_init(dev_priv);
1610
1611 return 0;
1612
1613out_gem_unload:
1614 if (dev_priv->mm.inactive_shrinker.shrink)
1615 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
1616
1617 if (dev->pdev->msi_enabled)
1618 pci_disable_msi(dev->pdev);
1619
1620 intel_teardown_gmbus(dev);
1621 intel_teardown_mchbar(dev);
1622 destroy_workqueue(dev_priv->wq);
1623out_mtrrfree:
1624 if (dev_priv->mm.gtt_mtrr >= 0) {
1625 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
1626 dev->agp->agp_info.aper_size * 1024 * 1024);
1627 dev_priv->mm.gtt_mtrr = -1;
1628 }
1629 io_mapping_free(dev_priv->mm.gtt_mapping);
1630out_rmmap:
1631 pci_iounmap(dev->pdev, dev_priv->regs);
1632put_bridge:
1633 pci_dev_put(dev_priv->bridge_dev);
1634free_priv:
1635 kfree(dev_priv);
1636 return ret;
1637}
1638
1639int i915_driver_unload(struct drm_device *dev)
1640{
1641 struct drm_i915_private *dev_priv = dev->dev_private;
1642 int ret;
1643
1644 intel_gpu_ips_teardown();
1645
1646 i915_teardown_sysfs(dev);
1647
1648 if (dev_priv->mm.inactive_shrinker.shrink)
1649 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
1650
1651 mutex_lock(&dev->struct_mutex);
1652 ret = i915_gpu_idle(dev);
1653 if (ret)
1654 DRM_ERROR("failed to idle hardware: %d\n", ret);
1655 i915_gem_retire_requests(dev);
1656 mutex_unlock(&dev->struct_mutex);
1657
1658 /* Cancel the retire work handler, which should be idle now. */
1659 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
1660
1661 io_mapping_free(dev_priv->mm.gtt_mapping);
1662 if (dev_priv->mm.gtt_mtrr >= 0) {
1663 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
1664 dev->agp->agp_info.aper_size * 1024 * 1024);
1665 dev_priv->mm.gtt_mtrr = -1;
1666 }
1667
1668 acpi_video_unregister();
1669
1670 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1671 intel_fbdev_fini(dev);
1672 intel_modeset_cleanup(dev);
1673
1674 /*
1675 * free the memory space allocated for the child device
1676 * config parsed from VBT
1677 */
1678 if (dev_priv->child_dev && dev_priv->child_dev_num) {
1679 kfree(dev_priv->child_dev);
1680 dev_priv->child_dev = NULL;
1681 dev_priv->child_dev_num = 0;
1682 }
1683
1684 vga_switcheroo_unregister_client(dev->pdev);
1685 vga_client_register(dev->pdev, NULL, NULL, NULL);
1686 }
1687
1688 /* Free error state after interrupts are fully disabled. */
1689 del_timer_sync(&dev_priv->hangcheck_timer);
1690 cancel_work_sync(&dev_priv->error_work);
1691 i915_destroy_error_state(dev);
1692
1693 if (dev->pdev->msi_enabled)
1694 pci_disable_msi(dev->pdev);
1695
1696 intel_opregion_fini(dev);
1697
1698 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1699 /* Flush any outstanding unpin_work. */
1700 flush_workqueue(dev_priv->wq);
1701
1702 mutex_lock(&dev->struct_mutex);
1703 i915_gem_free_all_phys_object(dev);
1704 i915_gem_cleanup_ringbuffer(dev);
1705 mutex_unlock(&dev->struct_mutex);
1706 i915_gem_cleanup_aliasing_ppgtt(dev);
1707 i915_gem_cleanup_stolen(dev);
1708 drm_mm_takedown(&dev_priv->mm.stolen);
1709
1710 intel_cleanup_overlay(dev);
1711
1712 if (!I915_NEED_GFX_HWS(dev))
1713 i915_free_hws(dev);
1714 }
1715
1716 if (dev_priv->regs != NULL)
1717 pci_iounmap(dev->pdev, dev_priv->regs);
1718
1719 intel_teardown_gmbus(dev);
1720 intel_teardown_mchbar(dev);
1721
1722 destroy_workqueue(dev_priv->wq);
1723
1724 pci_dev_put(dev_priv->bridge_dev);
1725 kfree(dev->dev_private);
1726
1727 return 0;
1728}
1729
1730int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1731{
1732 struct drm_i915_file_private *file_priv;
1733
1734 DRM_DEBUG_DRIVER("\n");
1735 file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
1736 if (!file_priv)
1737 return -ENOMEM;
1738
1739 file->driver_priv = file_priv;
1740
1741 spin_lock_init(&file_priv->mm.lock);
1742 INIT_LIST_HEAD(&file_priv->mm.request_list);
1743
1744 return 0;
1745}
1746
1747/**
1748 * i915_driver_lastclose - clean up after all DRM clients have exited
1749 * @dev: DRM device
1750 *
1751 * Take care of cleaning up after all DRM clients have exited. In the
1752 * mode setting case, we want to restore the kernel's initial mode (just
1753 * in case the last client left us in a bad state).
1754 *
1755 * Additionally, in the non-mode setting case, we'll tear down the GTT
1756 * and DMA structures, since the kernel won't be using them, and clea
1757 * up any GEM state.
1758 */
1759void i915_driver_lastclose(struct drm_device * dev)
1760{
1761 drm_i915_private_t *dev_priv = dev->dev_private;
1762
1763 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1764 intel_fb_restore_mode(dev);
1765 vga_switcheroo_process_delayed_switch();
1766 return;
1767 }
1768
1769 i915_gem_lastclose(dev);
1770
1771 i915_dma_cleanup(dev);
1772}
1773
1774void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1775{
1776 i915_gem_release(dev, file_priv);
1777}
1778
1779void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1780{
1781 struct drm_i915_file_private *file_priv = file->driver_priv;
1782
1783 kfree(file_priv);
1784}
1785
1786struct drm_ioctl_desc i915_ioctls[] = {
1787 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1788 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1789 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
1790 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1791 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1792 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1793 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
1794 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1795 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1796 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1797 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1798 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1799 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1800 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1801 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
1802 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1803 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1804 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1805 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
1806 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
1807 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1808 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1809 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1810 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1811 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1812 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1813 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
1814 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
1815 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
1816 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
1817 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
1818 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
1819 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
1820 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
1821 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
1822 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
1823 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
1824 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
1825 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1826 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1827 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1828 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1829};
1830
1831int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1832
1833/*
1834 * This is really ugly: Because old userspace abused the linux agp interface to
1835 * manage the gtt, we need to claim that all intel devices are agp. For
1836 * otherwise the drm core refuses to initialize the agp support code.
1837 */
1838int i915_driver_device_is_agp(struct drm_device * dev)
1839{
1840 return 1;
1841}