Loading...
1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
33#include <uapi/drm/i915_drm.h>
34
35#include <linux/pm_qos.h>
36
37#include <drm/ttm/ttm_device.h>
38
39#include "display/intel_display_limits.h"
40#include "display/intel_display_core.h"
41
42#include "gem/i915_gem_context_types.h"
43#include "gem/i915_gem_shrinker.h"
44#include "gem/i915_gem_stolen.h"
45
46#include "gt/intel_engine.h"
47#include "gt/intel_gt_types.h"
48#include "gt/intel_region_lmem.h"
49#include "gt/intel_workarounds.h"
50#include "gt/uc/intel_uc.h"
51
52#include "soc/intel_pch.h"
53
54#include "i915_drm_client.h"
55#include "i915_gem.h"
56#include "i915_gpu_error.h"
57#include "i915_params.h"
58#include "i915_perf_types.h"
59#include "i915_scheduler.h"
60#include "i915_utils.h"
61#include "intel_device_info.h"
62#include "intel_memory_region.h"
63#include "intel_runtime_pm.h"
64#include "intel_step.h"
65#include "intel_uncore.h"
66
67struct drm_i915_clock_gating_funcs;
68struct vlv_s0ix_state;
69struct intel_pxp;
70
71#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
72
73/* Data Stolen Memory (DSM) aka "i915 stolen memory" */
74struct i915_dsm {
75 /*
76 * The start and end of DSM which we can optionally use to create GEM
77 * objects backed by stolen memory.
78 *
79 * Note that usable_size tells us exactly how much of this we are
80 * actually allowed to use, given that some portion of it is in fact
81 * reserved for use by hardware functions.
82 */
83 struct resource stolen;
84
85 /*
86 * Reserved portion of DSM.
87 */
88 struct resource reserved;
89
90 /*
91 * Total size minus reserved ranges.
92 *
93 * DSM is segmented in hardware with different portions offlimits to
94 * certain functions.
95 *
96 * The drm_mm is initialised to the total accessible range, as found
97 * from the PCI config. On Broadwell+, this is further restricted to
98 * avoid the first page! The upper end of DSM is reserved for hardware
99 * functions and similarly removed from the accessible range.
100 */
101 resource_size_t usable_size;
102};
103
104struct i915_suspend_saved_registers {
105 u32 saveDSPARB;
106 u32 saveSWF0[16];
107 u32 saveSWF1[16];
108 u32 saveSWF3[3];
109 u16 saveGCDGMBUS;
110};
111
112#define MAX_L3_SLICES 2
113struct intel_l3_parity {
114 u32 *remap_info[MAX_L3_SLICES];
115 struct work_struct error_work;
116 int which_slice;
117};
118
119struct i915_gem_mm {
120 /*
121 * Shortcut for the stolen region. This points to either
122 * INTEL_REGION_STOLEN_SMEM for integrated platforms, or
123 * INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't
124 * support stolen.
125 */
126 struct intel_memory_region *stolen_region;
127 /** Memory allocator for GTT stolen memory */
128 struct drm_mm stolen;
129 /** Protects the usage of the GTT stolen memory allocator. This is
130 * always the inner lock when overlapping with struct_mutex. */
131 struct mutex stolen_lock;
132
133 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
134 spinlock_t obj_lock;
135
136 /**
137 * List of objects which are purgeable.
138 */
139 struct list_head purge_list;
140
141 /**
142 * List of objects which have allocated pages and are shrinkable.
143 */
144 struct list_head shrink_list;
145
146 /**
147 * List of objects which are pending destruction.
148 */
149 struct llist_head free_list;
150 struct work_struct free_work;
151 /**
152 * Count of objects pending destructions. Used to skip needlessly
153 * waiting on an RCU barrier if no objects are waiting to be freed.
154 */
155 atomic_t free_count;
156
157 /**
158 * tmpfs instance used for shmem backed objects
159 */
160 struct vfsmount *gemfs;
161
162 struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
163
164 struct notifier_block oom_notifier;
165 struct notifier_block vmap_notifier;
166 struct shrinker *shrinker;
167
168 /* shrinker accounting, also useful for userland debugging */
169 u64 shrink_memory;
170 u32 shrink_count;
171};
172
173struct i915_virtual_gpu {
174 struct mutex lock; /* serialises sending of g2v_notify command pkts */
175 bool active;
176 u32 caps;
177 u32 *initial_mmio;
178 u8 *initial_cfg_space;
179 struct list_head entry;
180};
181
182struct i915_selftest_stash {
183 atomic_t counter;
184 struct ida mock_region_instances;
185};
186
187struct drm_i915_private {
188 struct drm_device drm;
189
190 struct intel_display display;
191
192 /* FIXME: Device release actions should all be moved to drmm_ */
193 bool do_release;
194
195 /* i915 device parameters */
196 struct i915_params params;
197
198 const struct intel_device_info *__info; /* Use INTEL_INFO() to access. */
199 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
200 struct intel_driver_caps caps;
201
202 struct i915_dsm dsm;
203
204 struct intel_uncore uncore;
205 struct intel_uncore_mmio_debug mmio_debug;
206
207 struct i915_virtual_gpu vgpu;
208
209 struct intel_gvt *gvt;
210
211 struct {
212 struct pci_dev *pdev;
213 struct resource mch_res;
214 bool mchbar_need_disable;
215 } gmch;
216
217 /*
218 * Chaining user engines happens in multiple stages, starting with a
219 * simple lock-less linked list created by intel_engine_add_user(),
220 * which later gets sorted and converted to an intermediate regular
221 * list, just to be converted once again to its final rb tree structure
222 * in intel_engines_driver_register().
223 *
224 * Make sure to use the right iterator helper, depending on if the code
225 * in question runs before or after intel_engines_driver_register() --
226 * for_each_uabi_engine() can only be used afterwards!
227 */
228 union {
229 struct llist_head uabi_engines_llist;
230 struct list_head uabi_engines_list;
231 struct rb_root uabi_engines;
232 };
233 unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
234
235 /* protects the irq masks */
236 spinlock_t irq_lock;
237 bool irqs_enabled;
238
239 /* Sideband mailbox protection */
240 struct mutex sb_lock;
241 struct pm_qos_request sb_qos;
242
243 /** Cached value of IMR to avoid reads in updating the bitfield */
244 u32 irq_mask;
245
246 bool preserve_bios_swizzle;
247
248 unsigned int fsb_freq, mem_freq, is_ddr3;
249
250 unsigned int hpll_freq;
251 unsigned int czclk_freq;
252
253 /**
254 * wq - Driver workqueue for GEM.
255 *
256 * NOTE: Work items scheduled here are not allowed to grab any modeset
257 * locks, for otherwise the flushing done in the pageflip code will
258 * result in deadlocks.
259 */
260 struct workqueue_struct *wq;
261
262 /**
263 * unordered_wq - internal workqueue for unordered work
264 *
265 * This workqueue should be used for all unordered work
266 * scheduling within i915, which used to be scheduled on the
267 * system_wq before moving to a driver instance due
268 * deprecation of flush_scheduled_work().
269 */
270 struct workqueue_struct *unordered_wq;
271
272 /* pm private clock gating functions */
273 const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
274
275 /* PCH chipset type */
276 enum intel_pch pch_type;
277 unsigned short pch_id;
278
279 unsigned long gem_quirks;
280
281 struct i915_gem_mm mm;
282
283 struct intel_l3_parity l3_parity;
284
285 /*
286 * edram size in MB.
287 * Cannot be determined by PCIID. You must always read a register.
288 */
289 u32 edram_size_mb;
290
291 struct i915_gpu_error gpu_error;
292
293 u32 suspend_count;
294 struct i915_suspend_saved_registers regfile;
295 struct vlv_s0ix_state *vlv_s0ix_state;
296
297 struct dram_info {
298 bool wm_lv_0_adjust_needed;
299 u8 num_channels;
300 bool symmetric_memory;
301 enum intel_dram_type {
302 INTEL_DRAM_UNKNOWN,
303 INTEL_DRAM_DDR3,
304 INTEL_DRAM_DDR4,
305 INTEL_DRAM_LPDDR3,
306 INTEL_DRAM_LPDDR4,
307 INTEL_DRAM_DDR5,
308 INTEL_DRAM_LPDDR5,
309 INTEL_DRAM_GDDR,
310 } type;
311 u8 num_qgv_points;
312 u8 num_psf_gv_points;
313 } dram_info;
314
315 struct intel_runtime_pm runtime_pm;
316
317 struct i915_perf perf;
318
319 struct i915_hwmon *hwmon;
320
321 struct intel_gt *gt[I915_MAX_GT];
322
323 struct kobject *sysfs_gt;
324
325 /* Quick lookup of media GT (current platforms only have one) */
326 struct intel_gt *media_gt;
327
328 struct {
329 struct i915_gem_contexts {
330 spinlock_t lock; /* locks list */
331 struct list_head list;
332 } contexts;
333
334 /*
335 * We replace the local file with a global mappings as the
336 * backing storage for the mmap is on the device and not
337 * on the struct file, and we do not want to prolong the
338 * lifetime of the local fd. To minimise the number of
339 * anonymous inodes we create, we use a global singleton to
340 * share the global mapping.
341 */
342 struct file *mmap_singleton;
343 } gem;
344
345 struct intel_pxp *pxp;
346
347 struct i915_pmu pmu;
348
349 /* The TTM device structure. */
350 struct ttm_device bdev;
351
352 I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
353
354 /*
355 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
356 * will be rejected. Instead look for a better place.
357 */
358};
359
360static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
361{
362 return container_of(dev, struct drm_i915_private, drm);
363}
364
365static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
366{
367 struct drm_device *drm = dev_get_drvdata(kdev);
368
369 return drm ? to_i915(drm) : NULL;
370}
371
372static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
373{
374 struct drm_device *drm = pci_get_drvdata(pdev);
375
376 return drm ? to_i915(drm) : NULL;
377}
378
379static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
380{
381 return i915->gt[0];
382}
383
384#define rb_to_uabi_engine(rb) \
385 rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
386
387#define for_each_uabi_engine(engine__, i915__) \
388 for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
389 (engine__); \
390 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
391
392#define INTEL_INFO(i915) ((i915)->__info)
393#define RUNTIME_INFO(i915) (&(i915)->__runtime)
394#define DRIVER_CAPS(i915) (&(i915)->caps)
395
396#define INTEL_DEVID(i915) (RUNTIME_INFO(i915)->device_id)
397
398#define IP_VER(ver, rel) ((ver) << 8 | (rel))
399
400#define GRAPHICS_VER(i915) (RUNTIME_INFO(i915)->graphics.ip.ver)
401#define GRAPHICS_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->graphics.ip.ver, \
402 RUNTIME_INFO(i915)->graphics.ip.rel)
403#define IS_GRAPHICS_VER(i915, from, until) \
404 (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
405
406#define MEDIA_VER(i915) (RUNTIME_INFO(i915)->media.ip.ver)
407#define MEDIA_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->media.ip.ver, \
408 RUNTIME_INFO(i915)->media.ip.rel)
409#define IS_MEDIA_VER(i915, from, until) \
410 (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
411
412#define INTEL_REVID(i915) (to_pci_dev((i915)->drm.dev)->revision)
413
414#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
415#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
416
417#define IS_GRAPHICS_STEP(__i915, since, until) \
418 (drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \
419 INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until))
420
421#define IS_MEDIA_STEP(__i915, since, until) \
422 (drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \
423 INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until))
424
425static __always_inline unsigned int
426__platform_mask_index(const struct intel_runtime_info *info,
427 enum intel_platform p)
428{
429 const unsigned int pbits =
430 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
431
432 /* Expand the platform_mask array if this fails. */
433 BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
434 pbits * ARRAY_SIZE(info->platform_mask));
435
436 return p / pbits;
437}
438
439static __always_inline unsigned int
440__platform_mask_bit(const struct intel_runtime_info *info,
441 enum intel_platform p)
442{
443 const unsigned int pbits =
444 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
445
446 return p % pbits + INTEL_SUBPLATFORM_BITS;
447}
448
449static inline u32
450intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
451{
452 const unsigned int pi = __platform_mask_index(info, p);
453
454 return info->platform_mask[pi] & INTEL_SUBPLATFORM_MASK;
455}
456
457static __always_inline bool
458IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
459{
460 const struct intel_runtime_info *info = RUNTIME_INFO(i915);
461 const unsigned int pi = __platform_mask_index(info, p);
462 const unsigned int pb = __platform_mask_bit(info, p);
463
464 BUILD_BUG_ON(!__builtin_constant_p(p));
465
466 return info->platform_mask[pi] & BIT(pb);
467}
468
469static __always_inline bool
470IS_SUBPLATFORM(const struct drm_i915_private *i915,
471 enum intel_platform p, unsigned int s)
472{
473 const struct intel_runtime_info *info = RUNTIME_INFO(i915);
474 const unsigned int pi = __platform_mask_index(info, p);
475 const unsigned int pb = __platform_mask_bit(info, p);
476 const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
477 const u32 mask = info->platform_mask[pi];
478
479 BUILD_BUG_ON(!__builtin_constant_p(p));
480 BUILD_BUG_ON(!__builtin_constant_p(s));
481 BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
482
483 /* Shift and test on the MSB position so sign flag can be used. */
484 return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
485}
486
487#define IS_MOBILE(i915) (INTEL_INFO(i915)->is_mobile)
488#define IS_DGFX(i915) (INTEL_INFO(i915)->is_dgfx)
489
490#define IS_I830(i915) IS_PLATFORM(i915, INTEL_I830)
491#define IS_I845G(i915) IS_PLATFORM(i915, INTEL_I845G)
492#define IS_I85X(i915) IS_PLATFORM(i915, INTEL_I85X)
493#define IS_I865G(i915) IS_PLATFORM(i915, INTEL_I865G)
494#define IS_I915G(i915) IS_PLATFORM(i915, INTEL_I915G)
495#define IS_I915GM(i915) IS_PLATFORM(i915, INTEL_I915GM)
496#define IS_I945G(i915) IS_PLATFORM(i915, INTEL_I945G)
497#define IS_I945GM(i915) IS_PLATFORM(i915, INTEL_I945GM)
498#define IS_I965G(i915) IS_PLATFORM(i915, INTEL_I965G)
499#define IS_I965GM(i915) IS_PLATFORM(i915, INTEL_I965GM)
500#define IS_G45(i915) IS_PLATFORM(i915, INTEL_G45)
501#define IS_GM45(i915) IS_PLATFORM(i915, INTEL_GM45)
502#define IS_G4X(i915) (IS_G45(i915) || IS_GM45(i915))
503#define IS_PINEVIEW(i915) IS_PLATFORM(i915, INTEL_PINEVIEW)
504#define IS_G33(i915) IS_PLATFORM(i915, INTEL_G33)
505#define IS_IRONLAKE(i915) IS_PLATFORM(i915, INTEL_IRONLAKE)
506#define IS_IRONLAKE_M(i915) \
507 (IS_PLATFORM(i915, INTEL_IRONLAKE) && IS_MOBILE(i915))
508#define IS_SANDYBRIDGE(i915) IS_PLATFORM(i915, INTEL_SANDYBRIDGE)
509#define IS_IVYBRIDGE(i915) IS_PLATFORM(i915, INTEL_IVYBRIDGE)
510#define IS_VALLEYVIEW(i915) IS_PLATFORM(i915, INTEL_VALLEYVIEW)
511#define IS_CHERRYVIEW(i915) IS_PLATFORM(i915, INTEL_CHERRYVIEW)
512#define IS_HASWELL(i915) IS_PLATFORM(i915, INTEL_HASWELL)
513#define IS_BROADWELL(i915) IS_PLATFORM(i915, INTEL_BROADWELL)
514#define IS_SKYLAKE(i915) IS_PLATFORM(i915, INTEL_SKYLAKE)
515#define IS_BROXTON(i915) IS_PLATFORM(i915, INTEL_BROXTON)
516#define IS_KABYLAKE(i915) IS_PLATFORM(i915, INTEL_KABYLAKE)
517#define IS_GEMINILAKE(i915) IS_PLATFORM(i915, INTEL_GEMINILAKE)
518#define IS_COFFEELAKE(i915) IS_PLATFORM(i915, INTEL_COFFEELAKE)
519#define IS_COMETLAKE(i915) IS_PLATFORM(i915, INTEL_COMETLAKE)
520#define IS_ICELAKE(i915) IS_PLATFORM(i915, INTEL_ICELAKE)
521#define IS_JASPERLAKE(i915) IS_PLATFORM(i915, INTEL_JASPERLAKE)
522#define IS_ELKHARTLAKE(i915) IS_PLATFORM(i915, INTEL_ELKHARTLAKE)
523#define IS_TIGERLAKE(i915) IS_PLATFORM(i915, INTEL_TIGERLAKE)
524#define IS_ROCKETLAKE(i915) IS_PLATFORM(i915, INTEL_ROCKETLAKE)
525#define IS_DG1(i915) IS_PLATFORM(i915, INTEL_DG1)
526#define IS_ALDERLAKE_S(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_S)
527#define IS_ALDERLAKE_P(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_P)
528#define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2)
529#define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE)
530/*
531 * Display code shared by i915 and Xe relies on macros like IS_LUNARLAKE,
532 * so we need to define these even on platforms that the i915 base driver
533 * doesn't support. Ensure the parameter is used in the definition to
534 * avoid 'unused variable' warnings when compiling the shared display code
535 * for i915.
536 */
537#define IS_LUNARLAKE(i915) (0 && i915)
538#define IS_BATTLEMAGE(i915) (0 && i915)
539#define IS_PANTHERLAKE(i915) (0 && i915)
540
541#define IS_ARROWLAKE_H(i915) \
542 IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_H)
543#define IS_ARROWLAKE_U(i915) \
544 IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_U)
545#define IS_ARROWLAKE_S(i915) \
546 IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_S)
547#define IS_DG2_G10(i915) \
548 IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10)
549#define IS_DG2_G11(i915) \
550 IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G11)
551#define IS_DG2_G12(i915) \
552 IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G12)
553#define IS_RAPTORLAKE_S(i915) \
554 IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
555#define IS_ALDERLAKE_P_N(i915) \
556 IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
557#define IS_RAPTORLAKE_P(i915) \
558 IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
559#define IS_RAPTORLAKE_U(i915) \
560 IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPLU)
561#define IS_HASWELL_EARLY_SDV(i915) (IS_HASWELL(i915) && \
562 (INTEL_DEVID(i915) & 0xFF00) == 0x0C00)
563#define IS_BROADWELL_ULT(i915) \
564 IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
565#define IS_BROADWELL_ULX(i915) \
566 IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
567#define IS_HASWELL_ULT(i915) \
568 IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
569/* ULX machines are also considered ULT. */
570#define IS_HASWELL_ULX(i915) \
571 IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
572#define IS_SKYLAKE_ULT(i915) \
573 IS_SUBPLATFORM(i915, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
574#define IS_SKYLAKE_ULX(i915) \
575 IS_SUBPLATFORM(i915, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
576#define IS_KABYLAKE_ULT(i915) \
577 IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
578#define IS_KABYLAKE_ULX(i915) \
579 IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
580#define IS_COFFEELAKE_ULT(i915) \
581 IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
582#define IS_COFFEELAKE_ULX(i915) \
583 IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
584#define IS_COMETLAKE_ULT(i915) \
585 IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
586#define IS_COMETLAKE_ULX(i915) \
587 IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
588
589#define IS_ICL_WITH_PORT_F(i915) \
590 IS_SUBPLATFORM(i915, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
591
592#define IS_TIGERLAKE_UY(i915) \
593 IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
594
595#define IS_GEN9_LP(i915) (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
596#define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_GEN9_LP(i915))
597
598#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
599#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
600
601#define __ENGINE_INSTANCES_MASK(mask, first, count) ({ \
602 unsigned int first__ = (first); \
603 unsigned int count__ = (count); \
604 ((mask) & GENMASK(first__ + count__ - 1, first__)) >> first__; \
605})
606
607#define ENGINE_INSTANCES_MASK(gt, first, count) \
608 __ENGINE_INSTANCES_MASK((gt)->info.engine_mask, first, count)
609
610#define RCS_MASK(gt) \
611 ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
612#define BCS_MASK(gt) \
613 ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS)
614#define VDBOX_MASK(gt) \
615 ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
616#define VEBOX_MASK(gt) \
617 ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
618#define CCS_MASK(gt) \
619 ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
620
621#define HAS_MEDIA_RATIO_MODE(i915) (INTEL_INFO(i915)->has_media_ratio_mode)
622
623/*
624 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
625 * All later gens can run the final buffer from the ppgtt
626 */
627#define CMDPARSER_USES_GGTT(i915) (GRAPHICS_VER(i915) == 7)
628
629#define HAS_LLC(i915) (INTEL_INFO(i915)->has_llc)
630#define HAS_SNOOP(i915) (INTEL_INFO(i915)->has_snoop)
631#define HAS_EDRAM(i915) ((i915)->edram_size_mb)
632#define HAS_SECURE_BATCHES(i915) (GRAPHICS_VER(i915) < 6)
633#define HAS_WT(i915) HAS_EDRAM(i915)
634
635#define HWS_NEEDS_PHYSICAL(i915) (INTEL_INFO(i915)->hws_needs_physical)
636
637#define HAS_LOGICAL_RING_CONTEXTS(i915) \
638 (INTEL_INFO(i915)->has_logical_ring_contexts)
639#define HAS_LOGICAL_RING_ELSQ(i915) \
640 (INTEL_INFO(i915)->has_logical_ring_elsq)
641
642#define HAS_EXECLISTS(i915) HAS_LOGICAL_RING_CONTEXTS(i915)
643
644#define INTEL_PPGTT(i915) (RUNTIME_INFO(i915)->ppgtt_type)
645#define HAS_PPGTT(i915) \
646 (INTEL_PPGTT(i915) != INTEL_PPGTT_NONE)
647#define HAS_FULL_PPGTT(i915) \
648 (INTEL_PPGTT(i915) >= INTEL_PPGTT_FULL)
649
650#define HAS_PAGE_SIZES(i915, sizes) ({ \
651 GEM_BUG_ON((sizes) == 0); \
652 ((sizes) & ~RUNTIME_INFO(i915)->page_sizes) == 0; \
653})
654
655#define NEEDS_RC6_CTX_CORRUPTION_WA(i915) \
656 (IS_BROADWELL(i915) || GRAPHICS_VER(i915) == 9)
657
658/* WaRsDisableCoarsePowerGating:skl,cnl */
659#define NEEDS_WaRsDisableCoarsePowerGating(i915) \
660 (IS_SKYLAKE(i915) && (INTEL_INFO(i915)->gt == 3 || INTEL_INFO(i915)->gt == 4))
661
662/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
663 * rows, which changed the alignment requirements and fence programming.
664 */
665#define HAS_128_BYTE_Y_TILING(i915) (GRAPHICS_VER(i915) != 2 && \
666 !(IS_I915G(i915) || IS_I915GM(i915)))
667
668#define HAS_RC6(i915) (INTEL_INFO(i915)->has_rc6)
669#define HAS_RC6p(i915) (INTEL_INFO(i915)->has_rc6p)
670#define HAS_RC6pp(i915) (false) /* HW was never validated */
671
672#define HAS_RPS(i915) (INTEL_INFO(i915)->has_rps)
673
674#define HAS_PXP(i915) \
675 (IS_ENABLED(CONFIG_DRM_I915_PXP) && INTEL_INFO(i915)->has_pxp)
676
677#define HAS_HECI_PXP(i915) \
678 (INTEL_INFO(i915)->has_heci_pxp)
679
680#define HAS_HECI_GSCFI(i915) \
681 (INTEL_INFO(i915)->has_heci_gscfi)
682
683#define HAS_HECI_GSC(i915) (HAS_HECI_PXP(i915) || HAS_HECI_GSCFI(i915))
684
685#define HAS_RUNTIME_PM(i915) (INTEL_INFO(i915)->has_runtime_pm)
686#define HAS_64BIT_RELOC(i915) (INTEL_INFO(i915)->has_64bit_reloc)
687
688#define HAS_OA_BPC_REPORTING(i915) \
689 (INTEL_INFO(i915)->has_oa_bpc_reporting)
690#define HAS_OA_SLICE_CONTRIB_LIMITS(i915) \
691 (INTEL_INFO(i915)->has_oa_slice_contrib_limits)
692#define HAS_OAM(i915) \
693 (INTEL_INFO(i915)->has_oam)
694
695/*
696 * Set this flag, when platform requires 64K GTT page sizes or larger for
697 * device local memory access.
698 */
699#define HAS_64K_PAGES(i915) (INTEL_INFO(i915)->has_64k_pages)
700
701#define HAS_REGION(i915, id) (INTEL_INFO(i915)->memory_regions & BIT(id))
702#define HAS_LMEM(i915) HAS_REGION(i915, INTEL_REGION_LMEM_0)
703
704#define HAS_EXTRA_GT_LIST(i915) (INTEL_INFO(i915)->extra_gt_list)
705
706/*
707 * Platform has the dedicated compression control state for each lmem surfaces
708 * stored in lmem to support the 3D and media compression formats.
709 */
710#define HAS_FLAT_CCS(i915) (INTEL_INFO(i915)->has_flat_ccs)
711
712#define HAS_GT_UC(i915) (INTEL_INFO(i915)->has_gt_uc)
713
714#define HAS_POOLED_EU(i915) (RUNTIME_INFO(i915)->has_pooled_eu)
715
716#define HAS_GLOBAL_MOCS_REGISTERS(i915) (INTEL_INFO(i915)->has_global_mocs)
717
718#define HAS_GMD_ID(i915) (INTEL_INFO(i915)->has_gmd_id)
719
720#define HAS_L3_CCS_READ(i915) (INTEL_INFO(i915)->has_l3_ccs_read)
721
722/* DPF == dynamic parity feature */
723#define HAS_L3_DPF(i915) (INTEL_INFO(i915)->has_l3_dpf)
724#define NUM_L3_SLICES(i915) (IS_HASWELL(i915) && INTEL_INFO(i915)->gt == 3 ? \
725 2 : HAS_L3_DPF(i915))
726
727#define HAS_GUC_DEPRIVILEGE(i915) \
728 (INTEL_INFO(i915)->has_guc_deprivilege)
729
730#define HAS_GUC_TLB_INVALIDATION(i915) (INTEL_INFO(i915)->has_guc_tlb_invalidation)
731
732#define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline)
733
734#define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit)
735
736#define HAS_LMEMBAR_SMEM_STOLEN(i915) (!HAS_LMEM(i915) && \
737 GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
738
739#endif
1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
33#include <uapi/drm/i915_drm.h>
34#include <uapi/drm/drm_fourcc.h>
35
36#include <linux/io-mapping.h>
37#include <linux/i2c.h>
38#include <linux/i2c-algo-bit.h>
39#include <linux/backlight.h>
40#include <linux/hash.h>
41#include <linux/intel-iommu.h>
42#include <linux/kref.h>
43#include <linux/mm_types.h>
44#include <linux/perf_event.h>
45#include <linux/pm_qos.h>
46#include <linux/dma-resv.h>
47#include <linux/shmem_fs.h>
48#include <linux/stackdepot.h>
49#include <linux/xarray.h>
50
51#include <drm/intel-gtt.h>
52#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
53#include <drm/drm_gem.h>
54#include <drm/drm_auth.h>
55#include <drm/drm_cache.h>
56#include <drm/drm_util.h>
57#include <drm/drm_dsc.h>
58#include <drm/drm_atomic.h>
59#include <drm/drm_connector.h>
60#include <drm/i915_mei_hdcp_interface.h>
61
62#include "i915_params.h"
63#include "i915_reg.h"
64#include "i915_utils.h"
65
66#include "display/intel_bios.h"
67#include "display/intel_display.h"
68#include "display/intel_display_power.h"
69#include "display/intel_dpll_mgr.h"
70#include "display/intel_dsb.h"
71#include "display/intel_frontbuffer.h"
72#include "display/intel_global_state.h"
73#include "display/intel_gmbus.h"
74#include "display/intel_opregion.h"
75
76#include "gem/i915_gem_context_types.h"
77#include "gem/i915_gem_shrinker.h"
78#include "gem/i915_gem_stolen.h"
79
80#include "gt/intel_lrc.h"
81#include "gt/intel_engine.h"
82#include "gt/intel_gt_types.h"
83#include "gt/intel_workarounds.h"
84#include "gt/uc/intel_uc.h"
85
86#include "intel_device_info.h"
87#include "intel_pch.h"
88#include "intel_runtime_pm.h"
89#include "intel_memory_region.h"
90#include "intel_uncore.h"
91#include "intel_wakeref.h"
92#include "intel_wopcm.h"
93
94#include "i915_gem.h"
95#include "i915_gem_gtt.h"
96#include "i915_gpu_error.h"
97#include "i915_perf_types.h"
98#include "i915_request.h"
99#include "i915_scheduler.h"
100#include "gt/intel_timeline.h"
101#include "i915_vma.h"
102#include "i915_irq.h"
103
104#include "intel_region_lmem.h"
105
106/* General customization:
107 */
108
109#define DRIVER_NAME "i915"
110#define DRIVER_DESC "Intel Graphics"
111#define DRIVER_DATE "20200715"
112#define DRIVER_TIMESTAMP 1594811881
113
114struct drm_i915_gem_object;
115
116/*
117 * The code assumes that the hpd_pins below have consecutive values and
118 * starting with HPD_PORT_A, the HPD pin associated with any port can be
119 * retrieved by adding the corresponding port (or phy) enum value to
120 * HPD_PORT_A in most cases. For example:
121 * HPD_PORT_C = HPD_PORT_A + PHY_C - PHY_A
122 */
123enum hpd_pin {
124 HPD_NONE = 0,
125 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
126 HPD_CRT,
127 HPD_SDVO_B,
128 HPD_SDVO_C,
129 HPD_PORT_A,
130 HPD_PORT_B,
131 HPD_PORT_C,
132 HPD_PORT_D,
133 HPD_PORT_E,
134 HPD_PORT_F,
135 HPD_PORT_G,
136 HPD_PORT_H,
137 HPD_PORT_I,
138
139 HPD_NUM_PINS
140};
141
142#define for_each_hpd_pin(__pin) \
143 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
144
145/* Threshold == 5 for long IRQs, 50 for short */
146#define HPD_STORM_DEFAULT_THRESHOLD 50
147
148struct i915_hotplug {
149 struct delayed_work hotplug_work;
150
151 const u32 *hpd, *pch_hpd;
152
153 struct {
154 unsigned long last_jiffies;
155 int count;
156 enum {
157 HPD_ENABLED = 0,
158 HPD_DISABLED = 1,
159 HPD_MARK_DISABLED = 2
160 } state;
161 } stats[HPD_NUM_PINS];
162 u32 event_bits;
163 u32 retry_bits;
164 struct delayed_work reenable_work;
165
166 u32 long_port_mask;
167 u32 short_port_mask;
168 struct work_struct dig_port_work;
169
170 struct work_struct poll_init_work;
171 bool poll_enabled;
172
173 unsigned int hpd_storm_threshold;
174 /* Whether or not to count short HPD IRQs in HPD storms */
175 u8 hpd_short_storm_enabled;
176
177 /*
178 * if we get a HPD irq from DP and a HPD irq from non-DP
179 * the non-DP HPD could block the workqueue on a mode config
180 * mutex getting, that userspace may have taken. However
181 * userspace is waiting on the DP workqueue to run which is
182 * blocked behind the non-DP one.
183 */
184 struct workqueue_struct *dp_wq;
185};
186
187#define I915_GEM_GPU_DOMAINS \
188 (I915_GEM_DOMAIN_RENDER | \
189 I915_GEM_DOMAIN_SAMPLER | \
190 I915_GEM_DOMAIN_COMMAND | \
191 I915_GEM_DOMAIN_INSTRUCTION | \
192 I915_GEM_DOMAIN_VERTEX)
193
194struct drm_i915_private;
195struct i915_mm_struct;
196struct i915_mmu_object;
197
198struct drm_i915_file_private {
199 struct drm_i915_private *dev_priv;
200
201 union {
202 struct drm_file *file;
203 struct rcu_head rcu;
204 };
205
206 struct {
207 spinlock_t lock;
208 struct list_head request_list;
209 } mm;
210
211 struct xarray context_xa;
212 struct xarray vm_xa;
213
214 unsigned int bsd_engine;
215
216/*
217 * Every context ban increments per client ban score. Also
218 * hangs in short succession increments ban score. If ban threshold
219 * is reached, client is considered banned and submitting more work
220 * will fail. This is a stop gap measure to limit the badly behaving
221 * clients access to gpu. Note that unbannable contexts never increment
222 * the client ban score.
223 */
224#define I915_CLIENT_SCORE_HANG_FAST 1
225#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
226#define I915_CLIENT_SCORE_CONTEXT_BAN 3
227#define I915_CLIENT_SCORE_BANNED 9
228 /** ban_score: Accumulated score of all ctx bans and fast hangs. */
229 atomic_t ban_score;
230 unsigned long hang_timestamp;
231};
232
233/* Interface history:
234 *
235 * 1.1: Original.
236 * 1.2: Add Power Management
237 * 1.3: Add vblank support
238 * 1.4: Fix cmdbuffer path, add heap destroy
239 * 1.5: Add vblank pipe configuration
240 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
241 * - Support vertical blank on secondary display pipe
242 */
243#define DRIVER_MAJOR 1
244#define DRIVER_MINOR 6
245#define DRIVER_PATCHLEVEL 0
246
247struct intel_overlay;
248struct intel_overlay_error_state;
249
250struct sdvo_device_mapping {
251 u8 initialized;
252 u8 dvo_port;
253 u8 slave_addr;
254 u8 dvo_wiring;
255 u8 i2c_pin;
256 u8 ddc_pin;
257};
258
259struct intel_connector;
260struct intel_encoder;
261struct intel_atomic_state;
262struct intel_cdclk_config;
263struct intel_cdclk_state;
264struct intel_cdclk_vals;
265struct intel_initial_plane_config;
266struct intel_crtc;
267struct intel_limit;
268struct dpll;
269
270struct drm_i915_display_funcs {
271 void (*get_cdclk)(struct drm_i915_private *dev_priv,
272 struct intel_cdclk_config *cdclk_config);
273 void (*set_cdclk)(struct drm_i915_private *dev_priv,
274 const struct intel_cdclk_config *cdclk_config,
275 enum pipe pipe);
276 int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
277 int (*get_fifo_size)(struct drm_i915_private *dev_priv,
278 enum i9xx_plane_id i9xx_plane);
279 int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
280 int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
281 void (*initial_watermarks)(struct intel_atomic_state *state,
282 struct intel_crtc *crtc);
283 void (*atomic_update_watermarks)(struct intel_atomic_state *state,
284 struct intel_crtc *crtc);
285 void (*optimize_watermarks)(struct intel_atomic_state *state,
286 struct intel_crtc *crtc);
287 int (*compute_global_watermarks)(struct intel_atomic_state *state);
288 void (*update_wm)(struct intel_crtc *crtc);
289 int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
290 u8 (*calc_voltage_level)(int cdclk);
291 /* Returns the active state of the crtc, and if the crtc is active,
292 * fills out the pipe-config with the hw state. */
293 bool (*get_pipe_config)(struct intel_crtc *,
294 struct intel_crtc_state *);
295 void (*get_initial_plane_config)(struct intel_crtc *,
296 struct intel_initial_plane_config *);
297 int (*crtc_compute_clock)(struct intel_crtc *crtc,
298 struct intel_crtc_state *crtc_state);
299 void (*crtc_enable)(struct intel_atomic_state *state,
300 struct intel_crtc *crtc);
301 void (*crtc_disable)(struct intel_atomic_state *state,
302 struct intel_crtc *crtc);
303 void (*commit_modeset_enables)(struct intel_atomic_state *state);
304 void (*commit_modeset_disables)(struct intel_atomic_state *state);
305 void (*audio_codec_enable)(struct intel_encoder *encoder,
306 const struct intel_crtc_state *crtc_state,
307 const struct drm_connector_state *conn_state);
308 void (*audio_codec_disable)(struct intel_encoder *encoder,
309 const struct intel_crtc_state *old_crtc_state,
310 const struct drm_connector_state *old_conn_state);
311 void (*fdi_link_train)(struct intel_crtc *crtc,
312 const struct intel_crtc_state *crtc_state);
313 void (*init_clock_gating)(struct drm_i915_private *dev_priv);
314 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
315 /* clock updates for mode set */
316 /* cursor updates */
317 /* render clock increase/decrease */
318 /* display clock increase/decrease */
319 /* pll clock increase/decrease */
320
321 int (*color_check)(struct intel_crtc_state *crtc_state);
322 /*
323 * Program double buffered color management registers during
324 * vblank evasion. The registers should then latch during the
325 * next vblank start, alongside any other double buffered registers
326 * involved with the same commit.
327 */
328 void (*color_commit)(const struct intel_crtc_state *crtc_state);
329 /*
330 * Load LUTs (and other single buffered color management
331 * registers). Will (hopefully) be called during the vblank
332 * following the latching of any double buffered registers
333 * involved with the same commit.
334 */
335 void (*load_luts)(const struct intel_crtc_state *crtc_state);
336 void (*read_luts)(struct intel_crtc_state *crtc_state);
337};
338
339struct intel_csr {
340 struct work_struct work;
341 const char *fw_path;
342 u32 required_version;
343 u32 max_fw_size; /* bytes */
344 u32 *dmc_payload;
345 u32 dmc_fw_size; /* dwords */
346 u32 version;
347 u32 mmio_count;
348 i915_reg_t mmioaddr[20];
349 u32 mmiodata[20];
350 u32 dc_state;
351 u32 target_dc_state;
352 u32 allowed_dc_mask;
353 intel_wakeref_t wakeref;
354};
355
356enum i915_cache_level {
357 I915_CACHE_NONE = 0,
358 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
359 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
360 caches, eg sampler/render caches, and the
361 large Last-Level-Cache. LLC is coherent with
362 the CPU, but L3 is only visible to the GPU. */
363 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
364};
365
366#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
367
368struct intel_fbc {
369 /* This is always the inner lock when overlapping with struct_mutex and
370 * it's the outer lock when overlapping with stolen_lock. */
371 struct mutex lock;
372 unsigned threshold;
373 unsigned int possible_framebuffer_bits;
374 unsigned int busy_bits;
375 struct intel_crtc *crtc;
376
377 struct drm_mm_node compressed_fb;
378 struct drm_mm_node *compressed_llb;
379
380 bool false_color;
381
382 bool active;
383 bool activated;
384 bool flip_pending;
385
386 bool underrun_detected;
387 struct work_struct underrun_work;
388
389 /*
390 * Due to the atomic rules we can't access some structures without the
391 * appropriate locking, so we cache information here in order to avoid
392 * these problems.
393 */
394 struct intel_fbc_state_cache {
395 struct {
396 unsigned int mode_flags;
397 u32 hsw_bdw_pixel_rate;
398 } crtc;
399
400 struct {
401 unsigned int rotation;
402 int src_w;
403 int src_h;
404 bool visible;
405 /*
406 * Display surface base address adjustement for
407 * pageflips. Note that on gen4+ this only adjusts up
408 * to a tile, offsets within a tile are handled in
409 * the hw itself (with the TILEOFF register).
410 */
411 int adjusted_x;
412 int adjusted_y;
413
414 u16 pixel_blend_mode;
415 } plane;
416
417 struct {
418 const struct drm_format_info *format;
419 unsigned int stride;
420 u64 modifier;
421 } fb;
422
423 unsigned int fence_y_offset;
424 u16 gen9_wa_cfb_stride;
425 u16 interval;
426 s8 fence_id;
427 } state_cache;
428
429 /*
430 * This structure contains everything that's relevant to program the
431 * hardware registers. When we want to figure out if we need to disable
432 * and re-enable FBC for a new configuration we just check if there's
433 * something different in the struct. The genx_fbc_activate functions
434 * are supposed to read from it in order to program the registers.
435 */
436 struct intel_fbc_reg_params {
437 struct {
438 enum pipe pipe;
439 enum i9xx_plane_id i9xx_plane;
440 } crtc;
441
442 struct {
443 const struct drm_format_info *format;
444 unsigned int stride;
445 u64 modifier;
446 } fb;
447
448 int cfb_size;
449 unsigned int fence_y_offset;
450 u16 gen9_wa_cfb_stride;
451 u16 interval;
452 s8 fence_id;
453 bool plane_visible;
454 } params;
455
456 const char *no_fbc_reason;
457};
458
459/*
460 * HIGH_RR is the highest eDP panel refresh rate read from EDID
461 * LOW_RR is the lowest eDP panel refresh rate found from EDID
462 * parsing for same resolution.
463 */
464enum drrs_refresh_rate_type {
465 DRRS_HIGH_RR,
466 DRRS_LOW_RR,
467 DRRS_MAX_RR, /* RR count */
468};
469
470enum drrs_support_type {
471 DRRS_NOT_SUPPORTED = 0,
472 STATIC_DRRS_SUPPORT = 1,
473 SEAMLESS_DRRS_SUPPORT = 2
474};
475
476struct intel_dp;
477struct i915_drrs {
478 struct mutex mutex;
479 struct delayed_work work;
480 struct intel_dp *dp;
481 unsigned busy_frontbuffer_bits;
482 enum drrs_refresh_rate_type refresh_rate_type;
483 enum drrs_support_type type;
484};
485
486struct i915_psr {
487 struct mutex lock;
488
489#define I915_PSR_DEBUG_MODE_MASK 0x0f
490#define I915_PSR_DEBUG_DEFAULT 0x00
491#define I915_PSR_DEBUG_DISABLE 0x01
492#define I915_PSR_DEBUG_ENABLE 0x02
493#define I915_PSR_DEBUG_FORCE_PSR1 0x03
494#define I915_PSR_DEBUG_IRQ 0x10
495
496 u32 debug;
497 bool sink_support;
498 bool enabled;
499 struct intel_dp *dp;
500 enum pipe pipe;
501 enum transcoder transcoder;
502 bool active;
503 struct work_struct work;
504 unsigned busy_frontbuffer_bits;
505 bool sink_psr2_support;
506 bool link_standby;
507 bool colorimetry_support;
508 bool psr2_enabled;
509 u8 sink_sync_latency;
510 ktime_t last_entry_attempt;
511 ktime_t last_exit;
512 bool sink_not_reliable;
513 bool irq_aux_error;
514 u16 su_x_granularity;
515 bool dc3co_enabled;
516 u32 dc3co_exit_delay;
517 struct delayed_work dc3co_work;
518 bool force_mode_changed;
519 struct drm_dp_vsc_sdp vsc;
520};
521
522#define QUIRK_LVDS_SSC_DISABLE (1<<1)
523#define QUIRK_INVERT_BRIGHTNESS (1<<2)
524#define QUIRK_BACKLIGHT_PRESENT (1<<3)
525#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
526#define QUIRK_INCREASE_T12_DELAY (1<<6)
527#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
528
529struct intel_fbdev;
530struct intel_fbc_work;
531
532struct intel_gmbus {
533 struct i2c_adapter adapter;
534#define GMBUS_FORCE_BIT_RETRY (1U << 31)
535 u32 force_bit;
536 u32 reg0;
537 i915_reg_t gpio_reg;
538 struct i2c_algo_bit_data bit_algo;
539 struct drm_i915_private *dev_priv;
540};
541
542struct i915_suspend_saved_registers {
543 u32 saveDSPARB;
544 u32 saveFBC_CONTROL;
545 u32 saveCACHE_MODE_0;
546 u32 saveMI_ARB_STATE;
547 u32 saveSWF0[16];
548 u32 saveSWF1[16];
549 u32 saveSWF3[3];
550 u32 savePCH_PORT_HOTPLUG;
551 u16 saveGCDGMBUS;
552};
553
554struct vlv_s0ix_state;
555
556#define MAX_L3_SLICES 2
557struct intel_l3_parity {
558 u32 *remap_info[MAX_L3_SLICES];
559 struct work_struct error_work;
560 int which_slice;
561};
562
563struct i915_gem_mm {
564 /** Memory allocator for GTT stolen memory */
565 struct drm_mm stolen;
566 /** Protects the usage of the GTT stolen memory allocator. This is
567 * always the inner lock when overlapping with struct_mutex. */
568 struct mutex stolen_lock;
569
570 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
571 spinlock_t obj_lock;
572
573 /**
574 * List of objects which are purgeable.
575 */
576 struct list_head purge_list;
577
578 /**
579 * List of objects which have allocated pages and are shrinkable.
580 */
581 struct list_head shrink_list;
582
583 /**
584 * List of objects which are pending destruction.
585 */
586 struct llist_head free_list;
587 struct work_struct free_work;
588 /**
589 * Count of objects pending destructions. Used to skip needlessly
590 * waiting on an RCU barrier if no objects are waiting to be freed.
591 */
592 atomic_t free_count;
593
594 /**
595 * Small stash of WC pages
596 */
597 struct pagestash wc_stash;
598
599 /**
600 * tmpfs instance used for shmem backed objects
601 */
602 struct vfsmount *gemfs;
603
604 struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
605
606 struct notifier_block oom_notifier;
607 struct notifier_block vmap_notifier;
608 struct shrinker shrinker;
609
610 /**
611 * Workqueue to fault in userptr pages, flushed by the execbuf
612 * when required but otherwise left to userspace to try again
613 * on EAGAIN.
614 */
615 struct workqueue_struct *userptr_wq;
616
617 /* shrinker accounting, also useful for userland debugging */
618 u64 shrink_memory;
619 u32 shrink_count;
620};
621
622#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
623
624unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
625 u64 context);
626
627static inline unsigned long
628i915_fence_timeout(const struct drm_i915_private *i915)
629{
630 return i915_fence_context_timeout(i915, U64_MAX);
631}
632
633/* Amount of SAGV/QGV points, BSpec precisely defines this */
634#define I915_NUM_QGV_POINTS 8
635
636struct ddi_vbt_port_info {
637 /* Non-NULL if port present. */
638 const struct child_device_config *child;
639
640 int max_tmds_clock;
641
642 /* This is an index in the HDMI/DVI DDI buffer translation table. */
643 u8 hdmi_level_shift;
644 u8 hdmi_level_shift_set:1;
645
646 u8 supports_dvi:1;
647 u8 supports_hdmi:1;
648 u8 supports_dp:1;
649 u8 supports_edp:1;
650 u8 supports_typec_usb:1;
651 u8 supports_tbt:1;
652
653 u8 alternate_aux_channel;
654 u8 alternate_ddc_pin;
655
656 u8 dp_boost_level;
657 u8 hdmi_boost_level;
658 int dp_max_link_rate; /* 0 for not limited by VBT */
659};
660
661enum psr_lines_to_wait {
662 PSR_0_LINES_TO_WAIT = 0,
663 PSR_1_LINE_TO_WAIT,
664 PSR_4_LINES_TO_WAIT,
665 PSR_8_LINES_TO_WAIT
666};
667
668struct intel_vbt_data {
669 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
670 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
671
672 /* Feature bits */
673 unsigned int int_tv_support:1;
674 unsigned int lvds_dither:1;
675 unsigned int int_crt_support:1;
676 unsigned int lvds_use_ssc:1;
677 unsigned int int_lvds_support:1;
678 unsigned int display_clock_mode:1;
679 unsigned int fdi_rx_polarity_inverted:1;
680 unsigned int panel_type:4;
681 int lvds_ssc_freq;
682 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
683 enum drm_panel_orientation orientation;
684
685 enum drrs_support_type drrs_type;
686
687 struct {
688 int rate;
689 int lanes;
690 int preemphasis;
691 int vswing;
692 bool low_vswing;
693 bool initialized;
694 int bpp;
695 struct edp_power_seq pps;
696 bool hobl;
697 } edp;
698
699 struct {
700 bool enable;
701 bool full_link;
702 bool require_aux_wakeup;
703 int idle_frames;
704 enum psr_lines_to_wait lines_to_wait;
705 int tp1_wakeup_time_us;
706 int tp2_tp3_wakeup_time_us;
707 int psr2_tp2_tp3_wakeup_time_us;
708 } psr;
709
710 struct {
711 u16 pwm_freq_hz;
712 bool present;
713 bool active_low_pwm;
714 u8 min_brightness; /* min_brightness/255 of max */
715 u8 controller; /* brightness controller number */
716 enum intel_backlight_type type;
717 } backlight;
718
719 /* MIPI DSI */
720 struct {
721 u16 panel_id;
722 struct mipi_config *config;
723 struct mipi_pps_data *pps;
724 u16 bl_ports;
725 u16 cabc_ports;
726 u8 seq_version;
727 u32 size;
728 u8 *data;
729 const u8 *sequence[MIPI_SEQ_MAX];
730 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
731 enum drm_panel_orientation orientation;
732 } dsi;
733
734 int crt_ddc_pin;
735
736 struct list_head display_devices;
737
738 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
739 struct sdvo_device_mapping sdvo_mappings[2];
740};
741
742enum intel_ddb_partitioning {
743 INTEL_DDB_PART_1_2,
744 INTEL_DDB_PART_5_6, /* IVB+ */
745};
746
747struct ilk_wm_values {
748 u32 wm_pipe[3];
749 u32 wm_lp[3];
750 u32 wm_lp_spr[3];
751 bool enable_fbc_wm;
752 enum intel_ddb_partitioning partitioning;
753};
754
755struct g4x_pipe_wm {
756 u16 plane[I915_MAX_PLANES];
757 u16 fbc;
758};
759
760struct g4x_sr_wm {
761 u16 plane;
762 u16 cursor;
763 u16 fbc;
764};
765
766struct vlv_wm_ddl_values {
767 u8 plane[I915_MAX_PLANES];
768};
769
770struct vlv_wm_values {
771 struct g4x_pipe_wm pipe[3];
772 struct g4x_sr_wm sr;
773 struct vlv_wm_ddl_values ddl[3];
774 u8 level;
775 bool cxsr;
776};
777
778struct g4x_wm_values {
779 struct g4x_pipe_wm pipe[2];
780 struct g4x_sr_wm sr;
781 struct g4x_sr_wm hpll;
782 bool cxsr;
783 bool hpll_en;
784 bool fbc_en;
785};
786
787struct skl_ddb_entry {
788 u16 start, end; /* in number of blocks, 'end' is exclusive */
789};
790
791static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
792{
793 return entry->end - entry->start;
794}
795
796static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
797 const struct skl_ddb_entry *e2)
798{
799 if (e1->start == e2->start && e1->end == e2->end)
800 return true;
801
802 return false;
803}
804
805struct i915_frontbuffer_tracking {
806 spinlock_t lock;
807
808 /*
809 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
810 * scheduled flips.
811 */
812 unsigned busy_bits;
813 unsigned flip_bits;
814};
815
816struct i915_virtual_gpu {
817 struct mutex lock; /* serialises sending of g2v_notify command pkts */
818 bool active;
819 u32 caps;
820};
821
822struct intel_cdclk_config {
823 unsigned int cdclk, vco, ref, bypass;
824 u8 voltage_level;
825};
826
827struct i915_selftest_stash {
828 atomic_t counter;
829};
830
831struct drm_i915_private {
832 struct drm_device drm;
833
834 /* FIXME: Device release actions should all be moved to drmm_ */
835 bool do_release;
836
837 /* i915 device parameters */
838 struct i915_params params;
839
840 const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
841 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
842 struct intel_driver_caps caps;
843
844 /**
845 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
846 * end of stolen which we can optionally use to create GEM objects
847 * backed by stolen memory. Note that stolen_usable_size tells us
848 * exactly how much of this we are actually allowed to use, given that
849 * some portion of it is in fact reserved for use by hardware functions.
850 */
851 struct resource dsm;
852 /**
853 * Reseved portion of Data Stolen Memory
854 */
855 struct resource dsm_reserved;
856
857 /*
858 * Stolen memory is segmented in hardware with different portions
859 * offlimits to certain functions.
860 *
861 * The drm_mm is initialised to the total accessible range, as found
862 * from the PCI config. On Broadwell+, this is further restricted to
863 * avoid the first page! The upper end of stolen memory is reserved for
864 * hardware functions and similarly removed from the accessible range.
865 */
866 resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
867
868 struct intel_uncore uncore;
869 struct intel_uncore_mmio_debug mmio_debug;
870
871 struct i915_virtual_gpu vgpu;
872
873 struct intel_gvt *gvt;
874
875 struct intel_wopcm wopcm;
876
877 struct intel_csr csr;
878
879 struct intel_gmbus gmbus[GMBUS_NUM_PINS];
880
881 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
882 * controller on different i2c buses. */
883 struct mutex gmbus_mutex;
884
885 /**
886 * Base address of where the gmbus and gpio blocks are located (either
887 * on PCH or on SoC for platforms without PCH).
888 */
889 u32 gpio_mmio_base;
890
891 u32 hsw_psr_mmio_adjust;
892
893 /* MMIO base address for MIPI regs */
894 u32 mipi_mmio_base;
895
896 u32 pps_mmio_base;
897
898 wait_queue_head_t gmbus_wait_queue;
899
900 struct pci_dev *bridge_dev;
901
902 struct rb_root uabi_engines;
903
904 struct resource mch_res;
905
906 /* protects the irq masks */
907 spinlock_t irq_lock;
908
909 bool display_irqs_enabled;
910
911 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
912 struct pm_qos_request pm_qos;
913
914 /* Sideband mailbox protection */
915 struct mutex sb_lock;
916 struct pm_qos_request sb_qos;
917
918 /** Cached value of IMR to avoid reads in updating the bitfield */
919 union {
920 u32 irq_mask;
921 u32 de_irq_mask[I915_MAX_PIPES];
922 };
923 u32 pipestat_irq_mask[I915_MAX_PIPES];
924
925 struct i915_hotplug hotplug;
926 struct intel_fbc fbc;
927 struct i915_drrs drrs;
928 struct intel_opregion opregion;
929 struct intel_vbt_data vbt;
930
931 bool preserve_bios_swizzle;
932
933 /* overlay */
934 struct intel_overlay *overlay;
935
936 /* backlight registers and fields in struct intel_panel */
937 struct mutex backlight_lock;
938
939 /* protects panel power sequencer state */
940 struct mutex pps_mutex;
941
942 unsigned int fsb_freq, mem_freq, is_ddr3;
943 unsigned int skl_preferred_vco_freq;
944 unsigned int max_cdclk_freq;
945
946 unsigned int max_dotclk_freq;
947 unsigned int hpll_freq;
948 unsigned int fdi_pll_freq;
949 unsigned int czclk_freq;
950
951 struct {
952 /* The current hardware cdclk configuration */
953 struct intel_cdclk_config hw;
954
955 /* cdclk, divider, and ratio table from bspec */
956 const struct intel_cdclk_vals *table;
957
958 struct intel_global_obj obj;
959 } cdclk;
960
961 struct {
962 /* The current hardware dbuf configuration */
963 u8 enabled_slices;
964
965 struct intel_global_obj obj;
966 } dbuf;
967
968 /**
969 * wq - Driver workqueue for GEM.
970 *
971 * NOTE: Work items scheduled here are not allowed to grab any modeset
972 * locks, for otherwise the flushing done in the pageflip code will
973 * result in deadlocks.
974 */
975 struct workqueue_struct *wq;
976
977 /* ordered wq for modesets */
978 struct workqueue_struct *modeset_wq;
979 /* unbound hipri wq for page flips/plane updates */
980 struct workqueue_struct *flip_wq;
981
982 /* Display functions */
983 struct drm_i915_display_funcs display;
984
985 /* PCH chipset type */
986 enum intel_pch pch_type;
987 unsigned short pch_id;
988
989 unsigned long quirks;
990
991 struct drm_atomic_state *modeset_restore_state;
992 struct drm_modeset_acquire_ctx reset_ctx;
993
994 struct i915_ggtt ggtt; /* VM representing the global address space */
995
996 struct i915_gem_mm mm;
997 DECLARE_HASHTABLE(mm_structs, 7);
998 spinlock_t mm_lock;
999
1000 /* Kernel Modesetting */
1001
1002 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
1003 struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
1004
1005 /**
1006 * dpll and cdclk state is protected by connection_mutex
1007 * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
1008 * Must be global rather than per dpll, because on some platforms plls
1009 * share registers.
1010 */
1011 struct {
1012 struct mutex lock;
1013
1014 int num_shared_dpll;
1015 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1016 const struct intel_dpll_mgr *mgr;
1017
1018 struct {
1019 int nssc;
1020 int ssc;
1021 } ref_clks;
1022 } dpll;
1023
1024 struct list_head global_obj_list;
1025
1026 /*
1027 * For reading active_pipes holding any crtc lock is
1028 * sufficient, for writing must hold all of them.
1029 */
1030 u8 active_pipes;
1031
1032 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1033
1034 struct i915_wa_list gt_wa_list;
1035
1036 struct i915_frontbuffer_tracking fb_tracking;
1037
1038 struct intel_atomic_helper {
1039 struct llist_head free_list;
1040 struct work_struct free_work;
1041 } atomic_helper;
1042
1043 bool mchbar_need_disable;
1044
1045 struct intel_l3_parity l3_parity;
1046
1047 /*
1048 * edram size in MB.
1049 * Cannot be determined by PCIID. You must always read a register.
1050 */
1051 u32 edram_size_mb;
1052
1053 struct i915_power_domains power_domains;
1054
1055 struct i915_psr psr;
1056
1057 struct i915_gpu_error gpu_error;
1058
1059 struct drm_i915_gem_object *vlv_pctx;
1060
1061 /* list of fbdev register on this device */
1062 struct intel_fbdev *fbdev;
1063 struct work_struct fbdev_suspend_work;
1064
1065 struct drm_property *broadcast_rgb_property;
1066 struct drm_property *force_audio_property;
1067
1068 /* hda/i915 audio component */
1069 struct i915_audio_component *audio_component;
1070 bool audio_component_registered;
1071 /**
1072 * av_mutex - mutex for audio/video sync
1073 *
1074 */
1075 struct mutex av_mutex;
1076 int audio_power_refcount;
1077 u32 audio_freq_cntrl;
1078
1079 u32 fdi_rx_config;
1080
1081 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
1082 u32 chv_phy_control;
1083 /*
1084 * Shadows for CHV DPLL_MD regs to keep the state
1085 * checker somewhat working in the presence hardware
1086 * crappiness (can't read out DPLL_MD for pipes B & C).
1087 */
1088 u32 chv_dpll_md[I915_MAX_PIPES];
1089 u32 bxt_phy_grc;
1090
1091 u32 suspend_count;
1092 bool power_domains_suspended;
1093 struct i915_suspend_saved_registers regfile;
1094 struct vlv_s0ix_state *vlv_s0ix_state;
1095
1096 enum {
1097 I915_SAGV_UNKNOWN = 0,
1098 I915_SAGV_DISABLED,
1099 I915_SAGV_ENABLED,
1100 I915_SAGV_NOT_CONTROLLED
1101 } sagv_status;
1102
1103 u32 sagv_block_time_us;
1104
1105 struct {
1106 /*
1107 * Raw watermark latency values:
1108 * in 0.1us units for WM0,
1109 * in 0.5us units for WM1+.
1110 */
1111 /* primary */
1112 u16 pri_latency[5];
1113 /* sprite */
1114 u16 spr_latency[5];
1115 /* cursor */
1116 u16 cur_latency[5];
1117 /*
1118 * Raw watermark memory latency values
1119 * for SKL for all 8 levels
1120 * in 1us units.
1121 */
1122 u16 skl_latency[8];
1123
1124 /* current hardware state */
1125 union {
1126 struct ilk_wm_values hw;
1127 struct vlv_wm_values vlv;
1128 struct g4x_wm_values g4x;
1129 };
1130
1131 u8 max_level;
1132
1133 /*
1134 * Should be held around atomic WM register writing; also
1135 * protects * intel_crtc->wm.active and
1136 * crtc_state->wm.need_postvbl_update.
1137 */
1138 struct mutex wm_mutex;
1139
1140 /*
1141 * Set during HW readout of watermarks/DDB. Some platforms
1142 * need to know when we're still using BIOS-provided values
1143 * (which we don't fully trust).
1144 *
1145 * FIXME get rid of this.
1146 */
1147 bool distrust_bios_wm;
1148 } wm;
1149
1150 struct dram_info {
1151 bool valid;
1152 bool is_16gb_dimm;
1153 u8 num_channels;
1154 u8 ranks;
1155 u32 bandwidth_kbps;
1156 bool symmetric_memory;
1157 enum intel_dram_type {
1158 INTEL_DRAM_UNKNOWN,
1159 INTEL_DRAM_DDR3,
1160 INTEL_DRAM_DDR4,
1161 INTEL_DRAM_LPDDR3,
1162 INTEL_DRAM_LPDDR4
1163 } type;
1164 } dram_info;
1165
1166 struct intel_bw_info {
1167 /* for each QGV point */
1168 unsigned int deratedbw[I915_NUM_QGV_POINTS];
1169 u8 num_qgv_points;
1170 u8 num_planes;
1171 } max_bw[6];
1172
1173 struct intel_global_obj bw_obj;
1174
1175 struct intel_runtime_pm runtime_pm;
1176
1177 struct i915_perf perf;
1178
1179 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1180 struct intel_gt gt;
1181
1182 struct {
1183 struct i915_gem_contexts {
1184 spinlock_t lock; /* locks list */
1185 struct list_head list;
1186
1187 struct llist_head free_list;
1188 struct work_struct free_work;
1189 } contexts;
1190
1191 /*
1192 * We replace the local file with a global mappings as the
1193 * backing storage for the mmap is on the device and not
1194 * on the struct file, and we do not want to prolong the
1195 * lifetime of the local fd. To minimise the number of
1196 * anonymous inodes we create, we use a global singleton to
1197 * share the global mapping.
1198 */
1199 struct file *mmap_singleton;
1200 } gem;
1201
1202 u8 pch_ssc_use;
1203
1204 /* For i915gm/i945gm vblank irq workaround */
1205 u8 vblank_enabled;
1206
1207 /* perform PHY state sanity checks? */
1208 bool chv_phy_assert[2];
1209
1210 bool ipc_enabled;
1211
1212 /* Used to save the pipe-to-encoder mapping for audio */
1213 struct intel_encoder *av_enc_map[I915_MAX_PIPES];
1214
1215 /* necessary resource sharing with HDMI LPE audio driver. */
1216 struct {
1217 struct platform_device *platdev;
1218 int irq;
1219 } lpe_audio;
1220
1221 struct i915_pmu pmu;
1222
1223 struct i915_hdcp_comp_master *hdcp_master;
1224 bool hdcp_comp_added;
1225
1226 /* Mutex to protect the above hdcp component related values. */
1227 struct mutex hdcp_comp_mutex;
1228
1229 I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
1230
1231 /*
1232 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
1233 * will be rejected. Instead look for a better place.
1234 */
1235};
1236
1237static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1238{
1239 return container_of(dev, struct drm_i915_private, drm);
1240}
1241
1242static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
1243{
1244 return dev_get_drvdata(kdev);
1245}
1246
1247static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
1248{
1249 return pci_get_drvdata(pdev);
1250}
1251
1252/* Simple iterator over all initialised engines */
1253#define for_each_engine(engine__, dev_priv__, id__) \
1254 for ((id__) = 0; \
1255 (id__) < I915_NUM_ENGINES; \
1256 (id__)++) \
1257 for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
1258
1259/* Iterator over subset of engines selected by mask */
1260#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
1261 for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
1262 (tmp__) ? \
1263 ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
1264 0;)
1265
1266#define rb_to_uabi_engine(rb) \
1267 rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
1268
1269#define for_each_uabi_engine(engine__, i915__) \
1270 for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
1271 (engine__); \
1272 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1273
1274#define for_each_uabi_class_engine(engine__, class__, i915__) \
1275 for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
1276 (engine__) && (engine__)->uabi_class == (class__); \
1277 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1278
1279#define I915_GTT_OFFSET_NONE ((u32)-1)
1280
1281/*
1282 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
1283 * considered to be the frontbuffer for the given plane interface-wise. This
1284 * doesn't mean that the hw necessarily already scans it out, but that any
1285 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
1286 *
1287 * We have one bit per pipe and per scanout plane type.
1288 */
1289#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
1290#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
1291 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
1292 BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
1293 BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
1294})
1295#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
1296 BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1297#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
1298 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
1299 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1300
1301#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
1302#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
1303#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
1304
1305#define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen)
1306#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
1307
1308#define REVID_FOREVER 0xff
1309#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
1310
1311#define INTEL_GEN_MASK(s, e) ( \
1312 BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
1313 BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
1314 GENMASK((e) - 1, (s) - 1))
1315
1316/* Returns true if Gen is in inclusive range [Start, End] */
1317#define IS_GEN_RANGE(dev_priv, s, e) \
1318 (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
1319
1320#define IS_GEN(dev_priv, n) \
1321 (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
1322 INTEL_INFO(dev_priv)->gen == (n))
1323
1324#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
1325
1326/*
1327 * Return true if revision is in range [since,until] inclusive.
1328 *
1329 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
1330 */
1331#define IS_REVID(p, since, until) \
1332 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
1333
1334static __always_inline unsigned int
1335__platform_mask_index(const struct intel_runtime_info *info,
1336 enum intel_platform p)
1337{
1338 const unsigned int pbits =
1339 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1340
1341 /* Expand the platform_mask array if this fails. */
1342 BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1343 pbits * ARRAY_SIZE(info->platform_mask));
1344
1345 return p / pbits;
1346}
1347
1348static __always_inline unsigned int
1349__platform_mask_bit(const struct intel_runtime_info *info,
1350 enum intel_platform p)
1351{
1352 const unsigned int pbits =
1353 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1354
1355 return p % pbits + INTEL_SUBPLATFORM_BITS;
1356}
1357
1358static inline u32
1359intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
1360{
1361 const unsigned int pi = __platform_mask_index(info, p);
1362
1363 return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
1364}
1365
1366static __always_inline bool
1367IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
1368{
1369 const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1370 const unsigned int pi = __platform_mask_index(info, p);
1371 const unsigned int pb = __platform_mask_bit(info, p);
1372
1373 BUILD_BUG_ON(!__builtin_constant_p(p));
1374
1375 return info->platform_mask[pi] & BIT(pb);
1376}
1377
1378static __always_inline bool
1379IS_SUBPLATFORM(const struct drm_i915_private *i915,
1380 enum intel_platform p, unsigned int s)
1381{
1382 const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1383 const unsigned int pi = __platform_mask_index(info, p);
1384 const unsigned int pb = __platform_mask_bit(info, p);
1385 const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
1386 const u32 mask = info->platform_mask[pi];
1387
1388 BUILD_BUG_ON(!__builtin_constant_p(p));
1389 BUILD_BUG_ON(!__builtin_constant_p(s));
1390 BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
1391
1392 /* Shift and test on the MSB position so sign flag can be used. */
1393 return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
1394}
1395
1396#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
1397#define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx)
1398
1399#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
1400#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
1401#define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X)
1402#define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G)
1403#define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G)
1404#define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM)
1405#define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G)
1406#define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM)
1407#define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G)
1408#define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM)
1409#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
1410#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
1411#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
1412#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
1413#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
1414#define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
1415#define IS_IRONLAKE_M(dev_priv) \
1416 (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
1417#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
1418#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
1419 INTEL_INFO(dev_priv)->gt == 1)
1420#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
1421#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
1422#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
1423#define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL)
1424#define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
1425#define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON)
1426#define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
1427#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
1428#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
1429#define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
1430#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
1431#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
1432#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
1433#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
1434#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
1435#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
1436#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
1437 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
1438#define IS_BDW_ULT(dev_priv) \
1439 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
1440#define IS_BDW_ULX(dev_priv) \
1441 IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
1442#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
1443 INTEL_INFO(dev_priv)->gt == 3)
1444#define IS_HSW_ULT(dev_priv) \
1445 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
1446#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
1447 INTEL_INFO(dev_priv)->gt == 3)
1448#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
1449 INTEL_INFO(dev_priv)->gt == 1)
1450/* ULX machines are also considered ULT. */
1451#define IS_HSW_ULX(dev_priv) \
1452 IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
1453#define IS_SKL_ULT(dev_priv) \
1454 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
1455#define IS_SKL_ULX(dev_priv) \
1456 IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
1457#define IS_KBL_ULT(dev_priv) \
1458 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
1459#define IS_KBL_ULX(dev_priv) \
1460 IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
1461#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
1462 INTEL_INFO(dev_priv)->gt == 2)
1463#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
1464 INTEL_INFO(dev_priv)->gt == 3)
1465#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
1466 INTEL_INFO(dev_priv)->gt == 4)
1467#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
1468 INTEL_INFO(dev_priv)->gt == 2)
1469#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
1470 INTEL_INFO(dev_priv)->gt == 3)
1471#define IS_CFL_ULT(dev_priv) \
1472 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
1473#define IS_CFL_ULX(dev_priv) \
1474 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
1475#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
1476 INTEL_INFO(dev_priv)->gt == 2)
1477#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
1478 INTEL_INFO(dev_priv)->gt == 3)
1479
1480#define IS_CML_ULT(dev_priv) \
1481 IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
1482#define IS_CML_ULX(dev_priv) \
1483 IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
1484#define IS_CML_GT2(dev_priv) (IS_COMETLAKE(dev_priv) && \
1485 INTEL_INFO(dev_priv)->gt == 2)
1486
1487#define IS_CNL_WITH_PORT_F(dev_priv) \
1488 IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
1489#define IS_ICL_WITH_PORT_F(dev_priv) \
1490 IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
1491
1492#define SKL_REVID_A0 0x0
1493#define SKL_REVID_B0 0x1
1494#define SKL_REVID_C0 0x2
1495#define SKL_REVID_D0 0x3
1496#define SKL_REVID_E0 0x4
1497#define SKL_REVID_F0 0x5
1498#define SKL_REVID_G0 0x6
1499#define SKL_REVID_H0 0x7
1500
1501#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
1502
1503#define BXT_REVID_A0 0x0
1504#define BXT_REVID_A1 0x1
1505#define BXT_REVID_B0 0x3
1506#define BXT_REVID_B_LAST 0x8
1507#define BXT_REVID_C0 0x9
1508
1509#define IS_BXT_REVID(dev_priv, since, until) \
1510 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
1511
1512#define KBL_REVID_A0 0x0
1513#define KBL_REVID_B0 0x1
1514#define KBL_REVID_C0 0x2
1515#define KBL_REVID_D0 0x3
1516#define KBL_REVID_E0 0x4
1517
1518#define IS_KBL_REVID(dev_priv, since, until) \
1519 (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
1520
1521#define GLK_REVID_A0 0x0
1522#define GLK_REVID_A1 0x1
1523#define GLK_REVID_A2 0x2
1524#define GLK_REVID_B0 0x3
1525
1526#define IS_GLK_REVID(dev_priv, since, until) \
1527 (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
1528
1529#define CNL_REVID_A0 0x0
1530#define CNL_REVID_B0 0x1
1531#define CNL_REVID_C0 0x2
1532
1533#define IS_CNL_REVID(p, since, until) \
1534 (IS_CANNONLAKE(p) && IS_REVID(p, since, until))
1535
1536#define ICL_REVID_A0 0x0
1537#define ICL_REVID_A2 0x1
1538#define ICL_REVID_B0 0x3
1539#define ICL_REVID_B2 0x4
1540#define ICL_REVID_C0 0x5
1541
1542#define IS_ICL_REVID(p, since, until) \
1543 (IS_ICELAKE(p) && IS_REVID(p, since, until))
1544
1545#define EHL_REVID_A0 0x0
1546
1547#define IS_EHL_REVID(p, since, until) \
1548 (IS_ELKHARTLAKE(p) && IS_REVID(p, since, until))
1549
1550#define TGL_REVID_A0 0x0
1551#define TGL_REVID_B0 0x1
1552#define TGL_REVID_C0 0x2
1553
1554#define IS_TGL_REVID(p, since, until) \
1555 (IS_TIGERLAKE(p) && IS_REVID(p, since, until))
1556
1557#define RKL_REVID_A0 0x0
1558#define RKL_REVID_B0 0x1
1559#define RKL_REVID_C0 0x4
1560
1561#define IS_RKL_REVID(p, since, until) \
1562 (IS_ROCKETLAKE(p) && IS_REVID(p, since, until))
1563
1564#define DG1_REVID_A0 0x0
1565#define DG1_REVID_B0 0x1
1566
1567#define IS_DG1_REVID(p, since, until) \
1568 (IS_DG1(p) && IS_REVID(p, since, until))
1569
1570#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
1571#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
1572#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
1573
1574#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
1575#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
1576
1577#define ENGINE_INSTANCES_MASK(gt, first, count) ({ \
1578 unsigned int first__ = (first); \
1579 unsigned int count__ = (count); \
1580 ((gt)->info.engine_mask & \
1581 GENMASK(first__ + count__ - 1, first__)) >> first__; \
1582})
1583#define VDBOX_MASK(gt) \
1584 ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
1585#define VEBOX_MASK(gt) \
1586 ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
1587
1588/*
1589 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
1590 * All later gens can run the final buffer from the ppgtt
1591 */
1592#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
1593
1594#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
1595#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
1596#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
1597#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
1598#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
1599 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
1600
1601#define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
1602
1603#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
1604 (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
1605#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
1606 (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
1607#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
1608 (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
1609
1610#define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
1611
1612#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
1613
1614#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
1615#define HAS_PPGTT(dev_priv) \
1616 (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
1617#define HAS_FULL_PPGTT(dev_priv) \
1618 (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
1619
1620#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
1621 GEM_BUG_ON((sizes) == 0); \
1622 ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
1623})
1624
1625#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
1626#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
1627 (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
1628
1629/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1630#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
1631
1632#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
1633 (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
1634
1635/* WaRsDisableCoarsePowerGating:skl,cnl */
1636#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
1637 (IS_CANNONLAKE(dev_priv) || \
1638 IS_SKL_GT3(dev_priv) || \
1639 IS_SKL_GT4(dev_priv))
1640
1641#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
1642#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
1643 IS_GEMINILAKE(dev_priv) || \
1644 IS_KABYLAKE(dev_priv))
1645
1646/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1647 * rows, which changed the alignment requirements and fence programming.
1648 */
1649#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
1650 !(IS_I915G(dev_priv) || \
1651 IS_I915GM(dev_priv)))
1652#define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
1653#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
1654
1655#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
1656#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc)
1657#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7)
1658
1659#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
1660
1661#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
1662
1663#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
1664#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
1665#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
1666#define HAS_PSR_HW_TRACKING(dev_priv) \
1667 (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
1668#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
1669
1670#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
1671#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
1672#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
1673
1674#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
1675
1676#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
1677
1678#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
1679#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
1680
1681#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
1682
1683#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
1684#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
1685
1686#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
1687
1688#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
1689
1690#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
1691
1692
1693#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
1694
1695#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
1696
1697/* DPF == dynamic parity feature */
1698#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
1699#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
1700 2 : HAS_L3_DPF(dev_priv))
1701
1702#define GT_FREQUENCY_MULTIPLIER 50
1703#define GEN9_FREQ_SCALER 3
1704
1705#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask))
1706
1707#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
1708
1709/* Only valid when HAS_DISPLAY() is true */
1710#define INTEL_DISPLAY_ENABLED(dev_priv) \
1711 (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
1712
1713static inline bool intel_vtd_active(void)
1714{
1715#ifdef CONFIG_INTEL_IOMMU
1716 if (intel_iommu_gfx_mapped)
1717 return true;
1718#endif
1719 return false;
1720}
1721
1722static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
1723{
1724 return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
1725}
1726
1727static inline bool
1728intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
1729{
1730 return IS_BROXTON(dev_priv) && intel_vtd_active();
1731}
1732
1733/* i915_drv.c */
1734extern const struct dev_pm_ops i915_pm_ops;
1735
1736int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
1737void i915_driver_remove(struct drm_i915_private *i915);
1738
1739int i915_resume_switcheroo(struct drm_i915_private *i915);
1740int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
1741
1742int i915_getparam_ioctl(struct drm_device *dev, void *data,
1743 struct drm_file *file_priv);
1744
1745/* i915_gem.c */
1746int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
1747void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
1748void i915_gem_init_early(struct drm_i915_private *dev_priv);
1749void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
1750int i915_gem_freeze(struct drm_i915_private *dev_priv);
1751int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
1752
1753struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
1754
1755static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
1756{
1757 /*
1758 * A single pass should suffice to release all the freed objects (along
1759 * most call paths) , but be a little more paranoid in that freeing
1760 * the objects does take a little amount of time, during which the rcu
1761 * callbacks could have added new objects into the freed list, and
1762 * armed the work again.
1763 */
1764 while (atomic_read(&i915->mm.free_count)) {
1765 flush_work(&i915->mm.free_work);
1766 rcu_barrier();
1767 }
1768}
1769
1770static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
1771{
1772 /*
1773 * Similar to objects above (see i915_gem_drain_freed-objects), in
1774 * general we have workers that are armed by RCU and then rearm
1775 * themselves in their callbacks. To be paranoid, we need to
1776 * drain the workqueue a second time after waiting for the RCU
1777 * grace period so that we catch work queued via RCU from the first
1778 * pass. As neither drain_workqueue() nor flush_workqueue() report
1779 * a result, we make an assumption that we only don't require more
1780 * than 3 passes to catch all _recursive_ RCU delayed work.
1781 *
1782 */
1783 int pass = 3;
1784 do {
1785 flush_workqueue(i915->wq);
1786 rcu_barrier();
1787 i915_gem_drain_freed_objects(i915);
1788 } while (--pass);
1789 drain_workqueue(i915->wq);
1790}
1791
1792struct i915_vma * __must_check
1793i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1794 const struct i915_ggtt_view *view,
1795 u64 size,
1796 u64 alignment,
1797 u64 flags);
1798
1799int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
1800 unsigned long flags);
1801#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
1802#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
1803#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
1804
1805void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
1806
1807int i915_gem_dumb_create(struct drm_file *file_priv,
1808 struct drm_device *dev,
1809 struct drm_mode_create_dumb *args);
1810
1811int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
1812
1813static inline u32 i915_reset_count(struct i915_gpu_error *error)
1814{
1815 return atomic_read(&error->reset_count);
1816}
1817
1818static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
1819 const struct intel_engine_cs *engine)
1820{
1821 return atomic_read(&error->reset_engine_count[engine->uabi_class]);
1822}
1823
1824int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
1825void i915_gem_driver_register(struct drm_i915_private *i915);
1826void i915_gem_driver_unregister(struct drm_i915_private *i915);
1827void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
1828void i915_gem_driver_release(struct drm_i915_private *dev_priv);
1829void i915_gem_suspend(struct drm_i915_private *dev_priv);
1830void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
1831void i915_gem_resume(struct drm_i915_private *dev_priv);
1832
1833int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
1834void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1835
1836int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1837 enum i915_cache_level cache_level);
1838
1839struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1840 struct dma_buf *dma_buf);
1841
1842struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
1843
1844static inline struct i915_gem_context *
1845__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
1846{
1847 return xa_load(&file_priv->context_xa, id);
1848}
1849
1850static inline struct i915_gem_context *
1851i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
1852{
1853 struct i915_gem_context *ctx;
1854
1855 rcu_read_lock();
1856 ctx = __i915_gem_context_lookup_rcu(file_priv, id);
1857 if (ctx && !kref_get_unless_zero(&ctx->ref))
1858 ctx = NULL;
1859 rcu_read_unlock();
1860
1861 return ctx;
1862}
1863
1864/* i915_gem_evict.c */
1865int __must_check i915_gem_evict_something(struct i915_address_space *vm,
1866 u64 min_size, u64 alignment,
1867 unsigned long color,
1868 u64 start, u64 end,
1869 unsigned flags);
1870int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
1871 struct drm_mm_node *node,
1872 unsigned int flags);
1873int i915_gem_evict_vm(struct i915_address_space *vm);
1874
1875/* i915_gem_internal.c */
1876struct drm_i915_gem_object *
1877i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
1878 phys_addr_t size);
1879
1880/* i915_gem_tiling.c */
1881static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1882{
1883 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1884
1885 return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1886 i915_gem_object_is_tiled(obj);
1887}
1888
1889u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
1890 unsigned int tiling, unsigned int stride);
1891u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
1892 unsigned int tiling, unsigned int stride);
1893
1894const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
1895
1896/* i915_cmd_parser.c */
1897int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
1898void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
1899void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
1900int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1901 struct i915_vma *batch,
1902 u32 batch_offset,
1903 u32 batch_length,
1904 struct i915_vma *shadow,
1905 bool trampoline);
1906#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
1907
1908/* intel_device_info.c */
1909static inline struct intel_device_info *
1910mkwrite_device_info(struct drm_i915_private *dev_priv)
1911{
1912 return (struct intel_device_info *)INTEL_INFO(dev_priv);
1913}
1914
1915int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1916 struct drm_file *file);
1917
1918#define __I915_REG_OP(op__, dev_priv__, ...) \
1919 intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
1920
1921#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
1922#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
1923
1924#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
1925
1926/* These are untraced mmio-accessors that are only valid to be used inside
1927 * critical sections, such as inside IRQ handlers, where forcewake is explicitly
1928 * controlled.
1929 *
1930 * Think twice, and think again, before using these.
1931 *
1932 * As an example, these accessors can possibly be used between:
1933 *
1934 * spin_lock_irq(&dev_priv->uncore.lock);
1935 * intel_uncore_forcewake_get__locked();
1936 *
1937 * and
1938 *
1939 * intel_uncore_forcewake_put__locked();
1940 * spin_unlock_irq(&dev_priv->uncore.lock);
1941 *
1942 *
1943 * Note: some registers may not need forcewake held, so
1944 * intel_uncore_forcewake_{get,put} can be omitted, see
1945 * intel_uncore_forcewake_for_reg().
1946 *
1947 * Certain architectures will die if the same cacheline is concurrently accessed
1948 * by different clients (e.g. on Ivybridge). Access to registers should
1949 * therefore generally be serialised, by either the dev_priv->uncore.lock or
1950 * a more localised lock guarding all access to that bank of registers.
1951 */
1952#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
1953#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
1954
1955/* i915_mm.c */
1956int remap_io_mapping(struct vm_area_struct *vma,
1957 unsigned long addr, unsigned long pfn, unsigned long size,
1958 struct io_mapping *iomap);
1959int remap_io_sg(struct vm_area_struct *vma,
1960 unsigned long addr, unsigned long size,
1961 struct scatterlist *sgl, resource_size_t iobase);
1962
1963static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
1964{
1965 if (INTEL_GEN(i915) >= 10)
1966 return CNL_HWS_CSB_WRITE_INDEX;
1967 else
1968 return I915_HWS_CSB_WRITE_INDEX;
1969}
1970
1971static inline enum i915_map_type
1972i915_coherent_map_type(struct drm_i915_private *i915)
1973{
1974 return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
1975}
1976
1977static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val)
1978{
1979 return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz,
1980 1000000000);
1981}
1982
1983static inline u64 i915_cs_timestamp_ticks_to_ns(struct drm_i915_private *i915, u64 val)
1984{
1985 return div_u64(val * 1000000000,
1986 RUNTIME_INFO(i915)->cs_timestamp_frequency_hz);
1987}
1988
1989#endif