Linux Audio

Check our new training course

Loading...
v6.2
  1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
  2 */
  3/*
  4 *
  5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  6 * All Rights Reserved.
  7 *
  8 * Permission is hereby granted, free of charge, to any person obtaining a
  9 * copy of this software and associated documentation files (the
 10 * "Software"), to deal in the Software without restriction, including
 11 * without limitation the rights to use, copy, modify, merge, publish,
 12 * distribute, sub license, and/or sell copies of the Software, and to
 13 * permit persons to whom the Software is furnished to do so, subject to
 14 * the following conditions:
 15 *
 16 * The above copyright notice and this permission notice (including the
 17 * next paragraph) shall be included in all copies or substantial portions
 18 * of the Software.
 19 *
 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 27 *
 28 */
 29
 30#ifndef _I915_DRV_H_
 31#define _I915_DRV_H_
 32
 33#include <uapi/drm/i915_drm.h>
 
 34
 
 
 
 
 
 
 
 
 35#include <linux/pm_qos.h>
 
 
 36
 37#include <drm/ttm/ttm_device.h>
 
 
 
 
 
 38
 39#include "display/intel_display.h"
 40#include "display/intel_display_core.h"
 41
 42#include "gem/i915_gem_context_types.h"
 43#include "gem/i915_gem_shrinker.h"
 44#include "gem/i915_gem_stolen.h"
 45
 46#include "gt/intel_engine.h"
 47#include "gt/intel_gt_types.h"
 48#include "gt/intel_region_lmem.h"
 49#include "gt/intel_workarounds.h"
 50#include "gt/uc/intel_uc.h"
 51
 52#include "i915_drm_client.h"
 53#include "i915_gem.h"
 54#include "i915_gpu_error.h"
 55#include "i915_params.h"
 56#include "i915_perf_types.h"
 57#include "i915_scheduler.h"
 58#include "i915_utils.h"
 
 
 59#include "intel_device_info.h"
 60#include "intel_memory_region.h"
 61#include "intel_pch.h"
 62#include "intel_runtime_pm.h"
 63#include "intel_step.h"
 
 64#include "intel_uncore.h"
 
 65
 66struct drm_i915_clock_gating_funcs;
 67struct drm_i915_gem_object;
 68struct drm_i915_private;
 69struct intel_connector;
 70struct intel_dp;
 71struct intel_encoder;
 72struct intel_limit;
 73struct intel_overlay_error_state;
 74struct vlv_s0ix_state;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75
 76#define I915_GEM_GPU_DOMAINS \
 77	(I915_GEM_DOMAIN_RENDER | \
 78	 I915_GEM_DOMAIN_SAMPLER | \
 79	 I915_GEM_DOMAIN_COMMAND | \
 80	 I915_GEM_DOMAIN_INSTRUCTION | \
 81	 I915_GEM_DOMAIN_VERTEX)
 82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 83#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
 84
 85#define GEM_QUIRK_PIN_SWIZZLED_PAGES	BIT(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86
 87struct i915_suspend_saved_registers {
 88	u32 saveDSPARB;
 
 
 
 89	u32 saveSWF0[16];
 90	u32 saveSWF1[16];
 91	u32 saveSWF3[3];
 
 
 92	u16 saveGCDGMBUS;
 93};
 94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 95#define MAX_L3_SLICES 2
 96struct intel_l3_parity {
 97	u32 *remap_info[MAX_L3_SLICES];
 98	struct work_struct error_work;
 99	int which_slice;
100};
101
102struct i915_gem_mm {
103	/*
104	 * Shortcut for the stolen region. This points to either
105	 * INTEL_REGION_STOLEN_SMEM for integrated platforms, or
106	 * INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't
107	 * support stolen.
108	 */
109	struct intel_memory_region *stolen_region;
110	/** Memory allocator for GTT stolen memory */
111	struct drm_mm stolen;
112	/** Protects the usage of the GTT stolen memory allocator. This is
113	 * always the inner lock when overlapping with struct_mutex. */
114	struct mutex stolen_lock;
115
116	/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
117	spinlock_t obj_lock;
118
 
 
 
119	/**
120	 * List of objects which are purgeable.
 
 
121	 */
122	struct list_head purge_list;
123
124	/**
125	 * List of objects which have allocated pages and are shrinkable.
126	 */
127	struct list_head shrink_list;
128
129	/**
130	 * List of objects which are pending destruction.
131	 */
132	struct llist_head free_list;
133	struct work_struct free_work;
 
134	/**
135	 * Count of objects pending destructions. Used to skip needlessly
136	 * waiting on an RCU barrier if no objects are waiting to be freed.
137	 */
138	atomic_t free_count;
139
140	/**
 
 
 
 
 
141	 * tmpfs instance used for shmem backed objects
142	 */
143	struct vfsmount *gemfs;
144
145	struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
 
146
147	struct notifier_block oom_notifier;
148	struct notifier_block vmap_notifier;
149	struct shrinker shrinker;
150
151#ifdef CONFIG_MMU_NOTIFIER
 
 
152	/**
153	 * notifier_lock for mmu notifiers, memory may not be allocated
154	 * while holding this lock.
 
155	 */
156	rwlock_t notifier_lock;
157#endif
158
159	/* shrinker accounting, also useful for userland debugging */
160	u64 shrink_memory;
161	u32 shrink_count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162};
163
164#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
165
166unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
167					 u64 context);
168
169static inline unsigned long
170i915_fence_timeout(const struct drm_i915_private *i915)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171{
172	return i915_fence_context_timeout(i915, U64_MAX);
173}
174
175#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
177struct i915_virtual_gpu {
178	struct mutex lock; /* serialises sending of g2v_notify command pkts */
179	bool active;
180	u32 caps;
181	u32 *initial_mmio;
182	u8 *initial_cfg_space;
183	struct list_head entry;
184};
185
186struct i915_selftest_stash {
187	atomic_t counter;
188	struct ida mock_region_instances;
 
 
189};
190
191struct drm_i915_private {
192	struct drm_device drm;
 
 
193
194	struct intel_display display;
 
 
 
195
196	/* FIXME: Device release actions should all be moved to drmm_ */
197	bool do_release;
 
 
 
 
 
 
 
 
 
 
 
 
198
199	/* i915 device parameters */
200	struct i915_params params;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
202	const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
203	struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204	struct intel_driver_caps caps;
205
206	/**
207	 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
208	 * end of stolen which we can optionally use to create GEM objects
209	 * backed by stolen memory. Note that stolen_usable_size tells us
210	 * exactly how much of this we are actually allowed to use, given that
211	 * some portion of it is in fact reserved for use by hardware functions.
212	 */
213	struct resource dsm;
214	/**
215	 * Reseved portion of Data Stolen Memory
216	 */
217	struct resource dsm_reserved;
218
219	/*
220	 * Stolen memory is segmented in hardware with different portions
221	 * offlimits to certain functions.
222	 *
223	 * The drm_mm is initialised to the total accessible range, as found
224	 * from the PCI config. On Broadwell+, this is further restricted to
225	 * avoid the first page! The upper end of stolen memory is reserved for
226	 * hardware functions and similarly removed from the accessible range.
227	 */
228	resource_size_t stolen_usable_size;	/* Total size minus reserved ranges */
229
 
 
230	struct intel_uncore uncore;
231	struct intel_uncore_mmio_debug mmio_debug;
232
233	struct i915_virtual_gpu vgpu;
234
235	struct intel_gvt *gvt;
236
237	struct pci_dev *bridge_dev;
 
 
 
 
 
238
239	struct rb_root uabi_engines;
240	unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
 
242	struct resource mch_res;
243
244	/* protects the irq masks */
245	spinlock_t irq_lock;
246
247	bool display_irqs_enabled;
248
 
 
 
249	/* Sideband mailbox protection */
250	struct mutex sb_lock;
251	struct pm_qos_request sb_qos;
252
253	/** Cached value of IMR to avoid reads in updating the bitfield */
254	union {
255		u32 irq_mask;
256		u32 de_irq_mask[I915_MAX_PIPES];
257	};
 
 
 
 
 
258	u32 pipestat_irq_mask[I915_MAX_PIPES];
259
 
 
 
 
 
 
260	bool preserve_bios_swizzle;
261
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262	unsigned int fsb_freq, mem_freq, is_ddr3;
263	unsigned int skl_preferred_vco_freq;
 
264
265	unsigned int max_dotclk_freq;
 
266	unsigned int hpll_freq;
 
267	unsigned int czclk_freq;
268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269	/**
270	 * wq - Driver workqueue for GEM.
271	 *
272	 * NOTE: Work items scheduled here are not allowed to grab any modeset
273	 * locks, for otherwise the flushing done in the pageflip code will
274	 * result in deadlocks.
275	 */
276	struct workqueue_struct *wq;
277
278	/* pm private clock gating functions */
279	const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
 
 
 
280
281	/* PCH chipset type */
282	enum intel_pch pch_type;
283	unsigned short pch_id;
284
285	unsigned long gem_quirks;
 
 
 
 
 
 
 
 
286
287	struct i915_gem_mm mm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
289	bool mchbar_need_disable;
290
291	struct intel_l3_parity l3_parity;
292
 
 
 
293	/*
294	 * edram size in MB.
295	 * Cannot be determined by PCIID. You must always read a register.
 
 
296	 */
297	u32 edram_size_mb;
 
 
 
 
 
 
 
 
 
 
 
298
299	struct i915_gpu_error gpu_error;
300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301	/*
302	 * Shadows for CHV DPLL_MD regs to keep the state
303	 * checker somewhat working in the presence hardware
304	 * crappiness (can't read out DPLL_MD for pipes B & C).
305	 */
306	u32 chv_dpll_md[I915_MAX_PIPES];
307	u32 bxt_phy_grc;
308
309	u32 suspend_count;
 
310	struct i915_suspend_saved_registers regfile;
311	struct vlv_s0ix_state *vlv_s0ix_state;
312
313	struct dram_info {
314		bool wm_lv_0_adjust_needed;
315		u8 num_channels;
316		bool symmetric_memory;
317		enum intel_dram_type {
318			INTEL_DRAM_UNKNOWN,
319			INTEL_DRAM_DDR3,
320			INTEL_DRAM_DDR4,
321			INTEL_DRAM_LPDDR3,
322			INTEL_DRAM_LPDDR4,
323			INTEL_DRAM_DDR5,
324			INTEL_DRAM_LPDDR5,
325		} type;
326		u8 num_qgv_points;
327		u8 num_psf_gv_points;
328	} dram_info;
329
330	struct intel_runtime_pm runtime_pm;
 
 
 
 
 
331
332	struct i915_perf perf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
334	struct i915_hwmon *hwmon;
 
 
 
 
 
 
335
336	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
337	struct intel_gt gt0;
338
339	/*
340	 * i915->gt[0] == &i915->gt0
341	 */
342#define I915_MAX_GT 4
343	struct intel_gt *gt[I915_MAX_GT];
 
344
345	struct kobject *sysfs_gt;
 
 
 
 
 
 
346
347	/* Quick lookup of media GT (current platforms only have one) */
348	struct intel_gt *media_gt;
349
350	struct {
351		struct i915_gem_contexts {
352			spinlock_t lock; /* locks list */
353			struct list_head list;
354		} contexts;
355
356		/*
357		 * We replace the local file with a global mappings as the
358		 * backing storage for the mmap is on the device and not
359		 * on the struct file, and we do not want to prolong the
360		 * lifetime of the local fd. To minimise the number of
361		 * anonymous inodes we create, we use a global singleton to
362		 * share the global mapping.
363		 */
364		struct file *mmap_singleton;
365	} gem;
366
367	u8 pch_ssc_use;
 
 
 
 
368
369	/* For i915gm/i945gm vblank irq workaround */
370	u8 vblank_enabled;
 
 
 
 
371
372	bool irq_enabled;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
373
374	/*
375	 * DG2: Mask of PHYs that were not calibrated by the firmware
376	 * and should not be used.
377	 */
378	u8 snps_phy_failed_calibration;
379
380	struct i915_pmu pmu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381
382	struct i915_drm_clients clients;
 
383
384	/* The TTM device structure. */
385	struct ttm_device bdev;
386
387	I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
 
 
 
 
 
 
 
 
 
 
 
388
389	/*
390	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
391	 * will be rejected. Instead look for a better place.
392	 */
393};
394
395static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
396{
397	return container_of(dev, struct drm_i915_private, drm);
398}
399
400static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
401{
402	return dev_get_drvdata(kdev);
403}
404
405static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
406{
407	return pci_get_drvdata(pdev);
408}
409
410static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
411{
412	return &i915->gt0;
413}
414
415/* Simple iterator over all initialised engines */
416#define for_each_engine(engine__, dev_priv__, id__) \
417	for ((id__) = 0; \
418	     (id__) < I915_NUM_ENGINES; \
419	     (id__)++) \
420		for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
421
422/* Iterator over subset of engines selected by mask */
423#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
424	for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
425	     (tmp__) ? \
426	     ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
427	     0;)
428
429#define rb_to_uabi_engine(rb) \
430	rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
431
432#define for_each_uabi_engine(engine__, i915__) \
433	for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
434	     (engine__); \
435	     (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
436
437#define for_each_uabi_class_engine(engine__, class__, i915__) \
438	for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
439	     (engine__) && (engine__)->uabi_class == (class__); \
440	     (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
441
442#define INTEL_INFO(dev_priv)	(&(dev_priv)->__info)
443#define RUNTIME_INFO(dev_priv)	(&(dev_priv)->__runtime)
444#define DRIVER_CAPS(dev_priv)	(&(dev_priv)->caps)
445
446#define INTEL_DEVID(dev_priv)	(RUNTIME_INFO(dev_priv)->device_id)
447
448#define IP_VER(ver, rel)		((ver) << 8 | (rel))
449
450#define GRAPHICS_VER(i915)		(RUNTIME_INFO(i915)->graphics.ip.ver)
451#define GRAPHICS_VER_FULL(i915)		IP_VER(RUNTIME_INFO(i915)->graphics.ip.ver, \
452					       RUNTIME_INFO(i915)->graphics.ip.rel)
453#define IS_GRAPHICS_VER(i915, from, until) \
454	(GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
455
456#define MEDIA_VER(i915)			(RUNTIME_INFO(i915)->media.ip.ver)
457#define MEDIA_VER_FULL(i915)		IP_VER(RUNTIME_INFO(i915)->media.ip.ver, \
458					       RUNTIME_INFO(i915)->media.ip.rel)
459#define IS_MEDIA_VER(i915, from, until) \
460	(MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
461
462#define DISPLAY_VER(i915)	(RUNTIME_INFO(i915)->display.ip.ver)
463#define IS_DISPLAY_VER(i915, from, until) \
464	(DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
465
466#define INTEL_REVID(dev_priv)	(to_pci_dev((dev_priv)->drm.dev)->revision)
467
468#define HAS_DSB(dev_priv)	(INTEL_INFO(dev_priv)->display.has_dsb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
469
470#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
471#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
472#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
473#define INTEL_BASEDIE_STEP(__i915) (RUNTIME_INFO(__i915)->step.basedie_step)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
474
475#define IS_DISPLAY_STEP(__i915, since, until) \
476	(drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
477	 INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
478
479#define IS_GRAPHICS_STEP(__i915, since, until) \
480	(drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \
481	 INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until))
 
 
 
 
482
483#define IS_MEDIA_STEP(__i915, since, until) \
484	(drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \
485	 INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until))
 
 
 
 
 
 
 
 
 
 
 
 
 
486
487#define IS_BASEDIE_STEP(__i915, since, until) \
488	(drm_WARN_ON(&(__i915)->drm, INTEL_BASEDIE_STEP(__i915) == STEP_NONE), \
489	 INTEL_BASEDIE_STEP(__i915) >= (since) && INTEL_BASEDIE_STEP(__i915) < (until))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
490
491static __always_inline unsigned int
492__platform_mask_index(const struct intel_runtime_info *info,
493		      enum intel_platform p)
494{
495	const unsigned int pbits =
496		BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
497
498	/* Expand the platform_mask array if this fails. */
499	BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
500		     pbits * ARRAY_SIZE(info->platform_mask));
 
 
 
 
501
502	return p / pbits;
503}
504
505static __always_inline unsigned int
506__platform_mask_bit(const struct intel_runtime_info *info,
507		    enum intel_platform p)
508{
509	const unsigned int pbits =
510		BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
511
512	return p % pbits + INTEL_SUBPLATFORM_BITS;
513}
514
515static inline u32
516intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
517{
518	const unsigned int pi = __platform_mask_index(info, p);
519
520	return info->platform_mask[pi] & INTEL_SUBPLATFORM_MASK;
521}
522
523static __always_inline bool
524IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
525{
526	const struct intel_runtime_info *info = RUNTIME_INFO(i915);
527	const unsigned int pi = __platform_mask_index(info, p);
528	const unsigned int pb = __platform_mask_bit(info, p);
529
530	BUILD_BUG_ON(!__builtin_constant_p(p));
531
532	return info->platform_mask[pi] & BIT(pb);
533}
534
535static __always_inline bool
536IS_SUBPLATFORM(const struct drm_i915_private *i915,
537	       enum intel_platform p, unsigned int s)
538{
539	const struct intel_runtime_info *info = RUNTIME_INFO(i915);
540	const unsigned int pi = __platform_mask_index(info, p);
541	const unsigned int pb = __platform_mask_bit(info, p);
542	const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
543	const u32 mask = info->platform_mask[pi];
544
545	BUILD_BUG_ON(!__builtin_constant_p(p));
546	BUILD_BUG_ON(!__builtin_constant_p(s));
547	BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
 
 
 
 
 
548
549	/* Shift and test on the MSB position so sign flag can be used. */
550	return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
551}
 
 
 
 
 
 
 
 
 
 
 
 
552
553#define IS_MOBILE(dev_priv)	(INTEL_INFO(dev_priv)->is_mobile)
554#define IS_DGFX(dev_priv)   (INTEL_INFO(dev_priv)->is_dgfx)
555
556#define IS_I830(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I830)
557#define IS_I845G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I845G)
558#define IS_I85X(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I85X)
559#define IS_I865G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I865G)
560#define IS_I915G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I915G)
561#define IS_I915GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I915GM)
562#define IS_I945G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I945G)
563#define IS_I945GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I945GM)
564#define IS_I965G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I965G)
565#define IS_I965GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I965GM)
566#define IS_G45(dev_priv)	IS_PLATFORM(dev_priv, INTEL_G45)
567#define IS_GM45(dev_priv)	IS_PLATFORM(dev_priv, INTEL_GM45)
568#define IS_G4X(dev_priv)	(IS_G45(dev_priv) || IS_GM45(dev_priv))
 
 
569#define IS_PINEVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
570#define IS_G33(dev_priv)	IS_PLATFORM(dev_priv, INTEL_G33)
571#define IS_IRONLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
572#define IS_IRONLAKE_M(dev_priv) \
573	(IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
574#define IS_SANDYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SANDYBRIDGE)
575#define IS_IVYBRIDGE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
576#define IS_IVB_GT1(dev_priv)	(IS_IVYBRIDGE(dev_priv) && \
577				 INTEL_INFO(dev_priv)->gt == 1)
578#define IS_VALLEYVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
579#define IS_CHERRYVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
580#define IS_HASWELL(dev_priv)	IS_PLATFORM(dev_priv, INTEL_HASWELL)
581#define IS_BROADWELL(dev_priv)	IS_PLATFORM(dev_priv, INTEL_BROADWELL)
582#define IS_SKYLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
583#define IS_BROXTON(dev_priv)	IS_PLATFORM(dev_priv, INTEL_BROXTON)
584#define IS_KABYLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
585#define IS_GEMINILAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
586#define IS_COFFEELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
587#define IS_COMETLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
588#define IS_ICELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_ICELAKE)
589#define IS_JSL_EHL(dev_priv)	(IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \
590				IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
591#define IS_TIGERLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
592#define IS_ROCKETLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
593#define IS_DG1(dev_priv)        IS_PLATFORM(dev_priv, INTEL_DG1)
594#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S)
595#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P)
596#define IS_XEHPSDV(dev_priv) IS_PLATFORM(dev_priv, INTEL_XEHPSDV)
597#define IS_DG2(dev_priv)	IS_PLATFORM(dev_priv, INTEL_DG2)
598#define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, INTEL_PONTEVECCHIO)
599#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_METEORLAKE)
600
601#define IS_METEORLAKE_M(dev_priv) \
602	IS_SUBPLATFORM(dev_priv, INTEL_METEORLAKE, INTEL_SUBPLATFORM_M)
603#define IS_METEORLAKE_P(dev_priv) \
604	IS_SUBPLATFORM(dev_priv, INTEL_METEORLAKE, INTEL_SUBPLATFORM_P)
605#define IS_DG2_G10(dev_priv) \
606	IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10)
607#define IS_DG2_G11(dev_priv) \
608	IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11)
609#define IS_DG2_G12(dev_priv) \
610	IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12)
611#define IS_ADLS_RPLS(dev_priv) \
612	IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
613#define IS_ADLP_N(dev_priv) \
614	IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
615#define IS_ADLP_RPLP(dev_priv) \
616	IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
617#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
618				    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
619#define IS_BDW_ULT(dev_priv) \
620	IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
621#define IS_BDW_ULX(dev_priv) \
622	IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
 
 
 
623#define IS_BDW_GT3(dev_priv)	(IS_BROADWELL(dev_priv) && \
624				 INTEL_INFO(dev_priv)->gt == 3)
625#define IS_HSW_ULT(dev_priv) \
626	IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
627#define IS_HSW_GT3(dev_priv)	(IS_HASWELL(dev_priv) && \
628				 INTEL_INFO(dev_priv)->gt == 3)
629#define IS_HSW_GT1(dev_priv)	(IS_HASWELL(dev_priv) && \
630				 INTEL_INFO(dev_priv)->gt == 1)
631/* ULX machines are also considered ULT. */
632#define IS_HSW_ULX(dev_priv) \
633	IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
634#define IS_SKL_ULT(dev_priv) \
635	IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
636#define IS_SKL_ULX(dev_priv) \
637	IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
638#define IS_KBL_ULT(dev_priv) \
639	IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
640#define IS_KBL_ULX(dev_priv) \
641	IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
 
 
 
 
 
 
 
 
642#define IS_SKL_GT2(dev_priv)	(IS_SKYLAKE(dev_priv) && \
643				 INTEL_INFO(dev_priv)->gt == 2)
644#define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
645				 INTEL_INFO(dev_priv)->gt == 3)
646#define IS_SKL_GT4(dev_priv)	(IS_SKYLAKE(dev_priv) && \
647				 INTEL_INFO(dev_priv)->gt == 4)
648#define IS_KBL_GT2(dev_priv)	(IS_KABYLAKE(dev_priv) && \
649				 INTEL_INFO(dev_priv)->gt == 2)
650#define IS_KBL_GT3(dev_priv)	(IS_KABYLAKE(dev_priv) && \
651				 INTEL_INFO(dev_priv)->gt == 3)
652#define IS_CFL_ULT(dev_priv) \
653	IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
654#define IS_CFL_ULX(dev_priv) \
655	IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
656#define IS_CFL_GT2(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
657				 INTEL_INFO(dev_priv)->gt == 2)
658#define IS_CFL_GT3(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
659				 INTEL_INFO(dev_priv)->gt == 3)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
660
661#define IS_CML_ULT(dev_priv) \
662	IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
663#define IS_CML_ULX(dev_priv) \
664	IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
665#define IS_CML_GT2(dev_priv)	(IS_COMETLAKE(dev_priv) && \
666				 INTEL_INFO(dev_priv)->gt == 2)
667
668#define IS_ICL_WITH_PORT_F(dev_priv) \
669	IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
670
671#define IS_TGL_UY(dev_priv) \
672	IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
673
674#define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until))
675
676#define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \
677	(IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until))
678#define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \
679	(IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until))
680
681#define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \
682	(IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until))
683#define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \
684	(IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until))
685
686#define IS_TGL_DISPLAY_STEP(__i915, since, until) \
687	(IS_TIGERLAKE(__i915) && \
688	 IS_DISPLAY_STEP(__i915, since, until))
689
690#define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \
691	(IS_TGL_UY(__i915) && \
692	 IS_GRAPHICS_STEP(__i915, since, until))
693
694#define IS_TGL_GRAPHICS_STEP(__i915, since, until) \
695	(IS_TIGERLAKE(__i915) && !IS_TGL_UY(__i915)) && \
696	 IS_GRAPHICS_STEP(__i915, since, until))
697
698#define IS_RKL_DISPLAY_STEP(p, since, until) \
699	(IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
700
701#define IS_DG1_GRAPHICS_STEP(p, since, until) \
702	(IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until))
703#define IS_DG1_DISPLAY_STEP(p, since, until) \
704	(IS_DG1(p) && IS_DISPLAY_STEP(p, since, until))
705
706#define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
707	(IS_ALDERLAKE_S(__i915) && \
708	 IS_DISPLAY_STEP(__i915, since, until))
709
710#define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \
711	(IS_ALDERLAKE_S(__i915) && \
712	 IS_GRAPHICS_STEP(__i915, since, until))
713
714#define IS_ADLP_DISPLAY_STEP(__i915, since, until) \
715	(IS_ALDERLAKE_P(__i915) && \
716	 IS_DISPLAY_STEP(__i915, since, until))
717
718#define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \
719	(IS_ALDERLAKE_P(__i915) && \
720	 IS_GRAPHICS_STEP(__i915, since, until))
721
722#define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \
723	(IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until))
724
725#define IS_MTL_GRAPHICS_STEP(__i915, variant, since, until) \
726	(IS_SUBPLATFORM(__i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_##variant) && \
727	 IS_GRAPHICS_STEP(__i915, since, until))
728
729/*
730 * DG2 hardware steppings are a bit unusual.  The hardware design was forked to
731 * create three variants (G10, G11, and G12) which each have distinct
732 * workaround sets.  The G11 and G12 forks of the DG2 design reset the GT
733 * stepping back to "A0" for their first iterations, even though they're more
734 * similar to a G10 B0 stepping and G10 C0 stepping respectively in terms of
735 * functionality and workarounds.  However the display stepping does not reset
736 * in the same manner --- a specific stepping like "B0" has a consistent
737 * meaning regardless of whether it belongs to a G10, G11, or G12 DG2.
738 *
739 * TLDR:  All GT workarounds and stepping-specific logic must be applied in
740 * relation to a specific subplatform (G10/G11/G12), whereas display workarounds
741 * and stepping-specific logic will be applied with a general DG2-wide stepping
742 * number.
743 */
744#define IS_DG2_GRAPHICS_STEP(__i915, variant, since, until) \
745	(IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \
746	 IS_GRAPHICS_STEP(__i915, since, until))
747
748#define IS_DG2_DISPLAY_STEP(__i915, since, until) \
749	(IS_DG2(__i915) && \
750	 IS_DISPLAY_STEP(__i915, since, until))
751
752#define IS_PVC_BD_STEP(__i915, since, until) \
753	(IS_PONTEVECCHIO(__i915) && \
754	 IS_BASEDIE_STEP(__i915, since, until))
755
756#define IS_PVC_CT_STEP(__i915, since, until) \
757	(IS_PONTEVECCHIO(__i915) && \
758	 IS_GRAPHICS_STEP(__i915, since, until))
759
760#define IS_LP(dev_priv)		(INTEL_INFO(dev_priv)->is_lp)
761#define IS_GEN9_LP(dev_priv)	(GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv))
762#define IS_GEN9_BC(dev_priv)	(GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv))
763
764#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
765#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
766
767#define __ENGINE_INSTANCES_MASK(mask, first, count) ({			\
768	unsigned int first__ = (first);					\
769	unsigned int count__ = (count);					\
770	((mask) & GENMASK(first__ + count__ - 1, first__)) >> first__;	\
771})
772
773#define ENGINE_INSTANCES_MASK(gt, first, count) \
774	__ENGINE_INSTANCES_MASK((gt)->info.engine_mask, first, count)
775
776#define RCS_MASK(gt) \
777	ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
778#define BCS_MASK(gt) \
779	ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS)
780#define VDBOX_MASK(gt) \
781	ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
782#define VEBOX_MASK(gt) \
783	ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
784#define CCS_MASK(gt) \
785	ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
786
787#define HAS_MEDIA_RATIO_MODE(dev_priv) (INTEL_INFO(dev_priv)->has_media_ratio_mode)
788
789/*
790 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
791 * All later gens can run the final buffer from the ppgtt
792 */
793#define CMDPARSER_USES_GGTT(dev_priv) (GRAPHICS_VER(dev_priv) == 7)
794
795#define HAS_LLC(dev_priv)	(INTEL_INFO(dev_priv)->has_llc)
796#define HAS_4TILE(dev_priv)	(INTEL_INFO(dev_priv)->has_4tile)
797#define HAS_SNOOP(dev_priv)	(INTEL_INFO(dev_priv)->has_snoop)
798#define HAS_EDRAM(dev_priv)	((dev_priv)->edram_size_mb)
799#define HAS_SECURE_BATCHES(dev_priv) (GRAPHICS_VER(dev_priv) < 6)
800#define HAS_WT(dev_priv)	HAS_EDRAM(dev_priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
801
802#define HWS_NEEDS_PHYSICAL(dev_priv)	(INTEL_INFO(dev_priv)->hws_needs_physical)
803
804#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
805		(INTEL_INFO(dev_priv)->has_logical_ring_contexts)
806#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
807		(INTEL_INFO(dev_priv)->has_logical_ring_elsq)
 
 
808
809#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
810
811#define INTEL_PPGTT(dev_priv) (RUNTIME_INFO(dev_priv)->ppgtt_type)
812#define HAS_PPGTT(dev_priv) \
813	(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
814#define HAS_FULL_PPGTT(dev_priv) \
815	(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
816
817#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
818	GEM_BUG_ON((sizes) == 0); \
819	((sizes) & ~RUNTIME_INFO(dev_priv)->page_sizes) == 0; \
820})
821
822#define HAS_OVERLAY(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_overlay)
823#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
824		(INTEL_INFO(dev_priv)->display.overlay_needs_physical)
825
826/* Early gen2 have a totally busted CS tlb and require pinned batches. */
827#define HAS_BROKEN_CS_TLB(dev_priv)	(IS_I830(dev_priv) || IS_I845G(dev_priv))
828
829#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)	\
830	(IS_BROADWELL(dev_priv) || GRAPHICS_VER(dev_priv) == 9)
831
832/* WaRsDisableCoarsePowerGating:skl,cnl */
833#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv)			\
834	(IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
 
835
836#define HAS_GMBUS_IRQ(dev_priv) (DISPLAY_VER(dev_priv) >= 4)
837#define HAS_GMBUS_BURST_READ(dev_priv) (DISPLAY_VER(dev_priv) >= 11 || \
838					IS_GEMINILAKE(dev_priv) || \
839					IS_KABYLAKE(dev_priv))
 
 
 
 
 
 
 
840
841/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
842 * rows, which changed the alignment requirements and fence programming.
843 */
844#define HAS_128_BYTE_Y_TILING(dev_priv) (GRAPHICS_VER(dev_priv) != 2 && \
845					 !(IS_I915G(dev_priv) || IS_I915GM(dev_priv)))
846#define SUPPORTS_TV(dev_priv)		(INTEL_INFO(dev_priv)->display.supports_tv)
847#define I915_HAS_HOTPLUG(dev_priv)	(INTEL_INFO(dev_priv)->display.has_hotplug)
848
849#define HAS_FW_BLC(dev_priv)	(DISPLAY_VER(dev_priv) > 2)
850#define HAS_FBC(dev_priv)	(RUNTIME_INFO(dev_priv)->fbc_mask != 0)
851#define HAS_CUR_FBC(dev_priv)	(!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7)
 
852
853#define HAS_IPS(dev_priv)	(IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
854
855#define HAS_DP_MST(dev_priv)	(INTEL_INFO(dev_priv)->display.has_dp_mst)
856#define HAS_DP20(dev_priv)	(IS_DG2(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
857
858#define HAS_DOUBLE_BUFFERED_M_N(dev_priv)	(DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
 
 
859
860#define HAS_CDCLK_CRAWL(dev_priv)	 (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
861#define HAS_CDCLK_SQUASH(dev_priv)	 (INTEL_INFO(dev_priv)->display.has_cdclk_squash)
862#define HAS_DDI(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_ddi)
863#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg)
864#define HAS_PSR(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_psr)
865#define HAS_PSR_HW_TRACKING(dev_priv) \
866	(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
867#define HAS_PSR2_SEL_FETCH(dev_priv)	 (DISPLAY_VER(dev_priv) >= 12)
868#define HAS_TRANSCODER(dev_priv, trans)	 ((RUNTIME_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
869
870#define HAS_RC6(dev_priv)		 (INTEL_INFO(dev_priv)->has_rc6)
871#define HAS_RC6p(dev_priv)		 (INTEL_INFO(dev_priv)->has_rc6p)
872#define HAS_RC6pp(dev_priv)		 (false) /* HW was never validated */
873
874#define HAS_RPS(dev_priv)	(INTEL_INFO(dev_priv)->has_rps)
875
876#define HAS_DMC(dev_priv)	(RUNTIME_INFO(dev_priv)->has_dmc)
 
877
878#define HAS_HECI_PXP(dev_priv) \
879	(INTEL_INFO(dev_priv)->has_heci_pxp)
880
881#define HAS_HECI_GSCFI(dev_priv) \
882	(INTEL_INFO(dev_priv)->has_heci_gscfi)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
883
884#define HAS_HECI_GSC(dev_priv) (HAS_HECI_PXP(dev_priv) || HAS_HECI_GSCFI(dev_priv))
885
886#define HAS_MSO(i915)		(DISPLAY_VER(i915) >= 12)
887
888#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
889#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
 
 
890
891#define HAS_OA_BPC_REPORTING(dev_priv) \
892	(INTEL_INFO(dev_priv)->has_oa_bpc_reporting)
893#define HAS_OA_SLICE_CONTRIB_LIMITS(dev_priv) \
894	(INTEL_INFO(dev_priv)->has_oa_slice_contrib_limits)
895
896/*
897 * Set this flag, when platform requires 64K GTT page sizes or larger for
898 * device local memory access.
899 */
900#define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages)
901
902#define HAS_IPC(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_ipc)
 
 
 
 
 
 
 
903
904#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
905#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
 
 
906
907#define HAS_EXTRA_GT_LIST(dev_priv)   (INTEL_INFO(dev_priv)->extra_gt_list)
 
 
 
 
908
909/*
910 * Platform has the dedicated compression control state for each lmem surfaces
911 * stored in lmem to support the 3D and media compression formats.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
912 */
913#define HAS_FLAT_CCS(dev_priv)   (INTEL_INFO(dev_priv)->has_flat_ccs)
 
914
915#define HAS_GT_UC(dev_priv)	(INTEL_INFO(dev_priv)->has_gt_uc)
 
 
 
 
 
 
 
 
 
 
 
 
916
917#define HAS_POOLED_EU(dev_priv)	(RUNTIME_INFO(dev_priv)->has_pooled_eu)
 
 
 
 
 
 
918
919#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv)	(INTEL_INFO(dev_priv)->has_global_mocs)
 
 
 
 
920
921#define HAS_PXP(dev_priv)  ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \
922			    INTEL_INFO(dev_priv)->has_pxp) && \
923			    VDBOX_MASK(to_gt(dev_priv)))
 
 
 
 
 
 
 
 
 
 
 
924
925#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
926
927#define HAS_GMD_ID(i915)	(INTEL_INFO(i915)->has_gmd_id)
 
928
929#define HAS_LSPCON(dev_priv) (IS_DISPLAY_VER(dev_priv, 9, 10))
 
 
 
930
931#define HAS_L3_CCS_READ(i915) (INTEL_INFO(i915)->has_l3_ccs_read)
 
 
 
932
933/* DPF == dynamic parity feature */
934#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
935#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
936				 2 : HAS_L3_DPF(dev_priv))
937
938#define GT_FREQUENCY_MULTIPLIER 50
939#define GEN9_FREQ_SCALER 3
 
 
 
 
 
 
 
940
941#define INTEL_NUM_PIPES(dev_priv) (hweight8(RUNTIME_INFO(dev_priv)->pipe_mask))
 
 
 
 
942
943#define HAS_DISPLAY(dev_priv) (RUNTIME_INFO(dev_priv)->pipe_mask != 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
944
945#define HAS_VRR(i915)	(DISPLAY_VER(i915) >= 11)
 
946
947#define HAS_ASYNC_FLIPS(i915)		(DISPLAY_VER(i915) >= 5)
 
948
949/* Only valid when HAS_DISPLAY() is true */
950#define INTEL_DISPLAY_ENABLED(dev_priv) \
951	(drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)),		\
952	 !(dev_priv)->params.disable_display &&				\
953	 !intel_opregion_headless_sku(dev_priv))
954
955#define HAS_GUC_DEPRIVILEGE(dev_priv) \
956	(INTEL_INFO(dev_priv)->has_guc_deprivilege)
 
 
 
 
 
 
 
 
 
 
 
957
958#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
959					      IS_ALDERLAKE_S(dev_priv))
 
 
 
960
961#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
 
 
 
962
963#define HAS_3D_PIPELINE(i915)	(INTEL_INFO(i915)->has_3d_pipeline)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
964
965#define HAS_ONE_EU_PER_FUSE_BIT(i915)	(INTEL_INFO(i915)->has_one_eu_per_fuse_bit)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
967#define HAS_LMEMBAR_SMEM_STOLEN(i915) (!HAS_LMEM(i915) && \
968				       GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
969
970/* intel_device_info.c */
971static inline struct intel_device_info *
972mkwrite_device_info(struct drm_i915_private *dev_priv)
973{
974	return (struct intel_device_info *)INTEL_INFO(dev_priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
975}
976
977#endif
v4.17
   1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
   2 */
   3/*
   4 *
   5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   6 * All Rights Reserved.
   7 *
   8 * Permission is hereby granted, free of charge, to any person obtaining a
   9 * copy of this software and associated documentation files (the
  10 * "Software"), to deal in the Software without restriction, including
  11 * without limitation the rights to use, copy, modify, merge, publish,
  12 * distribute, sub license, and/or sell copies of the Software, and to
  13 * permit persons to whom the Software is furnished to do so, subject to
  14 * the following conditions:
  15 *
  16 * The above copyright notice and this permission notice (including the
  17 * next paragraph) shall be included in all copies or substantial portions
  18 * of the Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  27 *
  28 */
  29
  30#ifndef _I915_DRV_H_
  31#define _I915_DRV_H_
  32
  33#include <uapi/drm/i915_drm.h>
  34#include <uapi/drm/drm_fourcc.h>
  35
  36#include <linux/io-mapping.h>
  37#include <linux/i2c.h>
  38#include <linux/i2c-algo-bit.h>
  39#include <linux/backlight.h>
  40#include <linux/hash.h>
  41#include <linux/intel-iommu.h>
  42#include <linux/kref.h>
  43#include <linux/perf_event.h>
  44#include <linux/pm_qos.h>
  45#include <linux/reservation.h>
  46#include <linux/shmem_fs.h>
  47
  48#include <drm/drmP.h>
  49#include <drm/intel-gtt.h>
  50#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
  51#include <drm/drm_gem.h>
  52#include <drm/drm_auth.h>
  53#include <drm/drm_cache.h>
  54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  55#include "i915_params.h"
  56#include "i915_reg.h"
 
  57#include "i915_utils.h"
  58
  59#include "intel_bios.h"
  60#include "intel_device_info.h"
  61#include "intel_display.h"
  62#include "intel_dpll_mgr.h"
  63#include "intel_lrc.h"
  64#include "intel_opregion.h"
  65#include "intel_ringbuffer.h"
  66#include "intel_uncore.h"
  67#include "intel_uc.h"
  68
  69#include "i915_gem.h"
  70#include "i915_gem_context.h"
  71#include "i915_gem_fence_reg.h"
  72#include "i915_gem_object.h"
  73#include "i915_gem_gtt.h"
  74#include "i915_gem_timeline.h"
  75
  76#include "i915_request.h"
  77#include "i915_vma.h"
  78
  79#include "intel_gvt.h"
  80
  81/* General customization:
  82 */
  83
  84#define DRIVER_NAME		"i915"
  85#define DRIVER_DESC		"Intel Graphics"
  86#define DRIVER_DATE		"20180308"
  87#define DRIVER_TIMESTAMP	1520513379
  88
  89/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  90 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
  91 * which may not necessarily be a user visible problem.  This will either
  92 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
  93 * enable distros and users to tailor their preferred amount of i915 abrt
  94 * spam.
  95 */
  96#define I915_STATE_WARN(condition, format...) ({			\
  97	int __ret_warn_on = !!(condition);				\
  98	if (unlikely(__ret_warn_on))					\
  99		if (!WARN(i915_modparams.verbose_state_checks, format))	\
 100			DRM_ERROR(format);				\
 101	unlikely(__ret_warn_on);					\
 102})
 103
 104#define I915_STATE_WARN_ON(x)						\
 105	I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
 106
 107#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
 108bool __i915_inject_load_failure(const char *func, int line);
 109#define i915_inject_load_failure() \
 110	__i915_inject_load_failure(__func__, __LINE__)
 111#else
 112#define i915_inject_load_failure() false
 113#endif
 114
 115typedef struct {
 116	uint32_t val;
 117} uint_fixed_16_16_t;
 118
 119#define FP_16_16_MAX ({ \
 120	uint_fixed_16_16_t fp; \
 121	fp.val = UINT_MAX; \
 122	fp; \
 123})
 124
 125static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
 126{
 127	if (val.val == 0)
 128		return true;
 129	return false;
 130}
 131
 132static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
 133{
 134	uint_fixed_16_16_t fp;
 135
 136	WARN_ON(val > U16_MAX);
 137
 138	fp.val = val << 16;
 139	return fp;
 140}
 141
 142static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
 143{
 144	return DIV_ROUND_UP(fp.val, 1 << 16);
 145}
 146
 147static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
 148{
 149	return fp.val >> 16;
 150}
 151
 152static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
 153						 uint_fixed_16_16_t min2)
 154{
 155	uint_fixed_16_16_t min;
 156
 157	min.val = min(min1.val, min2.val);
 158	return min;
 159}
 160
 161static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
 162						 uint_fixed_16_16_t max2)
 163{
 164	uint_fixed_16_16_t max;
 165
 166	max.val = max(max1.val, max2.val);
 167	return max;
 168}
 169
 170static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
 171{
 172	uint_fixed_16_16_t fp;
 173	WARN_ON(val > U32_MAX);
 174	fp.val = (uint32_t) val;
 175	return fp;
 176}
 177
 178static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
 179					    uint_fixed_16_16_t d)
 180{
 181	return DIV_ROUND_UP(val.val, d.val);
 182}
 183
 184static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
 185						uint_fixed_16_16_t mul)
 186{
 187	uint64_t intermediate_val;
 188
 189	intermediate_val = (uint64_t) val * mul.val;
 190	intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
 191	WARN_ON(intermediate_val > U32_MAX);
 192	return (uint32_t) intermediate_val;
 193}
 194
 195static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
 196					     uint_fixed_16_16_t mul)
 197{
 198	uint64_t intermediate_val;
 199
 200	intermediate_val = (uint64_t) val.val * mul.val;
 201	intermediate_val = intermediate_val >> 16;
 202	return clamp_u64_to_fixed16(intermediate_val);
 203}
 204
 205static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
 206{
 207	uint64_t interm_val;
 208
 209	interm_val = (uint64_t)val << 16;
 210	interm_val = DIV_ROUND_UP_ULL(interm_val, d);
 211	return clamp_u64_to_fixed16(interm_val);
 212}
 213
 214static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
 215						uint_fixed_16_16_t d)
 216{
 217	uint64_t interm_val;
 218
 219	interm_val = (uint64_t)val << 16;
 220	interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
 221	WARN_ON(interm_val > U32_MAX);
 222	return (uint32_t) interm_val;
 223}
 224
 225static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
 226						     uint_fixed_16_16_t mul)
 227{
 228	uint64_t intermediate_val;
 229
 230	intermediate_val = (uint64_t) val * mul.val;
 231	return clamp_u64_to_fixed16(intermediate_val);
 232}
 233
 234static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
 235					     uint_fixed_16_16_t add2)
 236{
 237	uint64_t interm_sum;
 238
 239	interm_sum = (uint64_t) add1.val + add2.val;
 240	return clamp_u64_to_fixed16(interm_sum);
 241}
 242
 243static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
 244						 uint32_t add2)
 245{
 246	uint64_t interm_sum;
 247	uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
 248
 249	interm_sum = (uint64_t) add1.val + interm_add2.val;
 250	return clamp_u64_to_fixed16(interm_sum);
 251}
 252
 253enum hpd_pin {
 254	HPD_NONE = 0,
 255	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
 256	HPD_CRT,
 257	HPD_SDVO_B,
 258	HPD_SDVO_C,
 259	HPD_PORT_A,
 260	HPD_PORT_B,
 261	HPD_PORT_C,
 262	HPD_PORT_D,
 263	HPD_PORT_E,
 264	HPD_NUM_PINS
 265};
 266
 267#define for_each_hpd_pin(__pin) \
 268	for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
 269
 270#define HPD_STORM_DEFAULT_THRESHOLD 5
 271
 272struct i915_hotplug {
 273	struct work_struct hotplug_work;
 274
 275	struct {
 276		unsigned long last_jiffies;
 277		int count;
 278		enum {
 279			HPD_ENABLED = 0,
 280			HPD_DISABLED = 1,
 281			HPD_MARK_DISABLED = 2
 282		} state;
 283	} stats[HPD_NUM_PINS];
 284	u32 event_bits;
 285	struct delayed_work reenable_work;
 286
 287	struct intel_digital_port *irq_port[I915_MAX_PORTS];
 288	u32 long_port_mask;
 289	u32 short_port_mask;
 290	struct work_struct dig_port_work;
 291
 292	struct work_struct poll_init_work;
 293	bool poll_enabled;
 294
 295	unsigned int hpd_storm_threshold;
 296
 297	/*
 298	 * if we get a HPD irq from DP and a HPD irq from non-DP
 299	 * the non-DP HPD could block the workqueue on a mode config
 300	 * mutex getting, that userspace may have taken. However
 301	 * userspace is waiting on the DP workqueue to run which is
 302	 * blocked behind the non-DP one.
 303	 */
 304	struct workqueue_struct *dp_wq;
 305};
 306
 307#define I915_GEM_GPU_DOMAINS \
 308	(I915_GEM_DOMAIN_RENDER | \
 309	 I915_GEM_DOMAIN_SAMPLER | \
 310	 I915_GEM_DOMAIN_COMMAND | \
 311	 I915_GEM_DOMAIN_INSTRUCTION | \
 312	 I915_GEM_DOMAIN_VERTEX)
 313
 314struct drm_i915_private;
 315struct i915_mm_struct;
 316struct i915_mmu_object;
 317
 318struct drm_i915_file_private {
 319	struct drm_i915_private *dev_priv;
 320	struct drm_file *file;
 321
 322	struct {
 323		spinlock_t lock;
 324		struct list_head request_list;
 325/* 20ms is a fairly arbitrary limit (greater than the average frame time)
 326 * chosen to prevent the CPU getting more than a frame ahead of the GPU
 327 * (when using lax throttling for the frontbuffer). We also use it to
 328 * offer free GPU waitboosts for severely congested workloads.
 329 */
 330#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
 331	} mm;
 332	struct idr context_idr;
 333
 334	struct intel_rps_client {
 335		atomic_t boosts;
 336	} rps_client;
 337
 338	unsigned int bsd_engine;
 339
 340/* Client can have a maximum of 3 contexts banned before
 341 * it is denied of creating new contexts. As one context
 342 * ban needs 4 consecutive hangs, and more if there is
 343 * progress in between, this is a last resort stop gap measure
 344 * to limit the badly behaving clients access to gpu.
 345 */
 346#define I915_MAX_CLIENT_CONTEXT_BANS 3
 347	atomic_t context_bans;
 348};
 349
 350/* Interface history:
 351 *
 352 * 1.1: Original.
 353 * 1.2: Add Power Management
 354 * 1.3: Add vblank support
 355 * 1.4: Fix cmdbuffer path, add heap destroy
 356 * 1.5: Add vblank pipe configuration
 357 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
 358 *      - Support vertical blank on secondary display pipe
 359 */
 360#define DRIVER_MAJOR		1
 361#define DRIVER_MINOR		6
 362#define DRIVER_PATCHLEVEL	0
 363
 364struct intel_overlay;
 365struct intel_overlay_error_state;
 366
 367struct sdvo_device_mapping {
 368	u8 initialized;
 369	u8 dvo_port;
 370	u8 slave_addr;
 371	u8 dvo_wiring;
 372	u8 i2c_pin;
 373	u8 ddc_pin;
 374};
 375
 376struct intel_connector;
 377struct intel_encoder;
 378struct intel_atomic_state;
 379struct intel_crtc_state;
 380struct intel_initial_plane_config;
 381struct intel_crtc;
 382struct intel_limit;
 383struct dpll;
 384struct intel_cdclk_state;
 385
 386struct drm_i915_display_funcs {
 387	void (*get_cdclk)(struct drm_i915_private *dev_priv,
 388			  struct intel_cdclk_state *cdclk_state);
 389	void (*set_cdclk)(struct drm_i915_private *dev_priv,
 390			  const struct intel_cdclk_state *cdclk_state);
 391	int (*get_fifo_size)(struct drm_i915_private *dev_priv,
 392			     enum i9xx_plane_id i9xx_plane);
 393	int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
 394	int (*compute_intermediate_wm)(struct drm_device *dev,
 395				       struct intel_crtc *intel_crtc,
 396				       struct intel_crtc_state *newstate);
 397	void (*initial_watermarks)(struct intel_atomic_state *state,
 398				   struct intel_crtc_state *cstate);
 399	void (*atomic_update_watermarks)(struct intel_atomic_state *state,
 400					 struct intel_crtc_state *cstate);
 401	void (*optimize_watermarks)(struct intel_atomic_state *state,
 402				    struct intel_crtc_state *cstate);
 403	int (*compute_global_watermarks)(struct drm_atomic_state *state);
 404	void (*update_wm)(struct intel_crtc *crtc);
 405	int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
 406	/* Returns the active state of the crtc, and if the crtc is active,
 407	 * fills out the pipe-config with the hw state. */
 408	bool (*get_pipe_config)(struct intel_crtc *,
 409				struct intel_crtc_state *);
 410	void (*get_initial_plane_config)(struct intel_crtc *,
 411					 struct intel_initial_plane_config *);
 412	int (*crtc_compute_clock)(struct intel_crtc *crtc,
 413				  struct intel_crtc_state *crtc_state);
 414	void (*crtc_enable)(struct intel_crtc_state *pipe_config,
 415			    struct drm_atomic_state *old_state);
 416	void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
 417			     struct drm_atomic_state *old_state);
 418	void (*update_crtcs)(struct drm_atomic_state *state);
 419	void (*audio_codec_enable)(struct intel_encoder *encoder,
 420				   const struct intel_crtc_state *crtc_state,
 421				   const struct drm_connector_state *conn_state);
 422	void (*audio_codec_disable)(struct intel_encoder *encoder,
 423				    const struct intel_crtc_state *old_crtc_state,
 424				    const struct drm_connector_state *old_conn_state);
 425	void (*fdi_link_train)(struct intel_crtc *crtc,
 426			       const struct intel_crtc_state *crtc_state);
 427	void (*init_clock_gating)(struct drm_i915_private *dev_priv);
 428	void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
 429	/* clock updates for mode set */
 430	/* cursor updates */
 431	/* render clock increase/decrease */
 432	/* display clock increase/decrease */
 433	/* pll clock increase/decrease */
 434
 435	void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
 436	void (*load_luts)(struct drm_crtc_state *crtc_state);
 437};
 438
 439#define CSR_VERSION(major, minor)	((major) << 16 | (minor))
 440#define CSR_VERSION_MAJOR(version)	((version) >> 16)
 441#define CSR_VERSION_MINOR(version)	((version) & 0xffff)
 442
 443struct intel_csr {
 444	struct work_struct work;
 445	const char *fw_path;
 446	uint32_t *dmc_payload;
 447	uint32_t dmc_fw_size;
 448	uint32_t version;
 449	uint32_t mmio_count;
 450	i915_reg_t mmioaddr[8];
 451	uint32_t mmiodata[8];
 452	uint32_t dc_state;
 453	uint32_t allowed_dc_mask;
 454};
 455
 456struct intel_display_error_state;
 457
 458struct i915_gpu_state {
 459	struct kref ref;
 460	ktime_t time;
 461	ktime_t boottime;
 462	ktime_t uptime;
 463
 464	struct drm_i915_private *i915;
 465
 466	char error_msg[128];
 467	bool simulated;
 468	bool awake;
 469	bool wakelock;
 470	bool suspended;
 471	int iommu;
 472	u32 reset_count;
 473	u32 suspend_count;
 474	struct intel_device_info device_info;
 475	struct intel_driver_caps driver_caps;
 476	struct i915_params params;
 477
 478	struct i915_error_uc {
 479		struct intel_uc_fw guc_fw;
 480		struct intel_uc_fw huc_fw;
 481		struct drm_i915_error_object *guc_log;
 482	} uc;
 483
 484	/* Generic register state */
 485	u32 eir;
 486	u32 pgtbl_er;
 487	u32 ier;
 488	u32 gtier[4], ngtier;
 489	u32 ccid;
 490	u32 derrmr;
 491	u32 forcewake;
 492	u32 error; /* gen6+ */
 493	u32 err_int; /* gen7 */
 494	u32 fault_data0; /* gen8, gen9 */
 495	u32 fault_data1; /* gen8, gen9 */
 496	u32 done_reg;
 497	u32 gac_eco;
 498	u32 gam_ecochk;
 499	u32 gab_ctl;
 500	u32 gfx_mode;
 501
 502	u32 nfence;
 503	u64 fence[I915_MAX_NUM_FENCES];
 504	struct intel_overlay_error_state *overlay;
 505	struct intel_display_error_state *display;
 506
 507	struct drm_i915_error_engine {
 508		int engine_id;
 509		/* Software tracked state */
 510		bool idle;
 511		bool waiting;
 512		int num_waiters;
 513		unsigned long hangcheck_timestamp;
 514		bool hangcheck_stalled;
 515		enum intel_engine_hangcheck_action hangcheck_action;
 516		struct i915_address_space *vm;
 517		int num_requests;
 518		u32 reset_count;
 519
 520		/* position of active request inside the ring */
 521		u32 rq_head, rq_post, rq_tail;
 522
 523		/* our own tracking of ring head and tail */
 524		u32 cpu_ring_head;
 525		u32 cpu_ring_tail;
 526
 527		u32 last_seqno;
 528
 529		/* Register state */
 530		u32 start;
 531		u32 tail;
 532		u32 head;
 533		u32 ctl;
 534		u32 mode;
 535		u32 hws;
 536		u32 ipeir;
 537		u32 ipehr;
 538		u32 bbstate;
 539		u32 instpm;
 540		u32 instps;
 541		u32 seqno;
 542		u64 bbaddr;
 543		u64 acthd;
 544		u32 fault_reg;
 545		u64 faddr;
 546		u32 rc_psmi; /* sleep state */
 547		u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
 548		struct intel_instdone instdone;
 549
 550		struct drm_i915_error_context {
 551			char comm[TASK_COMM_LEN];
 552			pid_t pid;
 553			u32 handle;
 554			u32 hw_id;
 555			int priority;
 556			int ban_score;
 557			int active;
 558			int guilty;
 559			bool bannable;
 560		} context;
 561
 562		struct drm_i915_error_object {
 563			u64 gtt_offset;
 564			u64 gtt_size;
 565			int page_count;
 566			int unused;
 567			u32 *pages[0];
 568		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
 569
 570		struct drm_i915_error_object **user_bo;
 571		long user_bo_count;
 572
 573		struct drm_i915_error_object *wa_ctx;
 574		struct drm_i915_error_object *default_state;
 575
 576		struct drm_i915_error_request {
 577			long jiffies;
 578			pid_t pid;
 579			u32 context;
 580			int priority;
 581			int ban_score;
 582			u32 seqno;
 583			u32 head;
 584			u32 tail;
 585		} *requests, execlist[EXECLIST_MAX_PORTS];
 586		unsigned int num_ports;
 587
 588		struct drm_i915_error_waiter {
 589			char comm[TASK_COMM_LEN];
 590			pid_t pid;
 591			u32 seqno;
 592		} *waiters;
 593
 594		struct {
 595			u32 gfx_mode;
 596			union {
 597				u64 pdp[4];
 598				u32 pp_dir_base;
 599			};
 600		} vm_info;
 601	} engine[I915_NUM_ENGINES];
 602
 603	struct drm_i915_error_buffer {
 604		u32 size;
 605		u32 name;
 606		u32 rseqno[I915_NUM_ENGINES], wseqno;
 607		u64 gtt_offset;
 608		u32 read_domains;
 609		u32 write_domain;
 610		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
 611		u32 tiling:2;
 612		u32 dirty:1;
 613		u32 purgeable:1;
 614		u32 userptr:1;
 615		s32 engine:4;
 616		u32 cache_level:3;
 617	} *active_bo[I915_NUM_ENGINES], *pinned_bo;
 618	u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
 619	struct i915_address_space *active_vm[I915_NUM_ENGINES];
 620};
 621
 622enum i915_cache_level {
 623	I915_CACHE_NONE = 0,
 624	I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
 625	I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
 626			      caches, eg sampler/render caches, and the
 627			      large Last-Level-Cache. LLC is coherent with
 628			      the CPU, but L3 is only visible to the GPU. */
 629	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
 630};
 631
 632#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
 633
 634enum fb_op_origin {
 635	ORIGIN_GTT,
 636	ORIGIN_CPU,
 637	ORIGIN_CS,
 638	ORIGIN_FLIP,
 639	ORIGIN_DIRTYFB,
 640};
 641
 642struct intel_fbc {
 643	/* This is always the inner lock when overlapping with struct_mutex and
 644	 * it's the outer lock when overlapping with stolen_lock. */
 645	struct mutex lock;
 646	unsigned threshold;
 647	unsigned int possible_framebuffer_bits;
 648	unsigned int busy_bits;
 649	unsigned int visible_pipes_mask;
 650	struct intel_crtc *crtc;
 651
 652	struct drm_mm_node compressed_fb;
 653	struct drm_mm_node *compressed_llb;
 654
 655	bool false_color;
 656
 657	bool enabled;
 658	bool active;
 659
 660	bool underrun_detected;
 661	struct work_struct underrun_work;
 662
 663	/*
 664	 * Due to the atomic rules we can't access some structures without the
 665	 * appropriate locking, so we cache information here in order to avoid
 666	 * these problems.
 667	 */
 668	struct intel_fbc_state_cache {
 669		struct i915_vma *vma;
 670		unsigned long flags;
 671
 672		struct {
 673			unsigned int mode_flags;
 674			uint32_t hsw_bdw_pixel_rate;
 675		} crtc;
 676
 677		struct {
 678			unsigned int rotation;
 679			int src_w;
 680			int src_h;
 681			bool visible;
 682			/*
 683			 * Display surface base address adjustement for
 684			 * pageflips. Note that on gen4+ this only adjusts up
 685			 * to a tile, offsets within a tile are handled in
 686			 * the hw itself (with the TILEOFF register).
 687			 */
 688			int adjusted_x;
 689			int adjusted_y;
 690
 691			int y;
 692		} plane;
 693
 694		struct {
 695			const struct drm_format_info *format;
 696			unsigned int stride;
 697		} fb;
 698	} state_cache;
 699
 700	/*
 701	 * This structure contains everything that's relevant to program the
 702	 * hardware registers. When we want to figure out if we need to disable
 703	 * and re-enable FBC for a new configuration we just check if there's
 704	 * something different in the struct. The genx_fbc_activate functions
 705	 * are supposed to read from it in order to program the registers.
 706	 */
 707	struct intel_fbc_reg_params {
 708		struct i915_vma *vma;
 709		unsigned long flags;
 710
 711		struct {
 712			enum pipe pipe;
 713			enum i9xx_plane_id i9xx_plane;
 714			unsigned int fence_y_offset;
 715		} crtc;
 716
 717		struct {
 718			const struct drm_format_info *format;
 719			unsigned int stride;
 720		} fb;
 721
 722		int cfb_size;
 723		unsigned int gen9_wa_cfb_stride;
 724	} params;
 725
 726	struct intel_fbc_work {
 727		bool scheduled;
 728		u64 scheduled_vblank;
 729		struct work_struct work;
 730	} work;
 731
 732	const char *no_fbc_reason;
 733};
 734
 735/*
 736 * HIGH_RR is the highest eDP panel refresh rate read from EDID
 737 * LOW_RR is the lowest eDP panel refresh rate found from EDID
 738 * parsing for same resolution.
 739 */
 740enum drrs_refresh_rate_type {
 741	DRRS_HIGH_RR,
 742	DRRS_LOW_RR,
 743	DRRS_MAX_RR, /* RR count */
 744};
 745
 746enum drrs_support_type {
 747	DRRS_NOT_SUPPORTED = 0,
 748	STATIC_DRRS_SUPPORT = 1,
 749	SEAMLESS_DRRS_SUPPORT = 2
 750};
 751
 752struct intel_dp;
 753struct i915_drrs {
 754	struct mutex mutex;
 755	struct delayed_work work;
 756	struct intel_dp *dp;
 757	unsigned busy_frontbuffer_bits;
 758	enum drrs_refresh_rate_type refresh_rate_type;
 759	enum drrs_support_type type;
 760};
 761
 762struct i915_psr {
 763	struct mutex lock;
 764	bool sink_support;
 765	struct intel_dp *enabled;
 766	bool active;
 767	struct delayed_work work;
 768	unsigned busy_frontbuffer_bits;
 769	bool psr2_support;
 770	bool aux_frame_sync;
 771	bool link_standby;
 772	bool y_cord_support;
 773	bool colorimetry_support;
 774	bool alpm;
 775
 776	void (*enable_source)(struct intel_dp *,
 777			      const struct intel_crtc_state *);
 778	void (*disable_source)(struct intel_dp *,
 779			       const struct intel_crtc_state *);
 780	void (*enable_sink)(struct intel_dp *);
 781	void (*activate)(struct intel_dp *);
 782	void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *);
 783};
 784
 785enum intel_pch {
 786	PCH_NONE = 0,	/* No PCH present */
 787	PCH_IBX,	/* Ibexpeak PCH */
 788	PCH_CPT,	/* Cougarpoint/Pantherpoint PCH */
 789	PCH_LPT,	/* Lynxpoint/Wildcatpoint PCH */
 790	PCH_SPT,        /* Sunrisepoint PCH */
 791	PCH_KBP,        /* Kaby Lake PCH */
 792	PCH_CNP,        /* Cannon Lake PCH */
 793	PCH_ICP,	/* Ice Lake PCH */
 794	PCH_NOP,
 795};
 796
 797enum intel_sbi_destination {
 798	SBI_ICLK,
 799	SBI_MPHY,
 800};
 801
 802#define QUIRK_LVDS_SSC_DISABLE (1<<1)
 803#define QUIRK_INVERT_BRIGHTNESS (1<<2)
 804#define QUIRK_BACKLIGHT_PRESENT (1<<3)
 805#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
 806#define QUIRK_INCREASE_T12_DELAY (1<<6)
 807
 808struct intel_fbdev;
 809struct intel_fbc_work;
 810
 811struct intel_gmbus {
 812	struct i2c_adapter adapter;
 813#define GMBUS_FORCE_BIT_RETRY (1U << 31)
 814	u32 force_bit;
 815	u32 reg0;
 816	i915_reg_t gpio_reg;
 817	struct i2c_algo_bit_data bit_algo;
 818	struct drm_i915_private *dev_priv;
 819};
 820
 821struct i915_suspend_saved_registers {
 822	u32 saveDSPARB;
 823	u32 saveFBC_CONTROL;
 824	u32 saveCACHE_MODE_0;
 825	u32 saveMI_ARB_STATE;
 826	u32 saveSWF0[16];
 827	u32 saveSWF1[16];
 828	u32 saveSWF3[3];
 829	uint64_t saveFENCE[I915_MAX_NUM_FENCES];
 830	u32 savePCH_PORT_HOTPLUG;
 831	u16 saveGCDGMBUS;
 832};
 833
 834struct vlv_s0ix_state {
 835	/* GAM */
 836	u32 wr_watermark;
 837	u32 gfx_prio_ctrl;
 838	u32 arb_mode;
 839	u32 gfx_pend_tlb0;
 840	u32 gfx_pend_tlb1;
 841	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
 842	u32 media_max_req_count;
 843	u32 gfx_max_req_count;
 844	u32 render_hwsp;
 845	u32 ecochk;
 846	u32 bsd_hwsp;
 847	u32 blt_hwsp;
 848	u32 tlb_rd_addr;
 849
 850	/* MBC */
 851	u32 g3dctl;
 852	u32 gsckgctl;
 853	u32 mbctl;
 854
 855	/* GCP */
 856	u32 ucgctl1;
 857	u32 ucgctl3;
 858	u32 rcgctl1;
 859	u32 rcgctl2;
 860	u32 rstctl;
 861	u32 misccpctl;
 862
 863	/* GPM */
 864	u32 gfxpause;
 865	u32 rpdeuhwtc;
 866	u32 rpdeuc;
 867	u32 ecobus;
 868	u32 pwrdwnupctl;
 869	u32 rp_down_timeout;
 870	u32 rp_deucsw;
 871	u32 rcubmabdtmr;
 872	u32 rcedata;
 873	u32 spare2gh;
 874
 875	/* Display 1 CZ domain */
 876	u32 gt_imr;
 877	u32 gt_ier;
 878	u32 pm_imr;
 879	u32 pm_ier;
 880	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
 881
 882	/* GT SA CZ domain */
 883	u32 tilectl;
 884	u32 gt_fifoctl;
 885	u32 gtlc_wake_ctrl;
 886	u32 gtlc_survive;
 887	u32 pmwgicz;
 888
 889	/* Display 2 CZ domain */
 890	u32 gu_ctl0;
 891	u32 gu_ctl1;
 892	u32 pcbr;
 893	u32 clock_gate_dis2;
 894};
 895
 896struct intel_rps_ei {
 897	ktime_t ktime;
 898	u32 render_c0;
 899	u32 media_c0;
 900};
 901
 902struct intel_rps {
 903	/*
 904	 * work, interrupts_enabled and pm_iir are protected by
 905	 * dev_priv->irq_lock
 906	 */
 907	struct work_struct work;
 908	bool interrupts_enabled;
 909	u32 pm_iir;
 910
 911	/* PM interrupt bits that should never be masked */
 912	u32 pm_intrmsk_mbz;
 913
 914	/* Frequencies are stored in potentially platform dependent multiples.
 915	 * In other words, *_freq needs to be multiplied by X to be interesting.
 916	 * Soft limits are those which are used for the dynamic reclocking done
 917	 * by the driver (raise frequencies under heavy loads, and lower for
 918	 * lighter loads). Hard limits are those imposed by the hardware.
 919	 *
 920	 * A distinction is made for overclocking, which is never enabled by
 921	 * default, and is considered to be above the hard limit if it's
 922	 * possible at all.
 923	 */
 924	u8 cur_freq;		/* Current frequency (cached, may not == HW) */
 925	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */
 926	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
 927	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
 928	u8 min_freq;		/* AKA RPn. Minimum frequency */
 929	u8 boost_freq;		/* Frequency to request when wait boosting */
 930	u8 idle_freq;		/* Frequency to request when we are idle */
 931	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
 932	u8 rp1_freq;		/* "less than" RP0 power/freqency */
 933	u8 rp0_freq;		/* Non-overclocked max frequency. */
 934	u16 gpll_ref_freq;	/* vlv/chv GPLL reference frequency */
 935
 936	u8 up_threshold; /* Current %busy required to uplock */
 937	u8 down_threshold; /* Current %busy required to downclock */
 938
 939	int last_adj;
 940	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
 941
 942	bool enabled;
 943	atomic_t num_waiters;
 944	atomic_t boosts;
 945
 946	/* manual wa residency calculations */
 947	struct intel_rps_ei ei;
 948};
 949
 950struct intel_rc6 {
 951	bool enabled;
 952	u64 prev_hw_residency[4];
 953	u64 cur_residency[4];
 954};
 955
 956struct intel_llc_pstate {
 957	bool enabled;
 958};
 959
 960struct intel_gen6_power_mgmt {
 961	struct intel_rps rps;
 962	struct intel_rc6 rc6;
 963	struct intel_llc_pstate llc_pstate;
 964};
 965
 966/* defined intel_pm.c */
 967extern spinlock_t mchdev_lock;
 968
 969struct intel_ilk_power_mgmt {
 970	u8 cur_delay;
 971	u8 min_delay;
 972	u8 max_delay;
 973	u8 fmax;
 974	u8 fstart;
 975
 976	u64 last_count1;
 977	unsigned long last_time1;
 978	unsigned long chipset_power;
 979	u64 last_count2;
 980	u64 last_time2;
 981	unsigned long gfx_power;
 982	u8 corr;
 983
 984	int c_m;
 985	int r_t;
 986};
 987
 988struct drm_i915_private;
 989struct i915_power_well;
 990
 991struct i915_power_well_ops {
 992	/*
 993	 * Synchronize the well's hw state to match the current sw state, for
 994	 * example enable/disable it based on the current refcount. Called
 995	 * during driver init and resume time, possibly after first calling
 996	 * the enable/disable handlers.
 997	 */
 998	void (*sync_hw)(struct drm_i915_private *dev_priv,
 999			struct i915_power_well *power_well);
1000	/*
1001	 * Enable the well and resources that depend on it (for example
1002	 * interrupts located on the well). Called after the 0->1 refcount
1003	 * transition.
1004	 */
1005	void (*enable)(struct drm_i915_private *dev_priv,
1006		       struct i915_power_well *power_well);
1007	/*
1008	 * Disable the well and resources that depend on it. Called after
1009	 * the 1->0 refcount transition.
1010	 */
1011	void (*disable)(struct drm_i915_private *dev_priv,
1012			struct i915_power_well *power_well);
1013	/* Returns the hw enabled state. */
1014	bool (*is_enabled)(struct drm_i915_private *dev_priv,
1015			   struct i915_power_well *power_well);
1016};
1017
1018/* Power well structure for haswell */
1019struct i915_power_well {
1020	const char *name;
1021	bool always_on;
1022	/* power well enable/disable usage count */
1023	int count;
1024	/* cached hw enabled state */
1025	bool hw_enabled;
1026	u64 domains;
1027	/* unique identifier for this power well */
1028	enum i915_power_well_id id;
1029	/*
1030	 * Arbitraty data associated with this power well. Platform and power
1031	 * well specific.
1032	 */
1033	union {
1034		struct {
1035			enum dpio_phy phy;
1036		} bxt;
1037		struct {
1038			/* Mask of pipes whose IRQ logic is backed by the pw */
1039			u8 irq_pipe_mask;
1040			/* The pw is backing the VGA functionality */
1041			bool has_vga:1;
1042			bool has_fuses:1;
1043		} hsw;
1044	};
1045	const struct i915_power_well_ops *ops;
1046};
1047
1048struct i915_power_domains {
1049	/*
1050	 * Power wells needed for initialization at driver init and suspend
1051	 * time are on. They are kept on until after the first modeset.
1052	 */
1053	bool init_power_on;
1054	bool initializing;
1055	int power_well_count;
1056
1057	struct mutex lock;
1058	int domain_use_count[POWER_DOMAIN_NUM];
1059	struct i915_power_well *power_wells;
1060};
1061
1062#define MAX_L3_SLICES 2
1063struct intel_l3_parity {
1064	u32 *remap_info[MAX_L3_SLICES];
1065	struct work_struct error_work;
1066	int which_slice;
1067};
1068
1069struct i915_gem_mm {
 
 
 
 
 
 
 
1070	/** Memory allocator for GTT stolen memory */
1071	struct drm_mm stolen;
1072	/** Protects the usage of the GTT stolen memory allocator. This is
1073	 * always the inner lock when overlapping with struct_mutex. */
1074	struct mutex stolen_lock;
1075
1076	/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
1077	spinlock_t obj_lock;
1078
1079	/** List of all objects in gtt_space. Used to restore gtt
1080	 * mappings on resume */
1081	struct list_head bound_list;
1082	/**
1083	 * List of objects which are not bound to the GTT (thus
1084	 * are idle and not used by the GPU). These objects may or may
1085	 * not actually have any pages attached.
1086	 */
1087	struct list_head unbound_list;
1088
1089	/** List of all objects in gtt_space, currently mmaped by userspace.
1090	 * All objects within this list must also be on bound_list.
1091	 */
1092	struct list_head userfault_list;
1093
1094	/**
1095	 * List of objects which are pending destruction.
1096	 */
1097	struct llist_head free_list;
1098	struct work_struct free_work;
1099	spinlock_t free_lock;
1100	/**
1101	 * Count of objects pending destructions. Used to skip needlessly
1102	 * waiting on an RCU barrier if no objects are waiting to be freed.
1103	 */
1104	atomic_t free_count;
1105
1106	/**
1107	 * Small stash of WC pages
1108	 */
1109	struct pagevec wc_stash;
1110
1111	/**
1112	 * tmpfs instance used for shmem backed objects
1113	 */
1114	struct vfsmount *gemfs;
1115
1116	/** PPGTT used for aliasing the PPGTT with the GTT */
1117	struct i915_hw_ppgtt *aliasing_ppgtt;
1118
1119	struct notifier_block oom_notifier;
1120	struct notifier_block vmap_notifier;
1121	struct shrinker shrinker;
1122
1123	/** LRU list of objects with fence regs on them. */
1124	struct list_head fence_list;
1125
1126	/**
1127	 * Workqueue to fault in userptr pages, flushed by the execbuf
1128	 * when required but otherwise left to userspace to try again
1129	 * on EAGAIN.
1130	 */
1131	struct workqueue_struct *userptr_wq;
 
1132
1133	u64 unordered_timeline;
1134
1135	/* the indicator for dispatch video commands on two BSD rings */
1136	atomic_t bsd_engine_dispatch_index;
1137
1138	/** Bit 6 swizzling required for X tiling */
1139	uint32_t bit_6_swizzle_x;
1140	/** Bit 6 swizzling required for Y tiling */
1141	uint32_t bit_6_swizzle_y;
1142
1143	/* accounting, useful for userland debugging */
1144	spinlock_t object_stat_lock;
1145	u64 object_memory;
1146	u32 object_count;
1147};
1148
1149struct drm_i915_error_state_buf {
1150	struct drm_i915_private *i915;
1151	unsigned bytes;
1152	unsigned size;
1153	int err;
1154	u8 *buf;
1155	loff_t start;
1156	loff_t pos;
1157};
1158
1159#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
1160
1161#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
1162#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
1163
1164#define I915_ENGINE_DEAD_TIMEOUT  (4 * HZ)  /* Seqno, head and subunits dead */
1165#define I915_SEQNO_DEAD_TIMEOUT   (12 * HZ) /* Seqno dead with active head */
1166
1167struct i915_gpu_error {
1168	/* For hangcheck timer */
1169#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1170#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1171
1172	struct delayed_work hangcheck_work;
1173
1174	/* For reset and error_state handling. */
1175	spinlock_t lock;
1176	/* Protected by the above dev->gpu_error.lock. */
1177	struct i915_gpu_state *first_error;
1178
1179	atomic_t pending_fb_pin;
1180
1181	unsigned long missed_irq_rings;
1182
1183	/**
1184	 * State variable controlling the reset flow and count
1185	 *
1186	 * This is a counter which gets incremented when reset is triggered,
1187	 *
1188	 * Before the reset commences, the I915_RESET_BACKOFF bit is set
1189	 * meaning that any waiters holding onto the struct_mutex should
1190	 * relinquish the lock immediately in order for the reset to start.
1191	 *
1192	 * If reset is not completed succesfully, the I915_WEDGE bit is
1193	 * set meaning that hardware is terminally sour and there is no
1194	 * recovery. All waiters on the reset_queue will be woken when
1195	 * that happens.
1196	 *
1197	 * This counter is used by the wait_seqno code to notice that reset
1198	 * event happened and it needs to restart the entire ioctl (since most
1199	 * likely the seqno it waited for won't ever signal anytime soon).
1200	 *
1201	 * This is important for lock-free wait paths, where no contended lock
1202	 * naturally enforces the correct ordering between the bail-out of the
1203	 * waiter and the gpu reset work code.
1204	 */
1205	unsigned long reset_count;
1206
1207	/**
1208	 * flags: Control various stages of the GPU reset
1209	 *
1210	 * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
1211	 * other users acquiring the struct_mutex. To do this we set the
1212	 * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
1213	 * and then check for that bit before acquiring the struct_mutex (in
1214	 * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
1215	 * secondary role in preventing two concurrent global reset attempts.
1216	 *
1217	 * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
1218	 * struct_mutex. We try to acquire the struct_mutex in the reset worker,
1219	 * but it may be held by some long running waiter (that we cannot
1220	 * interrupt without causing trouble). Once we are ready to do the GPU
1221	 * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
1222	 * they already hold the struct_mutex and want to participate they can
1223	 * inspect the bit and do the reset directly, otherwise the worker
1224	 * waits for the struct_mutex.
1225	 *
1226	 * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
1227	 * acquire the struct_mutex to reset an engine, we need an explicit
1228	 * flag to prevent two concurrent reset attempts in the same engine.
1229	 * As the number of engines continues to grow, allocate the flags from
1230	 * the most significant bits.
1231	 *
1232	 * #I915_WEDGED - If reset fails and we can no longer use the GPU,
1233	 * we set the #I915_WEDGED bit. Prior to command submission, e.g.
1234	 * i915_request_alloc(), this bit is checked and the sequence
1235	 * aborted (with -EIO reported to userspace) if set.
1236	 */
1237	unsigned long flags;
1238#define I915_RESET_BACKOFF	0
1239#define I915_RESET_HANDOFF	1
1240#define I915_RESET_MODESET	2
1241#define I915_WEDGED		(BITS_PER_LONG - 1)
1242#define I915_RESET_ENGINE	(I915_WEDGED - I915_NUM_ENGINES)
1243
1244	/** Number of times an engine has been reset */
1245	u32 reset_engine_count[I915_NUM_ENGINES];
1246
1247	/**
1248	 * Waitqueue to signal when a hang is detected. Used to for waiters
1249	 * to release the struct_mutex for the reset to procede.
1250	 */
1251	wait_queue_head_t wait_queue;
1252
1253	/**
1254	 * Waitqueue to signal when the reset has completed. Used by clients
1255	 * that wait for dev_priv->mm.wedged to settle.
1256	 */
1257	wait_queue_head_t reset_queue;
1258
1259	/* For missed irq/seqno simulation. */
1260	unsigned long test_irq_rings;
1261};
1262
1263enum modeset_restore {
1264	MODESET_ON_LID_OPEN,
1265	MODESET_DONE,
1266	MODESET_SUSPENDED,
1267};
1268
1269#define DP_AUX_A 0x40
1270#define DP_AUX_B 0x10
1271#define DP_AUX_C 0x20
1272#define DP_AUX_D 0x30
1273#define DP_AUX_F 0x60
1274
1275#define DDC_PIN_B  0x05
1276#define DDC_PIN_C  0x04
1277#define DDC_PIN_D  0x06
1278
1279struct ddi_vbt_port_info {
1280	int max_tmds_clock;
1281
1282	/*
1283	 * This is an index in the HDMI/DVI DDI buffer translation table.
1284	 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
1285	 * populate this field.
1286	 */
1287#define HDMI_LEVEL_SHIFT_UNKNOWN	0xff
1288	uint8_t hdmi_level_shift;
1289
1290	uint8_t supports_dvi:1;
1291	uint8_t supports_hdmi:1;
1292	uint8_t supports_dp:1;
1293	uint8_t supports_edp:1;
1294
1295	uint8_t alternate_aux_channel;
1296	uint8_t alternate_ddc_pin;
1297
1298	uint8_t dp_boost_level;
1299	uint8_t hdmi_boost_level;
1300	int dp_max_link_rate;		/* 0 for not limited by VBT */
1301};
1302
1303enum psr_lines_to_wait {
1304	PSR_0_LINES_TO_WAIT = 0,
1305	PSR_1_LINE_TO_WAIT,
1306	PSR_4_LINES_TO_WAIT,
1307	PSR_8_LINES_TO_WAIT
1308};
1309
1310struct intel_vbt_data {
1311	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1312	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1313
1314	/* Feature bits */
1315	unsigned int int_tv_support:1;
1316	unsigned int lvds_dither:1;
1317	unsigned int lvds_vbt:1;
1318	unsigned int int_crt_support:1;
1319	unsigned int lvds_use_ssc:1;
1320	unsigned int display_clock_mode:1;
1321	unsigned int fdi_rx_polarity_inverted:1;
1322	unsigned int panel_type:4;
1323	int lvds_ssc_freq;
1324	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1325
1326	enum drrs_support_type drrs_type;
1327
1328	struct {
1329		int rate;
1330		int lanes;
1331		int preemphasis;
1332		int vswing;
1333		bool low_vswing;
1334		bool initialized;
1335		bool support;
1336		int bpp;
1337		struct edp_power_seq pps;
1338	} edp;
1339
1340	struct {
1341		bool full_link;
1342		bool require_aux_wakeup;
1343		int idle_frames;
1344		enum psr_lines_to_wait lines_to_wait;
1345		int tp1_wakeup_time;
1346		int tp2_tp3_wakeup_time;
1347	} psr;
1348
1349	struct {
1350		u16 pwm_freq_hz;
1351		bool present;
1352		bool active_low_pwm;
1353		u8 min_brightness;	/* min_brightness/255 of max */
1354		u8 controller;		/* brightness controller number */
1355		enum intel_backlight_type type;
1356	} backlight;
1357
1358	/* MIPI DSI */
1359	struct {
1360		u16 panel_id;
1361		struct mipi_config *config;
1362		struct mipi_pps_data *pps;
1363		u16 bl_ports;
1364		u16 cabc_ports;
1365		u8 seq_version;
1366		u32 size;
1367		u8 *data;
1368		const u8 *sequence[MIPI_SEQ_MAX];
1369		u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
1370	} dsi;
1371
1372	int crt_ddc_pin;
1373
1374	int child_dev_num;
1375	struct child_device_config *child_dev;
1376
1377	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1378	struct sdvo_device_mapping sdvo_mappings[2];
1379};
1380
1381enum intel_ddb_partitioning {
1382	INTEL_DDB_PART_1_2,
1383	INTEL_DDB_PART_5_6, /* IVB+ */
1384};
1385
1386struct intel_wm_level {
1387	bool enable;
1388	uint32_t pri_val;
1389	uint32_t spr_val;
1390	uint32_t cur_val;
1391	uint32_t fbc_val;
1392};
1393
1394struct ilk_wm_values {
1395	uint32_t wm_pipe[3];
1396	uint32_t wm_lp[3];
1397	uint32_t wm_lp_spr[3];
1398	uint32_t wm_linetime[3];
1399	bool enable_fbc_wm;
1400	enum intel_ddb_partitioning partitioning;
1401};
1402
1403struct g4x_pipe_wm {
1404	uint16_t plane[I915_MAX_PLANES];
1405	uint16_t fbc;
1406};
1407
1408struct g4x_sr_wm {
1409	uint16_t plane;
1410	uint16_t cursor;
1411	uint16_t fbc;
1412};
1413
1414struct vlv_wm_ddl_values {
1415	uint8_t plane[I915_MAX_PLANES];
1416};
1417
1418struct vlv_wm_values {
1419	struct g4x_pipe_wm pipe[3];
1420	struct g4x_sr_wm sr;
1421	struct vlv_wm_ddl_values ddl[3];
1422	uint8_t level;
1423	bool cxsr;
1424};
1425
1426struct g4x_wm_values {
1427	struct g4x_pipe_wm pipe[2];
1428	struct g4x_sr_wm sr;
1429	struct g4x_sr_wm hpll;
1430	bool cxsr;
1431	bool hpll_en;
1432	bool fbc_en;
1433};
1434
1435struct skl_ddb_entry {
1436	uint16_t start, end;	/* in number of blocks, 'end' is exclusive */
1437};
1438
1439static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
1440{
1441	return entry->end - entry->start;
1442}
1443
1444static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
1445				       const struct skl_ddb_entry *e2)
1446{
1447	if (e1->start == e2->start && e1->end == e2->end)
1448		return true;
1449
1450	return false;
1451}
1452
1453struct skl_ddb_allocation {
1454	struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
1455	struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
1456};
1457
1458struct skl_wm_values {
1459	unsigned dirty_pipes;
1460	struct skl_ddb_allocation ddb;
1461};
1462
1463struct skl_wm_level {
1464	bool plane_en;
1465	uint16_t plane_res_b;
1466	uint8_t plane_res_l;
1467};
1468
1469/* Stores plane specific WM parameters */
1470struct skl_wm_params {
1471	bool x_tiled, y_tiled;
1472	bool rc_surface;
1473	uint32_t width;
1474	uint8_t cpp;
1475	uint32_t plane_pixel_rate;
1476	uint32_t y_min_scanlines;
1477	uint32_t plane_bytes_per_line;
1478	uint_fixed_16_16_t plane_blocks_per_line;
1479	uint_fixed_16_16_t y_tile_minimum;
1480	uint32_t linetime_us;
1481	uint32_t dbuf_block_size;
1482};
1483
1484/*
1485 * This struct helps tracking the state needed for runtime PM, which puts the
1486 * device in PCI D3 state. Notice that when this happens, nothing on the
1487 * graphics device works, even register access, so we don't get interrupts nor
1488 * anything else.
1489 *
1490 * Every piece of our code that needs to actually touch the hardware needs to
1491 * either call intel_runtime_pm_get or call intel_display_power_get with the
1492 * appropriate power domain.
1493 *
1494 * Our driver uses the autosuspend delay feature, which means we'll only really
1495 * suspend if we stay with zero refcount for a certain amount of time. The
1496 * default value is currently very conservative (see intel_runtime_pm_enable), but
1497 * it can be changed with the standard runtime PM files from sysfs.
1498 *
1499 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1500 * goes back to false exactly before we reenable the IRQs. We use this variable
1501 * to check if someone is trying to enable/disable IRQs while they're supposed
1502 * to be disabled. This shouldn't happen and we'll print some error messages in
1503 * case it happens.
1504 *
1505 * For more, read the Documentation/power/runtime_pm.txt.
1506 */
1507struct i915_runtime_pm {
1508	atomic_t wakeref_count;
1509	bool suspended;
1510	bool irqs_enabled;
1511};
1512
1513enum intel_pipe_crc_source {
1514	INTEL_PIPE_CRC_SOURCE_NONE,
1515	INTEL_PIPE_CRC_SOURCE_PLANE1,
1516	INTEL_PIPE_CRC_SOURCE_PLANE2,
1517	INTEL_PIPE_CRC_SOURCE_PF,
1518	INTEL_PIPE_CRC_SOURCE_PIPE,
1519	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
1520	INTEL_PIPE_CRC_SOURCE_TV,
1521	INTEL_PIPE_CRC_SOURCE_DP_B,
1522	INTEL_PIPE_CRC_SOURCE_DP_C,
1523	INTEL_PIPE_CRC_SOURCE_DP_D,
1524	INTEL_PIPE_CRC_SOURCE_AUTO,
1525	INTEL_PIPE_CRC_SOURCE_MAX,
1526};
1527
1528struct intel_pipe_crc_entry {
1529	uint32_t frame;
1530	uint32_t crc[5];
1531};
1532
1533#define INTEL_PIPE_CRC_ENTRIES_NR	128
1534struct intel_pipe_crc {
1535	spinlock_t lock;
1536	bool opened;		/* exclusive access to the result file */
1537	struct intel_pipe_crc_entry *entries;
1538	enum intel_pipe_crc_source source;
1539	int head, tail;
1540	wait_queue_head_t wq;
1541	int skipped;
1542};
1543
1544struct i915_frontbuffer_tracking {
1545	spinlock_t lock;
1546
1547	/*
1548	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
1549	 * scheduled flips.
1550	 */
1551	unsigned busy_bits;
1552	unsigned flip_bits;
1553};
1554
1555struct i915_wa_reg {
1556	i915_reg_t addr;
1557	u32 value;
1558	/* bitmask representing WA bits */
1559	u32 mask;
1560};
1561
1562#define I915_MAX_WA_REGS 16
1563
1564struct i915_workarounds {
1565	struct i915_wa_reg reg[I915_MAX_WA_REGS];
1566	u32 count;
1567	u32 hw_whitelist_count[I915_NUM_ENGINES];
1568};
1569
1570struct i915_virtual_gpu {
 
1571	bool active;
1572	u32 caps;
 
 
 
1573};
1574
1575/* used in computing the new watermarks state */
1576struct intel_wm_config {
1577	unsigned int num_pipes_active;
1578	bool sprites_enabled;
1579	bool sprites_scaled;
1580};
1581
1582struct i915_oa_format {
1583	u32 format;
1584	int size;
1585};
1586
1587struct i915_oa_reg {
1588	i915_reg_t addr;
1589	u32 value;
1590};
1591
1592struct i915_oa_config {
1593	char uuid[UUID_STRING_LEN + 1];
1594	int id;
1595
1596	const struct i915_oa_reg *mux_regs;
1597	u32 mux_regs_len;
1598	const struct i915_oa_reg *b_counter_regs;
1599	u32 b_counter_regs_len;
1600	const struct i915_oa_reg *flex_regs;
1601	u32 flex_regs_len;
1602
1603	struct attribute_group sysfs_metric;
1604	struct attribute *attrs[2];
1605	struct device_attribute sysfs_metric_id;
1606
1607	atomic_t ref_count;
1608};
1609
1610struct i915_perf_stream;
1611
1612/**
1613 * struct i915_perf_stream_ops - the OPs to support a specific stream type
1614 */
1615struct i915_perf_stream_ops {
1616	/**
1617	 * @enable: Enables the collection of HW samples, either in response to
1618	 * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
1619	 * without `I915_PERF_FLAG_DISABLED`.
1620	 */
1621	void (*enable)(struct i915_perf_stream *stream);
1622
1623	/**
1624	 * @disable: Disables the collection of HW samples, either in response
1625	 * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
1626	 * the stream.
1627	 */
1628	void (*disable)(struct i915_perf_stream *stream);
1629
1630	/**
1631	 * @poll_wait: Call poll_wait, passing a wait queue that will be woken
1632	 * once there is something ready to read() for the stream
1633	 */
1634	void (*poll_wait)(struct i915_perf_stream *stream,
1635			  struct file *file,
1636			  poll_table *wait);
1637
1638	/**
1639	 * @wait_unlocked: For handling a blocking read, wait until there is
1640	 * something to ready to read() for the stream. E.g. wait on the same
1641	 * wait queue that would be passed to poll_wait().
1642	 */
1643	int (*wait_unlocked)(struct i915_perf_stream *stream);
1644
1645	/**
1646	 * @read: Copy buffered metrics as records to userspace
1647	 * **buf**: the userspace, destination buffer
1648	 * **count**: the number of bytes to copy, requested by userspace
1649	 * **offset**: zero at the start of the read, updated as the read
1650	 * proceeds, it represents how many bytes have been copied so far and
1651	 * the buffer offset for copying the next record.
1652	 *
1653	 * Copy as many buffered i915 perf samples and records for this stream
1654	 * to userspace as will fit in the given buffer.
1655	 *
1656	 * Only write complete records; returning -%ENOSPC if there isn't room
1657	 * for a complete record.
1658	 *
1659	 * Return any error condition that results in a short read such as
1660	 * -%ENOSPC or -%EFAULT, even though these may be squashed before
1661	 * returning to userspace.
1662	 */
1663	int (*read)(struct i915_perf_stream *stream,
1664		    char __user *buf,
1665		    size_t count,
1666		    size_t *offset);
1667
1668	/**
1669	 * @destroy: Cleanup any stream specific resources.
1670	 *
1671	 * The stream will always be disabled before this is called.
1672	 */
1673	void (*destroy)(struct i915_perf_stream *stream);
1674};
1675
1676/**
1677 * struct i915_perf_stream - state for a single open stream FD
1678 */
1679struct i915_perf_stream {
1680	/**
1681	 * @dev_priv: i915 drm device
1682	 */
1683	struct drm_i915_private *dev_priv;
1684
1685	/**
1686	 * @link: Links the stream into ``&drm_i915_private->streams``
1687	 */
1688	struct list_head link;
1689
1690	/**
1691	 * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
1692	 * properties given when opening a stream, representing the contents
1693	 * of a single sample as read() by userspace.
1694	 */
1695	u32 sample_flags;
1696
1697	/**
1698	 * @sample_size: Considering the configured contents of a sample
1699	 * combined with the required header size, this is the total size
1700	 * of a single sample record.
1701	 */
1702	int sample_size;
1703
1704	/**
1705	 * @ctx: %NULL if measuring system-wide across all contexts or a
1706	 * specific context that is being monitored.
1707	 */
1708	struct i915_gem_context *ctx;
1709
1710	/**
1711	 * @enabled: Whether the stream is currently enabled, considering
1712	 * whether the stream was opened in a disabled state and based
1713	 * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
1714	 */
1715	bool enabled;
1716
1717	/**
1718	 * @ops: The callbacks providing the implementation of this specific
1719	 * type of configured stream.
1720	 */
1721	const struct i915_perf_stream_ops *ops;
1722
1723	/**
1724	 * @oa_config: The OA configuration used by the stream.
1725	 */
1726	struct i915_oa_config *oa_config;
1727};
1728
1729/**
1730 * struct i915_oa_ops - Gen specific implementation of an OA unit stream
1731 */
1732struct i915_oa_ops {
1733	/**
1734	 * @is_valid_b_counter_reg: Validates register's address for
1735	 * programming boolean counters for a particular platform.
1736	 */
1737	bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
1738				       u32 addr);
1739
1740	/**
1741	 * @is_valid_mux_reg: Validates register's address for programming mux
1742	 * for a particular platform.
1743	 */
1744	bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
1745
1746	/**
1747	 * @is_valid_flex_reg: Validates register's address for programming
1748	 * flex EU filtering for a particular platform.
1749	 */
1750	bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
1751
1752	/**
1753	 * @init_oa_buffer: Resets the head and tail pointers of the
1754	 * circular buffer for periodic OA reports.
1755	 *
1756	 * Called when first opening a stream for OA metrics, but also may be
1757	 * called in response to an OA buffer overflow or other error
1758	 * condition.
1759	 *
1760	 * Note it may be necessary to clear the full OA buffer here as part of
1761	 * maintaining the invariable that new reports must be written to
1762	 * zeroed memory for us to be able to reliable detect if an expected
1763	 * report has not yet landed in memory.  (At least on Haswell the OA
1764	 * buffer tail pointer is not synchronized with reports being visible
1765	 * to the CPU)
1766	 */
1767	void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
1768
1769	/**
1770	 * @enable_metric_set: Selects and applies any MUX configuration to set
1771	 * up the Boolean and Custom (B/C) counters that are part of the
1772	 * counter reports being sampled. May apply system constraints such as
1773	 * disabling EU clock gating as required.
1774	 */
1775	int (*enable_metric_set)(struct drm_i915_private *dev_priv,
1776				 const struct i915_oa_config *oa_config);
1777
1778	/**
1779	 * @disable_metric_set: Remove system constraints associated with using
1780	 * the OA unit.
1781	 */
1782	void (*disable_metric_set)(struct drm_i915_private *dev_priv);
1783
1784	/**
1785	 * @oa_enable: Enable periodic sampling
1786	 */
1787	void (*oa_enable)(struct drm_i915_private *dev_priv);
1788
1789	/**
1790	 * @oa_disable: Disable periodic sampling
1791	 */
1792	void (*oa_disable)(struct drm_i915_private *dev_priv);
1793
1794	/**
1795	 * @read: Copy data from the circular OA buffer into a given userspace
1796	 * buffer.
1797	 */
1798	int (*read)(struct i915_perf_stream *stream,
1799		    char __user *buf,
1800		    size_t count,
1801		    size_t *offset);
1802
1803	/**
1804	 * @oa_hw_tail_read: read the OA tail pointer register
1805	 *
1806	 * In particular this enables us to share all the fiddly code for
1807	 * handling the OA unit tail pointer race that affects multiple
1808	 * generations.
1809	 */
1810	u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
1811};
1812
1813struct intel_cdclk_state {
1814	unsigned int cdclk, vco, ref, bypass;
1815	u8 voltage_level;
1816};
1817
1818struct drm_i915_private {
1819	struct drm_device drm;
1820
1821	struct kmem_cache *objects;
1822	struct kmem_cache *vmas;
1823	struct kmem_cache *luts;
1824	struct kmem_cache *requests;
1825	struct kmem_cache *dependencies;
1826	struct kmem_cache *priorities;
1827
1828	const struct intel_device_info info;
1829	struct intel_driver_caps caps;
1830
1831	/**
1832	 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
1833	 * end of stolen which we can optionally use to create GEM objects
1834	 * backed by stolen memory. Note that stolen_usable_size tells us
1835	 * exactly how much of this we are actually allowed to use, given that
1836	 * some portion of it is in fact reserved for use by hardware functions.
1837	 */
1838	struct resource dsm;
1839	/**
1840	 * Reseved portion of Data Stolen Memory
1841	 */
1842	struct resource dsm_reserved;
1843
1844	/*
1845	 * Stolen memory is segmented in hardware with different portions
1846	 * offlimits to certain functions.
1847	 *
1848	 * The drm_mm is initialised to the total accessible range, as found
1849	 * from the PCI config. On Broadwell+, this is further restricted to
1850	 * avoid the first page! The upper end of stolen memory is reserved for
1851	 * hardware functions and similarly removed from the accessible range.
1852	 */
1853	resource_size_t stolen_usable_size;	/* Total size minus reserved ranges */
1854
1855	void __iomem *regs;
1856
1857	struct intel_uncore uncore;
 
1858
1859	struct i915_virtual_gpu vgpu;
1860
1861	struct intel_gvt *gvt;
1862
1863	struct intel_huc huc;
1864	struct intel_guc guc;
1865
1866	struct intel_csr csr;
1867
1868	struct intel_gmbus gmbus[GMBUS_NUM_PINS];
1869
1870	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
1871	 * controller on different i2c buses. */
1872	struct mutex gmbus_mutex;
1873
1874	/**
1875	 * Base address of the gmbus and gpio block.
1876	 */
1877	uint32_t gpio_mmio_base;
1878
1879	/* MMIO base address for MIPI regs */
1880	uint32_t mipi_mmio_base;
1881
1882	uint32_t psr_mmio_base;
1883
1884	uint32_t pps_mmio_base;
1885
1886	wait_queue_head_t gmbus_wait_queue;
1887
1888	struct pci_dev *bridge_dev;
1889	struct intel_engine_cs *engine[I915_NUM_ENGINES];
1890	/* Context used internally to idle the GPU and setup initial state */
1891	struct i915_gem_context *kernel_context;
1892	/* Context only to be used for injecting preemption commands */
1893	struct i915_gem_context *preempt_context;
1894	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
1895					    [MAX_ENGINE_INSTANCE + 1];
1896
1897	struct drm_dma_handle *status_page_dmah;
1898	struct resource mch_res;
1899
1900	/* protects the irq masks */
1901	spinlock_t irq_lock;
1902
1903	bool display_irqs_enabled;
1904
1905	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1906	struct pm_qos_request pm_qos;
1907
1908	/* Sideband mailbox protection */
1909	struct mutex sb_lock;
 
1910
1911	/** Cached value of IMR to avoid reads in updating the bitfield */
1912	union {
1913		u32 irq_mask;
1914		u32 de_irq_mask[I915_MAX_PIPES];
1915	};
1916	u32 gt_irq_mask;
1917	u32 pm_imr;
1918	u32 pm_ier;
1919	u32 pm_rps_events;
1920	u32 pm_guc_events;
1921	u32 pipestat_irq_mask[I915_MAX_PIPES];
1922
1923	struct i915_hotplug hotplug;
1924	struct intel_fbc fbc;
1925	struct i915_drrs drrs;
1926	struct intel_opregion opregion;
1927	struct intel_vbt_data vbt;
1928
1929	bool preserve_bios_swizzle;
1930
1931	/* overlay */
1932	struct intel_overlay *overlay;
1933
1934	/* backlight registers and fields in struct intel_panel */
1935	struct mutex backlight_lock;
1936
1937	/* LVDS info */
1938	bool no_aux_handshake;
1939
1940	/* protects panel power sequencer state */
1941	struct mutex pps_mutex;
1942
1943	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1944	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1945
1946	unsigned int fsb_freq, mem_freq, is_ddr3;
1947	unsigned int skl_preferred_vco_freq;
1948	unsigned int max_cdclk_freq;
1949
1950	unsigned int max_dotclk_freq;
1951	unsigned int rawclk_freq;
1952	unsigned int hpll_freq;
1953	unsigned int fdi_pll_freq;
1954	unsigned int czclk_freq;
1955
1956	struct {
1957		/*
1958		 * The current logical cdclk state.
1959		 * See intel_atomic_state.cdclk.logical
1960		 *
1961		 * For reading holding any crtc lock is sufficient,
1962		 * for writing must hold all of them.
1963		 */
1964		struct intel_cdclk_state logical;
1965		/*
1966		 * The current actual cdclk state.
1967		 * See intel_atomic_state.cdclk.actual
1968		 */
1969		struct intel_cdclk_state actual;
1970		/* The current hardware cdclk state */
1971		struct intel_cdclk_state hw;
1972	} cdclk;
1973
1974	/**
1975	 * wq - Driver workqueue for GEM.
1976	 *
1977	 * NOTE: Work items scheduled here are not allowed to grab any modeset
1978	 * locks, for otherwise the flushing done in the pageflip code will
1979	 * result in deadlocks.
1980	 */
1981	struct workqueue_struct *wq;
1982
1983	/* ordered wq for modesets */
1984	struct workqueue_struct *modeset_wq;
1985
1986	/* Display functions */
1987	struct drm_i915_display_funcs display;
1988
1989	/* PCH chipset type */
1990	enum intel_pch pch_type;
1991	unsigned short pch_id;
1992
1993	unsigned long quirks;
1994
1995	enum modeset_restore modeset_restore;
1996	struct mutex modeset_restore_lock;
1997	struct drm_atomic_state *modeset_restore_state;
1998	struct drm_modeset_acquire_ctx reset_ctx;
1999
2000	struct list_head vm_list; /* Global list of all address spaces */
2001	struct i915_ggtt ggtt; /* VM representing the global address space */
2002
2003	struct i915_gem_mm mm;
2004	DECLARE_HASHTABLE(mm_structs, 7);
2005	struct mutex mm_lock;
2006
2007	struct intel_ppat ppat;
2008
2009	/* Kernel Modesetting */
2010
2011	struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
2012	struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
2013
2014#ifdef CONFIG_DEBUG_FS
2015	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
2016#endif
2017
2018	/* dpll and cdclk state is protected by connection_mutex */
2019	int num_shared_dpll;
2020	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
2021	const struct intel_dpll_mgr *dpll_mgr;
2022
2023	/*
2024	 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
2025	 * Must be global rather than per dpll, because on some platforms
2026	 * plls share registers.
2027	 */
2028	struct mutex dpll_lock;
2029
2030	unsigned int active_crtcs;
2031	/* minimum acceptable cdclk for each pipe */
2032	int min_cdclk[I915_MAX_PIPES];
2033	/* minimum acceptable voltage level for each pipe */
2034	u8 min_voltage_level[I915_MAX_PIPES];
2035
2036	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
2037
2038	struct i915_workarounds workarounds;
2039
2040	struct i915_frontbuffer_tracking fb_tracking;
2041
2042	struct intel_atomic_helper {
2043		struct llist_head free_list;
2044		struct work_struct free_work;
2045	} atomic_helper;
2046
2047	u16 orig_clock;
2048
2049	bool mchbar_need_disable;
2050
2051	struct intel_l3_parity l3_parity;
2052
2053	/* Cannot be determined by PCIID. You must always read a register. */
2054	u32 edram_cap;
2055
2056	/*
2057	 * Protects RPS/RC6 register access and PCU communication.
2058	 * Must be taken after struct_mutex if nested. Note that
2059	 * this lock may be held for long periods of time when
2060	 * talking to hw - so only take it when talking to hw!
2061	 */
2062	struct mutex pcu_lock;
2063
2064	/* gen6+ GT PM state */
2065	struct intel_gen6_power_mgmt gt_pm;
2066
2067	/* ilk-only ips/rps state. Everything in here is protected by the global
2068	 * mchdev_lock in intel_pm.c */
2069	struct intel_ilk_power_mgmt ips;
2070
2071	struct i915_power_domains power_domains;
2072
2073	struct i915_psr psr;
2074
2075	struct i915_gpu_error gpu_error;
2076
2077	struct drm_i915_gem_object *vlv_pctx;
2078
2079	/* list of fbdev register on this device */
2080	struct intel_fbdev *fbdev;
2081	struct work_struct fbdev_suspend_work;
2082
2083	struct drm_property *broadcast_rgb_property;
2084	struct drm_property *force_audio_property;
2085
2086	/* hda/i915 audio component */
2087	struct i915_audio_component *audio_component;
2088	bool audio_component_registered;
2089	/**
2090	 * av_mutex - mutex for audio/video sync
2091	 *
2092	 */
2093	struct mutex av_mutex;
2094
2095	struct {
2096		struct list_head list;
2097		struct llist_head free_list;
2098		struct work_struct free_work;
2099
2100		/* The hw wants to have a stable context identifier for the
2101		 * lifetime of the context (for OA, PASID, faults, etc).
2102		 * This is limited in execlists to 21 bits.
2103		 */
2104		struct ida hw_ida;
2105#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
2106#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
2107	} contexts;
2108
2109	u32 fdi_rx_config;
2110
2111	/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
2112	u32 chv_phy_control;
2113	/*
2114	 * Shadows for CHV DPLL_MD regs to keep the state
2115	 * checker somewhat working in the presence hardware
2116	 * crappiness (can't read out DPLL_MD for pipes B & C).
2117	 */
2118	u32 chv_dpll_md[I915_MAX_PIPES];
2119	u32 bxt_phy_grc;
2120
2121	u32 suspend_count;
2122	bool power_domains_suspended;
2123	struct i915_suspend_saved_registers regfile;
2124	struct vlv_s0ix_state vlv_s0ix_state;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2125
2126	enum {
2127		I915_SAGV_UNKNOWN = 0,
2128		I915_SAGV_DISABLED,
2129		I915_SAGV_ENABLED,
2130		I915_SAGV_NOT_CONTROLLED
2131	} sagv_status;
2132
2133	struct {
2134		/*
2135		 * Raw watermark latency values:
2136		 * in 0.1us units for WM0,
2137		 * in 0.5us units for WM1+.
2138		 */
2139		/* primary */
2140		uint16_t pri_latency[5];
2141		/* sprite */
2142		uint16_t spr_latency[5];
2143		/* cursor */
2144		uint16_t cur_latency[5];
2145		/*
2146		 * Raw watermark memory latency values
2147		 * for SKL for all 8 levels
2148		 * in 1us units.
2149		 */
2150		uint16_t skl_latency[8];
2151
2152		/* current hardware state */
2153		union {
2154			struct ilk_wm_values hw;
2155			struct skl_wm_values skl_hw;
2156			struct vlv_wm_values vlv;
2157			struct g4x_wm_values g4x;
2158		};
2159
2160		uint8_t max_level;
 
2161
2162		/*
2163		 * Should be held around atomic WM register writing; also
2164		 * protects * intel_crtc->wm.active and
2165		 * cstate->wm.need_postvbl_update.
2166		 */
2167		struct mutex wm_mutex;
2168
2169		/*
2170		 * Set during HW readout of watermarks/DDB.  Some platforms
2171		 * need to know when we're still using BIOS-provided values
2172		 * (which we don't fully trust).
2173		 */
2174		bool distrust_bios_wm;
2175	} wm;
2176
2177	struct i915_runtime_pm runtime_pm;
 
2178
2179	struct {
2180		bool initialized;
2181
2182		struct kobject *metrics_kobj;
2183		struct ctl_table_header *sysctl_header;
2184
2185		/*
2186		 * Lock associated with adding/modifying/removing OA configs
2187		 * in dev_priv->perf.metrics_idr.
 
 
 
 
2188		 */
2189		struct mutex metrics_lock;
 
2190
2191		/*
2192		 * List of dynamic configurations, you need to hold
2193		 * dev_priv->perf.metrics_lock to access it.
2194		 */
2195		struct idr metrics_idr;
2196
2197		/*
2198		 * Lock associated with anything below within this structure
2199		 * except exclusive_stream.
2200		 */
2201		struct mutex lock;
2202		struct list_head streams;
2203
2204		struct {
2205			/*
2206			 * The stream currently using the OA unit. If accessed
2207			 * outside a syscall associated to its file
2208			 * descriptor, you need to hold
2209			 * dev_priv->drm.struct_mutex.
2210			 */
2211			struct i915_perf_stream *exclusive_stream;
2212
2213			u32 specific_ctx_id;
2214
2215			struct hrtimer poll_check_timer;
2216			wait_queue_head_t poll_wq;
2217			bool pollin;
2218
2219			/**
2220			 * For rate limiting any notifications of spurious
2221			 * invalid OA reports
2222			 */
2223			struct ratelimit_state spurious_report_rs;
2224
2225			bool periodic;
2226			int period_exponent;
2227
2228			struct i915_oa_config test_config;
2229
2230			struct {
2231				struct i915_vma *vma;
2232				u8 *vaddr;
2233				u32 last_ctx_id;
2234				int format;
2235				int format_size;
2236
2237				/**
2238				 * Locks reads and writes to all head/tail state
2239				 *
2240				 * Consider: the head and tail pointer state
2241				 * needs to be read consistently from a hrtimer
2242				 * callback (atomic context) and read() fop
2243				 * (user context) with tail pointer updates
2244				 * happening in atomic context and head updates
2245				 * in user context and the (unlikely)
2246				 * possibility of read() errors needing to
2247				 * reset all head/tail state.
2248				 *
2249				 * Note: Contention or performance aren't
2250				 * currently a significant concern here
2251				 * considering the relatively low frequency of
2252				 * hrtimer callbacks (5ms period) and that
2253				 * reads typically only happen in response to a
2254				 * hrtimer event and likely complete before the
2255				 * next callback.
2256				 *
2257				 * Note: This lock is not held *while* reading
2258				 * and copying data to userspace so the value
2259				 * of head observed in htrimer callbacks won't
2260				 * represent any partial consumption of data.
2261				 */
2262				spinlock_t ptr_lock;
2263
2264				/**
2265				 * One 'aging' tail pointer and one 'aged'
2266				 * tail pointer ready to used for reading.
2267				 *
2268				 * Initial values of 0xffffffff are invalid
2269				 * and imply that an update is required
2270				 * (and should be ignored by an attempted
2271				 * read)
2272				 */
2273				struct {
2274					u32 offset;
2275				} tails[2];
2276
2277				/**
2278				 * Index for the aged tail ready to read()
2279				 * data up to.
2280				 */
2281				unsigned int aged_tail_idx;
2282
2283				/**
2284				 * A monotonic timestamp for when the current
2285				 * aging tail pointer was read; used to
2286				 * determine when it is old enough to trust.
2287				 */
2288				u64 aging_timestamp;
2289
2290				/**
2291				 * Although we can always read back the head
2292				 * pointer register, we prefer to avoid
2293				 * trusting the HW state, just to avoid any
2294				 * risk that some hardware condition could
2295				 * somehow bump the head pointer unpredictably
2296				 * and cause us to forward the wrong OA buffer
2297				 * data to userspace.
2298				 */
2299				u32 head;
2300			} oa_buffer;
2301
2302			u32 gen7_latched_oastatus1;
2303			u32 ctx_oactxctrl_offset;
2304			u32 ctx_flexeu0_offset;
2305
2306			/**
2307			 * The RPT_ID/reason field for Gen8+ includes a bit
2308			 * to determine if the CTX ID in the report is valid
2309			 * but the specific bit differs between Gen 8 and 9
2310			 */
2311			u32 gen8_valid_ctx_bit;
2312
2313			struct i915_oa_ops ops;
2314			const struct i915_oa_format *oa_formats;
2315		} oa;
2316	} perf;
2317
2318	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
2319	struct {
2320		void (*resume)(struct drm_i915_private *);
2321		void (*cleanup_engine)(struct intel_engine_cs *engine);
 
2322
2323		struct list_head timelines;
2324		struct i915_gem_timeline global_timeline;
2325		u32 active_requests;
2326
2327		/**
2328		 * Is the GPU currently considered idle, or busy executing
2329		 * userspace requests? Whilst idle, we allow runtime power
2330		 * management to power down the hardware and display clocks.
2331		 * In order to reduce the effect on performance, there
2332		 * is a slight delay before we do so.
2333		 */
2334		bool awake;
2335
2336		/**
2337		 * The number of times we have woken up.
2338		 */
2339		unsigned int epoch;
2340#define I915_EPOCH_INVALID 0
2341
2342		/**
2343		 * We leave the user IRQ off as much as possible,
2344		 * but this means that requests will finish and never
2345		 * be retired once the system goes idle. Set a timer to
2346		 * fire periodically while the ring is running. When it
2347		 * fires, go retire requests.
2348		 */
2349		struct delayed_work retire_work;
2350
2351		/**
2352		 * When we detect an idle GPU, we want to turn on
2353		 * powersaving features. So once we see that there
2354		 * are no more requests outstanding and no more
2355		 * arrive within a small period of time, we fire
2356		 * off the idle_work.
2357		 */
2358		struct delayed_work idle_work;
2359
2360		ktime_t last_init_time;
2361	} gt;
2362
2363	/* perform PHY state sanity checks? */
2364	bool chv_phy_assert[2];
2365
2366	bool ipc_enabled;
2367
2368	/* Used to save the pipe-to-encoder mapping for audio */
2369	struct intel_encoder *av_enc_map[I915_MAX_PIPES];
2370
2371	/* necessary resource sharing with HDMI LPE audio driver. */
2372	struct {
2373		struct platform_device *platdev;
2374		int	irq;
2375	} lpe_audio;
2376
2377	struct i915_pmu pmu;
2378
2379	/*
2380	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
2381	 * will be rejected. Instead look for a better place.
2382	 */
2383};
2384
2385static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
2386{
2387	return container_of(dev, struct drm_i915_private, drm);
2388}
2389
2390static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
2391{
2392	return to_i915(dev_get_drvdata(kdev));
2393}
2394
2395static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
2396{
2397	return container_of(guc, struct drm_i915_private, guc);
2398}
2399
2400static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
2401{
2402	return container_of(huc, struct drm_i915_private, huc);
2403}
2404
2405/* Simple iterator over all initialised engines */
2406#define for_each_engine(engine__, dev_priv__, id__) \
2407	for ((id__) = 0; \
2408	     (id__) < I915_NUM_ENGINES; \
2409	     (id__)++) \
2410		for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
2411
2412/* Iterator over subset of engines selected by mask */
2413#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
2414	for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask;	\
2415	     tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
2416
2417enum hdmi_force_audio {
2418	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
2419	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
2420	HDMI_AUDIO_AUTO,		/* trust EDID */
2421	HDMI_AUDIO_ON,			/* force turn on HDMI audio */
2422};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2423
2424#define I915_GTT_OFFSET_NONE ((u32)-1)
2425
2426/*
2427 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
2428 * considered to be the frontbuffer for the given plane interface-wise. This
2429 * doesn't mean that the hw necessarily already scans it out, but that any
2430 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
2431 *
2432 * We have one bit per pipe and per scanout plane type.
2433 */
2434#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
2435#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
2436	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
2437	BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
2438	BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
2439})
2440#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
2441	BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
2442#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
2443	GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
2444		INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
2445
2446/*
2447 * Optimised SGL iterator for GEM objects
2448 */
2449static __always_inline struct sgt_iter {
2450	struct scatterlist *sgp;
2451	union {
2452		unsigned long pfn;
2453		dma_addr_t dma;
2454	};
2455	unsigned int curr;
2456	unsigned int max;
2457} __sgt_iter(struct scatterlist *sgl, bool dma) {
2458	struct sgt_iter s = { .sgp = sgl };
2459
2460	if (s.sgp) {
2461		s.max = s.curr = s.sgp->offset;
2462		s.max += s.sgp->length;
2463		if (dma)
2464			s.dma = sg_dma_address(s.sgp);
2465		else
2466			s.pfn = page_to_pfn(sg_page(s.sgp));
2467	}
2468
2469	return s;
2470}
 
2471
2472static inline struct scatterlist *____sg_next(struct scatterlist *sg)
2473{
2474	++sg;
2475	if (unlikely(sg_is_chain(sg)))
2476		sg = sg_chain_ptr(sg);
2477	return sg;
2478}
2479
2480/**
2481 * __sg_next - return the next scatterlist entry in a list
2482 * @sg:		The current sg entry
2483 *
2484 * Description:
2485 *   If the entry is the last, return NULL; otherwise, step to the next
2486 *   element in the array (@sg@+1). If that's a chain pointer, follow it;
2487 *   otherwise just return the pointer to the current element.
2488 **/
2489static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2490{
2491#ifdef CONFIG_DEBUG_SG
2492	BUG_ON(sg->sg_magic != SG_MAGIC);
2493#endif
2494	return sg_is_last(sg) ? NULL : ____sg_next(sg);
2495}
2496
2497/**
2498 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
2499 * @__dmap:	DMA address (output)
2500 * @__iter:	'struct sgt_iter' (iterator state, internal)
2501 * @__sgt:	sg_table to iterate over (input)
2502 */
2503#define for_each_sgt_dma(__dmap, __iter, __sgt)				\
2504	for ((__iter) = __sgt_iter((__sgt)->sgl, true);			\
2505	     ((__dmap) = (__iter).dma + (__iter).curr);			\
2506	     (((__iter).curr += PAGE_SIZE) >= (__iter).max) ?		\
2507	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
2508
2509/**
2510 * for_each_sgt_page - iterate over the pages of the given sg_table
2511 * @__pp:	page pointer (output)
2512 * @__iter:	'struct sgt_iter' (iterator state, internal)
2513 * @__sgt:	sg_table to iterate over (input)
2514 */
2515#define for_each_sgt_page(__pp, __iter, __sgt)				\
2516	for ((__iter) = __sgt_iter((__sgt)->sgl, false);		\
2517	     ((__pp) = (__iter).pfn == 0 ? NULL :			\
2518	      pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2519	     (((__iter).curr += PAGE_SIZE) >= (__iter).max) ?		\
2520	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
2521
2522static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
 
 
2523{
2524	unsigned int page_sizes;
 
2525
2526	page_sizes = 0;
2527	while (sg) {
2528		GEM_BUG_ON(sg->offset);
2529		GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
2530		page_sizes |= sg->length;
2531		sg = __sg_next(sg);
2532	}
2533
2534	return page_sizes;
2535}
2536
2537static inline unsigned int i915_sg_segment_size(void)
 
 
2538{
2539	unsigned int size = swiotlb_max_segment();
 
2540
2541	if (size == 0)
2542		return SCATTERLIST_MAX_SEGMENT;
2543
2544	size = rounddown(size, PAGE_SIZE);
2545	/* swiotlb_max_segment_size can return 1 byte when it means one page. */
2546	if (size < PAGE_SIZE)
2547		size = PAGE_SIZE;
2548
2549	return size;
2550}
2551
2552static inline const struct intel_device_info *
2553intel_info(const struct drm_i915_private *dev_priv)
2554{
2555	return &dev_priv->info;
2556}
 
2557
2558#define INTEL_INFO(dev_priv)	intel_info((dev_priv))
2559
2560#define INTEL_GEN(dev_priv)	((dev_priv)->info.gen)
2561#define INTEL_DEVID(dev_priv)	((dev_priv)->info.device_id)
2562
2563#define REVID_FOREVER		0xff
2564#define INTEL_REVID(dev_priv)	((dev_priv)->drm.pdev->revision)
 
 
 
 
 
 
 
2565
2566#define GEN_FOREVER (0)
2567
2568#define INTEL_GEN_MASK(s, e) ( \
2569	BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
2570	BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
2571	GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
2572		(s) != GEN_FOREVER ? (s) - 1 : 0) \
2573)
2574
2575/*
2576 * Returns true if Gen is in inclusive range [Start, End].
2577 *
2578 * Use GEN_FOREVER for unbound start and or end.
2579 */
2580#define IS_GEN(dev_priv, s, e) \
2581	(!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
2582
2583/*
2584 * Return true if revision is in range [since,until] inclusive.
2585 *
2586 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
2587 */
2588#define IS_REVID(p, since, until) \
2589	(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
2590
2591#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
 
2592
2593#define IS_I830(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I830)
2594#define IS_I845G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I845G)
2595#define IS_I85X(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I85X)
2596#define IS_I865G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I865G)
2597#define IS_I915G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I915G)
2598#define IS_I915GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I915GM)
2599#define IS_I945G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I945G)
2600#define IS_I945GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I945GM)
2601#define IS_I965G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I965G)
2602#define IS_I965GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I965GM)
2603#define IS_G45(dev_priv)	IS_PLATFORM(dev_priv, INTEL_G45)
2604#define IS_GM45(dev_priv)	IS_PLATFORM(dev_priv, INTEL_GM45)
2605#define IS_G4X(dev_priv)	(IS_G45(dev_priv) || IS_GM45(dev_priv))
2606#define IS_PINEVIEW_G(dev_priv)	(INTEL_DEVID(dev_priv) == 0xa001)
2607#define IS_PINEVIEW_M(dev_priv)	(INTEL_DEVID(dev_priv) == 0xa011)
2608#define IS_PINEVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
2609#define IS_G33(dev_priv)	IS_PLATFORM(dev_priv, INTEL_G33)
2610#define IS_IRONLAKE_M(dev_priv)	(INTEL_DEVID(dev_priv) == 0x0046)
 
 
 
2611#define IS_IVYBRIDGE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
2612#define IS_IVB_GT1(dev_priv)	(IS_IVYBRIDGE(dev_priv) && \
2613				 (dev_priv)->info.gt == 1)
2614#define IS_VALLEYVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
2615#define IS_CHERRYVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
2616#define IS_HASWELL(dev_priv)	IS_PLATFORM(dev_priv, INTEL_HASWELL)
2617#define IS_BROADWELL(dev_priv)	IS_PLATFORM(dev_priv, INTEL_BROADWELL)
2618#define IS_SKYLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
2619#define IS_BROXTON(dev_priv)	IS_PLATFORM(dev_priv, INTEL_BROXTON)
2620#define IS_KABYLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
2621#define IS_GEMINILAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
2622#define IS_COFFEELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
2623#define IS_CANNONLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
2624#define IS_ICELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_ICELAKE)
2625#define IS_MOBILE(dev_priv)	((dev_priv)->info.is_mobile)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2626#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
2627				    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
2628#define IS_BDW_ULT(dev_priv)	(IS_BROADWELL(dev_priv) && \
2629				 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 ||	\
2630				 (INTEL_DEVID(dev_priv) & 0xf) == 0xb ||	\
2631				 (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
2632/* ULX machines are also considered ULT. */
2633#define IS_BDW_ULX(dev_priv)	(IS_BROADWELL(dev_priv) && \
2634				 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
2635#define IS_BDW_GT3(dev_priv)	(IS_BROADWELL(dev_priv) && \
2636				 (dev_priv)->info.gt == 3)
2637#define IS_HSW_ULT(dev_priv)	(IS_HASWELL(dev_priv) && \
2638				 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
2639#define IS_HSW_GT3(dev_priv)	(IS_HASWELL(dev_priv) && \
2640				 (dev_priv)->info.gt == 3)
 
 
2641/* ULX machines are also considered ULT. */
2642#define IS_HSW_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x0A0E || \
2643				 INTEL_DEVID(dev_priv) == 0x0A1E)
2644#define IS_SKL_ULT(dev_priv)	(INTEL_DEVID(dev_priv) == 0x1906 || \
2645				 INTEL_DEVID(dev_priv) == 0x1913 || \
2646				 INTEL_DEVID(dev_priv) == 0x1916 || \
2647				 INTEL_DEVID(dev_priv) == 0x1921 || \
2648				 INTEL_DEVID(dev_priv) == 0x1926)
2649#define IS_SKL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x190E || \
2650				 INTEL_DEVID(dev_priv) == 0x1915 || \
2651				 INTEL_DEVID(dev_priv) == 0x191E)
2652#define IS_KBL_ULT(dev_priv)	(INTEL_DEVID(dev_priv) == 0x5906 || \
2653				 INTEL_DEVID(dev_priv) == 0x5913 || \
2654				 INTEL_DEVID(dev_priv) == 0x5916 || \
2655				 INTEL_DEVID(dev_priv) == 0x5921 || \
2656				 INTEL_DEVID(dev_priv) == 0x5926)
2657#define IS_KBL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x590E || \
2658				 INTEL_DEVID(dev_priv) == 0x5915 || \
2659				 INTEL_DEVID(dev_priv) == 0x591E)
2660#define IS_SKL_GT2(dev_priv)	(IS_SKYLAKE(dev_priv) && \
2661				 (dev_priv)->info.gt == 2)
2662#define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
2663				 (dev_priv)->info.gt == 3)
2664#define IS_SKL_GT4(dev_priv)	(IS_SKYLAKE(dev_priv) && \
2665				 (dev_priv)->info.gt == 4)
2666#define IS_KBL_GT2(dev_priv)	(IS_KABYLAKE(dev_priv) && \
2667				 (dev_priv)->info.gt == 2)
2668#define IS_KBL_GT3(dev_priv)	(IS_KABYLAKE(dev_priv) && \
2669				 (dev_priv)->info.gt == 3)
2670#define IS_CFL_ULT(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
2671				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
 
 
2672#define IS_CFL_GT2(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
2673				 (dev_priv)->info.gt == 2)
2674#define IS_CFL_GT3(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
2675				 (dev_priv)->info.gt == 3)
2676#define IS_CNL_WITH_PORT_F(dev_priv)   (IS_CANNONLAKE(dev_priv) && \
2677					(INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
2678
2679#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
2680
2681#define SKL_REVID_A0		0x0
2682#define SKL_REVID_B0		0x1
2683#define SKL_REVID_C0		0x2
2684#define SKL_REVID_D0		0x3
2685#define SKL_REVID_E0		0x4
2686#define SKL_REVID_F0		0x5
2687#define SKL_REVID_G0		0x6
2688#define SKL_REVID_H0		0x7
2689
2690#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
2691
2692#define BXT_REVID_A0		0x0
2693#define BXT_REVID_A1		0x1
2694#define BXT_REVID_B0		0x3
2695#define BXT_REVID_B_LAST	0x8
2696#define BXT_REVID_C0		0x9
2697
2698#define IS_BXT_REVID(dev_priv, since, until) \
2699	(IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
2700
2701#define KBL_REVID_A0		0x0
2702#define KBL_REVID_B0		0x1
2703#define KBL_REVID_C0		0x2
2704#define KBL_REVID_D0		0x3
2705#define KBL_REVID_E0		0x4
2706
2707#define IS_KBL_REVID(dev_priv, since, until) \
2708	(IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2709
2710#define GLK_REVID_A0		0x0
2711#define GLK_REVID_A1		0x1
2712
2713#define IS_GLK_REVID(dev_priv, since, until) \
2714	(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2715
2716#define CNL_REVID_A0		0x0
2717#define CNL_REVID_B0		0x1
2718#define CNL_REVID_C0		0x2
2719
2720#define IS_CNL_REVID(p, since, until) \
2721	(IS_CANNONLAKE(p) && IS_REVID(p, since, until))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2722
2723/*
2724 * The genX designation typically refers to the render engine, so render
2725 * capability related checks should use IS_GEN, while display and other checks
2726 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2727 * chips, etc.).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2728 */
2729#define IS_GEN2(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(1)))
2730#define IS_GEN3(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(2)))
2731#define IS_GEN4(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(3)))
2732#define IS_GEN5(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(4)))
2733#define IS_GEN6(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(5)))
2734#define IS_GEN7(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(6)))
2735#define IS_GEN8(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(7)))
2736#define IS_GEN9(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(8)))
2737#define IS_GEN10(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(9)))
2738#define IS_GEN11(dev_priv)	(!!((dev_priv)->info.gen_mask & BIT(10)))
2739
2740#define IS_LP(dev_priv)	(INTEL_INFO(dev_priv)->is_lp)
2741#define IS_GEN9_LP(dev_priv)	(IS_GEN9(dev_priv) && IS_LP(dev_priv))
2742#define IS_GEN9_BC(dev_priv)	(IS_GEN9(dev_priv) && !IS_LP(dev_priv))
2743
2744#define ENGINE_MASK(id)	BIT(id)
2745#define RENDER_RING	ENGINE_MASK(RCS)
2746#define BSD_RING	ENGINE_MASK(VCS)
2747#define BLT_RING	ENGINE_MASK(BCS)
2748#define VEBOX_RING	ENGINE_MASK(VECS)
2749#define BSD2_RING	ENGINE_MASK(VCS2)
2750#define BSD3_RING	ENGINE_MASK(VCS3)
2751#define BSD4_RING	ENGINE_MASK(VCS4)
2752#define VEBOX2_RING	ENGINE_MASK(VECS2)
2753#define ALL_ENGINES	(~0)
2754
2755#define HAS_ENGINE(dev_priv, id) \
2756	(!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
2757
2758#define HAS_BSD(dev_priv)	HAS_ENGINE(dev_priv, VCS)
2759#define HAS_BSD2(dev_priv)	HAS_ENGINE(dev_priv, VCS2)
2760#define HAS_BLT(dev_priv)	HAS_ENGINE(dev_priv, BCS)
2761#define HAS_VEBOX(dev_priv)	HAS_ENGINE(dev_priv, VECS)
2762
2763#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv)
2764
2765#define HAS_LLC(dev_priv)	((dev_priv)->info.has_llc)
2766#define HAS_SNOOP(dev_priv)	((dev_priv)->info.has_snoop)
2767#define HAS_EDRAM(dev_priv)	(!!((dev_priv)->edram_cap & EDRAM_ENABLED))
2768#define HAS_WT(dev_priv)	((IS_HASWELL(dev_priv) || \
2769				 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
2770
2771#define HWS_NEEDS_PHYSICAL(dev_priv)	((dev_priv)->info.hws_needs_physical)
2772
2773#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
2774		((dev_priv)->info.has_logical_ring_contexts)
2775#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
2776		((dev_priv)->info.has_logical_ring_elsq)
2777#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
2778		((dev_priv)->info.has_logical_ring_preemption)
2779
2780#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
2781
2782#define USES_PPGTT(dev_priv)		(i915_modparams.enable_ppgtt)
2783#define USES_FULL_PPGTT(dev_priv)	(i915_modparams.enable_ppgtt >= 2)
2784#define USES_FULL_48BIT_PPGTT(dev_priv)	(i915_modparams.enable_ppgtt == 3)
 
 
 
2785#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
2786	GEM_BUG_ON((sizes) == 0); \
2787	((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
2788})
2789
2790#define HAS_OVERLAY(dev_priv)		 ((dev_priv)->info.has_overlay)
2791#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
2792		((dev_priv)->info.overlay_needs_physical)
2793
2794/* Early gen2 have a totally busted CS tlb and require pinned batches. */
2795#define HAS_BROKEN_CS_TLB(dev_priv)	(IS_I830(dev_priv) || IS_I845G(dev_priv))
2796
 
 
 
2797/* WaRsDisableCoarsePowerGating:skl,cnl */
2798#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
2799	(IS_CANNONLAKE(dev_priv) || \
2800	 IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
2801
2802/*
2803 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
2804 * even when in MSI mode. This results in spurious interrupt warnings if the
2805 * legacy irq no. is shared with another device. The kernel then disables that
2806 * interrupt source and so prevents the other device from working properly.
2807 *
2808 * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX
2809 * interrupts.
2810 */
2811#define HAS_AUX_IRQ(dev_priv)   true
2812#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
2813
2814/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
2815 * rows, which changed the alignment requirements and fence programming.
2816 */
2817#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
2818					 !(IS_I915G(dev_priv) || \
2819					 IS_I915GM(dev_priv)))
2820#define SUPPORTS_TV(dev_priv)		((dev_priv)->info.supports_tv)
2821#define I915_HAS_HOTPLUG(dev_priv)	((dev_priv)->info.has_hotplug)
2822
2823#define HAS_FW_BLC(dev_priv) 	(INTEL_GEN(dev_priv) > 2)
2824#define HAS_FBC(dev_priv)	((dev_priv)->info.has_fbc)
2825#define HAS_CUR_FBC(dev_priv)	(!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
2826
2827#define HAS_IPS(dev_priv)	(IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
2828
2829#define HAS_DP_MST(dev_priv)	((dev_priv)->info.has_dp_mst)
 
2830
2831#define HAS_DDI(dev_priv)		 ((dev_priv)->info.has_ddi)
2832#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
2833#define HAS_PSR(dev_priv)		 ((dev_priv)->info.has_psr)
2834
2835#define HAS_RC6(dev_priv)		 ((dev_priv)->info.has_rc6)
2836#define HAS_RC6p(dev_priv)		 ((dev_priv)->info.has_rc6p)
 
 
 
 
 
 
 
 
 
 
2837#define HAS_RC6pp(dev_priv)		 (false) /* HW was never validated */
2838
2839#define HAS_CSR(dev_priv)	((dev_priv)->info.has_csr)
2840
2841#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
2842#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
2843
2844#define HAS_IPC(dev_priv)		 ((dev_priv)->info.has_ipc)
 
2845
2846/*
2847 * For now, anything with a GuC requires uCode loading, and then supports
2848 * command submission once loaded. But these are logically independent
2849 * properties, so we have separate macros to test them.
2850 */
2851#define HAS_GUC(dev_priv)	((dev_priv)->info.has_guc)
2852#define HAS_GUC_CT(dev_priv)	((dev_priv)->info.has_guc_ct)
2853#define HAS_GUC_UCODE(dev_priv)	(HAS_GUC(dev_priv))
2854#define HAS_GUC_SCHED(dev_priv)	(HAS_GUC(dev_priv))
2855
2856/* For now, anything with a GuC has also HuC */
2857#define HAS_HUC(dev_priv)	(HAS_GUC(dev_priv))
2858#define HAS_HUC_UCODE(dev_priv)	(HAS_GUC(dev_priv))
2859
2860/* Having a GuC is not the same as using a GuC */
2861#define USES_GUC(dev_priv)		intel_uc_is_using_guc()
2862#define USES_GUC_SUBMISSION(dev_priv)	intel_uc_is_using_guc_submission()
2863#define USES_HUC(dev_priv)		intel_uc_is_using_huc()
2864
2865#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
2866
2867#define HAS_POOLED_EU(dev_priv)	((dev_priv)->info.has_pooled_eu)
2868
2869#define INTEL_PCH_DEVICE_ID_MASK		0xff80
2870#define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
2871#define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
2872#define INTEL_PCH_PPT_DEVICE_ID_TYPE		0x1e00
2873#define INTEL_PCH_LPT_DEVICE_ID_TYPE		0x8c00
2874#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE		0x9c00
2875#define INTEL_PCH_WPT_DEVICE_ID_TYPE		0x8c80
2876#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE		0x9c80
2877#define INTEL_PCH_SPT_DEVICE_ID_TYPE		0xA100
2878#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE		0x9D00
2879#define INTEL_PCH_KBP_DEVICE_ID_TYPE		0xA280
2880#define INTEL_PCH_CNP_DEVICE_ID_TYPE		0xA300
2881#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE		0x9D80
2882#define INTEL_PCH_ICP_DEVICE_ID_TYPE		0x3480
2883#define INTEL_PCH_P2X_DEVICE_ID_TYPE		0x7100
2884#define INTEL_PCH_P3X_DEVICE_ID_TYPE		0x7000
2885#define INTEL_PCH_QEMU_DEVICE_ID_TYPE		0x2900 /* qemu q35 has 2918 */
2886
2887#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
2888#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
2889#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
2890#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
2891#define HAS_PCH_CNP_LP(dev_priv) \
2892	(INTEL_PCH_ID(dev_priv) == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
2893#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
2894#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
2895#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
2896#define HAS_PCH_LPT_LP(dev_priv) \
2897	(INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
2898	 INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
2899#define HAS_PCH_LPT_H(dev_priv) \
2900	(INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
2901	 INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
2902#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
2903#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
2904#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
2905#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
2906
2907#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
2908
2909#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
2910
2911/* DPF == dynamic parity feature */
2912#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
2913#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2914				 2 : HAS_L3_DPF(dev_priv))
2915
2916#define GT_FREQUENCY_MULTIPLIER 50
2917#define GEN9_FREQ_SCALER 3
 
 
2918
2919#include "i915_trace.h"
 
 
 
 
2920
2921static inline bool intel_vtd_active(void)
2922{
2923#ifdef CONFIG_INTEL_IOMMU
2924	if (intel_iommu_gfx_mapped)
2925		return true;
2926#endif
2927	return false;
2928}
2929
2930static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2931{
2932	return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
2933}
2934
2935static inline bool
2936intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
2937{
2938	return IS_BROXTON(dev_priv) && intel_vtd_active();
2939}
2940
2941int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2942				int enable_ppgtt);
2943
2944/* i915_drv.c */
2945void __printf(3, 4)
2946__i915_printk(struct drm_i915_private *dev_priv, const char *level,
2947	      const char *fmt, ...);
2948
2949#define i915_report_error(dev_priv, fmt, ...)				   \
2950	__i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
2951
2952#ifdef CONFIG_COMPAT
2953extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2954			      unsigned long arg);
2955#else
2956#define i915_compat_ioctl NULL
2957#endif
2958extern const struct dev_pm_ops i915_pm_ops;
2959
2960extern int i915_driver_load(struct pci_dev *pdev,
2961			    const struct pci_device_id *ent);
2962extern void i915_driver_unload(struct drm_device *dev);
2963extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
2964extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
2965
2966#define I915_RESET_QUIET BIT(0)
2967extern void i915_reset(struct drm_i915_private *i915, unsigned int flags);
2968extern int i915_reset_engine(struct intel_engine_cs *engine,
2969			     unsigned int flags);
2970
2971extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
2972extern int intel_reset_guc(struct drm_i915_private *dev_priv);
2973extern int intel_guc_reset_engine(struct intel_guc *guc,
2974				  struct intel_engine_cs *engine);
2975extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
2976extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
2977extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2978extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2979extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2980extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2981int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2982
2983int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
2984int intel_engines_init(struct drm_i915_private *dev_priv);
2985
2986/* intel_hotplug.c */
2987void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
2988			   u32 pin_mask, u32 long_mask);
2989void intel_hpd_init(struct drm_i915_private *dev_priv);
2990void intel_hpd_init_work(struct drm_i915_private *dev_priv);
2991void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2992enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
2993				enum hpd_pin pin);
2994enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
2995				   enum port port);
2996bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2997void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2998
2999/* i915_irq.c */
3000static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
3001{
3002	unsigned long delay;
3003
3004	if (unlikely(!i915_modparams.enable_hangcheck))
3005		return;
3006
3007	/* Don't continually defer the hangcheck so that it is always run at
3008	 * least once after work has been scheduled on any ring. Otherwise,
3009	 * we will ignore a hung ring if a second ring is kept busy.
3010	 */
3011
3012	delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
3013	queue_delayed_work(system_long_wq,
3014			   &dev_priv->gpu_error.hangcheck_work, delay);
3015}
3016
3017__printf(3, 4)
3018void i915_handle_error(struct drm_i915_private *dev_priv,
3019		       u32 engine_mask,
3020		       const char *fmt, ...);
3021
3022extern void intel_irq_init(struct drm_i915_private *dev_priv);
3023extern void intel_irq_fini(struct drm_i915_private *dev_priv);
3024int intel_irq_install(struct drm_i915_private *dev_priv);
3025void intel_irq_uninstall(struct drm_i915_private *dev_priv);
3026
3027static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
3028{
3029	return dev_priv->gvt;
3030}
3031
3032static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
3033{
3034	return dev_priv->vgpu.active;
3035}
3036
3037u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
3038			      enum pipe pipe);
3039void
3040i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
3041		     u32 status_mask);
3042
3043void
3044i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
3045		      u32 status_mask);
3046
3047void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
3048void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
3049void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
3050				   uint32_t mask,
3051				   uint32_t bits);
3052void ilk_update_display_irq(struct drm_i915_private *dev_priv,
3053			    uint32_t interrupt_mask,
3054			    uint32_t enabled_irq_mask);
3055static inline void
3056ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
3057{
3058	ilk_update_display_irq(dev_priv, bits, bits);
3059}
3060static inline void
3061ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
3062{
3063	ilk_update_display_irq(dev_priv, bits, 0);
3064}
3065void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
3066			 enum pipe pipe,
3067			 uint32_t interrupt_mask,
3068			 uint32_t enabled_irq_mask);
3069static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
3070				       enum pipe pipe, uint32_t bits)
3071{
3072	bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
3073}
3074static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
3075					enum pipe pipe, uint32_t bits)
3076{
3077	bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
3078}
3079void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
3080				  uint32_t interrupt_mask,
3081				  uint32_t enabled_irq_mask);
3082static inline void
3083ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3084{
3085	ibx_display_interrupt_update(dev_priv, bits, bits);
3086}
3087static inline void
3088ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3089{
3090	ibx_display_interrupt_update(dev_priv, bits, 0);
3091}
3092
3093/* i915_gem.c */
3094int i915_gem_create_ioctl(struct drm_device *dev, void *data,
3095			  struct drm_file *file_priv);
3096int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
3097			 struct drm_file *file_priv);
3098int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
3099			  struct drm_file *file_priv);
3100int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
3101			struct drm_file *file_priv);
3102int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
3103			struct drm_file *file_priv);
3104int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
3105			      struct drm_file *file_priv);
3106int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
3107			     struct drm_file *file_priv);
3108int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
3109			      struct drm_file *file_priv);
3110int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
3111			       struct drm_file *file_priv);
3112int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3113			struct drm_file *file_priv);
3114int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3115			       struct drm_file *file);
3116int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3117			       struct drm_file *file);
3118int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3119			    struct drm_file *file_priv);
3120int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3121			   struct drm_file *file_priv);
3122int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
3123			      struct drm_file *file_priv);
3124int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
3125			      struct drm_file *file_priv);
3126int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
3127void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
3128int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
3129			   struct drm_file *file);
3130int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
3131				struct drm_file *file_priv);
3132int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
3133			struct drm_file *file_priv);
3134void i915_gem_sanitize(struct drm_i915_private *i915);
3135int i915_gem_load_init(struct drm_i915_private *dev_priv);
3136void i915_gem_load_cleanup(struct drm_i915_private *dev_priv);
3137void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
3138int i915_gem_freeze(struct drm_i915_private *dev_priv);
3139int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
3140
3141void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
3142void i915_gem_object_free(struct drm_i915_gem_object *obj);
3143void i915_gem_object_init(struct drm_i915_gem_object *obj,
3144			 const struct drm_i915_gem_object_ops *ops);
3145struct drm_i915_gem_object *
3146i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size);
3147struct drm_i915_gem_object *
3148i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
3149				 const void *data, size_t size);
3150void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
3151void i915_gem_free_object(struct drm_gem_object *obj);
3152
3153static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
3154{
3155	if (!atomic_read(&i915->mm.free_count))
3156		return;
3157
3158	/* A single pass should suffice to release all the freed objects (along
3159	 * most call paths) , but be a little more paranoid in that freeing
3160	 * the objects does take a little amount of time, during which the rcu
3161	 * callbacks could have added new objects into the freed list, and
3162	 * armed the work again.
3163	 */
3164	do {
3165		rcu_barrier();
3166	} while (flush_work(&i915->mm.free_work));
3167}
3168
3169static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
3170{
3171	/*
3172	 * Similar to objects above (see i915_gem_drain_freed-objects), in
3173	 * general we have workers that are armed by RCU and then rearm
3174	 * themselves in their callbacks. To be paranoid, we need to
3175	 * drain the workqueue a second time after waiting for the RCU
3176	 * grace period so that we catch work queued via RCU from the first
3177	 * pass. As neither drain_workqueue() nor flush_workqueue() report
3178	 * a result, we make an assumption that we only don't require more
3179	 * than 2 passes to catch all recursive RCU delayed work.
3180	 *
3181	 */
3182	int pass = 2;
3183	do {
3184		rcu_barrier();
3185		drain_workqueue(i915->wq);
3186	} while (--pass);
3187}
3188
3189struct i915_vma * __must_check
3190i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3191			 const struct i915_ggtt_view *view,
3192			 u64 size,
3193			 u64 alignment,
3194			 u64 flags);
3195
3196int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
3197void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
3198
3199void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
3200
3201static inline int __sg_page_count(const struct scatterlist *sg)
3202{
3203	return sg->length >> PAGE_SHIFT;
3204}
3205
3206struct scatterlist *
3207i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
3208		       unsigned int n, unsigned int *offset);
3209
3210struct page *
3211i915_gem_object_get_page(struct drm_i915_gem_object *obj,
3212			 unsigned int n);
3213
3214struct page *
3215i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
3216			       unsigned int n);
3217
3218dma_addr_t
3219i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
3220				unsigned long n);
3221
3222void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
3223				 struct sg_table *pages,
3224				 unsigned int sg_page_sizes);
3225int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
3226
3227static inline int __must_check
3228i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
3229{
3230	might_lock(&obj->mm.lock);
3231
3232	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
3233		return 0;
3234
3235	return __i915_gem_object_get_pages(obj);
3236}
3237
3238static inline bool
3239i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
3240{
3241	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
3242}
3243
3244static inline void
3245__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
3246{
3247	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
3248
3249	atomic_inc(&obj->mm.pages_pin_count);
3250}
3251
3252static inline bool
3253i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
3254{
3255	return atomic_read(&obj->mm.pages_pin_count);
3256}
3257
3258static inline void
3259__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3260{
3261	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
3262	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
3263
3264	atomic_dec(&obj->mm.pages_pin_count);
3265}
3266
3267static inline void
3268i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3269{
3270	__i915_gem_object_unpin_pages(obj);
3271}
3272
3273enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
3274	I915_MM_NORMAL = 0,
3275	I915_MM_SHRINKER
3276};
3277
3278void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
3279				 enum i915_mm_subclass subclass);
3280void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
3281
3282enum i915_map_type {
3283	I915_MAP_WB = 0,
3284	I915_MAP_WC,
3285#define I915_MAP_OVERRIDE BIT(31)
3286	I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
3287	I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
3288};
3289
3290/**
3291 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
3292 * @obj: the object to map into kernel address space
3293 * @type: the type of mapping, used to select pgprot_t
3294 *
3295 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
3296 * pages and then returns a contiguous mapping of the backing storage into
3297 * the kernel address space. Based on the @type of mapping, the PTE will be
3298 * set to either WriteBack or WriteCombine (via pgprot_t).
3299 *
3300 * The caller is responsible for calling i915_gem_object_unpin_map() when the
3301 * mapping is no longer required.
3302 *
3303 * Returns the pointer through which to access the mapped object, or an
3304 * ERR_PTR() on error.
3305 */
3306void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
3307					   enum i915_map_type type);
3308
3309/**
3310 * i915_gem_object_unpin_map - releases an earlier mapping
3311 * @obj: the object to unmap
3312 *
3313 * After pinning the object and mapping its pages, once you are finished
3314 * with your access, call i915_gem_object_unpin_map() to release the pin
3315 * upon the mapping. Once the pin count reaches zero, that mapping may be
3316 * removed.
3317 */
3318static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
3319{
3320	i915_gem_object_unpin_pages(obj);
3321}
3322
3323int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
3324				    unsigned int *needs_clflush);
3325int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
3326				     unsigned int *needs_clflush);
3327#define CLFLUSH_BEFORE	BIT(0)
3328#define CLFLUSH_AFTER	BIT(1)
3329#define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
3330
3331static inline void
3332i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
3333{
3334	i915_gem_object_unpin_pages(obj);
3335}
3336
3337int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
3338void i915_vma_move_to_active(struct i915_vma *vma,
3339			     struct i915_request *rq,
3340			     unsigned int flags);
3341int i915_gem_dumb_create(struct drm_file *file_priv,
3342			 struct drm_device *dev,
3343			 struct drm_mode_create_dumb *args);
3344int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3345		      uint32_t handle, uint64_t *offset);
3346int i915_gem_mmap_gtt_version(void);
3347
3348void i915_gem_track_fb(struct drm_i915_gem_object *old,
3349		       struct drm_i915_gem_object *new,
3350		       unsigned frontbuffer_bits);
3351
3352int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
3353
3354struct i915_request *
3355i915_gem_find_active_request(struct intel_engine_cs *engine);
3356
3357static inline bool i915_reset_backoff(struct i915_gpu_error *error)
3358{
3359	return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
3360}
3361
3362static inline bool i915_reset_handoff(struct i915_gpu_error *error)
3363{
3364	return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags));
3365}
3366
3367static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
3368{
3369	return unlikely(test_bit(I915_WEDGED, &error->flags));
3370}
3371
3372static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error)
3373{
3374	return i915_reset_backoff(error) | i915_terminally_wedged(error);
3375}
3376
3377static inline u32 i915_reset_count(struct i915_gpu_error *error)
3378{
3379	return READ_ONCE(error->reset_count);
3380}
3381
3382static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
3383					  struct intel_engine_cs *engine)
3384{
3385	return READ_ONCE(error->reset_engine_count[engine->id]);
3386}
3387
3388struct i915_request *
3389i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
3390int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
3391void i915_gem_reset(struct drm_i915_private *dev_priv);
3392void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
3393void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
3394void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
3395bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
3396void i915_gem_reset_engine(struct intel_engine_cs *engine,
3397			   struct i915_request *request);
3398
3399void i915_gem_init_mmio(struct drm_i915_private *i915);
3400int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
3401int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
3402void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
3403void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
3404int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
3405			   unsigned int flags);
3406int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
3407void i915_gem_resume(struct drm_i915_private *dev_priv);
3408int i915_gem_fault(struct vm_fault *vmf);
3409int i915_gem_object_wait(struct drm_i915_gem_object *obj,
3410			 unsigned int flags,
3411			 long timeout,
3412			 struct intel_rps_client *rps);
3413int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
3414				  unsigned int flags,
3415				  int priority);
3416#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
3417
3418int __must_check
3419i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
3420int __must_check
3421i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
3422int __must_check
3423i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
3424struct i915_vma * __must_check
3425i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3426				     u32 alignment,
3427				     const struct i915_ggtt_view *view,
3428				     unsigned int flags);
3429void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
3430int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
3431				int align);
3432int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
3433void i915_gem_release(struct drm_device *dev, struct drm_file *file);
3434
3435int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3436				    enum i915_cache_level cache_level);
3437
3438struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
3439				struct dma_buf *dma_buf);
3440
3441struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
3442				struct drm_gem_object *gem_obj, int flags);
3443
3444static inline struct i915_hw_ppgtt *
3445i915_vm_to_ppgtt(struct i915_address_space *vm)
3446{
3447	return container_of(vm, struct i915_hw_ppgtt, base);
3448}
3449
3450/* i915_gem_fence_reg.c */
3451struct drm_i915_fence_reg *
3452i915_reserve_fence(struct drm_i915_private *dev_priv);
3453void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
3454
3455void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
3456void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
3457
3458void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
3459void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
3460				       struct sg_table *pages);
3461void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
3462					 struct sg_table *pages);
3463
3464static inline struct i915_gem_context *
3465__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
3466{
3467	return idr_find(&file_priv->context_idr, id);
3468}
3469
3470static inline struct i915_gem_context *
3471i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
3472{
3473	struct i915_gem_context *ctx;
3474
3475	rcu_read_lock();
3476	ctx = __i915_gem_context_lookup_rcu(file_priv, id);
3477	if (ctx && !kref_get_unless_zero(&ctx->ref))
3478		ctx = NULL;
3479	rcu_read_unlock();
3480
3481	return ctx;
3482}
3483
3484static inline struct intel_timeline *
3485i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
3486				 struct intel_engine_cs *engine)
3487{
3488	struct i915_address_space *vm;
3489
3490	vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
3491	return &vm->timeline.engine[engine->id];
3492}
3493
3494int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3495			 struct drm_file *file);
3496int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
3497			       struct drm_file *file);
3498int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
3499				  struct drm_file *file);
3500void i915_oa_init_reg_state(struct intel_engine_cs *engine,
3501			    struct i915_gem_context *ctx,
3502			    uint32_t *reg_state);
3503
3504/* i915_gem_evict.c */
3505int __must_check i915_gem_evict_something(struct i915_address_space *vm,
3506					  u64 min_size, u64 alignment,
3507					  unsigned cache_level,
3508					  u64 start, u64 end,
3509					  unsigned flags);
3510int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
3511					 struct drm_mm_node *node,
3512					 unsigned int flags);
3513int i915_gem_evict_vm(struct i915_address_space *vm);
3514
3515void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
3516
3517/* belongs in i915_gem_gtt.h */
3518static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
3519{
3520	wmb();
3521	if (INTEL_GEN(dev_priv) < 6)
3522		intel_gtt_chipset_flush();
3523}
3524
3525/* i915_gem_stolen.c */
3526int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
3527				struct drm_mm_node *node, u64 size,
3528				unsigned alignment);
3529int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
3530					 struct drm_mm_node *node, u64 size,
3531					 unsigned alignment, u64 start,
3532					 u64 end);
3533void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
3534				 struct drm_mm_node *node);
3535int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
3536void i915_gem_cleanup_stolen(struct drm_device *dev);
3537struct drm_i915_gem_object *
3538i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
3539			      resource_size_t size);
3540struct drm_i915_gem_object *
3541i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
3542					       resource_size_t stolen_offset,
3543					       resource_size_t gtt_offset,
3544					       resource_size_t size);
3545
3546/* i915_gem_internal.c */
3547struct drm_i915_gem_object *
3548i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
3549				phys_addr_t size);
3550
3551/* i915_gem_shrinker.c */
3552unsigned long i915_gem_shrink(struct drm_i915_private *i915,
3553			      unsigned long target,
3554			      unsigned long *nr_scanned,
3555			      unsigned flags);
3556#define I915_SHRINK_PURGEABLE 0x1
3557#define I915_SHRINK_UNBOUND 0x2
3558#define I915_SHRINK_BOUND 0x4
3559#define I915_SHRINK_ACTIVE 0x8
3560#define I915_SHRINK_VMAPS 0x10
3561unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
3562void i915_gem_shrinker_register(struct drm_i915_private *i915);
3563void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
3564
3565
3566/* i915_gem_tiling.c */
3567static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
3568{
3569	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3570
3571	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
3572		i915_gem_object_is_tiled(obj);
3573}
3574
3575u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
3576			unsigned int tiling, unsigned int stride);
3577u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
3578			     unsigned int tiling, unsigned int stride);
3579
3580/* i915_debugfs.c */
3581#ifdef CONFIG_DEBUG_FS
3582int i915_debugfs_register(struct drm_i915_private *dev_priv);
3583int i915_debugfs_connector_add(struct drm_connector *connector);
3584void intel_display_crc_init(struct drm_i915_private *dev_priv);
3585#else
3586static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
3587static inline int i915_debugfs_connector_add(struct drm_connector *connector)
3588{ return 0; }
3589static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
3590#endif
3591
3592/* i915_gpu_error.c */
3593#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
3594
3595__printf(2, 3)
3596void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
3597int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
3598			    const struct i915_gpu_state *gpu);
3599int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
3600			      struct drm_i915_private *i915,
3601			      size_t count, loff_t pos);
3602static inline void i915_error_state_buf_release(
3603	struct drm_i915_error_state_buf *eb)
3604{
3605	kfree(eb->buf);
3606}
3607
3608struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
3609void i915_capture_error_state(struct drm_i915_private *dev_priv,
3610			      u32 engine_mask,
3611			      const char *error_msg);
3612
3613static inline struct i915_gpu_state *
3614i915_gpu_state_get(struct i915_gpu_state *gpu)
3615{
3616	kref_get(&gpu->ref);
3617	return gpu;
3618}
3619
3620void __i915_gpu_state_free(struct kref *kref);
3621static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
3622{
3623	if (gpu)
3624		kref_put(&gpu->ref, __i915_gpu_state_free);
3625}
3626
3627struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
3628void i915_reset_error_state(struct drm_i915_private *i915);
3629
3630#else
3631
3632static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
3633					    u32 engine_mask,
3634					    const char *error_msg)
3635{
3636}
3637
3638static inline struct i915_gpu_state *
3639i915_first_error_state(struct drm_i915_private *i915)
3640{
3641	return NULL;
3642}
3643
3644static inline void i915_reset_error_state(struct drm_i915_private *i915)
3645{
3646}
3647
3648#endif
3649
3650const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
3651
3652/* i915_cmd_parser.c */
3653int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
3654void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
3655void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
3656int intel_engine_cmd_parser(struct intel_engine_cs *engine,
3657			    struct drm_i915_gem_object *batch_obj,
3658			    struct drm_i915_gem_object *shadow_batch_obj,
3659			    u32 batch_start_offset,
3660			    u32 batch_len,
3661			    bool is_master);
3662
3663/* i915_perf.c */
3664extern void i915_perf_init(struct drm_i915_private *dev_priv);
3665extern void i915_perf_fini(struct drm_i915_private *dev_priv);
3666extern void i915_perf_register(struct drm_i915_private *dev_priv);
3667extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
3668
3669/* i915_suspend.c */
3670extern int i915_save_state(struct drm_i915_private *dev_priv);
3671extern int i915_restore_state(struct drm_i915_private *dev_priv);
3672
3673/* i915_sysfs.c */
3674void i915_setup_sysfs(struct drm_i915_private *dev_priv);
3675void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
3676
3677/* intel_lpe_audio.c */
3678int  intel_lpe_audio_init(struct drm_i915_private *dev_priv);
3679void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
3680void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
3681void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
3682			    enum pipe pipe, enum port port,
3683			    const void *eld, int ls_clock, bool dp_output);
3684
3685/* intel_i2c.c */
3686extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
3687extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
3688extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
3689				     unsigned int pin);
3690extern int intel_gmbus_output_aksv(struct i2c_adapter *adapter);
3691
3692extern struct i2c_adapter *
3693intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
3694extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
3695extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
3696static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
3697{
3698	return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
3699}
3700extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
3701
3702/* intel_bios.c */
3703void intel_bios_init(struct drm_i915_private *dev_priv);
3704void intel_bios_cleanup(struct drm_i915_private *dev_priv);
3705bool intel_bios_is_valid_vbt(const void *buf, size_t size);
3706bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
3707bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
3708bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
3709bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
3710bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
3711bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
3712bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3713				     enum port port);
3714bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
3715				enum port port);
3716
3717/* intel_acpi.c */
3718#ifdef CONFIG_ACPI
3719extern void intel_register_dsm_handler(void);
3720extern void intel_unregister_dsm_handler(void);
3721#else
3722static inline void intel_register_dsm_handler(void) { return; }
3723static inline void intel_unregister_dsm_handler(void) { return; }
3724#endif /* CONFIG_ACPI */
3725
3726/* intel_device_info.c */
3727static inline struct intel_device_info *
3728mkwrite_device_info(struct drm_i915_private *dev_priv)
3729{
3730	return (struct intel_device_info *)&dev_priv->info;
3731}
3732
3733/* modesetting */
3734extern void intel_modeset_init_hw(struct drm_device *dev);
3735extern int intel_modeset_init(struct drm_device *dev);
3736extern void intel_modeset_cleanup(struct drm_device *dev);
3737extern int intel_connector_register(struct drm_connector *);
3738extern void intel_connector_unregister(struct drm_connector *);
3739extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
3740				       bool state);
3741extern void intel_display_resume(struct drm_device *dev);
3742extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
3743extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
3744extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
3745extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
3746extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
3747extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3748				  bool enable);
3749
3750int i915_reg_read_ioctl(struct drm_device *dev, void *data,
3751			struct drm_file *file);
3752
3753/* overlay */
3754extern struct intel_overlay_error_state *
3755intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
3756extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
3757					    struct intel_overlay_error_state *error);
3758
3759extern struct intel_display_error_state *
3760intel_display_capture_error_state(struct drm_i915_private *dev_priv);
3761extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
3762					    struct intel_display_error_state *error);
3763
3764int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
3765int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
3766				    u32 val, int fast_timeout_us,
3767				    int slow_timeout_ms);
3768#define sandybridge_pcode_write(dev_priv, mbox, val)	\
3769	sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500, 0)
3770
3771int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
3772		      u32 reply_mask, u32 reply, int timeout_base_ms);
3773
3774/* intel_sideband.c */
3775u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
3776int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
3777u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
3778u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
3779void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
3780u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
3781void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3782u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
3783void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3784u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
3785void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3786u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
3787void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
3788u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
3789		   enum intel_sbi_destination destination);
3790void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
3791		     enum intel_sbi_destination destination);
3792u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
3793void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3794
3795/* intel_dpio_phy.c */
3796void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
3797			     enum dpio_phy *phy, enum dpio_channel *ch);
3798void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
3799				  enum port port, u32 margin, u32 scale,
3800				  u32 enable, u32 deemphasis);
3801void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
3802void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
3803bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
3804			    enum dpio_phy phy);
3805bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
3806			      enum dpio_phy phy);
3807uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count);
3808void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
3809				     uint8_t lane_lat_optim_mask);
3810uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
3811
3812void chv_set_phy_signal_level(struct intel_encoder *encoder,
3813			      u32 deemph_reg_value, u32 margin_reg_value,
3814			      bool uniq_trans_scale);
3815void chv_data_lane_soft_reset(struct intel_encoder *encoder,
3816			      const struct intel_crtc_state *crtc_state,
3817			      bool reset);
3818void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
3819			    const struct intel_crtc_state *crtc_state);
3820void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
3821				const struct intel_crtc_state *crtc_state);
3822void chv_phy_release_cl2_override(struct intel_encoder *encoder);
3823void chv_phy_post_pll_disable(struct intel_encoder *encoder,
3824			      const struct intel_crtc_state *old_crtc_state);
3825
3826void vlv_set_phy_signal_level(struct intel_encoder *encoder,
3827			      u32 demph_reg_value, u32 preemph_reg_value,
3828			      u32 uniqtranscale_reg_value, u32 tx3_demph);
3829void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
3830			    const struct intel_crtc_state *crtc_state);
3831void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
3832				const struct intel_crtc_state *crtc_state);
3833void vlv_phy_reset_lanes(struct intel_encoder *encoder,
3834			 const struct intel_crtc_state *old_crtc_state);
3835
3836int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3837int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3838u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
3839			   const i915_reg_t reg);
3840
3841u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
3842
3843static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
3844					 const i915_reg_t reg)
3845{
3846	return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
3847}
3848
3849#define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
3850#define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
3851
3852#define I915_READ16(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
3853#define I915_WRITE16(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
3854#define I915_READ16_NOTRACE(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
3855#define I915_WRITE16_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
3856
3857#define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
3858#define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
3859#define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
3860#define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
3861
3862/* Be very careful with read/write 64-bit values. On 32-bit machines, they
3863 * will be implemented using 2 32-bit writes in an arbitrary order with
3864 * an arbitrary delay between them. This can cause the hardware to
3865 * act upon the intermediate value, possibly leading to corruption and
3866 * machine death. For this reason we do not support I915_WRITE64, or
3867 * dev_priv->uncore.funcs.mmio_writeq.
3868 *
3869 * When reading a 64-bit value as two 32-bit values, the delay may cause
3870 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
3871 * occasionally a 64-bit register does not actualy support a full readq
3872 * and must be read using two 32-bit reads.
3873 *
3874 * You have been warned.
3875 */
3876#define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
3877
3878#define I915_READ64_2x32(lower_reg, upper_reg) ({			\
3879	u32 upper, lower, old_upper, loop = 0;				\
3880	upper = I915_READ(upper_reg);					\
3881	do {								\
3882		old_upper = upper;					\
3883		lower = I915_READ(lower_reg);				\
3884		upper = I915_READ(upper_reg);				\
3885	} while (upper != old_upper && loop++ < 2);			\
3886	(u64)upper << 32 | lower; })
3887
3888#define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
3889#define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
3890
3891#define __raw_read(x, s) \
3892static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
3893					     i915_reg_t reg) \
3894{ \
3895	return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
3896}
3897
3898#define __raw_write(x, s) \
3899static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
3900				       i915_reg_t reg, uint##x##_t val) \
3901{ \
3902	write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
3903}
3904__raw_read(8, b)
3905__raw_read(16, w)
3906__raw_read(32, l)
3907__raw_read(64, q)
3908
3909__raw_write(8, b)
3910__raw_write(16, w)
3911__raw_write(32, l)
3912__raw_write(64, q)
3913
3914#undef __raw_read
3915#undef __raw_write
3916
3917/* These are untraced mmio-accessors that are only valid to be used inside
3918 * critical sections, such as inside IRQ handlers, where forcewake is explicitly
3919 * controlled.
3920 *
3921 * Think twice, and think again, before using these.
3922 *
3923 * As an example, these accessors can possibly be used between:
3924 *
3925 * spin_lock_irq(&dev_priv->uncore.lock);
3926 * intel_uncore_forcewake_get__locked();
3927 *
3928 * and
3929 *
3930 * intel_uncore_forcewake_put__locked();
3931 * spin_unlock_irq(&dev_priv->uncore.lock);
3932 *
3933 *
3934 * Note: some registers may not need forcewake held, so
3935 * intel_uncore_forcewake_{get,put} can be omitted, see
3936 * intel_uncore_forcewake_for_reg().
3937 *
3938 * Certain architectures will die if the same cacheline is concurrently accessed
3939 * by different clients (e.g. on Ivybridge). Access to registers should
3940 * therefore generally be serialised, by either the dev_priv->uncore.lock or
3941 * a more localised lock guarding all access to that bank of registers.
3942 */
3943#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
3944#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
3945#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
3946#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
3947
3948/* "Broadcast RGB" property */
3949#define INTEL_BROADCAST_RGB_AUTO 0
3950#define INTEL_BROADCAST_RGB_FULL 1
3951#define INTEL_BROADCAST_RGB_LIMITED 2
3952
3953static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
3954{
3955	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3956		return VLV_VGACNTRL;
3957	else if (INTEL_GEN(dev_priv) >= 5)
3958		return CPU_VGACNTRL;
3959	else
3960		return VGACNTRL;
3961}
3962
3963static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
3964{
3965	unsigned long j = msecs_to_jiffies(m);
3966
3967	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3968}
3969
3970static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
3971{
3972	/* nsecs_to_jiffies64() does not guard against overflow */
3973	if (NSEC_PER_SEC % HZ &&
3974	    div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
3975		return MAX_JIFFY_OFFSET;
3976
3977        return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
3978}
3979
3980static inline unsigned long
3981timespec_to_jiffies_timeout(const struct timespec *value)
3982{
3983	unsigned long j = timespec_to_jiffies(value);
3984
3985	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3986}
3987
3988/*
3989 * If you need to wait X milliseconds between events A and B, but event B
3990 * doesn't happen exactly after event A, you record the timestamp (jiffies) of
3991 * when event A happened, then just before event B you call this function and
3992 * pass the timestamp as the first argument, and X as the second argument.
3993 */
3994static inline void
3995wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
3996{
3997	unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
3998
3999	/*
4000	 * Don't re-read the value of "jiffies" every time since it may change
4001	 * behind our back and break the math.
4002	 */
4003	tmp_jiffies = jiffies;
4004	target_jiffies = timestamp_jiffies +
4005			 msecs_to_jiffies_timeout(to_wait_ms);
4006
4007	if (time_after(target_jiffies, tmp_jiffies)) {
4008		remaining_jiffies = target_jiffies - tmp_jiffies;
4009		while (remaining_jiffies)
4010			remaining_jiffies =
4011			    schedule_timeout_uninterruptible(remaining_jiffies);
4012	}
4013}
4014
4015static inline bool
4016__i915_request_irq_complete(const struct i915_request *rq)
4017{
4018	struct intel_engine_cs *engine = rq->engine;
4019	u32 seqno;
4020
4021	/* Note that the engine may have wrapped around the seqno, and
4022	 * so our request->global_seqno will be ahead of the hardware,
4023	 * even though it completed the request before wrapping. We catch
4024	 * this by kicking all the waiters before resetting the seqno
4025	 * in hardware, and also signal the fence.
4026	 */
4027	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
4028		return true;
4029
4030	/* The request was dequeued before we were awoken. We check after
4031	 * inspecting the hw to confirm that this was the same request
4032	 * that generated the HWS update. The memory barriers within
4033	 * the request execution are sufficient to ensure that a check
4034	 * after reading the value from hw matches this request.
4035	 */
4036	seqno = i915_request_global_seqno(rq);
4037	if (!seqno)
4038		return false;
4039
4040	/* Before we do the heavier coherent read of the seqno,
4041	 * check the value (hopefully) in the CPU cacheline.
4042	 */
4043	if (__i915_request_completed(rq, seqno))
4044		return true;
4045
4046	/* Ensure our read of the seqno is coherent so that we
4047	 * do not "miss an interrupt" (i.e. if this is the last
4048	 * request and the seqno write from the GPU is not visible
4049	 * by the time the interrupt fires, we will see that the
4050	 * request is incomplete and go back to sleep awaiting
4051	 * another interrupt that will never come.)
4052	 *
4053	 * Strictly, we only need to do this once after an interrupt,
4054	 * but it is easier and safer to do it every time the waiter
4055	 * is woken.
4056	 */
4057	if (engine->irq_seqno_barrier &&
4058	    test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
4059		struct intel_breadcrumbs *b = &engine->breadcrumbs;
4060
4061		/* The ordering of irq_posted versus applying the barrier
4062		 * is crucial. The clearing of the current irq_posted must
4063		 * be visible before we perform the barrier operation,
4064		 * such that if a subsequent interrupt arrives, irq_posted
4065		 * is reasserted and our task rewoken (which causes us to
4066		 * do another __i915_request_irq_complete() immediately
4067		 * and reapply the barrier). Conversely, if the clear
4068		 * occurs after the barrier, then an interrupt that arrived
4069		 * whilst we waited on the barrier would not trigger a
4070		 * barrier on the next pass, and the read may not see the
4071		 * seqno update.
4072		 */
4073		engine->irq_seqno_barrier(engine);
4074
4075		/* If we consume the irq, but we are no longer the bottom-half,
4076		 * the real bottom-half may not have serialised their own
4077		 * seqno check with the irq-barrier (i.e. may have inspected
4078		 * the seqno before we believe it coherent since they see
4079		 * irq_posted == false but we are still running).
4080		 */
4081		spin_lock_irq(&b->irq_lock);
4082		if (b->irq_wait && b->irq_wait->tsk != current)
4083			/* Note that if the bottom-half is changed as we
4084			 * are sending the wake-up, the new bottom-half will
4085			 * be woken by whomever made the change. We only have
4086			 * to worry about when we steal the irq-posted for
4087			 * ourself.
4088			 */
4089			wake_up_process(b->irq_wait->tsk);
4090		spin_unlock_irq(&b->irq_lock);
4091
4092		if (__i915_request_completed(rq, seqno))
4093			return true;
4094	}
4095
4096	return false;
4097}
4098
4099void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
4100bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
4101
4102/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
4103 * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
4104 * perform the operation. To check beforehand, pass in the parameters to
4105 * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
4106 * you only need to pass in the minor offsets, page-aligned pointers are
4107 * always valid.
4108 *
4109 * For just checking for SSE4.1, in the foreknowledge that the future use
4110 * will be correctly aligned, just use i915_has_memcpy_from_wc().
4111 */
4112#define i915_can_memcpy_from_wc(dst, src, len) \
4113	i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
4114
4115#define i915_has_memcpy_from_wc() \
4116	i915_memcpy_from_wc(NULL, NULL, 0)
4117
4118/* i915_mm.c */
4119int remap_io_mapping(struct vm_area_struct *vma,
4120		     unsigned long addr, unsigned long pfn, unsigned long size,
4121		     struct io_mapping *iomap);
4122
4123static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
4124{
4125	if (INTEL_GEN(i915) >= 10)
4126		return CNL_HWS_CSB_WRITE_INDEX;
4127	else
4128		return I915_HWS_CSB_WRITE_INDEX;
4129}
4130
4131#endif