Loading...
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
26#include "i915_vgpu.h"
27
28#include <linux/pm_runtime.h>
29
30#define FORCEWAKE_ACK_TIMEOUT_MS 50
31
32#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
33
34static const char * const forcewake_domain_names[] = {
35 "render",
36 "blitter",
37 "media",
38};
39
40const char *
41intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
42{
43 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
44
45 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
46 return forcewake_domain_names[id];
47
48 WARN_ON(id);
49
50 return "unknown";
51}
52
53static inline void
54fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
55{
56 WARN_ON(!i915_mmio_reg_valid(d->reg_set));
57 __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
58}
59
60static inline void
61fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
62{
63 mod_timer_pinned(&d->timer, jiffies + 1);
64}
65
66static inline void
67fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
68{
69 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
70 FORCEWAKE_KERNEL) == 0,
71 FORCEWAKE_ACK_TIMEOUT_MS))
72 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
73 intel_uncore_forcewake_domain_to_str(d->id));
74}
75
76static inline void
77fw_domain_get(const struct intel_uncore_forcewake_domain *d)
78{
79 __raw_i915_write32(d->i915, d->reg_set, d->val_set);
80}
81
82static inline void
83fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
84{
85 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
86 FORCEWAKE_KERNEL),
87 FORCEWAKE_ACK_TIMEOUT_MS))
88 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
89 intel_uncore_forcewake_domain_to_str(d->id));
90}
91
92static inline void
93fw_domain_put(const struct intel_uncore_forcewake_domain *d)
94{
95 __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
96}
97
98static inline void
99fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
100{
101 /* something from same cacheline, but not from the set register */
102 if (i915_mmio_reg_valid(d->reg_post))
103 __raw_posting_read(d->i915, d->reg_post);
104}
105
106static void
107fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
108{
109 struct intel_uncore_forcewake_domain *d;
110 enum forcewake_domain_id id;
111
112 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
113 fw_domain_wait_ack_clear(d);
114 fw_domain_get(d);
115 fw_domain_wait_ack(d);
116 }
117}
118
119static void
120fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
121{
122 struct intel_uncore_forcewake_domain *d;
123 enum forcewake_domain_id id;
124
125 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
126 fw_domain_put(d);
127 fw_domain_posting_read(d);
128 }
129}
130
131static void
132fw_domains_posting_read(struct drm_i915_private *dev_priv)
133{
134 struct intel_uncore_forcewake_domain *d;
135 enum forcewake_domain_id id;
136
137 /* No need to do for all, just do for first found */
138 for_each_fw_domain(d, dev_priv, id) {
139 fw_domain_posting_read(d);
140 break;
141 }
142}
143
144static void
145fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
146{
147 struct intel_uncore_forcewake_domain *d;
148 enum forcewake_domain_id id;
149
150 if (dev_priv->uncore.fw_domains == 0)
151 return;
152
153 for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
154 fw_domain_reset(d);
155
156 fw_domains_posting_read(dev_priv);
157}
158
159static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
160{
161 /* w/a for a sporadic read returning 0 by waiting for the GT
162 * thread to wake up.
163 */
164 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
165 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
166 DRM_ERROR("GT thread status wait timed out\n");
167}
168
169static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
170 enum forcewake_domains fw_domains)
171{
172 fw_domains_get(dev_priv, fw_domains);
173
174 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
175 __gen6_gt_wait_for_thread_c0(dev_priv);
176}
177
178static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
179{
180 u32 gtfifodbg;
181
182 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
183 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
184 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
185}
186
187static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
188 enum forcewake_domains fw_domains)
189{
190 fw_domains_put(dev_priv, fw_domains);
191 gen6_gt_check_fifodbg(dev_priv);
192}
193
194static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
195{
196 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
197
198 return count & GT_FIFO_FREE_ENTRIES_MASK;
199}
200
201static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
202{
203 int ret = 0;
204
205 /* On VLV, FIFO will be shared by both SW and HW.
206 * So, we need to read the FREE_ENTRIES everytime */
207 if (IS_VALLEYVIEW(dev_priv->dev))
208 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
209
210 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
211 int loop = 500;
212 u32 fifo = fifo_free_entries(dev_priv);
213
214 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
215 udelay(10);
216 fifo = fifo_free_entries(dev_priv);
217 }
218 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
219 ++ret;
220 dev_priv->uncore.fifo_count = fifo;
221 }
222 dev_priv->uncore.fifo_count--;
223
224 return ret;
225}
226
227static void intel_uncore_fw_release_timer(unsigned long arg)
228{
229 struct intel_uncore_forcewake_domain *domain = (void *)arg;
230 unsigned long irqflags;
231
232 assert_rpm_device_not_suspended(domain->i915);
233
234 spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
235 if (WARN_ON(domain->wake_count == 0))
236 domain->wake_count++;
237
238 if (--domain->wake_count == 0)
239 domain->i915->uncore.funcs.force_wake_put(domain->i915,
240 1 << domain->id);
241
242 spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
243}
244
245void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
246{
247 struct drm_i915_private *dev_priv = dev->dev_private;
248 unsigned long irqflags;
249 struct intel_uncore_forcewake_domain *domain;
250 int retry_count = 100;
251 enum forcewake_domain_id id;
252 enum forcewake_domains fw = 0, active_domains;
253
254 /* Hold uncore.lock across reset to prevent any register access
255 * with forcewake not set correctly. Wait until all pending
256 * timers are run before holding.
257 */
258 while (1) {
259 active_domains = 0;
260
261 for_each_fw_domain(domain, dev_priv, id) {
262 if (del_timer_sync(&domain->timer) == 0)
263 continue;
264
265 intel_uncore_fw_release_timer((unsigned long)domain);
266 }
267
268 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
269
270 for_each_fw_domain(domain, dev_priv, id) {
271 if (timer_pending(&domain->timer))
272 active_domains |= (1 << id);
273 }
274
275 if (active_domains == 0)
276 break;
277
278 if (--retry_count == 0) {
279 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
280 break;
281 }
282
283 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
284 cond_resched();
285 }
286
287 WARN_ON(active_domains);
288
289 for_each_fw_domain(domain, dev_priv, id)
290 if (domain->wake_count)
291 fw |= 1 << id;
292
293 if (fw)
294 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
295
296 fw_domains_reset(dev_priv, FORCEWAKE_ALL);
297
298 if (restore) { /* If reset with a user forcewake, try to restore */
299 if (fw)
300 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
301
302 if (IS_GEN6(dev) || IS_GEN7(dev))
303 dev_priv->uncore.fifo_count =
304 fifo_free_entries(dev_priv);
305 }
306
307 if (!restore)
308 assert_forcewakes_inactive(dev_priv);
309
310 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
311}
312
313static void intel_uncore_ellc_detect(struct drm_device *dev)
314{
315 struct drm_i915_private *dev_priv = dev->dev_private;
316
317 if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
318 INTEL_INFO(dev)->gen >= 9) &&
319 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
320 /* The docs do not explain exactly how the calculation can be
321 * made. It is somewhat guessable, but for now, it's always
322 * 128MB.
323 * NB: We can't write IDICR yet because we do not have gt funcs
324 * set up */
325 dev_priv->ellc_size = 128;
326 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
327 }
328}
329
330static bool
331fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
332{
333 u32 dbg;
334
335 dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
336 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
337 return false;
338
339 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
340
341 return true;
342}
343
344static bool
345vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
346{
347 u32 cer;
348
349 cer = __raw_i915_read32(dev_priv, CLAIM_ER);
350 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
351 return false;
352
353 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
354
355 return true;
356}
357
358static bool
359check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
360{
361 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
362 return fpga_check_for_unclaimed_mmio(dev_priv);
363
364 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
365 return vlv_check_for_unclaimed_mmio(dev_priv);
366
367 return false;
368}
369
370static void __intel_uncore_early_sanitize(struct drm_device *dev,
371 bool restore_forcewake)
372{
373 struct drm_i915_private *dev_priv = dev->dev_private;
374
375 /* clear out unclaimed reg detection bit */
376 if (check_for_unclaimed_mmio(dev_priv))
377 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
378
379 /* clear out old GT FIFO errors */
380 if (IS_GEN6(dev) || IS_GEN7(dev))
381 __raw_i915_write32(dev_priv, GTFIFODBG,
382 __raw_i915_read32(dev_priv, GTFIFODBG));
383
384 /* WaDisableShadowRegForCpd:chv */
385 if (IS_CHERRYVIEW(dev)) {
386 __raw_i915_write32(dev_priv, GTFIFOCTL,
387 __raw_i915_read32(dev_priv, GTFIFOCTL) |
388 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
389 GT_FIFO_CTL_RC6_POLICY_STALL);
390 }
391
392 intel_uncore_forcewake_reset(dev, restore_forcewake);
393}
394
395void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
396{
397 __intel_uncore_early_sanitize(dev, restore_forcewake);
398 i915_check_and_clear_faults(dev);
399}
400
401void intel_uncore_sanitize(struct drm_device *dev)
402{
403 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
404
405 /* BIOS often leaves RC6 enabled, but disable it for hw init */
406 intel_disable_gt_powersave(dev);
407}
408
409static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
410 enum forcewake_domains fw_domains)
411{
412 struct intel_uncore_forcewake_domain *domain;
413 enum forcewake_domain_id id;
414
415 if (!dev_priv->uncore.funcs.force_wake_get)
416 return;
417
418 fw_domains &= dev_priv->uncore.fw_domains;
419
420 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
421 if (domain->wake_count++)
422 fw_domains &= ~(1 << id);
423 }
424
425 if (fw_domains)
426 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
427}
428
429/**
430 * intel_uncore_forcewake_get - grab forcewake domain references
431 * @dev_priv: i915 device instance
432 * @fw_domains: forcewake domains to get reference on
433 *
434 * This function can be used get GT's forcewake domain references.
435 * Normal register access will handle the forcewake domains automatically.
436 * However if some sequence requires the GT to not power down a particular
437 * forcewake domains this function should be called at the beginning of the
438 * sequence. And subsequently the reference should be dropped by symmetric
439 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
440 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
441 */
442void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
443 enum forcewake_domains fw_domains)
444{
445 unsigned long irqflags;
446
447 if (!dev_priv->uncore.funcs.force_wake_get)
448 return;
449
450 assert_rpm_wakelock_held(dev_priv);
451
452 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
453 __intel_uncore_forcewake_get(dev_priv, fw_domains);
454 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
455}
456
457/**
458 * intel_uncore_forcewake_get__locked - grab forcewake domain references
459 * @dev_priv: i915 device instance
460 * @fw_domains: forcewake domains to get reference on
461 *
462 * See intel_uncore_forcewake_get(). This variant places the onus
463 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
464 */
465void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
466 enum forcewake_domains fw_domains)
467{
468 assert_spin_locked(&dev_priv->uncore.lock);
469
470 if (!dev_priv->uncore.funcs.force_wake_get)
471 return;
472
473 __intel_uncore_forcewake_get(dev_priv, fw_domains);
474}
475
476static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
477 enum forcewake_domains fw_domains)
478{
479 struct intel_uncore_forcewake_domain *domain;
480 enum forcewake_domain_id id;
481
482 if (!dev_priv->uncore.funcs.force_wake_put)
483 return;
484
485 fw_domains &= dev_priv->uncore.fw_domains;
486
487 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
488 if (WARN_ON(domain->wake_count == 0))
489 continue;
490
491 if (--domain->wake_count)
492 continue;
493
494 domain->wake_count++;
495 fw_domain_arm_timer(domain);
496 }
497}
498
499/**
500 * intel_uncore_forcewake_put - release a forcewake domain reference
501 * @dev_priv: i915 device instance
502 * @fw_domains: forcewake domains to put references
503 *
504 * This function drops the device-level forcewakes for specified
505 * domains obtained by intel_uncore_forcewake_get().
506 */
507void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
508 enum forcewake_domains fw_domains)
509{
510 unsigned long irqflags;
511
512 if (!dev_priv->uncore.funcs.force_wake_put)
513 return;
514
515 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
516 __intel_uncore_forcewake_put(dev_priv, fw_domains);
517 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
518}
519
520/**
521 * intel_uncore_forcewake_put__locked - grab forcewake domain references
522 * @dev_priv: i915 device instance
523 * @fw_domains: forcewake domains to get reference on
524 *
525 * See intel_uncore_forcewake_put(). This variant places the onus
526 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
527 */
528void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
529 enum forcewake_domains fw_domains)
530{
531 assert_spin_locked(&dev_priv->uncore.lock);
532
533 if (!dev_priv->uncore.funcs.force_wake_put)
534 return;
535
536 __intel_uncore_forcewake_put(dev_priv, fw_domains);
537}
538
539void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
540{
541 struct intel_uncore_forcewake_domain *domain;
542 enum forcewake_domain_id id;
543
544 if (!dev_priv->uncore.funcs.force_wake_get)
545 return;
546
547 for_each_fw_domain(domain, dev_priv, id)
548 WARN_ON(domain->wake_count);
549}
550
551/* We give fast paths for the really cool registers */
552#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
553
554#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
555
556#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
557 (REG_RANGE((reg), 0x2000, 0x4000) || \
558 REG_RANGE((reg), 0x5000, 0x8000) || \
559 REG_RANGE((reg), 0xB000, 0x12000) || \
560 REG_RANGE((reg), 0x2E000, 0x30000))
561
562#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
563 (REG_RANGE((reg), 0x12000, 0x14000) || \
564 REG_RANGE((reg), 0x22000, 0x24000) || \
565 REG_RANGE((reg), 0x30000, 0x40000))
566
567#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
568 (REG_RANGE((reg), 0x2000, 0x4000) || \
569 REG_RANGE((reg), 0x5200, 0x8000) || \
570 REG_RANGE((reg), 0x8300, 0x8500) || \
571 REG_RANGE((reg), 0xB000, 0xB480) || \
572 REG_RANGE((reg), 0xE000, 0xE800))
573
574#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
575 (REG_RANGE((reg), 0x8800, 0x8900) || \
576 REG_RANGE((reg), 0xD000, 0xD800) || \
577 REG_RANGE((reg), 0x12000, 0x14000) || \
578 REG_RANGE((reg), 0x1A000, 0x1C000) || \
579 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
580 REG_RANGE((reg), 0x30000, 0x38000))
581
582#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
583 (REG_RANGE((reg), 0x4000, 0x5000) || \
584 REG_RANGE((reg), 0x8000, 0x8300) || \
585 REG_RANGE((reg), 0x8500, 0x8600) || \
586 REG_RANGE((reg), 0x9000, 0xB000) || \
587 REG_RANGE((reg), 0xF000, 0x10000))
588
589#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
590 REG_RANGE((reg), 0xB00, 0x2000)
591
592#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
593 (REG_RANGE((reg), 0x2000, 0x2700) || \
594 REG_RANGE((reg), 0x3000, 0x4000) || \
595 REG_RANGE((reg), 0x5200, 0x8000) || \
596 REG_RANGE((reg), 0x8140, 0x8160) || \
597 REG_RANGE((reg), 0x8300, 0x8500) || \
598 REG_RANGE((reg), 0x8C00, 0x8D00) || \
599 REG_RANGE((reg), 0xB000, 0xB480) || \
600 REG_RANGE((reg), 0xE000, 0xE900) || \
601 REG_RANGE((reg), 0x24400, 0x24800))
602
603#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
604 (REG_RANGE((reg), 0x8130, 0x8140) || \
605 REG_RANGE((reg), 0x8800, 0x8A00) || \
606 REG_RANGE((reg), 0xD000, 0xD800) || \
607 REG_RANGE((reg), 0x12000, 0x14000) || \
608 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
609 REG_RANGE((reg), 0x30000, 0x40000))
610
611#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
612 REG_RANGE((reg), 0x9400, 0x9800)
613
614#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
615 ((reg) < 0x40000 && \
616 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
617 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
618 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
619 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
620
621static void
622ilk_dummy_write(struct drm_i915_private *dev_priv)
623{
624 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
625 * the chip from rc6 before touching it for real. MI_MODE is masked,
626 * hence harmless to write 0 into. */
627 __raw_i915_write32(dev_priv, MI_MODE, 0);
628}
629
630static void
631__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
632 const i915_reg_t reg,
633 const bool read,
634 const bool before)
635{
636 /* XXX. We limit the auto arming traces for mmio
637 * debugs on these platforms. There are just too many
638 * revealed by these and CI/Bat suffers from the noise.
639 * Please fix and then re-enable the automatic traces.
640 */
641 if (i915.mmio_debug < 2 &&
642 (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
643 return;
644
645 if (WARN(check_for_unclaimed_mmio(dev_priv),
646 "Unclaimed register detected %s %s register 0x%x\n",
647 before ? "before" : "after",
648 read ? "reading" : "writing to",
649 i915_mmio_reg_offset(reg)))
650 i915.mmio_debug--; /* Only report the first N failures */
651}
652
653static inline void
654unclaimed_reg_debug(struct drm_i915_private *dev_priv,
655 const i915_reg_t reg,
656 const bool read,
657 const bool before)
658{
659 if (likely(!i915.mmio_debug))
660 return;
661
662 __unclaimed_reg_debug(dev_priv, reg, read, before);
663}
664
665#define GEN2_READ_HEADER(x) \
666 u##x val = 0; \
667 assert_rpm_wakelock_held(dev_priv);
668
669#define GEN2_READ_FOOTER \
670 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
671 return val
672
673#define __gen2_read(x) \
674static u##x \
675gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
676 GEN2_READ_HEADER(x); \
677 val = __raw_i915_read##x(dev_priv, reg); \
678 GEN2_READ_FOOTER; \
679}
680
681#define __gen5_read(x) \
682static u##x \
683gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
684 GEN2_READ_HEADER(x); \
685 ilk_dummy_write(dev_priv); \
686 val = __raw_i915_read##x(dev_priv, reg); \
687 GEN2_READ_FOOTER; \
688}
689
690__gen5_read(8)
691__gen5_read(16)
692__gen5_read(32)
693__gen5_read(64)
694__gen2_read(8)
695__gen2_read(16)
696__gen2_read(32)
697__gen2_read(64)
698
699#undef __gen5_read
700#undef __gen2_read
701
702#undef GEN2_READ_FOOTER
703#undef GEN2_READ_HEADER
704
705#define GEN6_READ_HEADER(x) \
706 u32 offset = i915_mmio_reg_offset(reg); \
707 unsigned long irqflags; \
708 u##x val = 0; \
709 assert_rpm_wakelock_held(dev_priv); \
710 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
711 unclaimed_reg_debug(dev_priv, reg, true, true)
712
713#define GEN6_READ_FOOTER \
714 unclaimed_reg_debug(dev_priv, reg, true, false); \
715 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
716 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
717 return val
718
719static inline void __force_wake_get(struct drm_i915_private *dev_priv,
720 enum forcewake_domains fw_domains)
721{
722 struct intel_uncore_forcewake_domain *domain;
723 enum forcewake_domain_id id;
724
725 if (WARN_ON(!fw_domains))
726 return;
727
728 /* Ideally GCC would be constant-fold and eliminate this loop */
729 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
730 if (domain->wake_count) {
731 fw_domains &= ~(1 << id);
732 continue;
733 }
734
735 domain->wake_count++;
736 fw_domain_arm_timer(domain);
737 }
738
739 if (fw_domains)
740 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
741}
742
743#define __gen6_read(x) \
744static u##x \
745gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
746 GEN6_READ_HEADER(x); \
747 if (NEEDS_FORCE_WAKE(offset)) \
748 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
749 val = __raw_i915_read##x(dev_priv, reg); \
750 GEN6_READ_FOOTER; \
751}
752
753#define __vlv_read(x) \
754static u##x \
755vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
756 enum forcewake_domains fw_engine = 0; \
757 GEN6_READ_HEADER(x); \
758 if (!NEEDS_FORCE_WAKE(offset)) \
759 fw_engine = 0; \
760 else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
761 fw_engine = FORCEWAKE_RENDER; \
762 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
763 fw_engine = FORCEWAKE_MEDIA; \
764 if (fw_engine) \
765 __force_wake_get(dev_priv, fw_engine); \
766 val = __raw_i915_read##x(dev_priv, reg); \
767 GEN6_READ_FOOTER; \
768}
769
770#define __chv_read(x) \
771static u##x \
772chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
773 enum forcewake_domains fw_engine = 0; \
774 GEN6_READ_HEADER(x); \
775 if (!NEEDS_FORCE_WAKE(offset)) \
776 fw_engine = 0; \
777 else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
778 fw_engine = FORCEWAKE_RENDER; \
779 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
780 fw_engine = FORCEWAKE_MEDIA; \
781 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
782 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
783 if (fw_engine) \
784 __force_wake_get(dev_priv, fw_engine); \
785 val = __raw_i915_read##x(dev_priv, reg); \
786 GEN6_READ_FOOTER; \
787}
788
789#define SKL_NEEDS_FORCE_WAKE(reg) \
790 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
791
792#define __gen9_read(x) \
793static u##x \
794gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
795 enum forcewake_domains fw_engine; \
796 GEN6_READ_HEADER(x); \
797 if (!SKL_NEEDS_FORCE_WAKE(offset)) \
798 fw_engine = 0; \
799 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
800 fw_engine = FORCEWAKE_RENDER; \
801 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
802 fw_engine = FORCEWAKE_MEDIA; \
803 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
804 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
805 else \
806 fw_engine = FORCEWAKE_BLITTER; \
807 if (fw_engine) \
808 __force_wake_get(dev_priv, fw_engine); \
809 val = __raw_i915_read##x(dev_priv, reg); \
810 GEN6_READ_FOOTER; \
811}
812
813__gen9_read(8)
814__gen9_read(16)
815__gen9_read(32)
816__gen9_read(64)
817__chv_read(8)
818__chv_read(16)
819__chv_read(32)
820__chv_read(64)
821__vlv_read(8)
822__vlv_read(16)
823__vlv_read(32)
824__vlv_read(64)
825__gen6_read(8)
826__gen6_read(16)
827__gen6_read(32)
828__gen6_read(64)
829
830#undef __gen9_read
831#undef __chv_read
832#undef __vlv_read
833#undef __gen6_read
834#undef GEN6_READ_FOOTER
835#undef GEN6_READ_HEADER
836
837#define VGPU_READ_HEADER(x) \
838 unsigned long irqflags; \
839 u##x val = 0; \
840 assert_rpm_device_not_suspended(dev_priv); \
841 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
842
843#define VGPU_READ_FOOTER \
844 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
845 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
846 return val
847
848#define __vgpu_read(x) \
849static u##x \
850vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
851 VGPU_READ_HEADER(x); \
852 val = __raw_i915_read##x(dev_priv, reg); \
853 VGPU_READ_FOOTER; \
854}
855
856__vgpu_read(8)
857__vgpu_read(16)
858__vgpu_read(32)
859__vgpu_read(64)
860
861#undef __vgpu_read
862#undef VGPU_READ_FOOTER
863#undef VGPU_READ_HEADER
864
865#define GEN2_WRITE_HEADER \
866 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
867 assert_rpm_wakelock_held(dev_priv); \
868
869#define GEN2_WRITE_FOOTER
870
871#define __gen2_write(x) \
872static void \
873gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
874 GEN2_WRITE_HEADER; \
875 __raw_i915_write##x(dev_priv, reg, val); \
876 GEN2_WRITE_FOOTER; \
877}
878
879#define __gen5_write(x) \
880static void \
881gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
882 GEN2_WRITE_HEADER; \
883 ilk_dummy_write(dev_priv); \
884 __raw_i915_write##x(dev_priv, reg, val); \
885 GEN2_WRITE_FOOTER; \
886}
887
888__gen5_write(8)
889__gen5_write(16)
890__gen5_write(32)
891__gen5_write(64)
892__gen2_write(8)
893__gen2_write(16)
894__gen2_write(32)
895__gen2_write(64)
896
897#undef __gen5_write
898#undef __gen2_write
899
900#undef GEN2_WRITE_FOOTER
901#undef GEN2_WRITE_HEADER
902
903#define GEN6_WRITE_HEADER \
904 u32 offset = i915_mmio_reg_offset(reg); \
905 unsigned long irqflags; \
906 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
907 assert_rpm_wakelock_held(dev_priv); \
908 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
909 unclaimed_reg_debug(dev_priv, reg, false, true)
910
911#define GEN6_WRITE_FOOTER \
912 unclaimed_reg_debug(dev_priv, reg, false, false); \
913 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
914
915#define __gen6_write(x) \
916static void \
917gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
918 u32 __fifo_ret = 0; \
919 GEN6_WRITE_HEADER; \
920 if (NEEDS_FORCE_WAKE(offset)) { \
921 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
922 } \
923 __raw_i915_write##x(dev_priv, reg, val); \
924 if (unlikely(__fifo_ret)) { \
925 gen6_gt_check_fifodbg(dev_priv); \
926 } \
927 GEN6_WRITE_FOOTER; \
928}
929
930#define __hsw_write(x) \
931static void \
932hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
933 u32 __fifo_ret = 0; \
934 GEN6_WRITE_HEADER; \
935 if (NEEDS_FORCE_WAKE(offset)) { \
936 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
937 } \
938 __raw_i915_write##x(dev_priv, reg, val); \
939 if (unlikely(__fifo_ret)) { \
940 gen6_gt_check_fifodbg(dev_priv); \
941 } \
942 GEN6_WRITE_FOOTER; \
943}
944
945static const i915_reg_t gen8_shadowed_regs[] = {
946 FORCEWAKE_MT,
947 GEN6_RPNSWREQ,
948 GEN6_RC_VIDEO_FREQ,
949 RING_TAIL(RENDER_RING_BASE),
950 RING_TAIL(GEN6_BSD_RING_BASE),
951 RING_TAIL(VEBOX_RING_BASE),
952 RING_TAIL(BLT_RING_BASE),
953 /* TODO: Other registers are not yet used */
954};
955
956static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
957 i915_reg_t reg)
958{
959 int i;
960 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
961 if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
962 return true;
963
964 return false;
965}
966
967#define __gen8_write(x) \
968static void \
969gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
970 GEN6_WRITE_HEADER; \
971 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
972 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
973 __raw_i915_write##x(dev_priv, reg, val); \
974 GEN6_WRITE_FOOTER; \
975}
976
977#define __chv_write(x) \
978static void \
979chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
980 enum forcewake_domains fw_engine = 0; \
981 GEN6_WRITE_HEADER; \
982 if (!NEEDS_FORCE_WAKE(offset) || \
983 is_gen8_shadowed(dev_priv, reg)) \
984 fw_engine = 0; \
985 else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
986 fw_engine = FORCEWAKE_RENDER; \
987 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
988 fw_engine = FORCEWAKE_MEDIA; \
989 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
990 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
991 if (fw_engine) \
992 __force_wake_get(dev_priv, fw_engine); \
993 __raw_i915_write##x(dev_priv, reg, val); \
994 GEN6_WRITE_FOOTER; \
995}
996
997static const i915_reg_t gen9_shadowed_regs[] = {
998 RING_TAIL(RENDER_RING_BASE),
999 RING_TAIL(GEN6_BSD_RING_BASE),
1000 RING_TAIL(VEBOX_RING_BASE),
1001 RING_TAIL(BLT_RING_BASE),
1002 FORCEWAKE_BLITTER_GEN9,
1003 FORCEWAKE_RENDER_GEN9,
1004 FORCEWAKE_MEDIA_GEN9,
1005 GEN6_RPNSWREQ,
1006 GEN6_RC_VIDEO_FREQ,
1007 /* TODO: Other registers are not yet used */
1008};
1009
1010static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
1011 i915_reg_t reg)
1012{
1013 int i;
1014 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
1015 if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
1016 return true;
1017
1018 return false;
1019}
1020
1021#define __gen9_write(x) \
1022static void \
1023gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
1024 bool trace) { \
1025 enum forcewake_domains fw_engine; \
1026 GEN6_WRITE_HEADER; \
1027 if (!SKL_NEEDS_FORCE_WAKE(offset) || \
1028 is_gen9_shadowed(dev_priv, reg)) \
1029 fw_engine = 0; \
1030 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
1031 fw_engine = FORCEWAKE_RENDER; \
1032 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
1033 fw_engine = FORCEWAKE_MEDIA; \
1034 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
1035 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
1036 else \
1037 fw_engine = FORCEWAKE_BLITTER; \
1038 if (fw_engine) \
1039 __force_wake_get(dev_priv, fw_engine); \
1040 __raw_i915_write##x(dev_priv, reg, val); \
1041 GEN6_WRITE_FOOTER; \
1042}
1043
1044__gen9_write(8)
1045__gen9_write(16)
1046__gen9_write(32)
1047__gen9_write(64)
1048__chv_write(8)
1049__chv_write(16)
1050__chv_write(32)
1051__chv_write(64)
1052__gen8_write(8)
1053__gen8_write(16)
1054__gen8_write(32)
1055__gen8_write(64)
1056__hsw_write(8)
1057__hsw_write(16)
1058__hsw_write(32)
1059__hsw_write(64)
1060__gen6_write(8)
1061__gen6_write(16)
1062__gen6_write(32)
1063__gen6_write(64)
1064
1065#undef __gen9_write
1066#undef __chv_write
1067#undef __gen8_write
1068#undef __hsw_write
1069#undef __gen6_write
1070#undef GEN6_WRITE_FOOTER
1071#undef GEN6_WRITE_HEADER
1072
1073#define VGPU_WRITE_HEADER \
1074 unsigned long irqflags; \
1075 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1076 assert_rpm_device_not_suspended(dev_priv); \
1077 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1078
1079#define VGPU_WRITE_FOOTER \
1080 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1081
1082#define __vgpu_write(x) \
1083static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1084 i915_reg_t reg, u##x val, bool trace) { \
1085 VGPU_WRITE_HEADER; \
1086 __raw_i915_write##x(dev_priv, reg, val); \
1087 VGPU_WRITE_FOOTER; \
1088}
1089
1090__vgpu_write(8)
1091__vgpu_write(16)
1092__vgpu_write(32)
1093__vgpu_write(64)
1094
1095#undef __vgpu_write
1096#undef VGPU_WRITE_FOOTER
1097#undef VGPU_WRITE_HEADER
1098
1099#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1100do { \
1101 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1102 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1103 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1104 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1105} while (0)
1106
1107#define ASSIGN_READ_MMIO_VFUNCS(x) \
1108do { \
1109 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1110 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1111 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1112 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1113} while (0)
1114
1115
1116static void fw_domain_init(struct drm_i915_private *dev_priv,
1117 enum forcewake_domain_id domain_id,
1118 i915_reg_t reg_set,
1119 i915_reg_t reg_ack)
1120{
1121 struct intel_uncore_forcewake_domain *d;
1122
1123 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1124 return;
1125
1126 d = &dev_priv->uncore.fw_domain[domain_id];
1127
1128 WARN_ON(d->wake_count);
1129
1130 d->wake_count = 0;
1131 d->reg_set = reg_set;
1132 d->reg_ack = reg_ack;
1133
1134 if (IS_GEN6(dev_priv)) {
1135 d->val_reset = 0;
1136 d->val_set = FORCEWAKE_KERNEL;
1137 d->val_clear = 0;
1138 } else {
1139 /* WaRsClearFWBitsAtReset:bdw,skl */
1140 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1141 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1142 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1143 }
1144
1145 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1146 d->reg_post = FORCEWAKE_ACK_VLV;
1147 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1148 d->reg_post = ECOBUS;
1149
1150 d->i915 = dev_priv;
1151 d->id = domain_id;
1152
1153 setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1154
1155 dev_priv->uncore.fw_domains |= (1 << domain_id);
1156
1157 fw_domain_reset(d);
1158}
1159
1160static void intel_uncore_fw_domains_init(struct drm_device *dev)
1161{
1162 struct drm_i915_private *dev_priv = dev->dev_private;
1163
1164 if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1165 return;
1166
1167 if (IS_GEN9(dev)) {
1168 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1169 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1170 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1171 FORCEWAKE_RENDER_GEN9,
1172 FORCEWAKE_ACK_RENDER_GEN9);
1173 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1174 FORCEWAKE_BLITTER_GEN9,
1175 FORCEWAKE_ACK_BLITTER_GEN9);
1176 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1177 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1178 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1179 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1180 if (!IS_CHERRYVIEW(dev))
1181 dev_priv->uncore.funcs.force_wake_put =
1182 fw_domains_put_with_fifo;
1183 else
1184 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1185 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1186 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1187 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1188 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1189 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1190 dev_priv->uncore.funcs.force_wake_get =
1191 fw_domains_get_with_thread_status;
1192 if (IS_HASWELL(dev))
1193 dev_priv->uncore.funcs.force_wake_put =
1194 fw_domains_put_with_fifo;
1195 else
1196 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1197 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1198 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1199 } else if (IS_IVYBRIDGE(dev)) {
1200 u32 ecobus;
1201
1202 /* IVB configs may use multi-threaded forcewake */
1203
1204 /* A small trick here - if the bios hasn't configured
1205 * MT forcewake, and if the device is in RC6, then
1206 * force_wake_mt_get will not wake the device and the
1207 * ECOBUS read will return zero. Which will be
1208 * (correctly) interpreted by the test below as MT
1209 * forcewake being disabled.
1210 */
1211 dev_priv->uncore.funcs.force_wake_get =
1212 fw_domains_get_with_thread_status;
1213 dev_priv->uncore.funcs.force_wake_put =
1214 fw_domains_put_with_fifo;
1215
1216 /* We need to init first for ECOBUS access and then
1217 * determine later if we want to reinit, in case of MT access is
1218 * not working. In this stage we don't know which flavour this
1219 * ivb is, so it is better to reset also the gen6 fw registers
1220 * before the ecobus check.
1221 */
1222
1223 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1224 __raw_posting_read(dev_priv, ECOBUS);
1225
1226 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1227 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1228
1229 mutex_lock(&dev->struct_mutex);
1230 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1231 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1232 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1233 mutex_unlock(&dev->struct_mutex);
1234
1235 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1236 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1237 DRM_INFO("when using vblank-synced partial screen updates.\n");
1238 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1239 FORCEWAKE, FORCEWAKE_ACK);
1240 }
1241 } else if (IS_GEN6(dev)) {
1242 dev_priv->uncore.funcs.force_wake_get =
1243 fw_domains_get_with_thread_status;
1244 dev_priv->uncore.funcs.force_wake_put =
1245 fw_domains_put_with_fifo;
1246 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1247 FORCEWAKE, FORCEWAKE_ACK);
1248 }
1249
1250 /* All future platforms are expected to require complex power gating */
1251 WARN_ON(dev_priv->uncore.fw_domains == 0);
1252}
1253
1254void intel_uncore_init(struct drm_device *dev)
1255{
1256 struct drm_i915_private *dev_priv = dev->dev_private;
1257
1258 i915_check_vgpu(dev);
1259
1260 intel_uncore_ellc_detect(dev);
1261 intel_uncore_fw_domains_init(dev);
1262 __intel_uncore_early_sanitize(dev, false);
1263
1264 dev_priv->uncore.unclaimed_mmio_check = 1;
1265
1266 switch (INTEL_INFO(dev)->gen) {
1267 default:
1268 case 9:
1269 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1270 ASSIGN_READ_MMIO_VFUNCS(gen9);
1271 break;
1272 case 8:
1273 if (IS_CHERRYVIEW(dev)) {
1274 ASSIGN_WRITE_MMIO_VFUNCS(chv);
1275 ASSIGN_READ_MMIO_VFUNCS(chv);
1276
1277 } else {
1278 ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1279 ASSIGN_READ_MMIO_VFUNCS(gen6);
1280 }
1281 break;
1282 case 7:
1283 case 6:
1284 if (IS_HASWELL(dev)) {
1285 ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1286 } else {
1287 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1288 }
1289
1290 if (IS_VALLEYVIEW(dev)) {
1291 ASSIGN_READ_MMIO_VFUNCS(vlv);
1292 } else {
1293 ASSIGN_READ_MMIO_VFUNCS(gen6);
1294 }
1295 break;
1296 case 5:
1297 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1298 ASSIGN_READ_MMIO_VFUNCS(gen5);
1299 break;
1300 case 4:
1301 case 3:
1302 case 2:
1303 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1304 ASSIGN_READ_MMIO_VFUNCS(gen2);
1305 break;
1306 }
1307
1308 if (intel_vgpu_active(dev)) {
1309 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1310 ASSIGN_READ_MMIO_VFUNCS(vgpu);
1311 }
1312
1313 i915_check_and_clear_faults(dev);
1314}
1315#undef ASSIGN_WRITE_MMIO_VFUNCS
1316#undef ASSIGN_READ_MMIO_VFUNCS
1317
1318void intel_uncore_fini(struct drm_device *dev)
1319{
1320 /* Paranoia: make sure we have disabled everything before we exit. */
1321 intel_uncore_sanitize(dev);
1322 intel_uncore_forcewake_reset(dev, false);
1323}
1324
1325#define GEN_RANGE(l, h) GENMASK(h, l)
1326
1327static const struct register_whitelist {
1328 i915_reg_t offset_ldw, offset_udw;
1329 uint32_t size;
1330 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1331 uint32_t gen_bitmask;
1332} whitelist[] = {
1333 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1334 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1335 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1336};
1337
1338int i915_reg_read_ioctl(struct drm_device *dev,
1339 void *data, struct drm_file *file)
1340{
1341 struct drm_i915_private *dev_priv = dev->dev_private;
1342 struct drm_i915_reg_read *reg = data;
1343 struct register_whitelist const *entry = whitelist;
1344 unsigned size;
1345 i915_reg_t offset_ldw, offset_udw;
1346 int i, ret = 0;
1347
1348 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1349 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1350 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1351 break;
1352 }
1353
1354 if (i == ARRAY_SIZE(whitelist))
1355 return -EINVAL;
1356
1357 /* We use the low bits to encode extra flags as the register should
1358 * be naturally aligned (and those that are not so aligned merely
1359 * limit the available flags for that register).
1360 */
1361 offset_ldw = entry->offset_ldw;
1362 offset_udw = entry->offset_udw;
1363 size = entry->size;
1364 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1365
1366 intel_runtime_pm_get(dev_priv);
1367
1368 switch (size) {
1369 case 8 | 1:
1370 reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1371 break;
1372 case 8:
1373 reg->val = I915_READ64(offset_ldw);
1374 break;
1375 case 4:
1376 reg->val = I915_READ(offset_ldw);
1377 break;
1378 case 2:
1379 reg->val = I915_READ16(offset_ldw);
1380 break;
1381 case 1:
1382 reg->val = I915_READ8(offset_ldw);
1383 break;
1384 default:
1385 ret = -EINVAL;
1386 goto out;
1387 }
1388
1389out:
1390 intel_runtime_pm_put(dev_priv);
1391 return ret;
1392}
1393
1394int i915_get_reset_stats_ioctl(struct drm_device *dev,
1395 void *data, struct drm_file *file)
1396{
1397 struct drm_i915_private *dev_priv = dev->dev_private;
1398 struct drm_i915_reset_stats *args = data;
1399 struct i915_ctx_hang_stats *hs;
1400 struct intel_context *ctx;
1401 int ret;
1402
1403 if (args->flags || args->pad)
1404 return -EINVAL;
1405
1406 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1407 return -EPERM;
1408
1409 ret = mutex_lock_interruptible(&dev->struct_mutex);
1410 if (ret)
1411 return ret;
1412
1413 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1414 if (IS_ERR(ctx)) {
1415 mutex_unlock(&dev->struct_mutex);
1416 return PTR_ERR(ctx);
1417 }
1418 hs = &ctx->hang_stats;
1419
1420 if (capable(CAP_SYS_ADMIN))
1421 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1422 else
1423 args->reset_count = 0;
1424
1425 args->batch_active = hs->batch_active;
1426 args->batch_pending = hs->batch_pending;
1427
1428 mutex_unlock(&dev->struct_mutex);
1429
1430 return 0;
1431}
1432
1433static int i915_reset_complete(struct drm_device *dev)
1434{
1435 u8 gdrst;
1436 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1437 return (gdrst & GRDOM_RESET_STATUS) == 0;
1438}
1439
1440static int i915_do_reset(struct drm_device *dev)
1441{
1442 /* assert reset for at least 20 usec */
1443 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1444 udelay(20);
1445 pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1446
1447 return wait_for(i915_reset_complete(dev), 500);
1448}
1449
1450static int g4x_reset_complete(struct drm_device *dev)
1451{
1452 u8 gdrst;
1453 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1454 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1455}
1456
1457static int g33_do_reset(struct drm_device *dev)
1458{
1459 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1460 return wait_for(g4x_reset_complete(dev), 500);
1461}
1462
1463static int g4x_do_reset(struct drm_device *dev)
1464{
1465 struct drm_i915_private *dev_priv = dev->dev_private;
1466 int ret;
1467
1468 pci_write_config_byte(dev->pdev, I915_GDRST,
1469 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1470 ret = wait_for(g4x_reset_complete(dev), 500);
1471 if (ret)
1472 return ret;
1473
1474 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1475 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1476 POSTING_READ(VDECCLK_GATE_D);
1477
1478 pci_write_config_byte(dev->pdev, I915_GDRST,
1479 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1480 ret = wait_for(g4x_reset_complete(dev), 500);
1481 if (ret)
1482 return ret;
1483
1484 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1485 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1486 POSTING_READ(VDECCLK_GATE_D);
1487
1488 pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1489
1490 return 0;
1491}
1492
1493static int ironlake_do_reset(struct drm_device *dev)
1494{
1495 struct drm_i915_private *dev_priv = dev->dev_private;
1496 int ret;
1497
1498 I915_WRITE(ILK_GDSR,
1499 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1500 ret = wait_for((I915_READ(ILK_GDSR) &
1501 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1502 if (ret)
1503 return ret;
1504
1505 I915_WRITE(ILK_GDSR,
1506 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1507 ret = wait_for((I915_READ(ILK_GDSR) &
1508 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1509 if (ret)
1510 return ret;
1511
1512 I915_WRITE(ILK_GDSR, 0);
1513
1514 return 0;
1515}
1516
1517static int gen6_do_reset(struct drm_device *dev)
1518{
1519 struct drm_i915_private *dev_priv = dev->dev_private;
1520 int ret;
1521
1522 /* Reset the chip */
1523
1524 /* GEN6_GDRST is not in the gt power well, no need to check
1525 * for fifo space for the write or forcewake the chip for
1526 * the read
1527 */
1528 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1529
1530 /* Spin waiting for the device to ack the reset request */
1531 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1532
1533 intel_uncore_forcewake_reset(dev, true);
1534
1535 return ret;
1536}
1537
1538static int wait_for_register(struct drm_i915_private *dev_priv,
1539 i915_reg_t reg,
1540 const u32 mask,
1541 const u32 value,
1542 const unsigned long timeout_ms)
1543{
1544 return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
1545}
1546
1547static int gen8_do_reset(struct drm_device *dev)
1548{
1549 struct drm_i915_private *dev_priv = dev->dev_private;
1550 struct intel_engine_cs *engine;
1551 int i;
1552
1553 for_each_ring(engine, dev_priv, i) {
1554 I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1555 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1556
1557 if (wait_for_register(dev_priv,
1558 RING_RESET_CTL(engine->mmio_base),
1559 RESET_CTL_READY_TO_RESET,
1560 RESET_CTL_READY_TO_RESET,
1561 700)) {
1562 DRM_ERROR("%s: reset request timeout\n", engine->name);
1563 goto not_ready;
1564 }
1565 }
1566
1567 return gen6_do_reset(dev);
1568
1569not_ready:
1570 for_each_ring(engine, dev_priv, i)
1571 I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1572 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1573
1574 return -EIO;
1575}
1576
1577static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
1578{
1579 if (!i915.reset)
1580 return NULL;
1581
1582 if (INTEL_INFO(dev)->gen >= 8)
1583 return gen8_do_reset;
1584 else if (INTEL_INFO(dev)->gen >= 6)
1585 return gen6_do_reset;
1586 else if (IS_GEN5(dev))
1587 return ironlake_do_reset;
1588 else if (IS_G4X(dev))
1589 return g4x_do_reset;
1590 else if (IS_G33(dev))
1591 return g33_do_reset;
1592 else if (INTEL_INFO(dev)->gen >= 3)
1593 return i915_do_reset;
1594 else
1595 return NULL;
1596}
1597
1598int intel_gpu_reset(struct drm_device *dev)
1599{
1600 struct drm_i915_private *dev_priv = to_i915(dev);
1601 int (*reset)(struct drm_device *);
1602 int ret;
1603
1604 reset = intel_get_gpu_reset(dev);
1605 if (reset == NULL)
1606 return -ENODEV;
1607
1608 /* If the power well sleeps during the reset, the reset
1609 * request may be dropped and never completes (causing -EIO).
1610 */
1611 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1612 ret = reset(dev);
1613 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1614
1615 return ret;
1616}
1617
1618bool intel_has_gpu_reset(struct drm_device *dev)
1619{
1620 return intel_get_gpu_reset(dev) != NULL;
1621}
1622
1623bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1624{
1625 return check_for_unclaimed_mmio(dev_priv);
1626}
1627
1628bool
1629intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1630{
1631 if (unlikely(i915.mmio_debug ||
1632 dev_priv->uncore.unclaimed_mmio_check <= 0))
1633 return false;
1634
1635 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1636 DRM_DEBUG("Unclaimed register detected, "
1637 "enabling oneshot unclaimed register reporting. "
1638 "Please use i915.mmio_debug=N for more information.\n");
1639 i915.mmio_debug++;
1640 dev_priv->uncore.unclaimed_mmio_check--;
1641 return true;
1642 }
1643
1644 return false;
1645}
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <drm/drm_managed.h>
25#include <linux/pm_runtime.h>
26
27#include "gt/intel_engine_regs.h"
28#include "gt/intel_gt_regs.h"
29
30#include "i915_drv.h"
31#include "i915_iosf_mbi.h"
32#include "i915_reg.h"
33#include "i915_trace.h"
34#include "i915_vgpu.h"
35#include "intel_pm.h"
36
37#define FORCEWAKE_ACK_TIMEOUT_MS 50
38#define GT_FIFO_TIMEOUT_MS 10
39
40#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
41
42static void
43fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
44{
45 uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
46}
47
48void
49intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
50{
51 spin_lock_init(&i915->mmio_debug.lock);
52 i915->mmio_debug.unclaimed_mmio_check = 1;
53
54 i915->uncore.debug = &i915->mmio_debug;
55}
56
57static void mmio_debug_suspend(struct intel_uncore *uncore)
58{
59 if (!uncore->debug)
60 return;
61
62 spin_lock(&uncore->debug->lock);
63
64 /* Save and disable mmio debugging for the user bypass */
65 if (!uncore->debug->suspend_count++) {
66 uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
67 uncore->debug->unclaimed_mmio_check = 0;
68 }
69
70 spin_unlock(&uncore->debug->lock);
71}
72
73static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
74
75static void mmio_debug_resume(struct intel_uncore *uncore)
76{
77 if (!uncore->debug)
78 return;
79
80 spin_lock(&uncore->debug->lock);
81
82 if (!--uncore->debug->suspend_count)
83 uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
84
85 if (check_for_unclaimed_mmio(uncore))
86 drm_info(&uncore->i915->drm,
87 "Invalid mmio detected during user access\n");
88
89 spin_unlock(&uncore->debug->lock);
90}
91
92static const char * const forcewake_domain_names[] = {
93 "render",
94 "gt",
95 "media",
96 "vdbox0",
97 "vdbox1",
98 "vdbox2",
99 "vdbox3",
100 "vdbox4",
101 "vdbox5",
102 "vdbox6",
103 "vdbox7",
104 "vebox0",
105 "vebox1",
106 "vebox2",
107 "vebox3",
108 "gsc",
109};
110
111const char *
112intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
113{
114 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
115
116 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
117 return forcewake_domain_names[id];
118
119 WARN_ON(id);
120
121 return "unknown";
122}
123
124#define fw_ack(d) readl((d)->reg_ack)
125#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
126#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
127
128static inline void
129fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
130{
131 /*
132 * We don't really know if the powerwell for the forcewake domain we are
133 * trying to reset here does exist at this point (engines could be fused
134 * off in ICL+), so no waiting for acks
135 */
136 /* WaRsClearFWBitsAtReset */
137 if (GRAPHICS_VER(d->uncore->i915) >= 12)
138 fw_clear(d, 0xefff);
139 else
140 fw_clear(d, 0xffff);
141}
142
143static inline void
144fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
145{
146 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
147 d->uncore->fw_domains_timer |= d->mask;
148 d->wake_count++;
149 hrtimer_start_range_ns(&d->timer,
150 NSEC_PER_MSEC,
151 NSEC_PER_MSEC,
152 HRTIMER_MODE_REL);
153}
154
155static inline int
156__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
157 const u32 ack,
158 const u32 value)
159{
160 return wait_for_atomic((fw_ack(d) & ack) == value,
161 FORCEWAKE_ACK_TIMEOUT_MS);
162}
163
164static inline int
165wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
166 const u32 ack)
167{
168 return __wait_for_ack(d, ack, 0);
169}
170
171static inline int
172wait_ack_set(const struct intel_uncore_forcewake_domain *d,
173 const u32 ack)
174{
175 return __wait_for_ack(d, ack, ack);
176}
177
178static inline void
179fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
180{
181 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
182 drm_err(&d->uncore->i915->drm,
183 "%s: timed out waiting for forcewake ack to clear.\n",
184 intel_uncore_forcewake_domain_to_str(d->id));
185 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
186 }
187}
188
189enum ack_type {
190 ACK_CLEAR = 0,
191 ACK_SET
192};
193
194static int
195fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
196 const enum ack_type type)
197{
198 const u32 ack_bit = FORCEWAKE_KERNEL;
199 const u32 value = type == ACK_SET ? ack_bit : 0;
200 unsigned int pass;
201 bool ack_detected;
202
203 /*
204 * There is a possibility of driver's wake request colliding
205 * with hardware's own wake requests and that can cause
206 * hardware to not deliver the driver's ack message.
207 *
208 * Use a fallback bit toggle to kick the gpu state machine
209 * in the hope that the original ack will be delivered along with
210 * the fallback ack.
211 *
212 * This workaround is described in HSDES #1604254524 and it's known as:
213 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
214 * although the name is a bit misleading.
215 */
216
217 pass = 1;
218 do {
219 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
220
221 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
222 /* Give gt some time to relax before the polling frenzy */
223 udelay(10 * pass);
224 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
225
226 ack_detected = (fw_ack(d) & ack_bit) == value;
227
228 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
229 } while (!ack_detected && pass++ < 10);
230
231 drm_dbg(&d->uncore->i915->drm,
232 "%s had to use fallback to %s ack, 0x%x (passes %u)\n",
233 intel_uncore_forcewake_domain_to_str(d->id),
234 type == ACK_SET ? "set" : "clear",
235 fw_ack(d),
236 pass);
237
238 return ack_detected ? 0 : -ETIMEDOUT;
239}
240
241static inline void
242fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
243{
244 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
245 return;
246
247 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
248 fw_domain_wait_ack_clear(d);
249}
250
251static inline void
252fw_domain_get(const struct intel_uncore_forcewake_domain *d)
253{
254 fw_set(d, FORCEWAKE_KERNEL);
255}
256
257static inline void
258fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
259{
260 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
261 drm_err(&d->uncore->i915->drm,
262 "%s: timed out waiting for forcewake ack request.\n",
263 intel_uncore_forcewake_domain_to_str(d->id));
264 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
265 }
266}
267
268static inline void
269fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
270{
271 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
272 return;
273
274 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
275 fw_domain_wait_ack_set(d);
276}
277
278static inline void
279fw_domain_put(const struct intel_uncore_forcewake_domain *d)
280{
281 fw_clear(d, FORCEWAKE_KERNEL);
282}
283
284static void
285fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
286{
287 struct intel_uncore_forcewake_domain *d;
288 unsigned int tmp;
289
290 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
291
292 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
293 fw_domain_wait_ack_clear(d);
294 fw_domain_get(d);
295 }
296
297 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
298 fw_domain_wait_ack_set(d);
299
300 uncore->fw_domains_active |= fw_domains;
301}
302
303static void
304fw_domains_get_with_fallback(struct intel_uncore *uncore,
305 enum forcewake_domains fw_domains)
306{
307 struct intel_uncore_forcewake_domain *d;
308 unsigned int tmp;
309
310 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
311
312 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
313 fw_domain_wait_ack_clear_fallback(d);
314 fw_domain_get(d);
315 }
316
317 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
318 fw_domain_wait_ack_set_fallback(d);
319
320 uncore->fw_domains_active |= fw_domains;
321}
322
323static void
324fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
325{
326 struct intel_uncore_forcewake_domain *d;
327 unsigned int tmp;
328
329 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
330
331 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
332 fw_domain_put(d);
333
334 uncore->fw_domains_active &= ~fw_domains;
335}
336
337static void
338fw_domains_reset(struct intel_uncore *uncore,
339 enum forcewake_domains fw_domains)
340{
341 struct intel_uncore_forcewake_domain *d;
342 unsigned int tmp;
343
344 if (!fw_domains)
345 return;
346
347 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
348
349 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
350 fw_domain_reset(d);
351}
352
353static inline u32 gt_thread_status(struct intel_uncore *uncore)
354{
355 u32 val;
356
357 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
358 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
359
360 return val;
361}
362
363static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
364{
365 /*
366 * w/a for a sporadic read returning 0 by waiting for the GT
367 * thread to wake up.
368 */
369 drm_WARN_ONCE(&uncore->i915->drm,
370 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
371 "GT thread status wait timed out\n");
372}
373
374static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
375 enum forcewake_domains fw_domains)
376{
377 fw_domains_get_normal(uncore, fw_domains);
378
379 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
380 __gen6_gt_wait_for_thread_c0(uncore);
381}
382
383static inline u32 fifo_free_entries(struct intel_uncore *uncore)
384{
385 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
386
387 return count & GT_FIFO_FREE_ENTRIES_MASK;
388}
389
390static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
391{
392 u32 n;
393
394 /* On VLV, FIFO will be shared by both SW and HW.
395 * So, we need to read the FREE_ENTRIES everytime */
396 if (IS_VALLEYVIEW(uncore->i915))
397 n = fifo_free_entries(uncore);
398 else
399 n = uncore->fifo_count;
400
401 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
402 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
403 GT_FIFO_NUM_RESERVED_ENTRIES,
404 GT_FIFO_TIMEOUT_MS)) {
405 drm_dbg(&uncore->i915->drm,
406 "GT_FIFO timeout, entries: %u\n", n);
407 return;
408 }
409 }
410
411 uncore->fifo_count = n - 1;
412}
413
414static enum hrtimer_restart
415intel_uncore_fw_release_timer(struct hrtimer *timer)
416{
417 struct intel_uncore_forcewake_domain *domain =
418 container_of(timer, struct intel_uncore_forcewake_domain, timer);
419 struct intel_uncore *uncore = domain->uncore;
420 unsigned long irqflags;
421
422 assert_rpm_device_not_suspended(uncore->rpm);
423
424 if (xchg(&domain->active, false))
425 return HRTIMER_RESTART;
426
427 spin_lock_irqsave(&uncore->lock, irqflags);
428
429 uncore->fw_domains_timer &= ~domain->mask;
430
431 GEM_BUG_ON(!domain->wake_count);
432 if (--domain->wake_count == 0)
433 fw_domains_put(uncore, domain->mask);
434
435 spin_unlock_irqrestore(&uncore->lock, irqflags);
436
437 return HRTIMER_NORESTART;
438}
439
440/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
441static unsigned int
442intel_uncore_forcewake_reset(struct intel_uncore *uncore)
443{
444 unsigned long irqflags;
445 struct intel_uncore_forcewake_domain *domain;
446 int retry_count = 100;
447 enum forcewake_domains fw, active_domains;
448
449 iosf_mbi_assert_punit_acquired();
450
451 /* Hold uncore.lock across reset to prevent any register access
452 * with forcewake not set correctly. Wait until all pending
453 * timers are run before holding.
454 */
455 while (1) {
456 unsigned int tmp;
457
458 active_domains = 0;
459
460 for_each_fw_domain(domain, uncore, tmp) {
461 smp_store_mb(domain->active, false);
462 if (hrtimer_cancel(&domain->timer) == 0)
463 continue;
464
465 intel_uncore_fw_release_timer(&domain->timer);
466 }
467
468 spin_lock_irqsave(&uncore->lock, irqflags);
469
470 for_each_fw_domain(domain, uncore, tmp) {
471 if (hrtimer_active(&domain->timer))
472 active_domains |= domain->mask;
473 }
474
475 if (active_domains == 0)
476 break;
477
478 if (--retry_count == 0) {
479 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
480 break;
481 }
482
483 spin_unlock_irqrestore(&uncore->lock, irqflags);
484 cond_resched();
485 }
486
487 drm_WARN_ON(&uncore->i915->drm, active_domains);
488
489 fw = uncore->fw_domains_active;
490 if (fw)
491 fw_domains_put(uncore, fw);
492
493 fw_domains_reset(uncore, uncore->fw_domains);
494 assert_forcewakes_inactive(uncore);
495
496 spin_unlock_irqrestore(&uncore->lock, irqflags);
497
498 return fw; /* track the lost user forcewake domains */
499}
500
501static bool
502fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
503{
504 u32 dbg;
505
506 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
507 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
508 return false;
509
510 /*
511 * Bugs in PCI programming (or failing hardware) can occasionally cause
512 * us to lose access to the MMIO BAR. When this happens, register
513 * reads will come back with 0xFFFFFFFF for every register and things
514 * go bad very quickly. Let's try to detect that special case and at
515 * least try to print a more informative message about what has
516 * happened.
517 *
518 * During normal operation the FPGA_DBG register has several unused
519 * bits that will always read back as 0's so we can use them as canaries
520 * to recognize when MMIO accesses are just busted.
521 */
522 if (unlikely(dbg == ~0))
523 drm_err(&uncore->i915->drm,
524 "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
525
526 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
527
528 return true;
529}
530
531static bool
532vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
533{
534 u32 cer;
535
536 cer = __raw_uncore_read32(uncore, CLAIM_ER);
537 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
538 return false;
539
540 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
541
542 return true;
543}
544
545static bool
546gen6_check_for_fifo_debug(struct intel_uncore *uncore)
547{
548 u32 fifodbg;
549
550 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
551
552 if (unlikely(fifodbg)) {
553 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
554 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
555 }
556
557 return fifodbg;
558}
559
560static bool
561check_for_unclaimed_mmio(struct intel_uncore *uncore)
562{
563 bool ret = false;
564
565 lockdep_assert_held(&uncore->debug->lock);
566
567 if (uncore->debug->suspend_count)
568 return false;
569
570 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
571 ret |= fpga_check_for_unclaimed_mmio(uncore);
572
573 if (intel_uncore_has_dbg_unclaimed(uncore))
574 ret |= vlv_check_for_unclaimed_mmio(uncore);
575
576 if (intel_uncore_has_fifo(uncore))
577 ret |= gen6_check_for_fifo_debug(uncore);
578
579 return ret;
580}
581
582static void forcewake_early_sanitize(struct intel_uncore *uncore,
583 unsigned int restore_forcewake)
584{
585 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
586
587 /* WaDisableShadowRegForCpd:chv */
588 if (IS_CHERRYVIEW(uncore->i915)) {
589 __raw_uncore_write32(uncore, GTFIFOCTL,
590 __raw_uncore_read32(uncore, GTFIFOCTL) |
591 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
592 GT_FIFO_CTL_RC6_POLICY_STALL);
593 }
594
595 iosf_mbi_punit_acquire();
596 intel_uncore_forcewake_reset(uncore);
597 if (restore_forcewake) {
598 spin_lock_irq(&uncore->lock);
599 fw_domains_get(uncore, restore_forcewake);
600
601 if (intel_uncore_has_fifo(uncore))
602 uncore->fifo_count = fifo_free_entries(uncore);
603 spin_unlock_irq(&uncore->lock);
604 }
605 iosf_mbi_punit_release();
606}
607
608void intel_uncore_suspend(struct intel_uncore *uncore)
609{
610 if (!intel_uncore_has_forcewake(uncore))
611 return;
612
613 iosf_mbi_punit_acquire();
614 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
615 &uncore->pmic_bus_access_nb);
616 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
617 iosf_mbi_punit_release();
618}
619
620void intel_uncore_resume_early(struct intel_uncore *uncore)
621{
622 unsigned int restore_forcewake;
623
624 if (intel_uncore_unclaimed_mmio(uncore))
625 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
626
627 if (!intel_uncore_has_forcewake(uncore))
628 return;
629
630 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
631 forcewake_early_sanitize(uncore, restore_forcewake);
632
633 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
634}
635
636void intel_uncore_runtime_resume(struct intel_uncore *uncore)
637{
638 if (!intel_uncore_has_forcewake(uncore))
639 return;
640
641 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
642}
643
644static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
645 enum forcewake_domains fw_domains)
646{
647 struct intel_uncore_forcewake_domain *domain;
648 unsigned int tmp;
649
650 fw_domains &= uncore->fw_domains;
651
652 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
653 if (domain->wake_count++) {
654 fw_domains &= ~domain->mask;
655 domain->active = true;
656 }
657 }
658
659 if (fw_domains)
660 fw_domains_get(uncore, fw_domains);
661}
662
663/**
664 * intel_uncore_forcewake_get - grab forcewake domain references
665 * @uncore: the intel_uncore structure
666 * @fw_domains: forcewake domains to get reference on
667 *
668 * This function can be used get GT's forcewake domain references.
669 * Normal register access will handle the forcewake domains automatically.
670 * However if some sequence requires the GT to not power down a particular
671 * forcewake domains this function should be called at the beginning of the
672 * sequence. And subsequently the reference should be dropped by symmetric
673 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
674 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
675 */
676void intel_uncore_forcewake_get(struct intel_uncore *uncore,
677 enum forcewake_domains fw_domains)
678{
679 unsigned long irqflags;
680
681 if (!uncore->fw_get_funcs)
682 return;
683
684 assert_rpm_wakelock_held(uncore->rpm);
685
686 spin_lock_irqsave(&uncore->lock, irqflags);
687 __intel_uncore_forcewake_get(uncore, fw_domains);
688 spin_unlock_irqrestore(&uncore->lock, irqflags);
689}
690
691/**
692 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
693 * @uncore: the intel_uncore structure
694 *
695 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
696 * the GT powerwell and in the process disable our debugging for the
697 * duration of userspace's bypass.
698 */
699void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
700{
701 spin_lock_irq(&uncore->lock);
702 if (!uncore->user_forcewake_count++) {
703 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
704 mmio_debug_suspend(uncore);
705 }
706 spin_unlock_irq(&uncore->lock);
707}
708
709/**
710 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
711 * @uncore: the intel_uncore structure
712 *
713 * This function complements intel_uncore_forcewake_user_get() and releases
714 * the GT powerwell taken on behalf of the userspace bypass.
715 */
716void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
717{
718 spin_lock_irq(&uncore->lock);
719 if (!--uncore->user_forcewake_count) {
720 mmio_debug_resume(uncore);
721 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
722 }
723 spin_unlock_irq(&uncore->lock);
724}
725
726/**
727 * intel_uncore_forcewake_get__locked - grab forcewake domain references
728 * @uncore: the intel_uncore structure
729 * @fw_domains: forcewake domains to get reference on
730 *
731 * See intel_uncore_forcewake_get(). This variant places the onus
732 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
733 */
734void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
735 enum forcewake_domains fw_domains)
736{
737 lockdep_assert_held(&uncore->lock);
738
739 if (!uncore->fw_get_funcs)
740 return;
741
742 __intel_uncore_forcewake_get(uncore, fw_domains);
743}
744
745static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
746 enum forcewake_domains fw_domains,
747 bool delayed)
748{
749 struct intel_uncore_forcewake_domain *domain;
750 unsigned int tmp;
751
752 fw_domains &= uncore->fw_domains;
753
754 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
755 GEM_BUG_ON(!domain->wake_count);
756
757 if (--domain->wake_count) {
758 domain->active = true;
759 continue;
760 }
761
762 if (delayed &&
763 !(domain->uncore->fw_domains_timer & domain->mask))
764 fw_domain_arm_timer(domain);
765 else
766 fw_domains_put(uncore, domain->mask);
767 }
768}
769
770/**
771 * intel_uncore_forcewake_put - release a forcewake domain reference
772 * @uncore: the intel_uncore structure
773 * @fw_domains: forcewake domains to put references
774 *
775 * This function drops the device-level forcewakes for specified
776 * domains obtained by intel_uncore_forcewake_get().
777 */
778void intel_uncore_forcewake_put(struct intel_uncore *uncore,
779 enum forcewake_domains fw_domains)
780{
781 unsigned long irqflags;
782
783 if (!uncore->fw_get_funcs)
784 return;
785
786 spin_lock_irqsave(&uncore->lock, irqflags);
787 __intel_uncore_forcewake_put(uncore, fw_domains, false);
788 spin_unlock_irqrestore(&uncore->lock, irqflags);
789}
790
791void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
792 enum forcewake_domains fw_domains)
793{
794 unsigned long irqflags;
795
796 if (!uncore->fw_get_funcs)
797 return;
798
799 spin_lock_irqsave(&uncore->lock, irqflags);
800 __intel_uncore_forcewake_put(uncore, fw_domains, true);
801 spin_unlock_irqrestore(&uncore->lock, irqflags);
802}
803
804/**
805 * intel_uncore_forcewake_flush - flush the delayed release
806 * @uncore: the intel_uncore structure
807 * @fw_domains: forcewake domains to flush
808 */
809void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
810 enum forcewake_domains fw_domains)
811{
812 struct intel_uncore_forcewake_domain *domain;
813 unsigned int tmp;
814
815 if (!uncore->fw_get_funcs)
816 return;
817
818 fw_domains &= uncore->fw_domains;
819 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
820 WRITE_ONCE(domain->active, false);
821 if (hrtimer_cancel(&domain->timer))
822 intel_uncore_fw_release_timer(&domain->timer);
823 }
824}
825
826/**
827 * intel_uncore_forcewake_put__locked - release forcewake domain references
828 * @uncore: the intel_uncore structure
829 * @fw_domains: forcewake domains to put references
830 *
831 * See intel_uncore_forcewake_put(). This variant places the onus
832 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
833 */
834void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
835 enum forcewake_domains fw_domains)
836{
837 lockdep_assert_held(&uncore->lock);
838
839 if (!uncore->fw_get_funcs)
840 return;
841
842 __intel_uncore_forcewake_put(uncore, fw_domains, false);
843}
844
845void assert_forcewakes_inactive(struct intel_uncore *uncore)
846{
847 if (!uncore->fw_get_funcs)
848 return;
849
850 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
851 "Expected all fw_domains to be inactive, but %08x are still on\n",
852 uncore->fw_domains_active);
853}
854
855void assert_forcewakes_active(struct intel_uncore *uncore,
856 enum forcewake_domains fw_domains)
857{
858 struct intel_uncore_forcewake_domain *domain;
859 unsigned int tmp;
860
861 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
862 return;
863
864 if (!uncore->fw_get_funcs)
865 return;
866
867 spin_lock_irq(&uncore->lock);
868
869 assert_rpm_wakelock_held(uncore->rpm);
870
871 fw_domains &= uncore->fw_domains;
872 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
873 "Expected %08x fw_domains to be active, but %08x are off\n",
874 fw_domains, fw_domains & ~uncore->fw_domains_active);
875
876 /*
877 * Check that the caller has an explicit wakeref and we don't mistake
878 * it for the auto wakeref.
879 */
880 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
881 unsigned int actual = READ_ONCE(domain->wake_count);
882 unsigned int expect = 1;
883
884 if (uncore->fw_domains_timer & domain->mask)
885 expect++; /* pending automatic release */
886
887 if (drm_WARN(&uncore->i915->drm, actual < expect,
888 "Expected domain %d to be held awake by caller, count=%d\n",
889 domain->id, actual))
890 break;
891 }
892
893 spin_unlock_irq(&uncore->lock);
894}
895
896/*
897 * We give fast paths for the really cool registers. The second range includes
898 * media domains (and the GSC starting from Xe_LPM+)
899 */
900#define NEEDS_FORCE_WAKE(reg) ({ \
901 u32 __reg = (reg); \
902 __reg < 0x40000 || __reg >= 0x116000; \
903})
904
905static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
906{
907 if (offset < entry->start)
908 return -1;
909 else if (offset > entry->end)
910 return 1;
911 else
912 return 0;
913}
914
915/* Copied and "macroized" from lib/bsearch.c */
916#define BSEARCH(key, base, num, cmp) ({ \
917 unsigned int start__ = 0, end__ = (num); \
918 typeof(base) result__ = NULL; \
919 while (start__ < end__) { \
920 unsigned int mid__ = start__ + (end__ - start__) / 2; \
921 int ret__ = (cmp)((key), (base) + mid__); \
922 if (ret__ < 0) { \
923 end__ = mid__; \
924 } else if (ret__ > 0) { \
925 start__ = mid__ + 1; \
926 } else { \
927 result__ = (base) + mid__; \
928 break; \
929 } \
930 } \
931 result__; \
932})
933
934static enum forcewake_domains
935find_fw_domain(struct intel_uncore *uncore, u32 offset)
936{
937 const struct intel_forcewake_range *entry;
938
939 if (IS_GSI_REG(offset))
940 offset += uncore->gsi_offset;
941
942 entry = BSEARCH(offset,
943 uncore->fw_domains_table,
944 uncore->fw_domains_table_entries,
945 fw_range_cmp);
946
947 if (!entry)
948 return 0;
949
950 /*
951 * The list of FW domains depends on the SKU in gen11+ so we
952 * can't determine it statically. We use FORCEWAKE_ALL and
953 * translate it here to the list of available domains.
954 */
955 if (entry->domains == FORCEWAKE_ALL)
956 return uncore->fw_domains;
957
958 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
959 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
960 entry->domains & ~uncore->fw_domains, offset);
961
962 return entry->domains;
963}
964
965/*
966 * Shadowed register tables describe special register ranges that i915 is
967 * allowed to write to without acquiring forcewake. If these registers' power
968 * wells are down, the hardware will save values written by i915 to a shadow
969 * copy and automatically transfer them into the real register the next time
970 * the power well is woken up. Shadowing only applies to writes; forcewake
971 * must still be acquired when reading from registers in these ranges.
972 *
973 * The documentation for shadowed registers is somewhat spotty on older
974 * platforms. However missing registers from these lists is non-fatal; it just
975 * means we'll wake up the hardware for some register accesses where we didn't
976 * really need to.
977 *
978 * The ranges listed in these tables must be sorted by offset.
979 *
980 * When adding new tables here, please also add them to
981 * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be
982 * scanned for obvious mistakes or typos by the selftests.
983 */
984
985static const struct i915_range gen8_shadowed_regs[] = {
986 { .start = 0x2030, .end = 0x2030 },
987 { .start = 0xA008, .end = 0xA00C },
988 { .start = 0x12030, .end = 0x12030 },
989 { .start = 0x1a030, .end = 0x1a030 },
990 { .start = 0x22030, .end = 0x22030 },
991};
992
993static const struct i915_range gen11_shadowed_regs[] = {
994 { .start = 0x2030, .end = 0x2030 },
995 { .start = 0x2550, .end = 0x2550 },
996 { .start = 0xA008, .end = 0xA00C },
997 { .start = 0x22030, .end = 0x22030 },
998 { .start = 0x22230, .end = 0x22230 },
999 { .start = 0x22510, .end = 0x22550 },
1000 { .start = 0x1C0030, .end = 0x1C0030 },
1001 { .start = 0x1C0230, .end = 0x1C0230 },
1002 { .start = 0x1C0510, .end = 0x1C0550 },
1003 { .start = 0x1C4030, .end = 0x1C4030 },
1004 { .start = 0x1C4230, .end = 0x1C4230 },
1005 { .start = 0x1C4510, .end = 0x1C4550 },
1006 { .start = 0x1C8030, .end = 0x1C8030 },
1007 { .start = 0x1C8230, .end = 0x1C8230 },
1008 { .start = 0x1C8510, .end = 0x1C8550 },
1009 { .start = 0x1D0030, .end = 0x1D0030 },
1010 { .start = 0x1D0230, .end = 0x1D0230 },
1011 { .start = 0x1D0510, .end = 0x1D0550 },
1012 { .start = 0x1D4030, .end = 0x1D4030 },
1013 { .start = 0x1D4230, .end = 0x1D4230 },
1014 { .start = 0x1D4510, .end = 0x1D4550 },
1015 { .start = 0x1D8030, .end = 0x1D8030 },
1016 { .start = 0x1D8230, .end = 0x1D8230 },
1017 { .start = 0x1D8510, .end = 0x1D8550 },
1018};
1019
1020static const struct i915_range gen12_shadowed_regs[] = {
1021 { .start = 0x2030, .end = 0x2030 },
1022 { .start = 0x2510, .end = 0x2550 },
1023 { .start = 0xA008, .end = 0xA00C },
1024 { .start = 0xA188, .end = 0xA188 },
1025 { .start = 0xA278, .end = 0xA278 },
1026 { .start = 0xA540, .end = 0xA56C },
1027 { .start = 0xC4C8, .end = 0xC4C8 },
1028 { .start = 0xC4D4, .end = 0xC4D4 },
1029 { .start = 0xC600, .end = 0xC600 },
1030 { .start = 0x22030, .end = 0x22030 },
1031 { .start = 0x22510, .end = 0x22550 },
1032 { .start = 0x1C0030, .end = 0x1C0030 },
1033 { .start = 0x1C0510, .end = 0x1C0550 },
1034 { .start = 0x1C4030, .end = 0x1C4030 },
1035 { .start = 0x1C4510, .end = 0x1C4550 },
1036 { .start = 0x1C8030, .end = 0x1C8030 },
1037 { .start = 0x1C8510, .end = 0x1C8550 },
1038 { .start = 0x1D0030, .end = 0x1D0030 },
1039 { .start = 0x1D0510, .end = 0x1D0550 },
1040 { .start = 0x1D4030, .end = 0x1D4030 },
1041 { .start = 0x1D4510, .end = 0x1D4550 },
1042 { .start = 0x1D8030, .end = 0x1D8030 },
1043 { .start = 0x1D8510, .end = 0x1D8550 },
1044
1045 /*
1046 * The rest of these ranges are specific to Xe_HP and beyond, but
1047 * are reserved/unused ranges on earlier gen12 platforms, so they can
1048 * be safely added to the gen12 table.
1049 */
1050 { .start = 0x1E0030, .end = 0x1E0030 },
1051 { .start = 0x1E0510, .end = 0x1E0550 },
1052 { .start = 0x1E4030, .end = 0x1E4030 },
1053 { .start = 0x1E4510, .end = 0x1E4550 },
1054 { .start = 0x1E8030, .end = 0x1E8030 },
1055 { .start = 0x1E8510, .end = 0x1E8550 },
1056 { .start = 0x1F0030, .end = 0x1F0030 },
1057 { .start = 0x1F0510, .end = 0x1F0550 },
1058 { .start = 0x1F4030, .end = 0x1F4030 },
1059 { .start = 0x1F4510, .end = 0x1F4550 },
1060 { .start = 0x1F8030, .end = 0x1F8030 },
1061 { .start = 0x1F8510, .end = 0x1F8550 },
1062};
1063
1064static const struct i915_range dg2_shadowed_regs[] = {
1065 { .start = 0x2030, .end = 0x2030 },
1066 { .start = 0x2510, .end = 0x2550 },
1067 { .start = 0xA008, .end = 0xA00C },
1068 { .start = 0xA188, .end = 0xA188 },
1069 { .start = 0xA278, .end = 0xA278 },
1070 { .start = 0xA540, .end = 0xA56C },
1071 { .start = 0xC4C8, .end = 0xC4C8 },
1072 { .start = 0xC4E0, .end = 0xC4E0 },
1073 { .start = 0xC600, .end = 0xC600 },
1074 { .start = 0xC658, .end = 0xC658 },
1075 { .start = 0x22030, .end = 0x22030 },
1076 { .start = 0x22510, .end = 0x22550 },
1077 { .start = 0x1C0030, .end = 0x1C0030 },
1078 { .start = 0x1C0510, .end = 0x1C0550 },
1079 { .start = 0x1C4030, .end = 0x1C4030 },
1080 { .start = 0x1C4510, .end = 0x1C4550 },
1081 { .start = 0x1C8030, .end = 0x1C8030 },
1082 { .start = 0x1C8510, .end = 0x1C8550 },
1083 { .start = 0x1D0030, .end = 0x1D0030 },
1084 { .start = 0x1D0510, .end = 0x1D0550 },
1085 { .start = 0x1D4030, .end = 0x1D4030 },
1086 { .start = 0x1D4510, .end = 0x1D4550 },
1087 { .start = 0x1D8030, .end = 0x1D8030 },
1088 { .start = 0x1D8510, .end = 0x1D8550 },
1089 { .start = 0x1E0030, .end = 0x1E0030 },
1090 { .start = 0x1E0510, .end = 0x1E0550 },
1091 { .start = 0x1E4030, .end = 0x1E4030 },
1092 { .start = 0x1E4510, .end = 0x1E4550 },
1093 { .start = 0x1E8030, .end = 0x1E8030 },
1094 { .start = 0x1E8510, .end = 0x1E8550 },
1095 { .start = 0x1F0030, .end = 0x1F0030 },
1096 { .start = 0x1F0510, .end = 0x1F0550 },
1097 { .start = 0x1F4030, .end = 0x1F4030 },
1098 { .start = 0x1F4510, .end = 0x1F4550 },
1099 { .start = 0x1F8030, .end = 0x1F8030 },
1100 { .start = 0x1F8510, .end = 0x1F8550 },
1101};
1102
1103static const struct i915_range pvc_shadowed_regs[] = {
1104 { .start = 0x2030, .end = 0x2030 },
1105 { .start = 0x2510, .end = 0x2550 },
1106 { .start = 0xA008, .end = 0xA00C },
1107 { .start = 0xA188, .end = 0xA188 },
1108 { .start = 0xA278, .end = 0xA278 },
1109 { .start = 0xA540, .end = 0xA56C },
1110 { .start = 0xC4C8, .end = 0xC4C8 },
1111 { .start = 0xC4E0, .end = 0xC4E0 },
1112 { .start = 0xC600, .end = 0xC600 },
1113 { .start = 0xC658, .end = 0xC658 },
1114 { .start = 0x22030, .end = 0x22030 },
1115 { .start = 0x22510, .end = 0x22550 },
1116 { .start = 0x1C0030, .end = 0x1C0030 },
1117 { .start = 0x1C0510, .end = 0x1C0550 },
1118 { .start = 0x1C4030, .end = 0x1C4030 },
1119 { .start = 0x1C4510, .end = 0x1C4550 },
1120 { .start = 0x1C8030, .end = 0x1C8030 },
1121 { .start = 0x1C8510, .end = 0x1C8550 },
1122 { .start = 0x1D0030, .end = 0x1D0030 },
1123 { .start = 0x1D0510, .end = 0x1D0550 },
1124 { .start = 0x1D4030, .end = 0x1D4030 },
1125 { .start = 0x1D4510, .end = 0x1D4550 },
1126 { .start = 0x1D8030, .end = 0x1D8030 },
1127 { .start = 0x1D8510, .end = 0x1D8550 },
1128 { .start = 0x1E0030, .end = 0x1E0030 },
1129 { .start = 0x1E0510, .end = 0x1E0550 },
1130 { .start = 0x1E4030, .end = 0x1E4030 },
1131 { .start = 0x1E4510, .end = 0x1E4550 },
1132 { .start = 0x1E8030, .end = 0x1E8030 },
1133 { .start = 0x1E8510, .end = 0x1E8550 },
1134 { .start = 0x1F0030, .end = 0x1F0030 },
1135 { .start = 0x1F0510, .end = 0x1F0550 },
1136 { .start = 0x1F4030, .end = 0x1F4030 },
1137 { .start = 0x1F4510, .end = 0x1F4550 },
1138 { .start = 0x1F8030, .end = 0x1F8030 },
1139 { .start = 0x1F8510, .end = 0x1F8550 },
1140};
1141
1142static const struct i915_range mtl_shadowed_regs[] = {
1143 { .start = 0x2030, .end = 0x2030 },
1144 { .start = 0x2510, .end = 0x2550 },
1145 { .start = 0xA008, .end = 0xA00C },
1146 { .start = 0xA188, .end = 0xA188 },
1147 { .start = 0xA278, .end = 0xA278 },
1148 { .start = 0xA540, .end = 0xA56C },
1149 { .start = 0xC050, .end = 0xC050 },
1150 { .start = 0xC340, .end = 0xC340 },
1151 { .start = 0xC4C8, .end = 0xC4C8 },
1152 { .start = 0xC4E0, .end = 0xC4E0 },
1153 { .start = 0xC600, .end = 0xC600 },
1154 { .start = 0xC658, .end = 0xC658 },
1155 { .start = 0xCFD4, .end = 0xCFDC },
1156 { .start = 0x22030, .end = 0x22030 },
1157 { .start = 0x22510, .end = 0x22550 },
1158};
1159
1160static const struct i915_range xelpmp_shadowed_regs[] = {
1161 { .start = 0x1C0030, .end = 0x1C0030 },
1162 { .start = 0x1C0510, .end = 0x1C0550 },
1163 { .start = 0x1C8030, .end = 0x1C8030 },
1164 { .start = 0x1C8510, .end = 0x1C8550 },
1165 { .start = 0x1D0030, .end = 0x1D0030 },
1166 { .start = 0x1D0510, .end = 0x1D0550 },
1167 { .start = 0x38A008, .end = 0x38A00C },
1168 { .start = 0x38A188, .end = 0x38A188 },
1169 { .start = 0x38A278, .end = 0x38A278 },
1170 { .start = 0x38A540, .end = 0x38A56C },
1171 { .start = 0x38A618, .end = 0x38A618 },
1172 { .start = 0x38C050, .end = 0x38C050 },
1173 { .start = 0x38C340, .end = 0x38C340 },
1174 { .start = 0x38C4C8, .end = 0x38C4C8 },
1175 { .start = 0x38C4E0, .end = 0x38C4E4 },
1176 { .start = 0x38C600, .end = 0x38C600 },
1177 { .start = 0x38C658, .end = 0x38C658 },
1178 { .start = 0x38CFD4, .end = 0x38CFDC },
1179};
1180
1181static int mmio_range_cmp(u32 key, const struct i915_range *range)
1182{
1183 if (key < range->start)
1184 return -1;
1185 else if (key > range->end)
1186 return 1;
1187 else
1188 return 0;
1189}
1190
1191static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1192{
1193 if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1194 return false;
1195
1196 if (IS_GSI_REG(offset))
1197 offset += uncore->gsi_offset;
1198
1199 return BSEARCH(offset,
1200 uncore->shadowed_reg_table,
1201 uncore->shadowed_reg_table_entries,
1202 mmio_range_cmp);
1203}
1204
1205static enum forcewake_domains
1206gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1207{
1208 return FORCEWAKE_RENDER;
1209}
1210
1211#define __fwtable_reg_read_fw_domains(uncore, offset) \
1212({ \
1213 enum forcewake_domains __fwd = 0; \
1214 if (NEEDS_FORCE_WAKE((offset))) \
1215 __fwd = find_fw_domain(uncore, offset); \
1216 __fwd; \
1217})
1218
1219#define __fwtable_reg_write_fw_domains(uncore, offset) \
1220({ \
1221 enum forcewake_domains __fwd = 0; \
1222 const u32 __offset = (offset); \
1223 if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
1224 __fwd = find_fw_domain(uncore, __offset); \
1225 __fwd; \
1226})
1227
1228#define GEN_FW_RANGE(s, e, d) \
1229 { .start = (s), .end = (e), .domains = (d) }
1230
1231/*
1232 * All platforms' forcewake tables below must be sorted by offset ranges.
1233 * Furthermore, new forcewake tables added should be "watertight" and have
1234 * no gaps between ranges.
1235 *
1236 * When there are multiple consecutive ranges listed in the bspec with
1237 * the same forcewake domain, it is customary to combine them into a single
1238 * row in the tables below to keep the tables small and lookups fast.
1239 * Likewise, reserved/unused ranges may be combined with the preceding and/or
1240 * following ranges since the driver will never be making MMIO accesses in
1241 * those ranges.
1242 *
1243 * For example, if the bspec were to list:
1244 *
1245 * ...
1246 * 0x1000 - 0x1fff: GT
1247 * 0x2000 - 0x2cff: GT
1248 * 0x2d00 - 0x2fff: unused/reserved
1249 * 0x3000 - 0xffff: GT
1250 * ...
1251 *
1252 * these could all be represented by a single line in the code:
1253 *
1254 * GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT)
1255 *
1256 * When adding new forcewake tables here, please also add them to
1257 * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be
1258 * scanned for obvious mistakes or typos by the selftests.
1259 */
1260
1261static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1262 GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
1263};
1264
1265static const struct intel_forcewake_range __vlv_fw_ranges[] = {
1266 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1267 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
1268 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
1269 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1270 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
1271 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
1272 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1273};
1274
1275static const struct intel_forcewake_range __chv_fw_ranges[] = {
1276 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1277 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1278 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1279 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1280 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1281 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1282 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
1283 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1284 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1285 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1286 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1287 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1288 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1289 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1290 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1291 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
1292};
1293
1294static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1295 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
1296 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1297 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1298 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1299 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1300 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1301 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1302 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
1303 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1304 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1305 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1306 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1307 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1308 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1309 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
1310 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1311 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
1312 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1313 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1314 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1315 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
1316 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1317 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
1318 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1319 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
1320 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1321 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
1322 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1323 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
1324 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1325 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
1326 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1327};
1328
1329static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1330 GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
1331 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1332 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1333 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1334 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1335 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1336 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1337 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1338 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1339 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1340 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1341 GEN_FW_RANGE(0x8800, 0x8bff, 0),
1342 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1343 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
1344 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1345 GEN_FW_RANGE(0x9560, 0x95ff, 0),
1346 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
1347 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1348 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
1349 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1350 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
1351 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1352 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
1353 GEN_FW_RANGE(0x24000, 0x2407f, 0),
1354 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
1355 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
1356 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
1357 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
1358 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
1359 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1360 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1361 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1362 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
1363 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1364 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
1365};
1366
1367static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1368 GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1369 0x0 - 0xaff: reserved
1370 0xb00 - 0x1fff: always on */
1371 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1372 GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1373 GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1374 GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
1375 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1376 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1377 0x4000 - 0x48ff: gt
1378 0x4900 - 0x51ff: reserved */
1379 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1380 0x5200 - 0x53ff: render
1381 0x5400 - 0x54ff: reserved
1382 0x5500 - 0x7fff: render */
1383 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1384 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1385 GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1386 0x8160 - 0x817f: reserved
1387 0x8180 - 0x81ff: always on */
1388 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1389 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1390 GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1391 0x8500 - 0x87ff: gt
1392 0x8800 - 0x8fff: reserved
1393 0x9000 - 0x947f: gt
1394 0x9480 - 0x94cf: reserved */
1395 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1396 GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1397 0x9560 - 0x95ff: always on
1398 0x9600 - 0x97ff: reserved */
1399 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1400 GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1401 GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1402 0xb400 - 0xbf7f: gt
1403 0xb480 - 0xbfff: reserved
1404 0xc000 - 0xcfff: gt */
1405 GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1406 GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1407 GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1408 GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1409 0xdc00 - 0xddff: render
1410 0xde00 - 0xde7f: reserved
1411 0xde80 - 0xe8ff: render
1412 0xe900 - 0xefff: reserved */
1413 GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1414 0xf000 - 0xffff: gt
1415 0x10000 - 0x147ff: reserved */
1416 GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1417 0x14800 - 0x14fff: render
1418 0x15000 - 0x16dff: reserved
1419 0x16e00 - 0x1bfff: render
1420 0x1c000 - 0x1ffff: reserved */
1421 GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1422 GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1423 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1424 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1425 0x24000 - 0x2407f: always on
1426 0x24080 - 0x2417f: reserved */
1427 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1428 0x24180 - 0x241ff: gt
1429 0x24200 - 0x249ff: reserved */
1430 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1431 0x24a00 - 0x24a7f: render
1432 0x24a80 - 0x251ff: reserved */
1433 GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1434 0x25200 - 0x252ff: gt
1435 0x25300 - 0x255ff: reserved */
1436 GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1437 GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1438 0x25680 - 0x256ff: VD2
1439 0x25700 - 0x259ff: reserved */
1440 GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1441 GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1442 0x25a80 - 0x25aff: VD2
1443 0x25b00 - 0x2ffff: reserved */
1444 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1445 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1446 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1447 0x1c0000 - 0x1c2bff: VD0
1448 0x1c2c00 - 0x1c2cff: reserved
1449 0x1c2d00 - 0x1c2dff: VD0
1450 0x1c2e00 - 0x1c3eff: reserved
1451 0x1c3f00 - 0x1c3fff: VD0 */
1452 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1453 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1454 0x1c8000 - 0x1ca0ff: VE0
1455 0x1ca100 - 0x1cbeff: reserved
1456 0x1cbf00 - 0x1cbfff: VE0 */
1457 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1458 0x1cc000 - 0x1ccfff: VD0
1459 0x1cd000 - 0x1cffff: reserved */
1460 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1461 0x1d0000 - 0x1d2bff: VD2
1462 0x1d2c00 - 0x1d2cff: reserved
1463 0x1d2d00 - 0x1d2dff: VD2
1464 0x1d2e00 - 0x1d3eff: reserved
1465 0x1d3f00 - 0x1d3fff: VD2 */
1466};
1467
1468/*
1469 * Graphics IP version 12.55 brings a slight change to the 0xd800 range,
1470 * switching it from the GT domain to the render domain.
1471 */
1472#define XEHP_FWRANGES(FW_RANGE_D800) \
1473 GEN_FW_RANGE(0x0, 0x1fff, 0), /* \
1474 0x0 - 0xaff: reserved \
1475 0xb00 - 0x1fff: always on */ \
1476 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), \
1477 GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT), \
1478 GEN_FW_RANGE(0x4b00, 0x51ff, 0), /* \
1479 0x4b00 - 0x4fff: reserved \
1480 0x5000 - 0x51ff: always on */ \
1481 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), \
1482 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), \
1483 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), \
1484 GEN_FW_RANGE(0x8160, 0x81ff, 0), /* \
1485 0x8160 - 0x817f: reserved \
1486 0x8180 - 0x81ff: always on */ \
1487 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), \
1488 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), \
1489 GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /* \
1490 0x8500 - 0x87ff: gt \
1491 0x8800 - 0x8c7f: reserved \
1492 0x8c80 - 0x8cff: gt (DG2 only) */ \
1493 GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /* \
1494 0x8d00 - 0x8dff: render (DG2 only) \
1495 0x8e00 - 0x8fff: reserved */ \
1496 GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /* \
1497 0x9000 - 0x947f: gt \
1498 0x9480 - 0x94cf: reserved */ \
1499 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), \
1500 GEN_FW_RANGE(0x9560, 0x967f, 0), /* \
1501 0x9560 - 0x95ff: always on \
1502 0x9600 - 0x967f: reserved */ \
1503 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /* \
1504 0x9680 - 0x96ff: render (DG2 only) \
1505 0x9700 - 0x97ff: reserved */ \
1506 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /* \
1507 0x9800 - 0xb4ff: gt \
1508 0xb500 - 0xbfff: reserved \
1509 0xc000 - 0xcfff: gt */ \
1510 GEN_FW_RANGE(0xd000, 0xd7ff, 0), \
1511 GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800), \
1512 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT), \
1513 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER), \
1514 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /* \
1515 0xdd00 - 0xddff: gt \
1516 0xde00 - 0xde7f: reserved */ \
1517 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /* \
1518 0xde80 - 0xdfff: render \
1519 0xe000 - 0xe0ff: reserved \
1520 0xe100 - 0xe8ff: render */ \
1521 GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /* \
1522 0xe900 - 0xe9ff: gt \
1523 0xea00 - 0xefff: reserved \
1524 0xf000 - 0xffff: gt */ \
1525 GEN_FW_RANGE(0x10000, 0x12fff, 0), /* \
1526 0x10000 - 0x11fff: reserved \
1527 0x12000 - 0x127ff: always on \
1528 0x12800 - 0x12fff: reserved */ \
1529 GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */ \
1530 GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /* \
1531 0x13200 - 0x133ff: VD2 (DG2 only) \
1532 0x13400 - 0x13fff: reserved */ \
1533 GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */ \
1534 GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */ \
1535 GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */ \
1536 GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */ \
1537 GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER), \
1538 GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /* \
1539 0x15000 - 0x15fff: gt (DG2 only) \
1540 0x16000 - 0x16dff: reserved */ \
1541 GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER), \
1542 GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /* \
1543 0x20000 - 0x20fff: VD0 (XEHPSDV only) \
1544 0x21000 - 0x21fff: reserved */ \
1545 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), \
1546 GEN_FW_RANGE(0x24000, 0x2417f, 0), /* \
1547 0x24000 - 0x2407f: always on \
1548 0x24080 - 0x2417f: reserved */ \
1549 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /* \
1550 0x24180 - 0x241ff: gt \
1551 0x24200 - 0x249ff: reserved */ \
1552 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /* \
1553 0x24a00 - 0x24a7f: render \
1554 0x24a80 - 0x251ff: reserved */ \
1555 GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /* \
1556 0x25200 - 0x252ff: gt \
1557 0x25300 - 0x25fff: reserved */ \
1558 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /* \
1559 0x26000 - 0x27fff: render \
1560 0x28000 - 0x29fff: reserved \
1561 0x2a000 - 0x2ffff: undocumented */ \
1562 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), \
1563 GEN_FW_RANGE(0x40000, 0x1bffff, 0), \
1564 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* \
1565 0x1c0000 - 0x1c2bff: VD0 \
1566 0x1c2c00 - 0x1c2cff: reserved \
1567 0x1c2d00 - 0x1c2dff: VD0 \
1568 0x1c2e00 - 0x1c3eff: VD0 (DG2 only) \
1569 0x1c3f00 - 0x1c3fff: VD0 */ \
1570 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /* \
1571 0x1c4000 - 0x1c6bff: VD1 \
1572 0x1c6c00 - 0x1c6cff: reserved \
1573 0x1c6d00 - 0x1c6dff: VD1 \
1574 0x1c6e00 - 0x1c7fff: reserved */ \
1575 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /* \
1576 0x1c8000 - 0x1ca0ff: VE0 \
1577 0x1ca100 - 0x1cbfff: reserved */ \
1578 GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0), \
1579 GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2), \
1580 GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4), \
1581 GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6), \
1582 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /* \
1583 0x1d0000 - 0x1d2bff: VD2 \
1584 0x1d2c00 - 0x1d2cff: reserved \
1585 0x1d2d00 - 0x1d2dff: VD2 \
1586 0x1d2e00 - 0x1d3dff: VD2 (DG2 only) \
1587 0x1d3e00 - 0x1d3eff: reserved \
1588 0x1d3f00 - 0x1d3fff: VD2 */ \
1589 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /* \
1590 0x1d4000 - 0x1d6bff: VD3 \
1591 0x1d6c00 - 0x1d6cff: reserved \
1592 0x1d6d00 - 0x1d6dff: VD3 \
1593 0x1d6e00 - 0x1d7fff: reserved */ \
1594 GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /* \
1595 0x1d8000 - 0x1da0ff: VE1 \
1596 0x1da100 - 0x1dffff: reserved */ \
1597 GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /* \
1598 0x1e0000 - 0x1e2bff: VD4 \
1599 0x1e2c00 - 0x1e2cff: reserved \
1600 0x1e2d00 - 0x1e2dff: VD4 \
1601 0x1e2e00 - 0x1e3eff: reserved \
1602 0x1e3f00 - 0x1e3fff: VD4 */ \
1603 GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /* \
1604 0x1e4000 - 0x1e6bff: VD5 \
1605 0x1e6c00 - 0x1e6cff: reserved \
1606 0x1e6d00 - 0x1e6dff: VD5 \
1607 0x1e6e00 - 0x1e7fff: reserved */ \
1608 GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /* \
1609 0x1e8000 - 0x1ea0ff: VE2 \
1610 0x1ea100 - 0x1effff: reserved */ \
1611 GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /* \
1612 0x1f0000 - 0x1f2bff: VD6 \
1613 0x1f2c00 - 0x1f2cff: reserved \
1614 0x1f2d00 - 0x1f2dff: VD6 \
1615 0x1f2e00 - 0x1f3eff: reserved \
1616 0x1f3f00 - 0x1f3fff: VD6 */ \
1617 GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /* \
1618 0x1f4000 - 0x1f6bff: VD7 \
1619 0x1f6c00 - 0x1f6cff: reserved \
1620 0x1f6d00 - 0x1f6dff: VD7 \
1621 0x1f6e00 - 0x1f7fff: reserved */ \
1622 GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
1623
1624static const struct intel_forcewake_range __xehp_fw_ranges[] = {
1625 XEHP_FWRANGES(FORCEWAKE_GT)
1626};
1627
1628static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1629 XEHP_FWRANGES(FORCEWAKE_RENDER)
1630};
1631
1632static const struct intel_forcewake_range __pvc_fw_ranges[] = {
1633 GEN_FW_RANGE(0x0, 0xaff, 0),
1634 GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1635 GEN_FW_RANGE(0xc00, 0xfff, 0),
1636 GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1637 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1638 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1639 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1640 GEN_FW_RANGE(0x4000, 0x813f, FORCEWAKE_GT), /*
1641 0x4000 - 0x4aff: gt
1642 0x4b00 - 0x4fff: reserved
1643 0x5000 - 0x51ff: gt
1644 0x5200 - 0x52ff: reserved
1645 0x5300 - 0x53ff: gt
1646 0x5400 - 0x7fff: reserved
1647 0x8000 - 0x813f: gt */
1648 GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER),
1649 GEN_FW_RANGE(0x8180, 0x81ff, 0),
1650 GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
1651 0x8200 - 0x82ff: gt
1652 0x8300 - 0x84ff: reserved
1653 0x8500 - 0x887f: gt
1654 0x8880 - 0x8a7f: reserved
1655 0x8a80 - 0x8aff: gt
1656 0x8b00 - 0x8fff: reserved
1657 0x9000 - 0x947f: gt
1658 0x9480 - 0x94cf: reserved */
1659 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1660 GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1661 0x9560 - 0x95ff: always on
1662 0x9600 - 0x967f: reserved */
1663 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1664 0x9680 - 0x96ff: render
1665 0x9700 - 0x97ff: reserved */
1666 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1667 0x9800 - 0xb4ff: gt
1668 0xb500 - 0xbfff: reserved
1669 0xc000 - 0xcfff: gt */
1670 GEN_FW_RANGE(0xd000, 0xd3ff, 0),
1671 GEN_FW_RANGE(0xd400, 0xdbff, FORCEWAKE_GT),
1672 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1673 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1674 0xdd00 - 0xddff: gt
1675 0xde00 - 0xde7f: reserved */
1676 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1677 0xde80 - 0xdeff: render
1678 0xdf00 - 0xe1ff: reserved
1679 0xe200 - 0xe7ff: render
1680 0xe800 - 0xe8ff: reserved */
1681 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT), /*
1682 0xe900 - 0xe9ff: gt
1683 0xea00 - 0xebff: reserved
1684 0xec00 - 0xffff: gt
1685 0x10000 - 0x11fff: reserved */
1686 GEN_FW_RANGE(0x12000, 0x12fff, 0), /*
1687 0x12000 - 0x127ff: always on
1688 0x12800 - 0x12fff: reserved */
1689 GEN_FW_RANGE(0x13000, 0x19fff, FORCEWAKE_GT), /*
1690 0x13000 - 0x135ff: gt
1691 0x13600 - 0x147ff: reserved
1692 0x14800 - 0x153ff: gt
1693 0x15400 - 0x19fff: reserved */
1694 GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
1695 0x1a000 - 0x1ffff: render
1696 0x20000 - 0x21fff: reserved */
1697 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1698 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1699 24000 - 0x2407f: always on
1700 24080 - 0x2417f: reserved */
1701 GEN_FW_RANGE(0x24180, 0x25fff, FORCEWAKE_GT), /*
1702 0x24180 - 0x241ff: gt
1703 0x24200 - 0x251ff: reserved
1704 0x25200 - 0x252ff: gt
1705 0x25300 - 0x25fff: reserved */
1706 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*
1707 0x26000 - 0x27fff: render
1708 0x28000 - 0x2ffff: reserved */
1709 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1710 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1711 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1712 0x1c0000 - 0x1c2bff: VD0
1713 0x1c2c00 - 0x1c2cff: reserved
1714 0x1c2d00 - 0x1c2dff: VD0
1715 0x1c2e00 - 0x1c3eff: reserved
1716 0x1c3f00 - 0x1c3fff: VD0 */
1717 GEN_FW_RANGE(0x1c4000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX1), /*
1718 0x1c4000 - 0x1c6aff: VD1
1719 0x1c6b00 - 0x1c7eff: reserved
1720 0x1c7f00 - 0x1c7fff: VD1
1721 0x1c8000 - 0x1cffff: reserved */
1722 GEN_FW_RANGE(0x1d0000, 0x23ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1723 0x1d0000 - 0x1d2aff: VD2
1724 0x1d2b00 - 0x1d3eff: reserved
1725 0x1d3f00 - 0x1d3fff: VD2
1726 0x1d4000 - 0x23ffff: reserved */
1727 GEN_FW_RANGE(0x240000, 0x3dffff, 0),
1728 GEN_FW_RANGE(0x3e0000, 0x3effff, FORCEWAKE_GT),
1729};
1730
1731static const struct intel_forcewake_range __mtl_fw_ranges[] = {
1732 GEN_FW_RANGE(0x0, 0xaff, 0),
1733 GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1734 GEN_FW_RANGE(0xc00, 0xfff, 0),
1735 GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1736 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1737 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1738 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1739 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1740 0x4000 - 0x48ff: render
1741 0x4900 - 0x51ff: reserved */
1742 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1743 0x5200 - 0x53ff: render
1744 0x5400 - 0x54ff: reserved
1745 0x5500 - 0x7fff: render */
1746 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1747 GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER), /*
1748 0x8140 - 0x815f: render
1749 0x8160 - 0x817f: reserved */
1750 GEN_FW_RANGE(0x8180, 0x81ff, 0),
1751 GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
1752 0x8200 - 0x87ff: gt
1753 0x8800 - 0x8dff: reserved
1754 0x8e00 - 0x8f7f: gt
1755 0x8f80 - 0x8fff: reserved
1756 0x9000 - 0x947f: gt
1757 0x9480 - 0x94cf: reserved */
1758 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1759 GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1760 0x9560 - 0x95ff: always on
1761 0x9600 - 0x967f: reserved */
1762 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1763 0x9680 - 0x96ff: render
1764 0x9700 - 0x97ff: reserved */
1765 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1766 0x9800 - 0xb4ff: gt
1767 0xb500 - 0xbfff: reserved
1768 0xc000 - 0xcfff: gt */
1769 GEN_FW_RANGE(0xd000, 0xd7ff, 0), /*
1770 0xd000 - 0xd3ff: always on
1771 0xd400 - 0xd7ff: reserved */
1772 GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1773 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1774 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1775 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1776 0xdd00 - 0xddff: gt
1777 0xde00 - 0xde7f: reserved */
1778 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1779 0xde80 - 0xdfff: render
1780 0xe000 - 0xe0ff: reserved
1781 0xe100 - 0xe8ff: render */
1782 GEN_FW_RANGE(0xe900, 0xe9ff, FORCEWAKE_GT),
1783 GEN_FW_RANGE(0xea00, 0x147ff, 0), /*
1784 0xea00 - 0x11fff: reserved
1785 0x12000 - 0x127ff: always on
1786 0x12800 - 0x147ff: reserved */
1787 GEN_FW_RANGE(0x14800, 0x19fff, FORCEWAKE_GT), /*
1788 0x14800 - 0x153ff: gt
1789 0x15400 - 0x19fff: reserved */
1790 GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
1791 0x1a000 - 0x1bfff: render
1792 0x1c000 - 0x21fff: reserved */
1793 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1794 GEN_FW_RANGE(0x24000, 0x2ffff, 0), /*
1795 0x24000 - 0x2407f: always on
1796 0x24080 - 0x2ffff: reserved */
1797 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT)
1798};
1799
1800/*
1801 * Note that the register ranges here are the final offsets after
1802 * translation of the GSI block to the 0x380000 offset.
1803 *
1804 * NOTE: There are a couple MCR ranges near the bottom of this table
1805 * that need to power up either VD0 or VD2 depending on which replicated
1806 * instance of the register we're trying to access. Our forcewake logic
1807 * at the moment doesn't have a good way to take steering into consideration,
1808 * and the driver doesn't even access any registers in those ranges today,
1809 * so for now we just mark those ranges as FORCEWAKE_ALL. That will ensure
1810 * proper operation if we do start using the ranges in the future, and we
1811 * can determine at that time whether it's worth adding extra complexity to
1812 * the forcewake handling to take steering into consideration.
1813 */
1814static const struct intel_forcewake_range __xelpmp_fw_ranges[] = {
1815 GEN_FW_RANGE(0x0, 0x115fff, 0), /* render GT range */
1816 GEN_FW_RANGE(0x116000, 0x11ffff, FORCEWAKE_GSC), /*
1817 0x116000 - 0x117fff: gsc
1818 0x118000 - 0x119fff: reserved
1819 0x11a000 - 0x11efff: gsc
1820 0x11f000 - 0x11ffff: reserved */
1821 GEN_FW_RANGE(0x120000, 0x1bffff, 0), /* non-GT range */
1822 GEN_FW_RANGE(0x1c0000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX0), /*
1823 0x1c0000 - 0x1c3dff: VD0
1824 0x1c3e00 - 0x1c3eff: reserved
1825 0x1c3f00 - 0x1c3fff: VD0
1826 0x1c4000 - 0x1c7fff: reserved */
1827 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1828 0x1c8000 - 0x1ca0ff: VE0
1829 0x1ca100 - 0x1cbfff: reserved */
1830 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1831 0x1cc000 - 0x1cdfff: VD0
1832 0x1ce000 - 0x1cffff: reserved */
1833 GEN_FW_RANGE(0x1d0000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX2), /*
1834 0x1d0000 - 0x1d3dff: VD2
1835 0x1d3e00 - 0x1d3eff: reserved
1836 0x1d4000 - 0x1d7fff: VD2 */
1837 GEN_FW_RANGE(0x1d8000, 0x1da0ff, FORCEWAKE_MEDIA_VEBOX1),
1838 GEN_FW_RANGE(0x1da100, 0x380aff, 0), /*
1839 0x1da100 - 0x23ffff: reserved
1840 0x240000 - 0x37ffff: non-GT range
1841 0x380000 - 0x380aff: reserved */
1842 GEN_FW_RANGE(0x380b00, 0x380bff, FORCEWAKE_GT),
1843 GEN_FW_RANGE(0x380c00, 0x380fff, 0),
1844 GEN_FW_RANGE(0x381000, 0x38817f, FORCEWAKE_GT), /*
1845 0x381000 - 0x381fff: gt
1846 0x382000 - 0x383fff: reserved
1847 0x384000 - 0x384aff: gt
1848 0x384b00 - 0x3851ff: reserved
1849 0x385200 - 0x3871ff: gt
1850 0x387200 - 0x387fff: reserved
1851 0x388000 - 0x38813f: gt
1852 0x388140 - 0x38817f: reserved */
1853 GEN_FW_RANGE(0x388180, 0x3882ff, 0), /*
1854 0x388180 - 0x3881ff: always on
1855 0x388200 - 0x3882ff: reserved */
1856 GEN_FW_RANGE(0x388300, 0x38955f, FORCEWAKE_GT), /*
1857 0x388300 - 0x38887f: gt
1858 0x388880 - 0x388fff: reserved
1859 0x389000 - 0x38947f: gt
1860 0x389480 - 0x38955f: reserved */
1861 GEN_FW_RANGE(0x389560, 0x389fff, 0), /*
1862 0x389560 - 0x3895ff: always on
1863 0x389600 - 0x389fff: reserved */
1864 GEN_FW_RANGE(0x38a000, 0x38cfff, FORCEWAKE_GT), /*
1865 0x38a000 - 0x38afff: gt
1866 0x38b000 - 0x38bfff: reserved
1867 0x38c000 - 0x38cfff: gt */
1868 GEN_FW_RANGE(0x38d000, 0x38d11f, 0),
1869 GEN_FW_RANGE(0x38d120, 0x391fff, FORCEWAKE_GT), /*
1870 0x38d120 - 0x38dfff: gt
1871 0x38e000 - 0x38efff: reserved
1872 0x38f000 - 0x38ffff: gt
1873 0x389000 - 0x391fff: reserved */
1874 GEN_FW_RANGE(0x392000, 0x392fff, 0), /*
1875 0x392000 - 0x3927ff: always on
1876 0x392800 - 0x292fff: reserved */
1877 GEN_FW_RANGE(0x393000, 0x3931ff, FORCEWAKE_GT),
1878 GEN_FW_RANGE(0x393200, 0x39323f, FORCEWAKE_ALL), /* instance-based, see note above */
1879 GEN_FW_RANGE(0x393240, 0x3933ff, FORCEWAKE_GT),
1880 GEN_FW_RANGE(0x393400, 0x3934ff, FORCEWAKE_ALL), /* instance-based, see note above */
1881 GEN_FW_RANGE(0x393500, 0x393c7f, 0), /*
1882 0x393500 - 0x393bff: reserved
1883 0x393c00 - 0x393c7f: always on */
1884 GEN_FW_RANGE(0x393c80, 0x393dff, FORCEWAKE_GT),
1885};
1886
1887static void
1888ilk_dummy_write(struct intel_uncore *uncore)
1889{
1890 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1891 * the chip from rc6 before touching it for real. MI_MODE is masked,
1892 * hence harmless to write 0 into. */
1893 __raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
1894}
1895
1896static void
1897__unclaimed_reg_debug(struct intel_uncore *uncore,
1898 const i915_reg_t reg,
1899 const bool read)
1900{
1901 if (drm_WARN(&uncore->i915->drm,
1902 check_for_unclaimed_mmio(uncore),
1903 "Unclaimed %s register 0x%x\n",
1904 read ? "read from" : "write to",
1905 i915_mmio_reg_offset(reg)))
1906 /* Only report the first N failures */
1907 uncore->i915->params.mmio_debug--;
1908}
1909
1910static void
1911__unclaimed_previous_reg_debug(struct intel_uncore *uncore,
1912 const i915_reg_t reg,
1913 const bool read)
1914{
1915 if (check_for_unclaimed_mmio(uncore))
1916 drm_dbg(&uncore->i915->drm,
1917 "Unclaimed access detected before %s register 0x%x\n",
1918 read ? "read from" : "write to",
1919 i915_mmio_reg_offset(reg));
1920}
1921
1922static inline void
1923unclaimed_reg_debug(struct intel_uncore *uncore,
1924 const i915_reg_t reg,
1925 const bool read,
1926 const bool before)
1927{
1928 if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
1929 return;
1930
1931 /* interrupts are disabled and re-enabled around uncore->lock usage */
1932 lockdep_assert_held(&uncore->lock);
1933
1934 if (before) {
1935 spin_lock(&uncore->debug->lock);
1936 __unclaimed_previous_reg_debug(uncore, reg, read);
1937 } else {
1938 __unclaimed_reg_debug(uncore, reg, read);
1939 spin_unlock(&uncore->debug->lock);
1940 }
1941}
1942
1943#define __vgpu_read(x) \
1944static u##x \
1945vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1946 u##x val = __raw_uncore_read##x(uncore, reg); \
1947 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1948 return val; \
1949}
1950__vgpu_read(8)
1951__vgpu_read(16)
1952__vgpu_read(32)
1953__vgpu_read(64)
1954
1955#define GEN2_READ_HEADER(x) \
1956 u##x val = 0; \
1957 assert_rpm_wakelock_held(uncore->rpm);
1958
1959#define GEN2_READ_FOOTER \
1960 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1961 return val
1962
1963#define __gen2_read(x) \
1964static u##x \
1965gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1966 GEN2_READ_HEADER(x); \
1967 val = __raw_uncore_read##x(uncore, reg); \
1968 GEN2_READ_FOOTER; \
1969}
1970
1971#define __gen5_read(x) \
1972static u##x \
1973gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1974 GEN2_READ_HEADER(x); \
1975 ilk_dummy_write(uncore); \
1976 val = __raw_uncore_read##x(uncore, reg); \
1977 GEN2_READ_FOOTER; \
1978}
1979
1980__gen5_read(8)
1981__gen5_read(16)
1982__gen5_read(32)
1983__gen5_read(64)
1984__gen2_read(8)
1985__gen2_read(16)
1986__gen2_read(32)
1987__gen2_read(64)
1988
1989#undef __gen5_read
1990#undef __gen2_read
1991
1992#undef GEN2_READ_FOOTER
1993#undef GEN2_READ_HEADER
1994
1995#define GEN6_READ_HEADER(x) \
1996 u32 offset = i915_mmio_reg_offset(reg); \
1997 unsigned long irqflags; \
1998 u##x val = 0; \
1999 assert_rpm_wakelock_held(uncore->rpm); \
2000 spin_lock_irqsave(&uncore->lock, irqflags); \
2001 unclaimed_reg_debug(uncore, reg, true, true)
2002
2003#define GEN6_READ_FOOTER \
2004 unclaimed_reg_debug(uncore, reg, true, false); \
2005 spin_unlock_irqrestore(&uncore->lock, irqflags); \
2006 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
2007 return val
2008
2009static noinline void ___force_wake_auto(struct intel_uncore *uncore,
2010 enum forcewake_domains fw_domains)
2011{
2012 struct intel_uncore_forcewake_domain *domain;
2013 unsigned int tmp;
2014
2015 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
2016
2017 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
2018 fw_domain_arm_timer(domain);
2019
2020 fw_domains_get(uncore, fw_domains);
2021}
2022
2023static inline void __force_wake_auto(struct intel_uncore *uncore,
2024 enum forcewake_domains fw_domains)
2025{
2026 GEM_BUG_ON(!fw_domains);
2027
2028 /* Turn on all requested but inactive supported forcewake domains. */
2029 fw_domains &= uncore->fw_domains;
2030 fw_domains &= ~uncore->fw_domains_active;
2031
2032 if (fw_domains)
2033 ___force_wake_auto(uncore, fw_domains);
2034}
2035
2036#define __gen_fwtable_read(x) \
2037static u##x \
2038fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
2039{ \
2040 enum forcewake_domains fw_engine; \
2041 GEN6_READ_HEADER(x); \
2042 fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
2043 if (fw_engine) \
2044 __force_wake_auto(uncore, fw_engine); \
2045 val = __raw_uncore_read##x(uncore, reg); \
2046 GEN6_READ_FOOTER; \
2047}
2048
2049static enum forcewake_domains
2050fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
2051 return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
2052}
2053
2054__gen_fwtable_read(8)
2055__gen_fwtable_read(16)
2056__gen_fwtable_read(32)
2057__gen_fwtable_read(64)
2058
2059#undef __gen_fwtable_read
2060#undef GEN6_READ_FOOTER
2061#undef GEN6_READ_HEADER
2062
2063#define GEN2_WRITE_HEADER \
2064 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2065 assert_rpm_wakelock_held(uncore->rpm); \
2066
2067#define GEN2_WRITE_FOOTER
2068
2069#define __gen2_write(x) \
2070static void \
2071gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2072 GEN2_WRITE_HEADER; \
2073 __raw_uncore_write##x(uncore, reg, val); \
2074 GEN2_WRITE_FOOTER; \
2075}
2076
2077#define __gen5_write(x) \
2078static void \
2079gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2080 GEN2_WRITE_HEADER; \
2081 ilk_dummy_write(uncore); \
2082 __raw_uncore_write##x(uncore, reg, val); \
2083 GEN2_WRITE_FOOTER; \
2084}
2085
2086__gen5_write(8)
2087__gen5_write(16)
2088__gen5_write(32)
2089__gen2_write(8)
2090__gen2_write(16)
2091__gen2_write(32)
2092
2093#undef __gen5_write
2094#undef __gen2_write
2095
2096#undef GEN2_WRITE_FOOTER
2097#undef GEN2_WRITE_HEADER
2098
2099#define GEN6_WRITE_HEADER \
2100 u32 offset = i915_mmio_reg_offset(reg); \
2101 unsigned long irqflags; \
2102 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2103 assert_rpm_wakelock_held(uncore->rpm); \
2104 spin_lock_irqsave(&uncore->lock, irqflags); \
2105 unclaimed_reg_debug(uncore, reg, false, true)
2106
2107#define GEN6_WRITE_FOOTER \
2108 unclaimed_reg_debug(uncore, reg, false, false); \
2109 spin_unlock_irqrestore(&uncore->lock, irqflags)
2110
2111#define __gen6_write(x) \
2112static void \
2113gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2114 GEN6_WRITE_HEADER; \
2115 if (NEEDS_FORCE_WAKE(offset)) \
2116 __gen6_gt_wait_for_fifo(uncore); \
2117 __raw_uncore_write##x(uncore, reg, val); \
2118 GEN6_WRITE_FOOTER; \
2119}
2120__gen6_write(8)
2121__gen6_write(16)
2122__gen6_write(32)
2123
2124#define __gen_fwtable_write(x) \
2125static void \
2126fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2127 enum forcewake_domains fw_engine; \
2128 GEN6_WRITE_HEADER; \
2129 fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
2130 if (fw_engine) \
2131 __force_wake_auto(uncore, fw_engine); \
2132 __raw_uncore_write##x(uncore, reg, val); \
2133 GEN6_WRITE_FOOTER; \
2134}
2135
2136static enum forcewake_domains
2137fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
2138{
2139 return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
2140}
2141
2142__gen_fwtable_write(8)
2143__gen_fwtable_write(16)
2144__gen_fwtable_write(32)
2145
2146#undef __gen_fwtable_write
2147#undef GEN6_WRITE_FOOTER
2148#undef GEN6_WRITE_HEADER
2149
2150#define __vgpu_write(x) \
2151static void \
2152vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2153 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2154 __raw_uncore_write##x(uncore, reg, val); \
2155}
2156__vgpu_write(8)
2157__vgpu_write(16)
2158__vgpu_write(32)
2159
2160#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
2161do { \
2162 (uncore)->funcs.mmio_writeb = x##_write8; \
2163 (uncore)->funcs.mmio_writew = x##_write16; \
2164 (uncore)->funcs.mmio_writel = x##_write32; \
2165} while (0)
2166
2167#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
2168do { \
2169 (uncore)->funcs.mmio_readb = x##_read8; \
2170 (uncore)->funcs.mmio_readw = x##_read16; \
2171 (uncore)->funcs.mmio_readl = x##_read32; \
2172 (uncore)->funcs.mmio_readq = x##_read64; \
2173} while (0)
2174
2175#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
2176do { \
2177 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
2178 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
2179} while (0)
2180
2181#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
2182do { \
2183 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
2184 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
2185} while (0)
2186
2187static int __fw_domain_init(struct intel_uncore *uncore,
2188 enum forcewake_domain_id domain_id,
2189 i915_reg_t reg_set,
2190 i915_reg_t reg_ack)
2191{
2192 struct intel_uncore_forcewake_domain *d;
2193
2194 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2195 GEM_BUG_ON(uncore->fw_domain[domain_id]);
2196
2197 if (i915_inject_probe_failure(uncore->i915))
2198 return -ENOMEM;
2199
2200 d = kzalloc(sizeof(*d), GFP_KERNEL);
2201 if (!d)
2202 return -ENOMEM;
2203
2204 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
2205 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
2206
2207 d->uncore = uncore;
2208 d->wake_count = 0;
2209 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
2210 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
2211
2212 d->id = domain_id;
2213
2214 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
2215 BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
2216 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
2217 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
2218 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
2219 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
2220 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
2221 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
2222 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
2223 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
2224 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
2225 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
2226 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
2227 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
2228 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
2229 BUILD_BUG_ON(FORCEWAKE_GSC != (1 << FW_DOMAIN_ID_GSC));
2230
2231 d->mask = BIT(domain_id);
2232
2233 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2234 d->timer.function = intel_uncore_fw_release_timer;
2235
2236 uncore->fw_domains |= BIT(domain_id);
2237
2238 fw_domain_reset(d);
2239
2240 uncore->fw_domain[domain_id] = d;
2241
2242 return 0;
2243}
2244
2245static void fw_domain_fini(struct intel_uncore *uncore,
2246 enum forcewake_domain_id domain_id)
2247{
2248 struct intel_uncore_forcewake_domain *d;
2249
2250 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2251
2252 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
2253 if (!d)
2254 return;
2255
2256 uncore->fw_domains &= ~BIT(domain_id);
2257 drm_WARN_ON(&uncore->i915->drm, d->wake_count);
2258 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
2259 kfree(d);
2260}
2261
2262static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
2263{
2264 struct intel_uncore_forcewake_domain *d;
2265 int tmp;
2266
2267 for_each_fw_domain(d, uncore, tmp)
2268 fw_domain_fini(uncore, d->id);
2269}
2270
2271static const struct intel_uncore_fw_get uncore_get_fallback = {
2272 .force_wake_get = fw_domains_get_with_fallback
2273};
2274
2275static const struct intel_uncore_fw_get uncore_get_normal = {
2276 .force_wake_get = fw_domains_get_normal,
2277};
2278
2279static const struct intel_uncore_fw_get uncore_get_thread_status = {
2280 .force_wake_get = fw_domains_get_with_thread_status
2281};
2282
2283static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
2284{
2285 struct drm_i915_private *i915 = uncore->i915;
2286 int ret = 0;
2287
2288 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2289
2290#define fw_domain_init(uncore__, id__, set__, ack__) \
2291 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
2292
2293 if (GRAPHICS_VER(i915) >= 11) {
2294 intel_engine_mask_t emask;
2295 int i;
2296
2297 /* we'll prune the domains of missing engines later */
2298 emask = uncore->gt->info.engine_mask;
2299
2300 uncore->fw_get_funcs = &uncore_get_fallback;
2301 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2302 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2303 FORCEWAKE_GT_GEN9,
2304 FORCEWAKE_ACK_GT_MTL);
2305 else
2306 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2307 FORCEWAKE_GT_GEN9,
2308 FORCEWAKE_ACK_GT_GEN9);
2309
2310 if (RCS_MASK(uncore->gt) || CCS_MASK(uncore->gt))
2311 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2312 FORCEWAKE_RENDER_GEN9,
2313 FORCEWAKE_ACK_RENDER_GEN9);
2314
2315 for (i = 0; i < I915_MAX_VCS; i++) {
2316 if (!__HAS_ENGINE(emask, _VCS(i)))
2317 continue;
2318
2319 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
2320 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
2321 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
2322 }
2323 for (i = 0; i < I915_MAX_VECS; i++) {
2324 if (!__HAS_ENGINE(emask, _VECS(i)))
2325 continue;
2326
2327 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
2328 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
2329 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
2330 }
2331
2332 if (uncore->gt->type == GT_MEDIA)
2333 fw_domain_init(uncore, FW_DOMAIN_ID_GSC,
2334 FORCEWAKE_REQ_GSC, FORCEWAKE_ACK_GSC);
2335 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2336 uncore->fw_get_funcs = &uncore_get_fallback;
2337 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2338 FORCEWAKE_RENDER_GEN9,
2339 FORCEWAKE_ACK_RENDER_GEN9);
2340 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2341 FORCEWAKE_GT_GEN9,
2342 FORCEWAKE_ACK_GT_GEN9);
2343 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2344 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
2345 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
2346 uncore->fw_get_funcs = &uncore_get_normal;
2347 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2348 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
2349 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2350 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
2351 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2352 uncore->fw_get_funcs = &uncore_get_thread_status;
2353 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2354 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
2355 } else if (IS_IVYBRIDGE(i915)) {
2356 u32 ecobus;
2357
2358 /* IVB configs may use multi-threaded forcewake */
2359
2360 /* A small trick here - if the bios hasn't configured
2361 * MT forcewake, and if the device is in RC6, then
2362 * force_wake_mt_get will not wake the device and the
2363 * ECOBUS read will return zero. Which will be
2364 * (correctly) interpreted by the test below as MT
2365 * forcewake being disabled.
2366 */
2367 uncore->fw_get_funcs = &uncore_get_thread_status;
2368
2369 /* We need to init first for ECOBUS access and then
2370 * determine later if we want to reinit, in case of MT access is
2371 * not working. In this stage we don't know which flavour this
2372 * ivb is, so it is better to reset also the gen6 fw registers
2373 * before the ecobus check.
2374 */
2375
2376 __raw_uncore_write32(uncore, FORCEWAKE, 0);
2377 __raw_posting_read(uncore, ECOBUS);
2378
2379 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2380 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
2381 if (ret)
2382 goto out;
2383
2384 spin_lock_irq(&uncore->lock);
2385 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
2386 ecobus = __raw_uncore_read32(uncore, ECOBUS);
2387 fw_domains_put(uncore, FORCEWAKE_RENDER);
2388 spin_unlock_irq(&uncore->lock);
2389
2390 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
2391 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
2392 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
2393 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
2394 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2395 FORCEWAKE, FORCEWAKE_ACK);
2396 }
2397 } else if (GRAPHICS_VER(i915) == 6) {
2398 uncore->fw_get_funcs = &uncore_get_thread_status;
2399 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2400 FORCEWAKE, FORCEWAKE_ACK);
2401 }
2402
2403#undef fw_domain_init
2404
2405 /* All future platforms are expected to require complex power gating */
2406 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
2407
2408out:
2409 if (ret)
2410 intel_uncore_fw_domains_fini(uncore);
2411
2412 return ret;
2413}
2414
2415#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
2416{ \
2417 (uncore)->fw_domains_table = \
2418 (struct intel_forcewake_range *)(d); \
2419 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
2420}
2421
2422#define ASSIGN_SHADOW_TABLE(uncore, d) \
2423{ \
2424 (uncore)->shadowed_reg_table = d; \
2425 (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
2426}
2427
2428static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
2429 unsigned long action, void *data)
2430{
2431 struct intel_uncore *uncore = container_of(nb,
2432 struct intel_uncore, pmic_bus_access_nb);
2433
2434 switch (action) {
2435 case MBI_PMIC_BUS_ACCESS_BEGIN:
2436 /*
2437 * forcewake all now to make sure that we don't need to do a
2438 * forcewake later which on systems where this notifier gets
2439 * called requires the punit to access to the shared pmic i2c
2440 * bus, which will be busy after this notification, leading to:
2441 * "render: timed out waiting for forcewake ack request."
2442 * errors.
2443 *
2444 * The notifier is unregistered during intel_runtime_suspend(),
2445 * so it's ok to access the HW here without holding a RPM
2446 * wake reference -> disable wakeref asserts for the time of
2447 * the access.
2448 */
2449 disable_rpm_wakeref_asserts(uncore->rpm);
2450 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2451 enable_rpm_wakeref_asserts(uncore->rpm);
2452 break;
2453 case MBI_PMIC_BUS_ACCESS_END:
2454 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2455 break;
2456 }
2457
2458 return NOTIFY_OK;
2459}
2460
2461static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
2462{
2463 iounmap(regs);
2464}
2465
2466int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
2467{
2468 struct drm_i915_private *i915 = uncore->i915;
2469 int mmio_size;
2470
2471 /*
2472 * Before gen4, the registers and the GTT are behind different BARs.
2473 * However, from gen4 onwards, the registers and the GTT are shared
2474 * in the same BAR, so we want to restrict this ioremap from
2475 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2476 * the register BAR remains the same size for all the earlier
2477 * generations up to Ironlake.
2478 * For dgfx chips register range is expanded to 4MB, and this larger
2479 * range is also used for integrated gpus beginning with Meteor Lake.
2480 */
2481 if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2482 mmio_size = 4 * 1024 * 1024;
2483 else if (GRAPHICS_VER(i915) >= 5)
2484 mmio_size = 2 * 1024 * 1024;
2485 else
2486 mmio_size = 512 * 1024;
2487
2488 uncore->regs = ioremap(phys_addr, mmio_size);
2489 if (uncore->regs == NULL) {
2490 drm_err(&i915->drm, "failed to map registers\n");
2491 return -EIO;
2492 }
2493
2494 return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs);
2495}
2496
2497void intel_uncore_init_early(struct intel_uncore *uncore,
2498 struct intel_gt *gt)
2499{
2500 spin_lock_init(&uncore->lock);
2501 uncore->i915 = gt->i915;
2502 uncore->gt = gt;
2503 uncore->rpm = >->i915->runtime_pm;
2504}
2505
2506static void uncore_raw_init(struct intel_uncore *uncore)
2507{
2508 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
2509
2510 if (intel_vgpu_active(uncore->i915)) {
2511 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2512 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
2513 } else if (GRAPHICS_VER(uncore->i915) == 5) {
2514 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2515 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2516 } else {
2517 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2518 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2519 }
2520}
2521
2522static int uncore_media_forcewake_init(struct intel_uncore *uncore)
2523{
2524 struct drm_i915_private *i915 = uncore->i915;
2525
2526 if (MEDIA_VER(i915) >= 13) {
2527 ASSIGN_FW_DOMAINS_TABLE(uncore, __xelpmp_fw_ranges);
2528 ASSIGN_SHADOW_TABLE(uncore, xelpmp_shadowed_regs);
2529 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2530 } else {
2531 MISSING_CASE(MEDIA_VER(i915));
2532 return -ENODEV;
2533 }
2534
2535 return 0;
2536}
2537
2538static int uncore_forcewake_init(struct intel_uncore *uncore)
2539{
2540 struct drm_i915_private *i915 = uncore->i915;
2541 int ret;
2542
2543 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2544
2545 ret = intel_uncore_fw_domains_init(uncore);
2546 if (ret)
2547 return ret;
2548 forcewake_early_sanitize(uncore, 0);
2549
2550 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2551
2552 if (uncore->gt->type == GT_MEDIA)
2553 return uncore_media_forcewake_init(uncore);
2554
2555 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
2556 ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges);
2557 ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs);
2558 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2559 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60)) {
2560 ASSIGN_FW_DOMAINS_TABLE(uncore, __pvc_fw_ranges);
2561 ASSIGN_SHADOW_TABLE(uncore, pvc_shadowed_regs);
2562 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2563 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
2564 ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
2565 ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
2566 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2567 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
2568 ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
2569 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2570 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2571 } else if (GRAPHICS_VER(i915) >= 12) {
2572 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
2573 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2574 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2575 } else if (GRAPHICS_VER(i915) == 11) {
2576 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
2577 ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
2578 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2579 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2580 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
2581 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2582 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2583 } else if (IS_CHERRYVIEW(i915)) {
2584 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
2585 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2586 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2587 } else if (GRAPHICS_VER(i915) == 8) {
2588 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2589 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2590 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2591 } else if (IS_VALLEYVIEW(i915)) {
2592 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2593 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2594 } else if (IS_GRAPHICS_VER(i915, 6, 7)) {
2595 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2596 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2597 }
2598
2599 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2600 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
2601
2602 return 0;
2603}
2604
2605int intel_uncore_init_mmio(struct intel_uncore *uncore)
2606{
2607 struct drm_i915_private *i915 = uncore->i915;
2608 int ret;
2609
2610 /*
2611 * The boot firmware initializes local memory and assesses its health.
2612 * If memory training fails, the punit will have been instructed to
2613 * keep the GT powered down; we won't be able to communicate with it
2614 * and we should not continue with driver initialization.
2615 */
2616 if (IS_DGFX(i915) &&
2617 !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2618 drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2619 return -ENODEV;
2620 }
2621
2622 if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2623 uncore->flags |= UNCORE_HAS_FORCEWAKE;
2624
2625 if (!intel_uncore_has_forcewake(uncore)) {
2626 uncore_raw_init(uncore);
2627 } else {
2628 ret = uncore_forcewake_init(uncore);
2629 if (ret)
2630 return ret;
2631 }
2632
2633 /* make sure fw funcs are set if and only if we have fw*/
2634 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
2635 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2636 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2637
2638 if (HAS_FPGA_DBG_UNCLAIMED(i915))
2639 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2640
2641 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2642 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2643
2644 if (IS_GRAPHICS_VER(i915, 6, 7))
2645 uncore->flags |= UNCORE_HAS_FIFO;
2646
2647 /* clear out unclaimed reg detection bit */
2648 if (intel_uncore_unclaimed_mmio(uncore))
2649 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
2650
2651 return 0;
2652}
2653
2654/*
2655 * We might have detected that some engines are fused off after we initialized
2656 * the forcewake domains. Prune them, to make sure they only reference existing
2657 * engines.
2658 */
2659void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2660 struct intel_gt *gt)
2661{
2662 enum forcewake_domains fw_domains = uncore->fw_domains;
2663 enum forcewake_domain_id domain_id;
2664 int i;
2665
2666 if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2667 return;
2668
2669 for (i = 0; i < I915_MAX_VCS; i++) {
2670 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
2671
2672 if (HAS_ENGINE(gt, _VCS(i)))
2673 continue;
2674
2675 /*
2676 * Starting with XeHP, the power well for an even-numbered
2677 * VDBOX is also used for shared units within the
2678 * media slice such as SFC. So even if the engine
2679 * itself is fused off, we still need to initialize
2680 * the forcewake domain if any of the other engines
2681 * in the same media slice are present.
2682 */
2683 if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
2684 if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2685 continue;
2686
2687 if (HAS_ENGINE(gt, _VECS(i / 2)))
2688 continue;
2689 }
2690
2691 if (fw_domains & BIT(domain_id))
2692 fw_domain_fini(uncore, domain_id);
2693 }
2694
2695 for (i = 0; i < I915_MAX_VECS; i++) {
2696 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
2697
2698 if (HAS_ENGINE(gt, _VECS(i)))
2699 continue;
2700
2701 if (fw_domains & BIT(domain_id))
2702 fw_domain_fini(uncore, domain_id);
2703 }
2704}
2705
2706/* Called via drm-managed action */
2707void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
2708{
2709 struct intel_uncore *uncore = data;
2710
2711 if (intel_uncore_has_forcewake(uncore)) {
2712 iosf_mbi_punit_acquire();
2713 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2714 &uncore->pmic_bus_access_nb);
2715 intel_uncore_forcewake_reset(uncore);
2716 intel_uncore_fw_domains_fini(uncore);
2717 iosf_mbi_punit_release();
2718 }
2719}
2720
2721/**
2722 * __intel_wait_for_register_fw - wait until register matches expected state
2723 * @uncore: the struct intel_uncore
2724 * @reg: the register to read
2725 * @mask: mask to apply to register value
2726 * @value: expected value
2727 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2728 * @slow_timeout_ms: slow timeout in millisecond
2729 * @out_value: optional placeholder to hold registry value
2730 *
2731 * This routine waits until the target register @reg contains the expected
2732 * @value after applying the @mask, i.e. it waits until ::
2733 *
2734 * (intel_uncore_read_fw(uncore, reg) & mask) == value
2735 *
2736 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
2737 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
2738 * must be not larger than 20,0000 microseconds.
2739 *
2740 * Note that this routine assumes the caller holds forcewake asserted, it is
2741 * not suitable for very long waits. See intel_wait_for_register() if you
2742 * wish to wait without holding forcewake for the duration (i.e. you expect
2743 * the wait to be slow).
2744 *
2745 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2746 */
2747int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2748 i915_reg_t reg,
2749 u32 mask,
2750 u32 value,
2751 unsigned int fast_timeout_us,
2752 unsigned int slow_timeout_ms,
2753 u32 *out_value)
2754{
2755 u32 reg_value = 0;
2756#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
2757 int ret;
2758
2759 /* Catch any overuse of this function */
2760 might_sleep_if(slow_timeout_ms);
2761 GEM_BUG_ON(fast_timeout_us > 20000);
2762 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
2763
2764 ret = -ETIMEDOUT;
2765 if (fast_timeout_us && fast_timeout_us <= 20000)
2766 ret = _wait_for_atomic(done, fast_timeout_us, 0);
2767 if (ret && slow_timeout_ms)
2768 ret = wait_for(done, slow_timeout_ms);
2769
2770 if (out_value)
2771 *out_value = reg_value;
2772
2773 return ret;
2774#undef done
2775}
2776
2777/**
2778 * __intel_wait_for_register - wait until register matches expected state
2779 * @uncore: the struct intel_uncore
2780 * @reg: the register to read
2781 * @mask: mask to apply to register value
2782 * @value: expected value
2783 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2784 * @slow_timeout_ms: slow timeout in millisecond
2785 * @out_value: optional placeholder to hold registry value
2786 *
2787 * This routine waits until the target register @reg contains the expected
2788 * @value after applying the @mask, i.e. it waits until ::
2789 *
2790 * (intel_uncore_read(uncore, reg) & mask) == value
2791 *
2792 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2793 *
2794 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2795 */
2796int __intel_wait_for_register(struct intel_uncore *uncore,
2797 i915_reg_t reg,
2798 u32 mask,
2799 u32 value,
2800 unsigned int fast_timeout_us,
2801 unsigned int slow_timeout_ms,
2802 u32 *out_value)
2803{
2804 unsigned fw =
2805 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
2806 u32 reg_value;
2807 int ret;
2808
2809 might_sleep_if(slow_timeout_ms);
2810
2811 spin_lock_irq(&uncore->lock);
2812 intel_uncore_forcewake_get__locked(uncore, fw);
2813
2814 ret = __intel_wait_for_register_fw(uncore,
2815 reg, mask, value,
2816 fast_timeout_us, 0, ®_value);
2817
2818 intel_uncore_forcewake_put__locked(uncore, fw);
2819 spin_unlock_irq(&uncore->lock);
2820
2821 if (ret && slow_timeout_ms)
2822 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2823 reg),
2824 (reg_value & mask) == value,
2825 slow_timeout_ms * 1000, 10, 1000);
2826
2827 /* just trace the final value */
2828 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2829
2830 if (out_value)
2831 *out_value = reg_value;
2832
2833 return ret;
2834}
2835
2836bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2837{
2838 bool ret;
2839
2840 if (!uncore->debug)
2841 return false;
2842
2843 spin_lock_irq(&uncore->debug->lock);
2844 ret = check_for_unclaimed_mmio(uncore);
2845 spin_unlock_irq(&uncore->debug->lock);
2846
2847 return ret;
2848}
2849
2850bool
2851intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2852{
2853 bool ret = false;
2854
2855 if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
2856 return false;
2857
2858 spin_lock_irq(&uncore->debug->lock);
2859
2860 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2861 goto out;
2862
2863 if (unlikely(check_for_unclaimed_mmio(uncore))) {
2864 if (!uncore->i915->params.mmio_debug) {
2865 drm_dbg(&uncore->i915->drm,
2866 "Unclaimed register detected, "
2867 "enabling oneshot unclaimed register reporting. "
2868 "Please use i915.mmio_debug=N for more information.\n");
2869 uncore->i915->params.mmio_debug++;
2870 }
2871 uncore->debug->unclaimed_mmio_check--;
2872 ret = true;
2873 }
2874
2875out:
2876 spin_unlock_irq(&uncore->debug->lock);
2877
2878 return ret;
2879}
2880
2881/**
2882 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2883 * a register
2884 * @uncore: pointer to struct intel_uncore
2885 * @reg: register in question
2886 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2887 *
2888 * Returns a set of forcewake domains required to be taken with for example
2889 * intel_uncore_forcewake_get for the specified register to be accessible in the
2890 * specified mode (read, write or read/write) with raw mmio accessors.
2891 *
2892 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2893 * callers to do FIFO management on their own or risk losing writes.
2894 */
2895enum forcewake_domains
2896intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2897 i915_reg_t reg, unsigned int op)
2898{
2899 enum forcewake_domains fw_domains = 0;
2900
2901 drm_WARN_ON(&uncore->i915->drm, !op);
2902
2903 if (!intel_uncore_has_forcewake(uncore))
2904 return 0;
2905
2906 if (op & FW_REG_READ)
2907 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2908
2909 if (op & FW_REG_WRITE)
2910 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2911
2912 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
2913
2914 return fw_domains;
2915}
2916
2917#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2918#include "selftests/mock_uncore.c"
2919#include "selftests/intel_uncore.c"
2920#endif