Loading...
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
26#include "i915_vgpu.h"
27
28#include <linux/pm_runtime.h>
29
30#define FORCEWAKE_ACK_TIMEOUT_MS 50
31
32#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
33
34static const char * const forcewake_domain_names[] = {
35 "render",
36 "blitter",
37 "media",
38};
39
40const char *
41intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
42{
43 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
44
45 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
46 return forcewake_domain_names[id];
47
48 WARN_ON(id);
49
50 return "unknown";
51}
52
53static inline void
54fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
55{
56 WARN_ON(!i915_mmio_reg_valid(d->reg_set));
57 __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
58}
59
60static inline void
61fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
62{
63 mod_timer_pinned(&d->timer, jiffies + 1);
64}
65
66static inline void
67fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
68{
69 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
70 FORCEWAKE_KERNEL) == 0,
71 FORCEWAKE_ACK_TIMEOUT_MS))
72 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
73 intel_uncore_forcewake_domain_to_str(d->id));
74}
75
76static inline void
77fw_domain_get(const struct intel_uncore_forcewake_domain *d)
78{
79 __raw_i915_write32(d->i915, d->reg_set, d->val_set);
80}
81
82static inline void
83fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
84{
85 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
86 FORCEWAKE_KERNEL),
87 FORCEWAKE_ACK_TIMEOUT_MS))
88 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
89 intel_uncore_forcewake_domain_to_str(d->id));
90}
91
92static inline void
93fw_domain_put(const struct intel_uncore_forcewake_domain *d)
94{
95 __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
96}
97
98static inline void
99fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
100{
101 /* something from same cacheline, but not from the set register */
102 if (i915_mmio_reg_valid(d->reg_post))
103 __raw_posting_read(d->i915, d->reg_post);
104}
105
106static void
107fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
108{
109 struct intel_uncore_forcewake_domain *d;
110 enum forcewake_domain_id id;
111
112 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
113 fw_domain_wait_ack_clear(d);
114 fw_domain_get(d);
115 fw_domain_wait_ack(d);
116 }
117}
118
119static void
120fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
121{
122 struct intel_uncore_forcewake_domain *d;
123 enum forcewake_domain_id id;
124
125 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
126 fw_domain_put(d);
127 fw_domain_posting_read(d);
128 }
129}
130
131static void
132fw_domains_posting_read(struct drm_i915_private *dev_priv)
133{
134 struct intel_uncore_forcewake_domain *d;
135 enum forcewake_domain_id id;
136
137 /* No need to do for all, just do for first found */
138 for_each_fw_domain(d, dev_priv, id) {
139 fw_domain_posting_read(d);
140 break;
141 }
142}
143
144static void
145fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
146{
147 struct intel_uncore_forcewake_domain *d;
148 enum forcewake_domain_id id;
149
150 if (dev_priv->uncore.fw_domains == 0)
151 return;
152
153 for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
154 fw_domain_reset(d);
155
156 fw_domains_posting_read(dev_priv);
157}
158
159static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
160{
161 /* w/a for a sporadic read returning 0 by waiting for the GT
162 * thread to wake up.
163 */
164 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
165 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
166 DRM_ERROR("GT thread status wait timed out\n");
167}
168
169static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
170 enum forcewake_domains fw_domains)
171{
172 fw_domains_get(dev_priv, fw_domains);
173
174 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
175 __gen6_gt_wait_for_thread_c0(dev_priv);
176}
177
178static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
179{
180 u32 gtfifodbg;
181
182 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
183 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
184 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
185}
186
187static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
188 enum forcewake_domains fw_domains)
189{
190 fw_domains_put(dev_priv, fw_domains);
191 gen6_gt_check_fifodbg(dev_priv);
192}
193
194static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
195{
196 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
197
198 return count & GT_FIFO_FREE_ENTRIES_MASK;
199}
200
201static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
202{
203 int ret = 0;
204
205 /* On VLV, FIFO will be shared by both SW and HW.
206 * So, we need to read the FREE_ENTRIES everytime */
207 if (IS_VALLEYVIEW(dev_priv->dev))
208 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
209
210 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
211 int loop = 500;
212 u32 fifo = fifo_free_entries(dev_priv);
213
214 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
215 udelay(10);
216 fifo = fifo_free_entries(dev_priv);
217 }
218 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
219 ++ret;
220 dev_priv->uncore.fifo_count = fifo;
221 }
222 dev_priv->uncore.fifo_count--;
223
224 return ret;
225}
226
227static void intel_uncore_fw_release_timer(unsigned long arg)
228{
229 struct intel_uncore_forcewake_domain *domain = (void *)arg;
230 unsigned long irqflags;
231
232 assert_rpm_device_not_suspended(domain->i915);
233
234 spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
235 if (WARN_ON(domain->wake_count == 0))
236 domain->wake_count++;
237
238 if (--domain->wake_count == 0)
239 domain->i915->uncore.funcs.force_wake_put(domain->i915,
240 1 << domain->id);
241
242 spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
243}
244
245void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
246{
247 struct drm_i915_private *dev_priv = dev->dev_private;
248 unsigned long irqflags;
249 struct intel_uncore_forcewake_domain *domain;
250 int retry_count = 100;
251 enum forcewake_domain_id id;
252 enum forcewake_domains fw = 0, active_domains;
253
254 /* Hold uncore.lock across reset to prevent any register access
255 * with forcewake not set correctly. Wait until all pending
256 * timers are run before holding.
257 */
258 while (1) {
259 active_domains = 0;
260
261 for_each_fw_domain(domain, dev_priv, id) {
262 if (del_timer_sync(&domain->timer) == 0)
263 continue;
264
265 intel_uncore_fw_release_timer((unsigned long)domain);
266 }
267
268 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
269
270 for_each_fw_domain(domain, dev_priv, id) {
271 if (timer_pending(&domain->timer))
272 active_domains |= (1 << id);
273 }
274
275 if (active_domains == 0)
276 break;
277
278 if (--retry_count == 0) {
279 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
280 break;
281 }
282
283 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
284 cond_resched();
285 }
286
287 WARN_ON(active_domains);
288
289 for_each_fw_domain(domain, dev_priv, id)
290 if (domain->wake_count)
291 fw |= 1 << id;
292
293 if (fw)
294 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
295
296 fw_domains_reset(dev_priv, FORCEWAKE_ALL);
297
298 if (restore) { /* If reset with a user forcewake, try to restore */
299 if (fw)
300 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
301
302 if (IS_GEN6(dev) || IS_GEN7(dev))
303 dev_priv->uncore.fifo_count =
304 fifo_free_entries(dev_priv);
305 }
306
307 if (!restore)
308 assert_forcewakes_inactive(dev_priv);
309
310 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
311}
312
313static void intel_uncore_ellc_detect(struct drm_device *dev)
314{
315 struct drm_i915_private *dev_priv = dev->dev_private;
316
317 if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
318 INTEL_INFO(dev)->gen >= 9) &&
319 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
320 /* The docs do not explain exactly how the calculation can be
321 * made. It is somewhat guessable, but for now, it's always
322 * 128MB.
323 * NB: We can't write IDICR yet because we do not have gt funcs
324 * set up */
325 dev_priv->ellc_size = 128;
326 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
327 }
328}
329
330static bool
331fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
332{
333 u32 dbg;
334
335 dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
336 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
337 return false;
338
339 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
340
341 return true;
342}
343
344static bool
345vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
346{
347 u32 cer;
348
349 cer = __raw_i915_read32(dev_priv, CLAIM_ER);
350 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
351 return false;
352
353 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
354
355 return true;
356}
357
358static bool
359check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
360{
361 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
362 return fpga_check_for_unclaimed_mmio(dev_priv);
363
364 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
365 return vlv_check_for_unclaimed_mmio(dev_priv);
366
367 return false;
368}
369
370static void __intel_uncore_early_sanitize(struct drm_device *dev,
371 bool restore_forcewake)
372{
373 struct drm_i915_private *dev_priv = dev->dev_private;
374
375 /* clear out unclaimed reg detection bit */
376 if (check_for_unclaimed_mmio(dev_priv))
377 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
378
379 /* clear out old GT FIFO errors */
380 if (IS_GEN6(dev) || IS_GEN7(dev))
381 __raw_i915_write32(dev_priv, GTFIFODBG,
382 __raw_i915_read32(dev_priv, GTFIFODBG));
383
384 /* WaDisableShadowRegForCpd:chv */
385 if (IS_CHERRYVIEW(dev)) {
386 __raw_i915_write32(dev_priv, GTFIFOCTL,
387 __raw_i915_read32(dev_priv, GTFIFOCTL) |
388 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
389 GT_FIFO_CTL_RC6_POLICY_STALL);
390 }
391
392 intel_uncore_forcewake_reset(dev, restore_forcewake);
393}
394
395void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
396{
397 __intel_uncore_early_sanitize(dev, restore_forcewake);
398 i915_check_and_clear_faults(dev);
399}
400
401void intel_uncore_sanitize(struct drm_device *dev)
402{
403 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
404
405 /* BIOS often leaves RC6 enabled, but disable it for hw init */
406 intel_disable_gt_powersave(dev);
407}
408
409static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
410 enum forcewake_domains fw_domains)
411{
412 struct intel_uncore_forcewake_domain *domain;
413 enum forcewake_domain_id id;
414
415 if (!dev_priv->uncore.funcs.force_wake_get)
416 return;
417
418 fw_domains &= dev_priv->uncore.fw_domains;
419
420 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
421 if (domain->wake_count++)
422 fw_domains &= ~(1 << id);
423 }
424
425 if (fw_domains)
426 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
427}
428
429/**
430 * intel_uncore_forcewake_get - grab forcewake domain references
431 * @dev_priv: i915 device instance
432 * @fw_domains: forcewake domains to get reference on
433 *
434 * This function can be used get GT's forcewake domain references.
435 * Normal register access will handle the forcewake domains automatically.
436 * However if some sequence requires the GT to not power down a particular
437 * forcewake domains this function should be called at the beginning of the
438 * sequence. And subsequently the reference should be dropped by symmetric
439 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
440 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
441 */
442void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
443 enum forcewake_domains fw_domains)
444{
445 unsigned long irqflags;
446
447 if (!dev_priv->uncore.funcs.force_wake_get)
448 return;
449
450 assert_rpm_wakelock_held(dev_priv);
451
452 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
453 __intel_uncore_forcewake_get(dev_priv, fw_domains);
454 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
455}
456
457/**
458 * intel_uncore_forcewake_get__locked - grab forcewake domain references
459 * @dev_priv: i915 device instance
460 * @fw_domains: forcewake domains to get reference on
461 *
462 * See intel_uncore_forcewake_get(). This variant places the onus
463 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
464 */
465void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
466 enum forcewake_domains fw_domains)
467{
468 assert_spin_locked(&dev_priv->uncore.lock);
469
470 if (!dev_priv->uncore.funcs.force_wake_get)
471 return;
472
473 __intel_uncore_forcewake_get(dev_priv, fw_domains);
474}
475
476static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
477 enum forcewake_domains fw_domains)
478{
479 struct intel_uncore_forcewake_domain *domain;
480 enum forcewake_domain_id id;
481
482 if (!dev_priv->uncore.funcs.force_wake_put)
483 return;
484
485 fw_domains &= dev_priv->uncore.fw_domains;
486
487 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
488 if (WARN_ON(domain->wake_count == 0))
489 continue;
490
491 if (--domain->wake_count)
492 continue;
493
494 domain->wake_count++;
495 fw_domain_arm_timer(domain);
496 }
497}
498
499/**
500 * intel_uncore_forcewake_put - release a forcewake domain reference
501 * @dev_priv: i915 device instance
502 * @fw_domains: forcewake domains to put references
503 *
504 * This function drops the device-level forcewakes for specified
505 * domains obtained by intel_uncore_forcewake_get().
506 */
507void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
508 enum forcewake_domains fw_domains)
509{
510 unsigned long irqflags;
511
512 if (!dev_priv->uncore.funcs.force_wake_put)
513 return;
514
515 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
516 __intel_uncore_forcewake_put(dev_priv, fw_domains);
517 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
518}
519
520/**
521 * intel_uncore_forcewake_put__locked - grab forcewake domain references
522 * @dev_priv: i915 device instance
523 * @fw_domains: forcewake domains to get reference on
524 *
525 * See intel_uncore_forcewake_put(). This variant places the onus
526 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
527 */
528void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
529 enum forcewake_domains fw_domains)
530{
531 assert_spin_locked(&dev_priv->uncore.lock);
532
533 if (!dev_priv->uncore.funcs.force_wake_put)
534 return;
535
536 __intel_uncore_forcewake_put(dev_priv, fw_domains);
537}
538
539void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
540{
541 struct intel_uncore_forcewake_domain *domain;
542 enum forcewake_domain_id id;
543
544 if (!dev_priv->uncore.funcs.force_wake_get)
545 return;
546
547 for_each_fw_domain(domain, dev_priv, id)
548 WARN_ON(domain->wake_count);
549}
550
551/* We give fast paths for the really cool registers */
552#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
553
554#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
555
556#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
557 (REG_RANGE((reg), 0x2000, 0x4000) || \
558 REG_RANGE((reg), 0x5000, 0x8000) || \
559 REG_RANGE((reg), 0xB000, 0x12000) || \
560 REG_RANGE((reg), 0x2E000, 0x30000))
561
562#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
563 (REG_RANGE((reg), 0x12000, 0x14000) || \
564 REG_RANGE((reg), 0x22000, 0x24000) || \
565 REG_RANGE((reg), 0x30000, 0x40000))
566
567#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
568 (REG_RANGE((reg), 0x2000, 0x4000) || \
569 REG_RANGE((reg), 0x5200, 0x8000) || \
570 REG_RANGE((reg), 0x8300, 0x8500) || \
571 REG_RANGE((reg), 0xB000, 0xB480) || \
572 REG_RANGE((reg), 0xE000, 0xE800))
573
574#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
575 (REG_RANGE((reg), 0x8800, 0x8900) || \
576 REG_RANGE((reg), 0xD000, 0xD800) || \
577 REG_RANGE((reg), 0x12000, 0x14000) || \
578 REG_RANGE((reg), 0x1A000, 0x1C000) || \
579 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
580 REG_RANGE((reg), 0x30000, 0x38000))
581
582#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
583 (REG_RANGE((reg), 0x4000, 0x5000) || \
584 REG_RANGE((reg), 0x8000, 0x8300) || \
585 REG_RANGE((reg), 0x8500, 0x8600) || \
586 REG_RANGE((reg), 0x9000, 0xB000) || \
587 REG_RANGE((reg), 0xF000, 0x10000))
588
589#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
590 REG_RANGE((reg), 0xB00, 0x2000)
591
592#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
593 (REG_RANGE((reg), 0x2000, 0x2700) || \
594 REG_RANGE((reg), 0x3000, 0x4000) || \
595 REG_RANGE((reg), 0x5200, 0x8000) || \
596 REG_RANGE((reg), 0x8140, 0x8160) || \
597 REG_RANGE((reg), 0x8300, 0x8500) || \
598 REG_RANGE((reg), 0x8C00, 0x8D00) || \
599 REG_RANGE((reg), 0xB000, 0xB480) || \
600 REG_RANGE((reg), 0xE000, 0xE900) || \
601 REG_RANGE((reg), 0x24400, 0x24800))
602
603#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
604 (REG_RANGE((reg), 0x8130, 0x8140) || \
605 REG_RANGE((reg), 0x8800, 0x8A00) || \
606 REG_RANGE((reg), 0xD000, 0xD800) || \
607 REG_RANGE((reg), 0x12000, 0x14000) || \
608 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
609 REG_RANGE((reg), 0x30000, 0x40000))
610
611#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
612 REG_RANGE((reg), 0x9400, 0x9800)
613
614#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
615 ((reg) < 0x40000 && \
616 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
617 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
618 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
619 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
620
621static void
622ilk_dummy_write(struct drm_i915_private *dev_priv)
623{
624 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
625 * the chip from rc6 before touching it for real. MI_MODE is masked,
626 * hence harmless to write 0 into. */
627 __raw_i915_write32(dev_priv, MI_MODE, 0);
628}
629
630static void
631__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
632 const i915_reg_t reg,
633 const bool read,
634 const bool before)
635{
636 /* XXX. We limit the auto arming traces for mmio
637 * debugs on these platforms. There are just too many
638 * revealed by these and CI/Bat suffers from the noise.
639 * Please fix and then re-enable the automatic traces.
640 */
641 if (i915.mmio_debug < 2 &&
642 (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
643 return;
644
645 if (WARN(check_for_unclaimed_mmio(dev_priv),
646 "Unclaimed register detected %s %s register 0x%x\n",
647 before ? "before" : "after",
648 read ? "reading" : "writing to",
649 i915_mmio_reg_offset(reg)))
650 i915.mmio_debug--; /* Only report the first N failures */
651}
652
653static inline void
654unclaimed_reg_debug(struct drm_i915_private *dev_priv,
655 const i915_reg_t reg,
656 const bool read,
657 const bool before)
658{
659 if (likely(!i915.mmio_debug))
660 return;
661
662 __unclaimed_reg_debug(dev_priv, reg, read, before);
663}
664
665#define GEN2_READ_HEADER(x) \
666 u##x val = 0; \
667 assert_rpm_wakelock_held(dev_priv);
668
669#define GEN2_READ_FOOTER \
670 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
671 return val
672
673#define __gen2_read(x) \
674static u##x \
675gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
676 GEN2_READ_HEADER(x); \
677 val = __raw_i915_read##x(dev_priv, reg); \
678 GEN2_READ_FOOTER; \
679}
680
681#define __gen5_read(x) \
682static u##x \
683gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
684 GEN2_READ_HEADER(x); \
685 ilk_dummy_write(dev_priv); \
686 val = __raw_i915_read##x(dev_priv, reg); \
687 GEN2_READ_FOOTER; \
688}
689
690__gen5_read(8)
691__gen5_read(16)
692__gen5_read(32)
693__gen5_read(64)
694__gen2_read(8)
695__gen2_read(16)
696__gen2_read(32)
697__gen2_read(64)
698
699#undef __gen5_read
700#undef __gen2_read
701
702#undef GEN2_READ_FOOTER
703#undef GEN2_READ_HEADER
704
705#define GEN6_READ_HEADER(x) \
706 u32 offset = i915_mmio_reg_offset(reg); \
707 unsigned long irqflags; \
708 u##x val = 0; \
709 assert_rpm_wakelock_held(dev_priv); \
710 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
711 unclaimed_reg_debug(dev_priv, reg, true, true)
712
713#define GEN6_READ_FOOTER \
714 unclaimed_reg_debug(dev_priv, reg, true, false); \
715 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
716 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
717 return val
718
719static inline void __force_wake_get(struct drm_i915_private *dev_priv,
720 enum forcewake_domains fw_domains)
721{
722 struct intel_uncore_forcewake_domain *domain;
723 enum forcewake_domain_id id;
724
725 if (WARN_ON(!fw_domains))
726 return;
727
728 /* Ideally GCC would be constant-fold and eliminate this loop */
729 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
730 if (domain->wake_count) {
731 fw_domains &= ~(1 << id);
732 continue;
733 }
734
735 domain->wake_count++;
736 fw_domain_arm_timer(domain);
737 }
738
739 if (fw_domains)
740 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
741}
742
743#define __gen6_read(x) \
744static u##x \
745gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
746 GEN6_READ_HEADER(x); \
747 if (NEEDS_FORCE_WAKE(offset)) \
748 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
749 val = __raw_i915_read##x(dev_priv, reg); \
750 GEN6_READ_FOOTER; \
751}
752
753#define __vlv_read(x) \
754static u##x \
755vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
756 enum forcewake_domains fw_engine = 0; \
757 GEN6_READ_HEADER(x); \
758 if (!NEEDS_FORCE_WAKE(offset)) \
759 fw_engine = 0; \
760 else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
761 fw_engine = FORCEWAKE_RENDER; \
762 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
763 fw_engine = FORCEWAKE_MEDIA; \
764 if (fw_engine) \
765 __force_wake_get(dev_priv, fw_engine); \
766 val = __raw_i915_read##x(dev_priv, reg); \
767 GEN6_READ_FOOTER; \
768}
769
770#define __chv_read(x) \
771static u##x \
772chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
773 enum forcewake_domains fw_engine = 0; \
774 GEN6_READ_HEADER(x); \
775 if (!NEEDS_FORCE_WAKE(offset)) \
776 fw_engine = 0; \
777 else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
778 fw_engine = FORCEWAKE_RENDER; \
779 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
780 fw_engine = FORCEWAKE_MEDIA; \
781 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
782 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
783 if (fw_engine) \
784 __force_wake_get(dev_priv, fw_engine); \
785 val = __raw_i915_read##x(dev_priv, reg); \
786 GEN6_READ_FOOTER; \
787}
788
789#define SKL_NEEDS_FORCE_WAKE(reg) \
790 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
791
792#define __gen9_read(x) \
793static u##x \
794gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
795 enum forcewake_domains fw_engine; \
796 GEN6_READ_HEADER(x); \
797 if (!SKL_NEEDS_FORCE_WAKE(offset)) \
798 fw_engine = 0; \
799 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
800 fw_engine = FORCEWAKE_RENDER; \
801 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
802 fw_engine = FORCEWAKE_MEDIA; \
803 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
804 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
805 else \
806 fw_engine = FORCEWAKE_BLITTER; \
807 if (fw_engine) \
808 __force_wake_get(dev_priv, fw_engine); \
809 val = __raw_i915_read##x(dev_priv, reg); \
810 GEN6_READ_FOOTER; \
811}
812
813__gen9_read(8)
814__gen9_read(16)
815__gen9_read(32)
816__gen9_read(64)
817__chv_read(8)
818__chv_read(16)
819__chv_read(32)
820__chv_read(64)
821__vlv_read(8)
822__vlv_read(16)
823__vlv_read(32)
824__vlv_read(64)
825__gen6_read(8)
826__gen6_read(16)
827__gen6_read(32)
828__gen6_read(64)
829
830#undef __gen9_read
831#undef __chv_read
832#undef __vlv_read
833#undef __gen6_read
834#undef GEN6_READ_FOOTER
835#undef GEN6_READ_HEADER
836
837#define VGPU_READ_HEADER(x) \
838 unsigned long irqflags; \
839 u##x val = 0; \
840 assert_rpm_device_not_suspended(dev_priv); \
841 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
842
843#define VGPU_READ_FOOTER \
844 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
845 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
846 return val
847
848#define __vgpu_read(x) \
849static u##x \
850vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
851 VGPU_READ_HEADER(x); \
852 val = __raw_i915_read##x(dev_priv, reg); \
853 VGPU_READ_FOOTER; \
854}
855
856__vgpu_read(8)
857__vgpu_read(16)
858__vgpu_read(32)
859__vgpu_read(64)
860
861#undef __vgpu_read
862#undef VGPU_READ_FOOTER
863#undef VGPU_READ_HEADER
864
865#define GEN2_WRITE_HEADER \
866 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
867 assert_rpm_wakelock_held(dev_priv); \
868
869#define GEN2_WRITE_FOOTER
870
871#define __gen2_write(x) \
872static void \
873gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
874 GEN2_WRITE_HEADER; \
875 __raw_i915_write##x(dev_priv, reg, val); \
876 GEN2_WRITE_FOOTER; \
877}
878
879#define __gen5_write(x) \
880static void \
881gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
882 GEN2_WRITE_HEADER; \
883 ilk_dummy_write(dev_priv); \
884 __raw_i915_write##x(dev_priv, reg, val); \
885 GEN2_WRITE_FOOTER; \
886}
887
888__gen5_write(8)
889__gen5_write(16)
890__gen5_write(32)
891__gen5_write(64)
892__gen2_write(8)
893__gen2_write(16)
894__gen2_write(32)
895__gen2_write(64)
896
897#undef __gen5_write
898#undef __gen2_write
899
900#undef GEN2_WRITE_FOOTER
901#undef GEN2_WRITE_HEADER
902
903#define GEN6_WRITE_HEADER \
904 u32 offset = i915_mmio_reg_offset(reg); \
905 unsigned long irqflags; \
906 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
907 assert_rpm_wakelock_held(dev_priv); \
908 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
909 unclaimed_reg_debug(dev_priv, reg, false, true)
910
911#define GEN6_WRITE_FOOTER \
912 unclaimed_reg_debug(dev_priv, reg, false, false); \
913 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
914
915#define __gen6_write(x) \
916static void \
917gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
918 u32 __fifo_ret = 0; \
919 GEN6_WRITE_HEADER; \
920 if (NEEDS_FORCE_WAKE(offset)) { \
921 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
922 } \
923 __raw_i915_write##x(dev_priv, reg, val); \
924 if (unlikely(__fifo_ret)) { \
925 gen6_gt_check_fifodbg(dev_priv); \
926 } \
927 GEN6_WRITE_FOOTER; \
928}
929
930#define __hsw_write(x) \
931static void \
932hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
933 u32 __fifo_ret = 0; \
934 GEN6_WRITE_HEADER; \
935 if (NEEDS_FORCE_WAKE(offset)) { \
936 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
937 } \
938 __raw_i915_write##x(dev_priv, reg, val); \
939 if (unlikely(__fifo_ret)) { \
940 gen6_gt_check_fifodbg(dev_priv); \
941 } \
942 GEN6_WRITE_FOOTER; \
943}
944
945static const i915_reg_t gen8_shadowed_regs[] = {
946 FORCEWAKE_MT,
947 GEN6_RPNSWREQ,
948 GEN6_RC_VIDEO_FREQ,
949 RING_TAIL(RENDER_RING_BASE),
950 RING_TAIL(GEN6_BSD_RING_BASE),
951 RING_TAIL(VEBOX_RING_BASE),
952 RING_TAIL(BLT_RING_BASE),
953 /* TODO: Other registers are not yet used */
954};
955
956static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
957 i915_reg_t reg)
958{
959 int i;
960 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
961 if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
962 return true;
963
964 return false;
965}
966
967#define __gen8_write(x) \
968static void \
969gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
970 GEN6_WRITE_HEADER; \
971 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
972 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
973 __raw_i915_write##x(dev_priv, reg, val); \
974 GEN6_WRITE_FOOTER; \
975}
976
977#define __chv_write(x) \
978static void \
979chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
980 enum forcewake_domains fw_engine = 0; \
981 GEN6_WRITE_HEADER; \
982 if (!NEEDS_FORCE_WAKE(offset) || \
983 is_gen8_shadowed(dev_priv, reg)) \
984 fw_engine = 0; \
985 else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
986 fw_engine = FORCEWAKE_RENDER; \
987 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
988 fw_engine = FORCEWAKE_MEDIA; \
989 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
990 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
991 if (fw_engine) \
992 __force_wake_get(dev_priv, fw_engine); \
993 __raw_i915_write##x(dev_priv, reg, val); \
994 GEN6_WRITE_FOOTER; \
995}
996
997static const i915_reg_t gen9_shadowed_regs[] = {
998 RING_TAIL(RENDER_RING_BASE),
999 RING_TAIL(GEN6_BSD_RING_BASE),
1000 RING_TAIL(VEBOX_RING_BASE),
1001 RING_TAIL(BLT_RING_BASE),
1002 FORCEWAKE_BLITTER_GEN9,
1003 FORCEWAKE_RENDER_GEN9,
1004 FORCEWAKE_MEDIA_GEN9,
1005 GEN6_RPNSWREQ,
1006 GEN6_RC_VIDEO_FREQ,
1007 /* TODO: Other registers are not yet used */
1008};
1009
1010static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
1011 i915_reg_t reg)
1012{
1013 int i;
1014 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
1015 if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
1016 return true;
1017
1018 return false;
1019}
1020
1021#define __gen9_write(x) \
1022static void \
1023gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
1024 bool trace) { \
1025 enum forcewake_domains fw_engine; \
1026 GEN6_WRITE_HEADER; \
1027 if (!SKL_NEEDS_FORCE_WAKE(offset) || \
1028 is_gen9_shadowed(dev_priv, reg)) \
1029 fw_engine = 0; \
1030 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
1031 fw_engine = FORCEWAKE_RENDER; \
1032 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
1033 fw_engine = FORCEWAKE_MEDIA; \
1034 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
1035 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
1036 else \
1037 fw_engine = FORCEWAKE_BLITTER; \
1038 if (fw_engine) \
1039 __force_wake_get(dev_priv, fw_engine); \
1040 __raw_i915_write##x(dev_priv, reg, val); \
1041 GEN6_WRITE_FOOTER; \
1042}
1043
1044__gen9_write(8)
1045__gen9_write(16)
1046__gen9_write(32)
1047__gen9_write(64)
1048__chv_write(8)
1049__chv_write(16)
1050__chv_write(32)
1051__chv_write(64)
1052__gen8_write(8)
1053__gen8_write(16)
1054__gen8_write(32)
1055__gen8_write(64)
1056__hsw_write(8)
1057__hsw_write(16)
1058__hsw_write(32)
1059__hsw_write(64)
1060__gen6_write(8)
1061__gen6_write(16)
1062__gen6_write(32)
1063__gen6_write(64)
1064
1065#undef __gen9_write
1066#undef __chv_write
1067#undef __gen8_write
1068#undef __hsw_write
1069#undef __gen6_write
1070#undef GEN6_WRITE_FOOTER
1071#undef GEN6_WRITE_HEADER
1072
1073#define VGPU_WRITE_HEADER \
1074 unsigned long irqflags; \
1075 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1076 assert_rpm_device_not_suspended(dev_priv); \
1077 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1078
1079#define VGPU_WRITE_FOOTER \
1080 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1081
1082#define __vgpu_write(x) \
1083static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1084 i915_reg_t reg, u##x val, bool trace) { \
1085 VGPU_WRITE_HEADER; \
1086 __raw_i915_write##x(dev_priv, reg, val); \
1087 VGPU_WRITE_FOOTER; \
1088}
1089
1090__vgpu_write(8)
1091__vgpu_write(16)
1092__vgpu_write(32)
1093__vgpu_write(64)
1094
1095#undef __vgpu_write
1096#undef VGPU_WRITE_FOOTER
1097#undef VGPU_WRITE_HEADER
1098
1099#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1100do { \
1101 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1102 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1103 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1104 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1105} while (0)
1106
1107#define ASSIGN_READ_MMIO_VFUNCS(x) \
1108do { \
1109 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1110 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1111 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1112 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1113} while (0)
1114
1115
1116static void fw_domain_init(struct drm_i915_private *dev_priv,
1117 enum forcewake_domain_id domain_id,
1118 i915_reg_t reg_set,
1119 i915_reg_t reg_ack)
1120{
1121 struct intel_uncore_forcewake_domain *d;
1122
1123 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1124 return;
1125
1126 d = &dev_priv->uncore.fw_domain[domain_id];
1127
1128 WARN_ON(d->wake_count);
1129
1130 d->wake_count = 0;
1131 d->reg_set = reg_set;
1132 d->reg_ack = reg_ack;
1133
1134 if (IS_GEN6(dev_priv)) {
1135 d->val_reset = 0;
1136 d->val_set = FORCEWAKE_KERNEL;
1137 d->val_clear = 0;
1138 } else {
1139 /* WaRsClearFWBitsAtReset:bdw,skl */
1140 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1141 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1142 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1143 }
1144
1145 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1146 d->reg_post = FORCEWAKE_ACK_VLV;
1147 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1148 d->reg_post = ECOBUS;
1149
1150 d->i915 = dev_priv;
1151 d->id = domain_id;
1152
1153 setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1154
1155 dev_priv->uncore.fw_domains |= (1 << domain_id);
1156
1157 fw_domain_reset(d);
1158}
1159
1160static void intel_uncore_fw_domains_init(struct drm_device *dev)
1161{
1162 struct drm_i915_private *dev_priv = dev->dev_private;
1163
1164 if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1165 return;
1166
1167 if (IS_GEN9(dev)) {
1168 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1169 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1170 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1171 FORCEWAKE_RENDER_GEN9,
1172 FORCEWAKE_ACK_RENDER_GEN9);
1173 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1174 FORCEWAKE_BLITTER_GEN9,
1175 FORCEWAKE_ACK_BLITTER_GEN9);
1176 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1177 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1178 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1179 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1180 if (!IS_CHERRYVIEW(dev))
1181 dev_priv->uncore.funcs.force_wake_put =
1182 fw_domains_put_with_fifo;
1183 else
1184 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1185 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1186 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1187 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1188 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1189 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1190 dev_priv->uncore.funcs.force_wake_get =
1191 fw_domains_get_with_thread_status;
1192 if (IS_HASWELL(dev))
1193 dev_priv->uncore.funcs.force_wake_put =
1194 fw_domains_put_with_fifo;
1195 else
1196 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1197 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1198 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1199 } else if (IS_IVYBRIDGE(dev)) {
1200 u32 ecobus;
1201
1202 /* IVB configs may use multi-threaded forcewake */
1203
1204 /* A small trick here - if the bios hasn't configured
1205 * MT forcewake, and if the device is in RC6, then
1206 * force_wake_mt_get will not wake the device and the
1207 * ECOBUS read will return zero. Which will be
1208 * (correctly) interpreted by the test below as MT
1209 * forcewake being disabled.
1210 */
1211 dev_priv->uncore.funcs.force_wake_get =
1212 fw_domains_get_with_thread_status;
1213 dev_priv->uncore.funcs.force_wake_put =
1214 fw_domains_put_with_fifo;
1215
1216 /* We need to init first for ECOBUS access and then
1217 * determine later if we want to reinit, in case of MT access is
1218 * not working. In this stage we don't know which flavour this
1219 * ivb is, so it is better to reset also the gen6 fw registers
1220 * before the ecobus check.
1221 */
1222
1223 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1224 __raw_posting_read(dev_priv, ECOBUS);
1225
1226 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1227 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1228
1229 mutex_lock(&dev->struct_mutex);
1230 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1231 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1232 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1233 mutex_unlock(&dev->struct_mutex);
1234
1235 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1236 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1237 DRM_INFO("when using vblank-synced partial screen updates.\n");
1238 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1239 FORCEWAKE, FORCEWAKE_ACK);
1240 }
1241 } else if (IS_GEN6(dev)) {
1242 dev_priv->uncore.funcs.force_wake_get =
1243 fw_domains_get_with_thread_status;
1244 dev_priv->uncore.funcs.force_wake_put =
1245 fw_domains_put_with_fifo;
1246 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1247 FORCEWAKE, FORCEWAKE_ACK);
1248 }
1249
1250 /* All future platforms are expected to require complex power gating */
1251 WARN_ON(dev_priv->uncore.fw_domains == 0);
1252}
1253
1254void intel_uncore_init(struct drm_device *dev)
1255{
1256 struct drm_i915_private *dev_priv = dev->dev_private;
1257
1258 i915_check_vgpu(dev);
1259
1260 intel_uncore_ellc_detect(dev);
1261 intel_uncore_fw_domains_init(dev);
1262 __intel_uncore_early_sanitize(dev, false);
1263
1264 dev_priv->uncore.unclaimed_mmio_check = 1;
1265
1266 switch (INTEL_INFO(dev)->gen) {
1267 default:
1268 case 9:
1269 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1270 ASSIGN_READ_MMIO_VFUNCS(gen9);
1271 break;
1272 case 8:
1273 if (IS_CHERRYVIEW(dev)) {
1274 ASSIGN_WRITE_MMIO_VFUNCS(chv);
1275 ASSIGN_READ_MMIO_VFUNCS(chv);
1276
1277 } else {
1278 ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1279 ASSIGN_READ_MMIO_VFUNCS(gen6);
1280 }
1281 break;
1282 case 7:
1283 case 6:
1284 if (IS_HASWELL(dev)) {
1285 ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1286 } else {
1287 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1288 }
1289
1290 if (IS_VALLEYVIEW(dev)) {
1291 ASSIGN_READ_MMIO_VFUNCS(vlv);
1292 } else {
1293 ASSIGN_READ_MMIO_VFUNCS(gen6);
1294 }
1295 break;
1296 case 5:
1297 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1298 ASSIGN_READ_MMIO_VFUNCS(gen5);
1299 break;
1300 case 4:
1301 case 3:
1302 case 2:
1303 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1304 ASSIGN_READ_MMIO_VFUNCS(gen2);
1305 break;
1306 }
1307
1308 if (intel_vgpu_active(dev)) {
1309 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1310 ASSIGN_READ_MMIO_VFUNCS(vgpu);
1311 }
1312
1313 i915_check_and_clear_faults(dev);
1314}
1315#undef ASSIGN_WRITE_MMIO_VFUNCS
1316#undef ASSIGN_READ_MMIO_VFUNCS
1317
1318void intel_uncore_fini(struct drm_device *dev)
1319{
1320 /* Paranoia: make sure we have disabled everything before we exit. */
1321 intel_uncore_sanitize(dev);
1322 intel_uncore_forcewake_reset(dev, false);
1323}
1324
1325#define GEN_RANGE(l, h) GENMASK(h, l)
1326
1327static const struct register_whitelist {
1328 i915_reg_t offset_ldw, offset_udw;
1329 uint32_t size;
1330 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1331 uint32_t gen_bitmask;
1332} whitelist[] = {
1333 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1334 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1335 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1336};
1337
1338int i915_reg_read_ioctl(struct drm_device *dev,
1339 void *data, struct drm_file *file)
1340{
1341 struct drm_i915_private *dev_priv = dev->dev_private;
1342 struct drm_i915_reg_read *reg = data;
1343 struct register_whitelist const *entry = whitelist;
1344 unsigned size;
1345 i915_reg_t offset_ldw, offset_udw;
1346 int i, ret = 0;
1347
1348 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1349 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1350 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1351 break;
1352 }
1353
1354 if (i == ARRAY_SIZE(whitelist))
1355 return -EINVAL;
1356
1357 /* We use the low bits to encode extra flags as the register should
1358 * be naturally aligned (and those that are not so aligned merely
1359 * limit the available flags for that register).
1360 */
1361 offset_ldw = entry->offset_ldw;
1362 offset_udw = entry->offset_udw;
1363 size = entry->size;
1364 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1365
1366 intel_runtime_pm_get(dev_priv);
1367
1368 switch (size) {
1369 case 8 | 1:
1370 reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1371 break;
1372 case 8:
1373 reg->val = I915_READ64(offset_ldw);
1374 break;
1375 case 4:
1376 reg->val = I915_READ(offset_ldw);
1377 break;
1378 case 2:
1379 reg->val = I915_READ16(offset_ldw);
1380 break;
1381 case 1:
1382 reg->val = I915_READ8(offset_ldw);
1383 break;
1384 default:
1385 ret = -EINVAL;
1386 goto out;
1387 }
1388
1389out:
1390 intel_runtime_pm_put(dev_priv);
1391 return ret;
1392}
1393
1394int i915_get_reset_stats_ioctl(struct drm_device *dev,
1395 void *data, struct drm_file *file)
1396{
1397 struct drm_i915_private *dev_priv = dev->dev_private;
1398 struct drm_i915_reset_stats *args = data;
1399 struct i915_ctx_hang_stats *hs;
1400 struct intel_context *ctx;
1401 int ret;
1402
1403 if (args->flags || args->pad)
1404 return -EINVAL;
1405
1406 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1407 return -EPERM;
1408
1409 ret = mutex_lock_interruptible(&dev->struct_mutex);
1410 if (ret)
1411 return ret;
1412
1413 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1414 if (IS_ERR(ctx)) {
1415 mutex_unlock(&dev->struct_mutex);
1416 return PTR_ERR(ctx);
1417 }
1418 hs = &ctx->hang_stats;
1419
1420 if (capable(CAP_SYS_ADMIN))
1421 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1422 else
1423 args->reset_count = 0;
1424
1425 args->batch_active = hs->batch_active;
1426 args->batch_pending = hs->batch_pending;
1427
1428 mutex_unlock(&dev->struct_mutex);
1429
1430 return 0;
1431}
1432
1433static int i915_reset_complete(struct drm_device *dev)
1434{
1435 u8 gdrst;
1436 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1437 return (gdrst & GRDOM_RESET_STATUS) == 0;
1438}
1439
1440static int i915_do_reset(struct drm_device *dev)
1441{
1442 /* assert reset for at least 20 usec */
1443 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1444 udelay(20);
1445 pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1446
1447 return wait_for(i915_reset_complete(dev), 500);
1448}
1449
1450static int g4x_reset_complete(struct drm_device *dev)
1451{
1452 u8 gdrst;
1453 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1454 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1455}
1456
1457static int g33_do_reset(struct drm_device *dev)
1458{
1459 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1460 return wait_for(g4x_reset_complete(dev), 500);
1461}
1462
1463static int g4x_do_reset(struct drm_device *dev)
1464{
1465 struct drm_i915_private *dev_priv = dev->dev_private;
1466 int ret;
1467
1468 pci_write_config_byte(dev->pdev, I915_GDRST,
1469 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1470 ret = wait_for(g4x_reset_complete(dev), 500);
1471 if (ret)
1472 return ret;
1473
1474 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1475 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1476 POSTING_READ(VDECCLK_GATE_D);
1477
1478 pci_write_config_byte(dev->pdev, I915_GDRST,
1479 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1480 ret = wait_for(g4x_reset_complete(dev), 500);
1481 if (ret)
1482 return ret;
1483
1484 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1485 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1486 POSTING_READ(VDECCLK_GATE_D);
1487
1488 pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1489
1490 return 0;
1491}
1492
1493static int ironlake_do_reset(struct drm_device *dev)
1494{
1495 struct drm_i915_private *dev_priv = dev->dev_private;
1496 int ret;
1497
1498 I915_WRITE(ILK_GDSR,
1499 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1500 ret = wait_for((I915_READ(ILK_GDSR) &
1501 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1502 if (ret)
1503 return ret;
1504
1505 I915_WRITE(ILK_GDSR,
1506 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1507 ret = wait_for((I915_READ(ILK_GDSR) &
1508 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1509 if (ret)
1510 return ret;
1511
1512 I915_WRITE(ILK_GDSR, 0);
1513
1514 return 0;
1515}
1516
1517static int gen6_do_reset(struct drm_device *dev)
1518{
1519 struct drm_i915_private *dev_priv = dev->dev_private;
1520 int ret;
1521
1522 /* Reset the chip */
1523
1524 /* GEN6_GDRST is not in the gt power well, no need to check
1525 * for fifo space for the write or forcewake the chip for
1526 * the read
1527 */
1528 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1529
1530 /* Spin waiting for the device to ack the reset request */
1531 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1532
1533 intel_uncore_forcewake_reset(dev, true);
1534
1535 return ret;
1536}
1537
1538static int wait_for_register(struct drm_i915_private *dev_priv,
1539 i915_reg_t reg,
1540 const u32 mask,
1541 const u32 value,
1542 const unsigned long timeout_ms)
1543{
1544 return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
1545}
1546
1547static int gen8_do_reset(struct drm_device *dev)
1548{
1549 struct drm_i915_private *dev_priv = dev->dev_private;
1550 struct intel_engine_cs *engine;
1551 int i;
1552
1553 for_each_ring(engine, dev_priv, i) {
1554 I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1555 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1556
1557 if (wait_for_register(dev_priv,
1558 RING_RESET_CTL(engine->mmio_base),
1559 RESET_CTL_READY_TO_RESET,
1560 RESET_CTL_READY_TO_RESET,
1561 700)) {
1562 DRM_ERROR("%s: reset request timeout\n", engine->name);
1563 goto not_ready;
1564 }
1565 }
1566
1567 return gen6_do_reset(dev);
1568
1569not_ready:
1570 for_each_ring(engine, dev_priv, i)
1571 I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1572 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1573
1574 return -EIO;
1575}
1576
1577static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
1578{
1579 if (!i915.reset)
1580 return NULL;
1581
1582 if (INTEL_INFO(dev)->gen >= 8)
1583 return gen8_do_reset;
1584 else if (INTEL_INFO(dev)->gen >= 6)
1585 return gen6_do_reset;
1586 else if (IS_GEN5(dev))
1587 return ironlake_do_reset;
1588 else if (IS_G4X(dev))
1589 return g4x_do_reset;
1590 else if (IS_G33(dev))
1591 return g33_do_reset;
1592 else if (INTEL_INFO(dev)->gen >= 3)
1593 return i915_do_reset;
1594 else
1595 return NULL;
1596}
1597
1598int intel_gpu_reset(struct drm_device *dev)
1599{
1600 struct drm_i915_private *dev_priv = to_i915(dev);
1601 int (*reset)(struct drm_device *);
1602 int ret;
1603
1604 reset = intel_get_gpu_reset(dev);
1605 if (reset == NULL)
1606 return -ENODEV;
1607
1608 /* If the power well sleeps during the reset, the reset
1609 * request may be dropped and never completes (causing -EIO).
1610 */
1611 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1612 ret = reset(dev);
1613 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1614
1615 return ret;
1616}
1617
1618bool intel_has_gpu_reset(struct drm_device *dev)
1619{
1620 return intel_get_gpu_reset(dev) != NULL;
1621}
1622
1623bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1624{
1625 return check_for_unclaimed_mmio(dev_priv);
1626}
1627
1628bool
1629intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1630{
1631 if (unlikely(i915.mmio_debug ||
1632 dev_priv->uncore.unclaimed_mmio_check <= 0))
1633 return false;
1634
1635 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1636 DRM_DEBUG("Unclaimed register detected, "
1637 "enabling oneshot unclaimed register reporting. "
1638 "Please use i915.mmio_debug=N for more information.\n");
1639 i915.mmio_debug++;
1640 dev_priv->uncore.unclaimed_mmio_check--;
1641 return true;
1642 }
1643
1644 return false;
1645}
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <drm/drm_managed.h>
25#include <linux/pm_runtime.h>
26
27#include "gt/intel_gt.h"
28#include "gt/intel_engine_regs.h"
29#include "gt/intel_gt_regs.h"
30
31#include "i915_drv.h"
32#include "i915_iosf_mbi.h"
33#include "i915_reg.h"
34#include "i915_trace.h"
35#include "i915_vgpu.h"
36
37#define FORCEWAKE_ACK_TIMEOUT_MS 50
38#define GT_FIFO_TIMEOUT_MS 10
39
40#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
41
42static void
43fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
44{
45 uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
46}
47
48void
49intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
50{
51 spin_lock_init(&i915->mmio_debug.lock);
52 i915->mmio_debug.unclaimed_mmio_check = 1;
53
54 i915->uncore.debug = &i915->mmio_debug;
55}
56
57static void mmio_debug_suspend(struct intel_uncore *uncore)
58{
59 if (!uncore->debug)
60 return;
61
62 spin_lock(&uncore->debug->lock);
63
64 /* Save and disable mmio debugging for the user bypass */
65 if (!uncore->debug->suspend_count++) {
66 uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
67 uncore->debug->unclaimed_mmio_check = 0;
68 }
69
70 spin_unlock(&uncore->debug->lock);
71}
72
73static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
74
75static void mmio_debug_resume(struct intel_uncore *uncore)
76{
77 if (!uncore->debug)
78 return;
79
80 spin_lock(&uncore->debug->lock);
81
82 if (!--uncore->debug->suspend_count)
83 uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
84
85 if (check_for_unclaimed_mmio(uncore))
86 drm_info(&uncore->i915->drm,
87 "Invalid mmio detected during user access\n");
88
89 spin_unlock(&uncore->debug->lock);
90}
91
92static const char * const forcewake_domain_names[] = {
93 "render",
94 "gt",
95 "media",
96 "vdbox0",
97 "vdbox1",
98 "vdbox2",
99 "vdbox3",
100 "vdbox4",
101 "vdbox5",
102 "vdbox6",
103 "vdbox7",
104 "vebox0",
105 "vebox1",
106 "vebox2",
107 "vebox3",
108 "gsc",
109};
110
111const char *
112intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
113{
114 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
115
116 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
117 return forcewake_domain_names[id];
118
119 WARN_ON(id);
120
121 return "unknown";
122}
123
124#define fw_ack(d) readl((d)->reg_ack)
125#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
126#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
127
128static inline void
129fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
130{
131 /*
132 * We don't really know if the powerwell for the forcewake domain we are
133 * trying to reset here does exist at this point (engines could be fused
134 * off in ICL+), so no waiting for acks
135 */
136 /* WaRsClearFWBitsAtReset */
137 if (GRAPHICS_VER(d->uncore->i915) >= 12)
138 fw_clear(d, 0xefff);
139 else
140 fw_clear(d, 0xffff);
141}
142
143static inline void
144fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
145{
146 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
147 d->uncore->fw_domains_timer |= d->mask;
148 d->wake_count++;
149 hrtimer_start_range_ns(&d->timer,
150 NSEC_PER_MSEC,
151 NSEC_PER_MSEC,
152 HRTIMER_MODE_REL);
153}
154
155static inline int
156__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
157 const u32 ack,
158 const u32 value)
159{
160 return wait_for_atomic((fw_ack(d) & ack) == value,
161 FORCEWAKE_ACK_TIMEOUT_MS);
162}
163
164static inline int
165wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
166 const u32 ack)
167{
168 return __wait_for_ack(d, ack, 0);
169}
170
171static inline int
172wait_ack_set(const struct intel_uncore_forcewake_domain *d,
173 const u32 ack)
174{
175 return __wait_for_ack(d, ack, ack);
176}
177
178static inline void
179fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
180{
181 if (!wait_ack_clear(d, FORCEWAKE_KERNEL))
182 return;
183
184 if (fw_ack(d) == ~0) {
185 drm_err(&d->uncore->i915->drm,
186 "%s: MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
187 intel_uncore_forcewake_domain_to_str(d->id));
188 intel_gt_set_wedged_async(d->uncore->gt);
189 } else {
190 drm_err(&d->uncore->i915->drm,
191 "%s: timed out waiting for forcewake ack to clear.\n",
192 intel_uncore_forcewake_domain_to_str(d->id));
193 }
194
195 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
196}
197
198enum ack_type {
199 ACK_CLEAR = 0,
200 ACK_SET
201};
202
203static int
204fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
205 const enum ack_type type)
206{
207 const u32 ack_bit = FORCEWAKE_KERNEL;
208 const u32 value = type == ACK_SET ? ack_bit : 0;
209 unsigned int pass;
210 bool ack_detected;
211
212 /*
213 * There is a possibility of driver's wake request colliding
214 * with hardware's own wake requests and that can cause
215 * hardware to not deliver the driver's ack message.
216 *
217 * Use a fallback bit toggle to kick the gpu state machine
218 * in the hope that the original ack will be delivered along with
219 * the fallback ack.
220 *
221 * This workaround is described in HSDES #1604254524 and it's known as:
222 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
223 * although the name is a bit misleading.
224 */
225
226 pass = 1;
227 do {
228 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
229
230 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
231 /* Give gt some time to relax before the polling frenzy */
232 udelay(10 * pass);
233 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
234
235 ack_detected = (fw_ack(d) & ack_bit) == value;
236
237 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
238 } while (!ack_detected && pass++ < 10);
239
240 drm_dbg(&d->uncore->i915->drm,
241 "%s had to use fallback to %s ack, 0x%x (passes %u)\n",
242 intel_uncore_forcewake_domain_to_str(d->id),
243 type == ACK_SET ? "set" : "clear",
244 fw_ack(d),
245 pass);
246
247 return ack_detected ? 0 : -ETIMEDOUT;
248}
249
250static inline void
251fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
252{
253 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
254 return;
255
256 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
257 fw_domain_wait_ack_clear(d);
258}
259
260static inline void
261fw_domain_get(const struct intel_uncore_forcewake_domain *d)
262{
263 fw_set(d, FORCEWAKE_KERNEL);
264}
265
266static inline void
267fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
268{
269 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
270 drm_err(&d->uncore->i915->drm,
271 "%s: timed out waiting for forcewake ack request.\n",
272 intel_uncore_forcewake_domain_to_str(d->id));
273 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
274 }
275}
276
277static inline void
278fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
279{
280 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
281 return;
282
283 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
284 fw_domain_wait_ack_set(d);
285}
286
287static inline void
288fw_domain_put(const struct intel_uncore_forcewake_domain *d)
289{
290 fw_clear(d, FORCEWAKE_KERNEL);
291}
292
293static void
294fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
295{
296 struct intel_uncore_forcewake_domain *d;
297 unsigned int tmp;
298
299 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
300
301 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
302 fw_domain_wait_ack_clear(d);
303 fw_domain_get(d);
304 }
305
306 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
307 fw_domain_wait_ack_set(d);
308
309 uncore->fw_domains_active |= fw_domains;
310}
311
312static void
313fw_domains_get_with_fallback(struct intel_uncore *uncore,
314 enum forcewake_domains fw_domains)
315{
316 struct intel_uncore_forcewake_domain *d;
317 unsigned int tmp;
318
319 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
320
321 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
322 fw_domain_wait_ack_clear_fallback(d);
323 fw_domain_get(d);
324 }
325
326 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
327 fw_domain_wait_ack_set_fallback(d);
328
329 uncore->fw_domains_active |= fw_domains;
330}
331
332static void
333fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
334{
335 struct intel_uncore_forcewake_domain *d;
336 unsigned int tmp;
337
338 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
339
340 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
341 fw_domain_put(d);
342
343 uncore->fw_domains_active &= ~fw_domains;
344}
345
346static void
347fw_domains_reset(struct intel_uncore *uncore,
348 enum forcewake_domains fw_domains)
349{
350 struct intel_uncore_forcewake_domain *d;
351 unsigned int tmp;
352
353 if (!fw_domains)
354 return;
355
356 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
357
358 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
359 fw_domain_reset(d);
360}
361
362static inline u32 gt_thread_status(struct intel_uncore *uncore)
363{
364 u32 val;
365
366 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
367 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
368
369 return val;
370}
371
372static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
373{
374 /*
375 * w/a for a sporadic read returning 0 by waiting for the GT
376 * thread to wake up.
377 */
378 drm_WARN_ONCE(&uncore->i915->drm,
379 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
380 "GT thread status wait timed out\n");
381}
382
383static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
384 enum forcewake_domains fw_domains)
385{
386 fw_domains_get_normal(uncore, fw_domains);
387
388 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
389 __gen6_gt_wait_for_thread_c0(uncore);
390}
391
392static inline u32 fifo_free_entries(struct intel_uncore *uncore)
393{
394 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
395
396 return count & GT_FIFO_FREE_ENTRIES_MASK;
397}
398
399static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
400{
401 u32 n;
402
403 /* On VLV, FIFO will be shared by both SW and HW.
404 * So, we need to read the FREE_ENTRIES everytime */
405 if (IS_VALLEYVIEW(uncore->i915))
406 n = fifo_free_entries(uncore);
407 else
408 n = uncore->fifo_count;
409
410 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
411 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
412 GT_FIFO_NUM_RESERVED_ENTRIES,
413 GT_FIFO_TIMEOUT_MS)) {
414 drm_dbg(&uncore->i915->drm,
415 "GT_FIFO timeout, entries: %u\n", n);
416 return;
417 }
418 }
419
420 uncore->fifo_count = n - 1;
421}
422
423static enum hrtimer_restart
424intel_uncore_fw_release_timer(struct hrtimer *timer)
425{
426 struct intel_uncore_forcewake_domain *domain =
427 container_of(timer, struct intel_uncore_forcewake_domain, timer);
428 struct intel_uncore *uncore = domain->uncore;
429 unsigned long irqflags;
430
431 assert_rpm_device_not_suspended(uncore->rpm);
432
433 if (xchg(&domain->active, false))
434 return HRTIMER_RESTART;
435
436 spin_lock_irqsave(&uncore->lock, irqflags);
437
438 uncore->fw_domains_timer &= ~domain->mask;
439
440 GEM_BUG_ON(!domain->wake_count);
441 if (--domain->wake_count == 0)
442 fw_domains_put(uncore, domain->mask);
443
444 spin_unlock_irqrestore(&uncore->lock, irqflags);
445
446 return HRTIMER_NORESTART;
447}
448
449/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
450static unsigned int
451intel_uncore_forcewake_reset(struct intel_uncore *uncore)
452{
453 unsigned long irqflags;
454 struct intel_uncore_forcewake_domain *domain;
455 int retry_count = 100;
456 enum forcewake_domains fw, active_domains;
457
458 iosf_mbi_assert_punit_acquired();
459
460 /* Hold uncore.lock across reset to prevent any register access
461 * with forcewake not set correctly. Wait until all pending
462 * timers are run before holding.
463 */
464 while (1) {
465 unsigned int tmp;
466
467 active_domains = 0;
468
469 for_each_fw_domain(domain, uncore, tmp) {
470 smp_store_mb(domain->active, false);
471 if (hrtimer_cancel(&domain->timer) == 0)
472 continue;
473
474 intel_uncore_fw_release_timer(&domain->timer);
475 }
476
477 spin_lock_irqsave(&uncore->lock, irqflags);
478
479 for_each_fw_domain(domain, uncore, tmp) {
480 if (hrtimer_active(&domain->timer))
481 active_domains |= domain->mask;
482 }
483
484 if (active_domains == 0)
485 break;
486
487 if (--retry_count == 0) {
488 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
489 break;
490 }
491
492 spin_unlock_irqrestore(&uncore->lock, irqflags);
493 cond_resched();
494 }
495
496 drm_WARN_ON(&uncore->i915->drm, active_domains);
497
498 fw = uncore->fw_domains_active;
499 if (fw)
500 fw_domains_put(uncore, fw);
501
502 fw_domains_reset(uncore, uncore->fw_domains);
503 assert_forcewakes_inactive(uncore);
504
505 spin_unlock_irqrestore(&uncore->lock, irqflags);
506
507 return fw; /* track the lost user forcewake domains */
508}
509
510static bool
511fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
512{
513 u32 dbg;
514
515 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
516 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
517 return false;
518
519 /*
520 * Bugs in PCI programming (or failing hardware) can occasionally cause
521 * us to lose access to the MMIO BAR. When this happens, register
522 * reads will come back with 0xFFFFFFFF for every register and things
523 * go bad very quickly. Let's try to detect that special case and at
524 * least try to print a more informative message about what has
525 * happened.
526 *
527 * During normal operation the FPGA_DBG register has several unused
528 * bits that will always read back as 0's so we can use them as canaries
529 * to recognize when MMIO accesses are just busted.
530 */
531 if (unlikely(dbg == ~0))
532 drm_err(&uncore->i915->drm,
533 "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
534
535 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
536
537 return true;
538}
539
540static bool
541vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
542{
543 u32 cer;
544
545 cer = __raw_uncore_read32(uncore, CLAIM_ER);
546 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
547 return false;
548
549 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
550
551 return true;
552}
553
554static bool
555gen6_check_for_fifo_debug(struct intel_uncore *uncore)
556{
557 u32 fifodbg;
558
559 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
560
561 if (unlikely(fifodbg)) {
562 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
563 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
564 }
565
566 return fifodbg;
567}
568
569static bool
570check_for_unclaimed_mmio(struct intel_uncore *uncore)
571{
572 bool ret = false;
573
574 lockdep_assert_held(&uncore->debug->lock);
575
576 if (uncore->debug->suspend_count)
577 return false;
578
579 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
580 ret |= fpga_check_for_unclaimed_mmio(uncore);
581
582 if (intel_uncore_has_dbg_unclaimed(uncore))
583 ret |= vlv_check_for_unclaimed_mmio(uncore);
584
585 if (intel_uncore_has_fifo(uncore))
586 ret |= gen6_check_for_fifo_debug(uncore);
587
588 return ret;
589}
590
591static void forcewake_early_sanitize(struct intel_uncore *uncore,
592 unsigned int restore_forcewake)
593{
594 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
595
596 /* WaDisableShadowRegForCpd:chv */
597 if (IS_CHERRYVIEW(uncore->i915)) {
598 __raw_uncore_write32(uncore, GTFIFOCTL,
599 __raw_uncore_read32(uncore, GTFIFOCTL) |
600 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
601 GT_FIFO_CTL_RC6_POLICY_STALL);
602 }
603
604 iosf_mbi_punit_acquire();
605 intel_uncore_forcewake_reset(uncore);
606 if (restore_forcewake) {
607 spin_lock_irq(&uncore->lock);
608 fw_domains_get(uncore, restore_forcewake);
609
610 if (intel_uncore_has_fifo(uncore))
611 uncore->fifo_count = fifo_free_entries(uncore);
612 spin_unlock_irq(&uncore->lock);
613 }
614 iosf_mbi_punit_release();
615}
616
617void intel_uncore_suspend(struct intel_uncore *uncore)
618{
619 if (!intel_uncore_has_forcewake(uncore))
620 return;
621
622 iosf_mbi_punit_acquire();
623 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
624 &uncore->pmic_bus_access_nb);
625 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
626 iosf_mbi_punit_release();
627}
628
629void intel_uncore_resume_early(struct intel_uncore *uncore)
630{
631 unsigned int restore_forcewake;
632
633 if (intel_uncore_unclaimed_mmio(uncore))
634 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
635
636 if (!intel_uncore_has_forcewake(uncore))
637 return;
638
639 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
640 forcewake_early_sanitize(uncore, restore_forcewake);
641
642 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
643}
644
645void intel_uncore_runtime_resume(struct intel_uncore *uncore)
646{
647 if (!intel_uncore_has_forcewake(uncore))
648 return;
649
650 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
651}
652
653static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
654 enum forcewake_domains fw_domains)
655{
656 struct intel_uncore_forcewake_domain *domain;
657 unsigned int tmp;
658
659 fw_domains &= uncore->fw_domains;
660
661 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
662 if (domain->wake_count++) {
663 fw_domains &= ~domain->mask;
664 domain->active = true;
665 }
666 }
667
668 if (fw_domains)
669 fw_domains_get(uncore, fw_domains);
670}
671
672/**
673 * intel_uncore_forcewake_get - grab forcewake domain references
674 * @uncore: the intel_uncore structure
675 * @fw_domains: forcewake domains to get reference on
676 *
677 * This function can be used get GT's forcewake domain references.
678 * Normal register access will handle the forcewake domains automatically.
679 * However if some sequence requires the GT to not power down a particular
680 * forcewake domains this function should be called at the beginning of the
681 * sequence. And subsequently the reference should be dropped by symmetric
682 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
683 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
684 */
685void intel_uncore_forcewake_get(struct intel_uncore *uncore,
686 enum forcewake_domains fw_domains)
687{
688 unsigned long irqflags;
689
690 if (!uncore->fw_get_funcs)
691 return;
692
693 assert_rpm_wakelock_held(uncore->rpm);
694
695 spin_lock_irqsave(&uncore->lock, irqflags);
696 __intel_uncore_forcewake_get(uncore, fw_domains);
697 spin_unlock_irqrestore(&uncore->lock, irqflags);
698}
699
700/**
701 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
702 * @uncore: the intel_uncore structure
703 *
704 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
705 * the GT powerwell and in the process disable our debugging for the
706 * duration of userspace's bypass.
707 */
708void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
709{
710 spin_lock_irq(&uncore->lock);
711 if (!uncore->user_forcewake_count++) {
712 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
713 mmio_debug_suspend(uncore);
714 }
715 spin_unlock_irq(&uncore->lock);
716}
717
718/**
719 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
720 * @uncore: the intel_uncore structure
721 *
722 * This function complements intel_uncore_forcewake_user_get() and releases
723 * the GT powerwell taken on behalf of the userspace bypass.
724 */
725void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
726{
727 spin_lock_irq(&uncore->lock);
728 if (!--uncore->user_forcewake_count) {
729 mmio_debug_resume(uncore);
730 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
731 }
732 spin_unlock_irq(&uncore->lock);
733}
734
735/**
736 * intel_uncore_forcewake_get__locked - grab forcewake domain references
737 * @uncore: the intel_uncore structure
738 * @fw_domains: forcewake domains to get reference on
739 *
740 * See intel_uncore_forcewake_get(). This variant places the onus
741 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
742 */
743void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
744 enum forcewake_domains fw_domains)
745{
746 lockdep_assert_held(&uncore->lock);
747
748 if (!uncore->fw_get_funcs)
749 return;
750
751 __intel_uncore_forcewake_get(uncore, fw_domains);
752}
753
754static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
755 enum forcewake_domains fw_domains,
756 bool delayed)
757{
758 struct intel_uncore_forcewake_domain *domain;
759 unsigned int tmp;
760
761 fw_domains &= uncore->fw_domains;
762
763 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
764 GEM_BUG_ON(!domain->wake_count);
765
766 if (--domain->wake_count) {
767 domain->active = true;
768 continue;
769 }
770
771 if (delayed &&
772 !(domain->uncore->fw_domains_timer & domain->mask))
773 fw_domain_arm_timer(domain);
774 else
775 fw_domains_put(uncore, domain->mask);
776 }
777}
778
779/**
780 * intel_uncore_forcewake_put - release a forcewake domain reference
781 * @uncore: the intel_uncore structure
782 * @fw_domains: forcewake domains to put references
783 *
784 * This function drops the device-level forcewakes for specified
785 * domains obtained by intel_uncore_forcewake_get().
786 */
787void intel_uncore_forcewake_put(struct intel_uncore *uncore,
788 enum forcewake_domains fw_domains)
789{
790 unsigned long irqflags;
791
792 if (!uncore->fw_get_funcs)
793 return;
794
795 spin_lock_irqsave(&uncore->lock, irqflags);
796 __intel_uncore_forcewake_put(uncore, fw_domains, false);
797 spin_unlock_irqrestore(&uncore->lock, irqflags);
798}
799
800void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
801 enum forcewake_domains fw_domains)
802{
803 unsigned long irqflags;
804
805 if (!uncore->fw_get_funcs)
806 return;
807
808 spin_lock_irqsave(&uncore->lock, irqflags);
809 __intel_uncore_forcewake_put(uncore, fw_domains, true);
810 spin_unlock_irqrestore(&uncore->lock, irqflags);
811}
812
813/**
814 * intel_uncore_forcewake_flush - flush the delayed release
815 * @uncore: the intel_uncore structure
816 * @fw_domains: forcewake domains to flush
817 */
818void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
819 enum forcewake_domains fw_domains)
820{
821 struct intel_uncore_forcewake_domain *domain;
822 unsigned int tmp;
823
824 if (!uncore->fw_get_funcs)
825 return;
826
827 fw_domains &= uncore->fw_domains;
828 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
829 WRITE_ONCE(domain->active, false);
830 if (hrtimer_cancel(&domain->timer))
831 intel_uncore_fw_release_timer(&domain->timer);
832 }
833}
834
835/**
836 * intel_uncore_forcewake_put__locked - release forcewake domain references
837 * @uncore: the intel_uncore structure
838 * @fw_domains: forcewake domains to put references
839 *
840 * See intel_uncore_forcewake_put(). This variant places the onus
841 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
842 */
843void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
844 enum forcewake_domains fw_domains)
845{
846 lockdep_assert_held(&uncore->lock);
847
848 if (!uncore->fw_get_funcs)
849 return;
850
851 __intel_uncore_forcewake_put(uncore, fw_domains, false);
852}
853
854void assert_forcewakes_inactive(struct intel_uncore *uncore)
855{
856 if (!uncore->fw_get_funcs)
857 return;
858
859 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
860 "Expected all fw_domains to be inactive, but %08x are still on\n",
861 uncore->fw_domains_active);
862}
863
864void assert_forcewakes_active(struct intel_uncore *uncore,
865 enum forcewake_domains fw_domains)
866{
867 struct intel_uncore_forcewake_domain *domain;
868 unsigned int tmp;
869
870 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
871 return;
872
873 if (!uncore->fw_get_funcs)
874 return;
875
876 spin_lock_irq(&uncore->lock);
877
878 assert_rpm_wakelock_held(uncore->rpm);
879
880 fw_domains &= uncore->fw_domains;
881 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
882 "Expected %08x fw_domains to be active, but %08x are off\n",
883 fw_domains, fw_domains & ~uncore->fw_domains_active);
884
885 /*
886 * Check that the caller has an explicit wakeref and we don't mistake
887 * it for the auto wakeref.
888 */
889 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
890 unsigned int actual = READ_ONCE(domain->wake_count);
891 unsigned int expect = 1;
892
893 if (uncore->fw_domains_timer & domain->mask)
894 expect++; /* pending automatic release */
895
896 if (drm_WARN(&uncore->i915->drm, actual < expect,
897 "Expected domain %d to be held awake by caller, count=%d\n",
898 domain->id, actual))
899 break;
900 }
901
902 spin_unlock_irq(&uncore->lock);
903}
904
905/*
906 * We give fast paths for the really cool registers. The second range includes
907 * media domains (and the GSC starting from Xe_LPM+)
908 */
909#define NEEDS_FORCE_WAKE(reg) ({ \
910 u32 __reg = (reg); \
911 __reg < 0x40000 || __reg >= 0x116000; \
912})
913
914static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
915{
916 if (offset < entry->start)
917 return -1;
918 else if (offset > entry->end)
919 return 1;
920 else
921 return 0;
922}
923
924/* Copied and "macroized" from lib/bsearch.c */
925#define BSEARCH(key, base, num, cmp) ({ \
926 unsigned int start__ = 0, end__ = (num); \
927 typeof(base) result__ = NULL; \
928 while (start__ < end__) { \
929 unsigned int mid__ = start__ + (end__ - start__) / 2; \
930 int ret__ = (cmp)((key), (base) + mid__); \
931 if (ret__ < 0) { \
932 end__ = mid__; \
933 } else if (ret__ > 0) { \
934 start__ = mid__ + 1; \
935 } else { \
936 result__ = (base) + mid__; \
937 break; \
938 } \
939 } \
940 result__; \
941})
942
943static enum forcewake_domains
944find_fw_domain(struct intel_uncore *uncore, u32 offset)
945{
946 const struct intel_forcewake_range *entry;
947
948 if (IS_GSI_REG(offset))
949 offset += uncore->gsi_offset;
950
951 entry = BSEARCH(offset,
952 uncore->fw_domains_table,
953 uncore->fw_domains_table_entries,
954 fw_range_cmp);
955
956 if (!entry)
957 return 0;
958
959 /*
960 * The list of FW domains depends on the SKU in gen11+ so we
961 * can't determine it statically. We use FORCEWAKE_ALL and
962 * translate it here to the list of available domains.
963 */
964 if (entry->domains == FORCEWAKE_ALL)
965 return uncore->fw_domains;
966
967 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
968 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
969 entry->domains & ~uncore->fw_domains, offset);
970
971 return entry->domains;
972}
973
974/*
975 * Shadowed register tables describe special register ranges that i915 is
976 * allowed to write to without acquiring forcewake. If these registers' power
977 * wells are down, the hardware will save values written by i915 to a shadow
978 * copy and automatically transfer them into the real register the next time
979 * the power well is woken up. Shadowing only applies to writes; forcewake
980 * must still be acquired when reading from registers in these ranges.
981 *
982 * The documentation for shadowed registers is somewhat spotty on older
983 * platforms. However missing registers from these lists is non-fatal; it just
984 * means we'll wake up the hardware for some register accesses where we didn't
985 * really need to.
986 *
987 * The ranges listed in these tables must be sorted by offset.
988 *
989 * When adding new tables here, please also add them to
990 * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be
991 * scanned for obvious mistakes or typos by the selftests.
992 */
993
994static const struct i915_range gen8_shadowed_regs[] = {
995 { .start = 0x2030, .end = 0x2030 },
996 { .start = 0xA008, .end = 0xA00C },
997 { .start = 0x12030, .end = 0x12030 },
998 { .start = 0x1a030, .end = 0x1a030 },
999 { .start = 0x22030, .end = 0x22030 },
1000};
1001
1002static const struct i915_range gen11_shadowed_regs[] = {
1003 { .start = 0x2030, .end = 0x2030 },
1004 { .start = 0x2550, .end = 0x2550 },
1005 { .start = 0xA008, .end = 0xA00C },
1006 { .start = 0x22030, .end = 0x22030 },
1007 { .start = 0x22230, .end = 0x22230 },
1008 { .start = 0x22510, .end = 0x22550 },
1009 { .start = 0x1C0030, .end = 0x1C0030 },
1010 { .start = 0x1C0230, .end = 0x1C0230 },
1011 { .start = 0x1C0510, .end = 0x1C0550 },
1012 { .start = 0x1C4030, .end = 0x1C4030 },
1013 { .start = 0x1C4230, .end = 0x1C4230 },
1014 { .start = 0x1C4510, .end = 0x1C4550 },
1015 { .start = 0x1C8030, .end = 0x1C8030 },
1016 { .start = 0x1C8230, .end = 0x1C8230 },
1017 { .start = 0x1C8510, .end = 0x1C8550 },
1018 { .start = 0x1D0030, .end = 0x1D0030 },
1019 { .start = 0x1D0230, .end = 0x1D0230 },
1020 { .start = 0x1D0510, .end = 0x1D0550 },
1021 { .start = 0x1D4030, .end = 0x1D4030 },
1022 { .start = 0x1D4230, .end = 0x1D4230 },
1023 { .start = 0x1D4510, .end = 0x1D4550 },
1024 { .start = 0x1D8030, .end = 0x1D8030 },
1025 { .start = 0x1D8230, .end = 0x1D8230 },
1026 { .start = 0x1D8510, .end = 0x1D8550 },
1027};
1028
1029static const struct i915_range gen12_shadowed_regs[] = {
1030 { .start = 0x2030, .end = 0x2030 },
1031 { .start = 0x2510, .end = 0x2550 },
1032 { .start = 0xA008, .end = 0xA00C },
1033 { .start = 0xA188, .end = 0xA188 },
1034 { .start = 0xA278, .end = 0xA278 },
1035 { .start = 0xA540, .end = 0xA56C },
1036 { .start = 0xC4C8, .end = 0xC4C8 },
1037 { .start = 0xC4D4, .end = 0xC4D4 },
1038 { .start = 0xC600, .end = 0xC600 },
1039 { .start = 0x22030, .end = 0x22030 },
1040 { .start = 0x22510, .end = 0x22550 },
1041 { .start = 0x1C0030, .end = 0x1C0030 },
1042 { .start = 0x1C0510, .end = 0x1C0550 },
1043 { .start = 0x1C4030, .end = 0x1C4030 },
1044 { .start = 0x1C4510, .end = 0x1C4550 },
1045 { .start = 0x1C8030, .end = 0x1C8030 },
1046 { .start = 0x1C8510, .end = 0x1C8550 },
1047 { .start = 0x1D0030, .end = 0x1D0030 },
1048 { .start = 0x1D0510, .end = 0x1D0550 },
1049 { .start = 0x1D4030, .end = 0x1D4030 },
1050 { .start = 0x1D4510, .end = 0x1D4550 },
1051 { .start = 0x1D8030, .end = 0x1D8030 },
1052 { .start = 0x1D8510, .end = 0x1D8550 },
1053
1054 /*
1055 * The rest of these ranges are specific to Xe_HP and beyond, but
1056 * are reserved/unused ranges on earlier gen12 platforms, so they can
1057 * be safely added to the gen12 table.
1058 */
1059 { .start = 0x1E0030, .end = 0x1E0030 },
1060 { .start = 0x1E0510, .end = 0x1E0550 },
1061 { .start = 0x1E4030, .end = 0x1E4030 },
1062 { .start = 0x1E4510, .end = 0x1E4550 },
1063 { .start = 0x1E8030, .end = 0x1E8030 },
1064 { .start = 0x1E8510, .end = 0x1E8550 },
1065 { .start = 0x1F0030, .end = 0x1F0030 },
1066 { .start = 0x1F0510, .end = 0x1F0550 },
1067 { .start = 0x1F4030, .end = 0x1F4030 },
1068 { .start = 0x1F4510, .end = 0x1F4550 },
1069 { .start = 0x1F8030, .end = 0x1F8030 },
1070 { .start = 0x1F8510, .end = 0x1F8550 },
1071};
1072
1073static const struct i915_range dg2_shadowed_regs[] = {
1074 { .start = 0x2030, .end = 0x2030 },
1075 { .start = 0x2510, .end = 0x2550 },
1076 { .start = 0xA008, .end = 0xA00C },
1077 { .start = 0xA188, .end = 0xA188 },
1078 { .start = 0xA278, .end = 0xA278 },
1079 { .start = 0xA540, .end = 0xA56C },
1080 { .start = 0xC4C8, .end = 0xC4C8 },
1081 { .start = 0xC4E0, .end = 0xC4E0 },
1082 { .start = 0xC600, .end = 0xC600 },
1083 { .start = 0xC658, .end = 0xC658 },
1084 { .start = 0x22030, .end = 0x22030 },
1085 { .start = 0x22510, .end = 0x22550 },
1086 { .start = 0x1C0030, .end = 0x1C0030 },
1087 { .start = 0x1C0510, .end = 0x1C0550 },
1088 { .start = 0x1C4030, .end = 0x1C4030 },
1089 { .start = 0x1C4510, .end = 0x1C4550 },
1090 { .start = 0x1C8030, .end = 0x1C8030 },
1091 { .start = 0x1C8510, .end = 0x1C8550 },
1092 { .start = 0x1D0030, .end = 0x1D0030 },
1093 { .start = 0x1D0510, .end = 0x1D0550 },
1094 { .start = 0x1D4030, .end = 0x1D4030 },
1095 { .start = 0x1D4510, .end = 0x1D4550 },
1096 { .start = 0x1D8030, .end = 0x1D8030 },
1097 { .start = 0x1D8510, .end = 0x1D8550 },
1098 { .start = 0x1E0030, .end = 0x1E0030 },
1099 { .start = 0x1E0510, .end = 0x1E0550 },
1100 { .start = 0x1E4030, .end = 0x1E4030 },
1101 { .start = 0x1E4510, .end = 0x1E4550 },
1102 { .start = 0x1E8030, .end = 0x1E8030 },
1103 { .start = 0x1E8510, .end = 0x1E8550 },
1104 { .start = 0x1F0030, .end = 0x1F0030 },
1105 { .start = 0x1F0510, .end = 0x1F0550 },
1106 { .start = 0x1F4030, .end = 0x1F4030 },
1107 { .start = 0x1F4510, .end = 0x1F4550 },
1108 { .start = 0x1F8030, .end = 0x1F8030 },
1109 { .start = 0x1F8510, .end = 0x1F8550 },
1110};
1111
1112static const struct i915_range mtl_shadowed_regs[] = {
1113 { .start = 0x2030, .end = 0x2030 },
1114 { .start = 0x2510, .end = 0x2550 },
1115 { .start = 0xA008, .end = 0xA00C },
1116 { .start = 0xA188, .end = 0xA188 },
1117 { .start = 0xA278, .end = 0xA278 },
1118 { .start = 0xA540, .end = 0xA56C },
1119 { .start = 0xC050, .end = 0xC050 },
1120 { .start = 0xC340, .end = 0xC340 },
1121 { .start = 0xC4C8, .end = 0xC4C8 },
1122 { .start = 0xC4E0, .end = 0xC4E0 },
1123 { .start = 0xC600, .end = 0xC600 },
1124 { .start = 0xC658, .end = 0xC658 },
1125 { .start = 0xCFD4, .end = 0xCFDC },
1126 { .start = 0x22030, .end = 0x22030 },
1127 { .start = 0x22510, .end = 0x22550 },
1128};
1129
1130static const struct i915_range xelpmp_shadowed_regs[] = {
1131 { .start = 0x1C0030, .end = 0x1C0030 },
1132 { .start = 0x1C0510, .end = 0x1C0550 },
1133 { .start = 0x1C8030, .end = 0x1C8030 },
1134 { .start = 0x1C8510, .end = 0x1C8550 },
1135 { .start = 0x1D0030, .end = 0x1D0030 },
1136 { .start = 0x1D0510, .end = 0x1D0550 },
1137 { .start = 0x38A008, .end = 0x38A00C },
1138 { .start = 0x38A188, .end = 0x38A188 },
1139 { .start = 0x38A278, .end = 0x38A278 },
1140 { .start = 0x38A540, .end = 0x38A56C },
1141 { .start = 0x38A618, .end = 0x38A618 },
1142 { .start = 0x38C050, .end = 0x38C050 },
1143 { .start = 0x38C340, .end = 0x38C340 },
1144 { .start = 0x38C4C8, .end = 0x38C4C8 },
1145 { .start = 0x38C4E0, .end = 0x38C4E4 },
1146 { .start = 0x38C600, .end = 0x38C600 },
1147 { .start = 0x38C658, .end = 0x38C658 },
1148 { .start = 0x38CFD4, .end = 0x38CFDC },
1149};
1150
1151static int mmio_range_cmp(u32 key, const struct i915_range *range)
1152{
1153 if (key < range->start)
1154 return -1;
1155 else if (key > range->end)
1156 return 1;
1157 else
1158 return 0;
1159}
1160
1161static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1162{
1163 if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1164 return false;
1165
1166 if (IS_GSI_REG(offset))
1167 offset += uncore->gsi_offset;
1168
1169 return BSEARCH(offset,
1170 uncore->shadowed_reg_table,
1171 uncore->shadowed_reg_table_entries,
1172 mmio_range_cmp);
1173}
1174
1175static enum forcewake_domains
1176gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1177{
1178 return FORCEWAKE_RENDER;
1179}
1180
1181#define __fwtable_reg_read_fw_domains(uncore, offset) \
1182({ \
1183 enum forcewake_domains __fwd = 0; \
1184 if (NEEDS_FORCE_WAKE((offset))) \
1185 __fwd = find_fw_domain(uncore, offset); \
1186 __fwd; \
1187})
1188
1189#define __fwtable_reg_write_fw_domains(uncore, offset) \
1190({ \
1191 enum forcewake_domains __fwd = 0; \
1192 const u32 __offset = (offset); \
1193 if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
1194 __fwd = find_fw_domain(uncore, __offset); \
1195 __fwd; \
1196})
1197
1198#define GEN_FW_RANGE(s, e, d) \
1199 { .start = (s), .end = (e), .domains = (d) }
1200
1201/*
1202 * All platforms' forcewake tables below must be sorted by offset ranges.
1203 * Furthermore, new forcewake tables added should be "watertight" and have
1204 * no gaps between ranges.
1205 *
1206 * When there are multiple consecutive ranges listed in the bspec with
1207 * the same forcewake domain, it is customary to combine them into a single
1208 * row in the tables below to keep the tables small and lookups fast.
1209 * Likewise, reserved/unused ranges may be combined with the preceding and/or
1210 * following ranges since the driver will never be making MMIO accesses in
1211 * those ranges.
1212 *
1213 * For example, if the bspec were to list:
1214 *
1215 * ...
1216 * 0x1000 - 0x1fff: GT
1217 * 0x2000 - 0x2cff: GT
1218 * 0x2d00 - 0x2fff: unused/reserved
1219 * 0x3000 - 0xffff: GT
1220 * ...
1221 *
1222 * these could all be represented by a single line in the code:
1223 *
1224 * GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT)
1225 *
1226 * When adding new forcewake tables here, please also add them to
1227 * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be
1228 * scanned for obvious mistakes or typos by the selftests.
1229 */
1230
1231static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1232 GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
1233};
1234
1235static const struct intel_forcewake_range __vlv_fw_ranges[] = {
1236 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1237 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
1238 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
1239 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1240 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
1241 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
1242 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1243};
1244
1245static const struct intel_forcewake_range __chv_fw_ranges[] = {
1246 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1247 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1248 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1249 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1250 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1251 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1252 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
1253 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1254 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1255 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1256 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1257 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1258 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1259 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1260 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1261 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
1262};
1263
1264static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1265 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
1266 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1267 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1268 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1269 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1270 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1271 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1272 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
1273 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1274 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1275 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1276 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1277 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1278 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1279 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
1280 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1281 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
1282 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1283 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1284 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1285 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
1286 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1287 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
1288 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1289 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
1290 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1291 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
1292 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1293 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
1294 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1295 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
1296 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1297};
1298
1299static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1300 GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
1301 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1302 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1303 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1304 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1305 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1306 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1307 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1308 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1309 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1310 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1311 GEN_FW_RANGE(0x8800, 0x8bff, 0),
1312 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1313 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
1314 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1315 GEN_FW_RANGE(0x9560, 0x95ff, 0),
1316 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
1317 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1318 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
1319 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1320 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
1321 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1322 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
1323 GEN_FW_RANGE(0x24000, 0x2407f, 0),
1324 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
1325 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
1326 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
1327 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
1328 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
1329 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1330 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1331 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1332 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
1333 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1334 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
1335};
1336
1337static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1338 GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1339 0x0 - 0xaff: reserved
1340 0xb00 - 0x1fff: always on */
1341 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1342 GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1343 GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1344 GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
1345 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1346 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1347 0x4000 - 0x48ff: gt
1348 0x4900 - 0x51ff: reserved */
1349 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1350 0x5200 - 0x53ff: render
1351 0x5400 - 0x54ff: reserved
1352 0x5500 - 0x7fff: render */
1353 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1354 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1355 GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1356 0x8160 - 0x817f: reserved
1357 0x8180 - 0x81ff: always on */
1358 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1359 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1360 GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1361 0x8500 - 0x87ff: gt
1362 0x8800 - 0x8fff: reserved
1363 0x9000 - 0x947f: gt
1364 0x9480 - 0x94cf: reserved */
1365 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1366 GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1367 0x9560 - 0x95ff: always on
1368 0x9600 - 0x97ff: reserved */
1369 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1370 GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1371 GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1372 0xb400 - 0xbf7f: gt
1373 0xb480 - 0xbfff: reserved
1374 0xc000 - 0xcfff: gt */
1375 GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1376 GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1377 GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1378 GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1379 0xdc00 - 0xddff: render
1380 0xde00 - 0xde7f: reserved
1381 0xde80 - 0xe8ff: render
1382 0xe900 - 0xefff: reserved */
1383 GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1384 0xf000 - 0xffff: gt
1385 0x10000 - 0x147ff: reserved */
1386 GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1387 0x14800 - 0x14fff: render
1388 0x15000 - 0x16dff: reserved
1389 0x16e00 - 0x1bfff: render
1390 0x1c000 - 0x1ffff: reserved */
1391 GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1392 GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1393 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1394 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1395 0x24000 - 0x2407f: always on
1396 0x24080 - 0x2417f: reserved */
1397 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1398 0x24180 - 0x241ff: gt
1399 0x24200 - 0x249ff: reserved */
1400 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1401 0x24a00 - 0x24a7f: render
1402 0x24a80 - 0x251ff: reserved */
1403 GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1404 0x25200 - 0x252ff: gt
1405 0x25300 - 0x255ff: reserved */
1406 GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1407 GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1408 0x25680 - 0x256ff: VD2
1409 0x25700 - 0x259ff: reserved */
1410 GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1411 GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1412 0x25a80 - 0x25aff: VD2
1413 0x25b00 - 0x2ffff: reserved */
1414 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1415 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1416 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1417 0x1c0000 - 0x1c2bff: VD0
1418 0x1c2c00 - 0x1c2cff: reserved
1419 0x1c2d00 - 0x1c2dff: VD0
1420 0x1c2e00 - 0x1c3eff: reserved
1421 0x1c3f00 - 0x1c3fff: VD0 */
1422 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1423 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1424 0x1c8000 - 0x1ca0ff: VE0
1425 0x1ca100 - 0x1cbeff: reserved
1426 0x1cbf00 - 0x1cbfff: VE0 */
1427 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1428 0x1cc000 - 0x1ccfff: VD0
1429 0x1cd000 - 0x1cffff: reserved */
1430 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1431 0x1d0000 - 0x1d2bff: VD2
1432 0x1d2c00 - 0x1d2cff: reserved
1433 0x1d2d00 - 0x1d2dff: VD2
1434 0x1d2e00 - 0x1d3eff: reserved
1435 0x1d3f00 - 0x1d3fff: VD2 */
1436};
1437
1438static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1439 GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1440 0x0 - 0xaff: reserved
1441 0xb00 - 0x1fff: always on */
1442 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1443 GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT),
1444 GEN_FW_RANGE(0x4b00, 0x51ff, 0), /*
1445 0x4b00 - 0x4fff: reserved
1446 0x5000 - 0x51ff: always on */
1447 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1448 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1449 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1450 GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1451 0x8160 - 0x817f: reserved
1452 0x8180 - 0x81ff: always on */
1453 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1454 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1455 GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /*
1456 0x8500 - 0x87ff: gt
1457 0x8800 - 0x8c7f: reserved
1458 0x8c80 - 0x8cff: gt (DG2 only) */
1459 GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /*
1460 0x8d00 - 0x8dff: render (DG2 only)
1461 0x8e00 - 0x8fff: reserved */
1462 GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /*
1463 0x9000 - 0x947f: gt
1464 0x9480 - 0x94cf: reserved */
1465 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1466 GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1467 0x9560 - 0x95ff: always on
1468 0x9600 - 0x967f: reserved */
1469 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1470 0x9680 - 0x96ff: render
1471 0x9700 - 0x97ff: reserved */
1472 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1473 0x9800 - 0xb4ff: gt
1474 0xb500 - 0xbfff: reserved
1475 0xc000 - 0xcfff: gt */
1476 GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1477 GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1478 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1479 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1480 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1481 0xdd00 - 0xddff: gt
1482 0xde00 - 0xde7f: reserved */
1483 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1484 0xde80 - 0xdfff: render
1485 0xe000 - 0xe0ff: reserved
1486 0xe100 - 0xe8ff: render */
1487 GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /*
1488 0xe900 - 0xe9ff: gt
1489 0xea00 - 0xefff: reserved
1490 0xf000 - 0xffff: gt */
1491 GEN_FW_RANGE(0x10000, 0x12fff, 0), /*
1492 0x10000 - 0x11fff: reserved
1493 0x12000 - 0x127ff: always on
1494 0x12800 - 0x12fff: reserved */
1495 GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0),
1496 GEN_FW_RANGE(0x13200, 0x147ff, FORCEWAKE_MEDIA_VDBOX2), /*
1497 0x13200 - 0x133ff: VD2 (DG2 only)
1498 0x13400 - 0x147ff: reserved */
1499 GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER),
1500 GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /*
1501 0x15000 - 0x15fff: gt (DG2 only)
1502 0x16000 - 0x16dff: reserved */
1503 GEN_FW_RANGE(0x16e00, 0x21fff, FORCEWAKE_RENDER), /*
1504 0x16e00 - 0x1ffff: render
1505 0x20000 - 0x21fff: reserved */
1506 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1507 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1508 0x24000 - 0x2407f: always on
1509 0x24080 - 0x2417f: reserved */
1510 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1511 0x24180 - 0x241ff: gt
1512 0x24200 - 0x249ff: reserved */
1513 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1514 0x24a00 - 0x24a7f: render
1515 0x24a80 - 0x251ff: reserved */
1516 GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /*
1517 0x25200 - 0x252ff: gt
1518 0x25300 - 0x25fff: reserved */
1519 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*
1520 0x26000 - 0x27fff: render
1521 0x28000 - 0x29fff: reserved
1522 0x2a000 - 0x2ffff: undocumented */
1523 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1524 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1525 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1526 0x1c0000 - 0x1c2bff: VD0
1527 0x1c2c00 - 0x1c2cff: reserved
1528 0x1c2d00 - 0x1c2dff: VD0
1529 0x1c2e00 - 0x1c3eff: VD0
1530 0x1c3f00 - 0x1c3fff: VD0 */
1531 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /*
1532 0x1c4000 - 0x1c6bff: VD1
1533 0x1c6c00 - 0x1c6cff: reserved
1534 0x1c6d00 - 0x1c6dff: VD1
1535 0x1c6e00 - 0x1c7fff: reserved */
1536 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1537 0x1c8000 - 0x1ca0ff: VE0
1538 0x1ca100 - 0x1cbfff: reserved */
1539 GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0),
1540 GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2),
1541 GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4),
1542 GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6),
1543 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1544 0x1d0000 - 0x1d2bff: VD2
1545 0x1d2c00 - 0x1d2cff: reserved
1546 0x1d2d00 - 0x1d2dff: VD2
1547 0x1d2e00 - 0x1d3dff: VD2
1548 0x1d3e00 - 0x1d3eff: reserved
1549 0x1d3f00 - 0x1d3fff: VD2 */
1550 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /*
1551 0x1d4000 - 0x1d6bff: VD3
1552 0x1d6c00 - 0x1d6cff: reserved
1553 0x1d6d00 - 0x1d6dff: VD3
1554 0x1d6e00 - 0x1d7fff: reserved */
1555 GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /*
1556 0x1d8000 - 0x1da0ff: VE1
1557 0x1da100 - 0x1dffff: reserved */
1558 GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /*
1559 0x1e0000 - 0x1e2bff: VD4
1560 0x1e2c00 - 0x1e2cff: reserved
1561 0x1e2d00 - 0x1e2dff: VD4
1562 0x1e2e00 - 0x1e3eff: reserved
1563 0x1e3f00 - 0x1e3fff: VD4 */
1564 GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /*
1565 0x1e4000 - 0x1e6bff: VD5
1566 0x1e6c00 - 0x1e6cff: reserved
1567 0x1e6d00 - 0x1e6dff: VD5
1568 0x1e6e00 - 0x1e7fff: reserved */
1569 GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /*
1570 0x1e8000 - 0x1ea0ff: VE2
1571 0x1ea100 - 0x1effff: reserved */
1572 GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /*
1573 0x1f0000 - 0x1f2bff: VD6
1574 0x1f2c00 - 0x1f2cff: reserved
1575 0x1f2d00 - 0x1f2dff: VD6
1576 0x1f2e00 - 0x1f3eff: reserved
1577 0x1f3f00 - 0x1f3fff: VD6 */
1578 GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /*
1579 0x1f4000 - 0x1f6bff: VD7
1580 0x1f6c00 - 0x1f6cff: reserved
1581 0x1f6d00 - 0x1f6dff: VD7
1582 0x1f6e00 - 0x1f7fff: reserved */
1583 GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
1584};
1585
1586static const struct intel_forcewake_range __mtl_fw_ranges[] = {
1587 GEN_FW_RANGE(0x0, 0xaff, 0),
1588 GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1589 GEN_FW_RANGE(0xc00, 0xfff, 0),
1590 GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1591 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1592 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1593 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1594 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1595 0x4000 - 0x48ff: render
1596 0x4900 - 0x51ff: reserved */
1597 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1598 0x5200 - 0x53ff: render
1599 0x5400 - 0x54ff: reserved
1600 0x5500 - 0x7fff: render */
1601 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1602 GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER), /*
1603 0x8140 - 0x815f: render
1604 0x8160 - 0x817f: reserved */
1605 GEN_FW_RANGE(0x8180, 0x81ff, 0),
1606 GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
1607 0x8200 - 0x87ff: gt
1608 0x8800 - 0x8dff: reserved
1609 0x8e00 - 0x8f7f: gt
1610 0x8f80 - 0x8fff: reserved
1611 0x9000 - 0x947f: gt
1612 0x9480 - 0x94cf: reserved */
1613 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1614 GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1615 0x9560 - 0x95ff: always on
1616 0x9600 - 0x967f: reserved */
1617 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1618 0x9680 - 0x96ff: render
1619 0x9700 - 0x97ff: reserved */
1620 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1621 0x9800 - 0xb4ff: gt
1622 0xb500 - 0xbfff: reserved
1623 0xc000 - 0xcfff: gt */
1624 GEN_FW_RANGE(0xd000, 0xd7ff, 0), /*
1625 0xd000 - 0xd3ff: always on
1626 0xd400 - 0xd7ff: reserved */
1627 GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1628 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1629 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1630 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1631 0xdd00 - 0xddff: gt
1632 0xde00 - 0xde7f: reserved */
1633 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1634 0xde80 - 0xdfff: render
1635 0xe000 - 0xe0ff: reserved
1636 0xe100 - 0xe8ff: render */
1637 GEN_FW_RANGE(0xe900, 0xe9ff, FORCEWAKE_GT),
1638 GEN_FW_RANGE(0xea00, 0x147ff, 0), /*
1639 0xea00 - 0x11fff: reserved
1640 0x12000 - 0x127ff: always on
1641 0x12800 - 0x147ff: reserved */
1642 GEN_FW_RANGE(0x14800, 0x19fff, FORCEWAKE_GT), /*
1643 0x14800 - 0x153ff: gt
1644 0x15400 - 0x19fff: reserved */
1645 GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
1646 0x1a000 - 0x1bfff: render
1647 0x1c000 - 0x21fff: reserved */
1648 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1649 GEN_FW_RANGE(0x24000, 0x2ffff, 0), /*
1650 0x24000 - 0x2407f: always on
1651 0x24080 - 0x2ffff: reserved */
1652 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1653 GEN_FW_RANGE(0x40000, 0x1901ef, 0),
1654 GEN_FW_RANGE(0x1901f0, 0x1901f3, FORCEWAKE_GT)
1655 /* FIXME: WA to wake GT while triggering H2G */
1656};
1657
1658/*
1659 * Note that the register ranges here are the final offsets after
1660 * translation of the GSI block to the 0x380000 offset.
1661 *
1662 * NOTE: There are a couple MCR ranges near the bottom of this table
1663 * that need to power up either VD0 or VD2 depending on which replicated
1664 * instance of the register we're trying to access. Our forcewake logic
1665 * at the moment doesn't have a good way to take steering into consideration,
1666 * and the driver doesn't even access any registers in those ranges today,
1667 * so for now we just mark those ranges as FORCEWAKE_ALL. That will ensure
1668 * proper operation if we do start using the ranges in the future, and we
1669 * can determine at that time whether it's worth adding extra complexity to
1670 * the forcewake handling to take steering into consideration.
1671 */
1672static const struct intel_forcewake_range __xelpmp_fw_ranges[] = {
1673 GEN_FW_RANGE(0x0, 0x115fff, 0), /* render GT range */
1674 GEN_FW_RANGE(0x116000, 0x11ffff, FORCEWAKE_GSC), /*
1675 0x116000 - 0x117fff: gsc
1676 0x118000 - 0x119fff: reserved
1677 0x11a000 - 0x11efff: gsc
1678 0x11f000 - 0x11ffff: reserved */
1679 GEN_FW_RANGE(0x120000, 0x1bffff, 0), /* non-GT range */
1680 GEN_FW_RANGE(0x1c0000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX0), /*
1681 0x1c0000 - 0x1c3dff: VD0
1682 0x1c3e00 - 0x1c3eff: reserved
1683 0x1c3f00 - 0x1c3fff: VD0
1684 0x1c4000 - 0x1c7fff: reserved */
1685 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1686 0x1c8000 - 0x1ca0ff: VE0
1687 0x1ca100 - 0x1cbfff: reserved */
1688 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1689 0x1cc000 - 0x1cdfff: VD0
1690 0x1ce000 - 0x1cffff: reserved */
1691 GEN_FW_RANGE(0x1d0000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX2), /*
1692 0x1d0000 - 0x1d3dff: VD2
1693 0x1d3e00 - 0x1d3eff: reserved
1694 0x1d4000 - 0x1d7fff: VD2 */
1695 GEN_FW_RANGE(0x1d8000, 0x1da0ff, FORCEWAKE_MEDIA_VEBOX1),
1696 GEN_FW_RANGE(0x1da100, 0x380aff, 0), /*
1697 0x1da100 - 0x23ffff: reserved
1698 0x240000 - 0x37ffff: non-GT range
1699 0x380000 - 0x380aff: reserved */
1700 GEN_FW_RANGE(0x380b00, 0x380bff, FORCEWAKE_GT),
1701 GEN_FW_RANGE(0x380c00, 0x380fff, 0),
1702 GEN_FW_RANGE(0x381000, 0x38817f, FORCEWAKE_GT), /*
1703 0x381000 - 0x381fff: gt
1704 0x382000 - 0x383fff: reserved
1705 0x384000 - 0x384aff: gt
1706 0x384b00 - 0x3851ff: reserved
1707 0x385200 - 0x3871ff: gt
1708 0x387200 - 0x387fff: reserved
1709 0x388000 - 0x38813f: gt
1710 0x388140 - 0x38817f: reserved */
1711 GEN_FW_RANGE(0x388180, 0x3882ff, 0), /*
1712 0x388180 - 0x3881ff: always on
1713 0x388200 - 0x3882ff: reserved */
1714 GEN_FW_RANGE(0x388300, 0x38955f, FORCEWAKE_GT), /*
1715 0x388300 - 0x38887f: gt
1716 0x388880 - 0x388fff: reserved
1717 0x389000 - 0x38947f: gt
1718 0x389480 - 0x38955f: reserved */
1719 GEN_FW_RANGE(0x389560, 0x389fff, 0), /*
1720 0x389560 - 0x3895ff: always on
1721 0x389600 - 0x389fff: reserved */
1722 GEN_FW_RANGE(0x38a000, 0x38cfff, FORCEWAKE_GT), /*
1723 0x38a000 - 0x38afff: gt
1724 0x38b000 - 0x38bfff: reserved
1725 0x38c000 - 0x38cfff: gt */
1726 GEN_FW_RANGE(0x38d000, 0x38d11f, 0),
1727 GEN_FW_RANGE(0x38d120, 0x391fff, FORCEWAKE_GT), /*
1728 0x38d120 - 0x38dfff: gt
1729 0x38e000 - 0x38efff: reserved
1730 0x38f000 - 0x38ffff: gt
1731 0x389000 - 0x391fff: reserved */
1732 GEN_FW_RANGE(0x392000, 0x392fff, 0), /*
1733 0x392000 - 0x3927ff: always on
1734 0x392800 - 0x292fff: reserved */
1735 GEN_FW_RANGE(0x393000, 0x3931ff, FORCEWAKE_GT),
1736 GEN_FW_RANGE(0x393200, 0x39323f, FORCEWAKE_ALL), /* instance-based, see note above */
1737 GEN_FW_RANGE(0x393240, 0x3933ff, FORCEWAKE_GT),
1738 GEN_FW_RANGE(0x393400, 0x3934ff, FORCEWAKE_ALL), /* instance-based, see note above */
1739 GEN_FW_RANGE(0x393500, 0x393c7f, 0), /*
1740 0x393500 - 0x393bff: reserved
1741 0x393c00 - 0x393c7f: always on */
1742 GEN_FW_RANGE(0x393c80, 0x393dff, FORCEWAKE_GT),
1743};
1744
1745static void
1746ilk_dummy_write(struct intel_uncore *uncore)
1747{
1748 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1749 * the chip from rc6 before touching it for real. MI_MODE is masked,
1750 * hence harmless to write 0 into. */
1751 __raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
1752}
1753
1754static void
1755__unclaimed_reg_debug(struct intel_uncore *uncore,
1756 const i915_reg_t reg,
1757 const bool read)
1758{
1759 if (drm_WARN(&uncore->i915->drm,
1760 check_for_unclaimed_mmio(uncore),
1761 "Unclaimed %s register 0x%x\n",
1762 read ? "read from" : "write to",
1763 i915_mmio_reg_offset(reg)))
1764 /* Only report the first N failures */
1765 uncore->i915->params.mmio_debug--;
1766}
1767
1768static void
1769__unclaimed_previous_reg_debug(struct intel_uncore *uncore,
1770 const i915_reg_t reg,
1771 const bool read)
1772{
1773 if (check_for_unclaimed_mmio(uncore))
1774 drm_dbg(&uncore->i915->drm,
1775 "Unclaimed access detected before %s register 0x%x\n",
1776 read ? "read from" : "write to",
1777 i915_mmio_reg_offset(reg));
1778}
1779
1780static inline bool __must_check
1781unclaimed_reg_debug_header(struct intel_uncore *uncore,
1782 const i915_reg_t reg, const bool read)
1783{
1784 if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
1785 return false;
1786
1787 /* interrupts are disabled and re-enabled around uncore->lock usage */
1788 lockdep_assert_held(&uncore->lock);
1789
1790 spin_lock(&uncore->debug->lock);
1791 __unclaimed_previous_reg_debug(uncore, reg, read);
1792
1793 return true;
1794}
1795
1796static inline void
1797unclaimed_reg_debug_footer(struct intel_uncore *uncore,
1798 const i915_reg_t reg, const bool read)
1799{
1800 /* interrupts are disabled and re-enabled around uncore->lock usage */
1801 lockdep_assert_held(&uncore->lock);
1802
1803 __unclaimed_reg_debug(uncore, reg, read);
1804 spin_unlock(&uncore->debug->lock);
1805}
1806
1807#define __vgpu_read(x) \
1808static u##x \
1809vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1810 u##x val = __raw_uncore_read##x(uncore, reg); \
1811 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1812 return val; \
1813}
1814__vgpu_read(8)
1815__vgpu_read(16)
1816__vgpu_read(32)
1817__vgpu_read(64)
1818
1819#define GEN2_READ_HEADER(x) \
1820 u##x val = 0; \
1821 assert_rpm_wakelock_held(uncore->rpm);
1822
1823#define GEN2_READ_FOOTER \
1824 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1825 return val
1826
1827#define __gen2_read(x) \
1828static u##x \
1829gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1830 GEN2_READ_HEADER(x); \
1831 val = __raw_uncore_read##x(uncore, reg); \
1832 GEN2_READ_FOOTER; \
1833}
1834
1835#define __gen5_read(x) \
1836static u##x \
1837gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1838 GEN2_READ_HEADER(x); \
1839 ilk_dummy_write(uncore); \
1840 val = __raw_uncore_read##x(uncore, reg); \
1841 GEN2_READ_FOOTER; \
1842}
1843
1844__gen5_read(8)
1845__gen5_read(16)
1846__gen5_read(32)
1847__gen5_read(64)
1848__gen2_read(8)
1849__gen2_read(16)
1850__gen2_read(32)
1851__gen2_read(64)
1852
1853#undef __gen5_read
1854#undef __gen2_read
1855
1856#undef GEN2_READ_FOOTER
1857#undef GEN2_READ_HEADER
1858
1859#define GEN6_READ_HEADER(x) \
1860 u32 offset = i915_mmio_reg_offset(reg); \
1861 unsigned long irqflags; \
1862 bool unclaimed_reg_debug; \
1863 u##x val = 0; \
1864 assert_rpm_wakelock_held(uncore->rpm); \
1865 spin_lock_irqsave(&uncore->lock, irqflags); \
1866 unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, true)
1867
1868#define GEN6_READ_FOOTER \
1869 if (unclaimed_reg_debug) \
1870 unclaimed_reg_debug_footer(uncore, reg, true); \
1871 spin_unlock_irqrestore(&uncore->lock, irqflags); \
1872 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1873 return val
1874
1875static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1876 enum forcewake_domains fw_domains)
1877{
1878 struct intel_uncore_forcewake_domain *domain;
1879 unsigned int tmp;
1880
1881 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1882
1883 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1884 fw_domain_arm_timer(domain);
1885
1886 fw_domains_get(uncore, fw_domains);
1887}
1888
1889static inline void __force_wake_auto(struct intel_uncore *uncore,
1890 enum forcewake_domains fw_domains)
1891{
1892 GEM_BUG_ON(!fw_domains);
1893
1894 /* Turn on all requested but inactive supported forcewake domains. */
1895 fw_domains &= uncore->fw_domains;
1896 fw_domains &= ~uncore->fw_domains_active;
1897
1898 if (fw_domains)
1899 ___force_wake_auto(uncore, fw_domains);
1900}
1901
1902#define __gen_fwtable_read(x) \
1903static u##x \
1904fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
1905{ \
1906 enum forcewake_domains fw_engine; \
1907 GEN6_READ_HEADER(x); \
1908 fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
1909 if (fw_engine) \
1910 __force_wake_auto(uncore, fw_engine); \
1911 val = __raw_uncore_read##x(uncore, reg); \
1912 GEN6_READ_FOOTER; \
1913}
1914
1915static enum forcewake_domains
1916fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
1917 return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
1918}
1919
1920__gen_fwtable_read(8)
1921__gen_fwtable_read(16)
1922__gen_fwtable_read(32)
1923__gen_fwtable_read(64)
1924
1925#undef __gen_fwtable_read
1926#undef GEN6_READ_FOOTER
1927#undef GEN6_READ_HEADER
1928
1929#define GEN2_WRITE_HEADER \
1930 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1931 assert_rpm_wakelock_held(uncore->rpm); \
1932
1933#define GEN2_WRITE_FOOTER
1934
1935#define __gen2_write(x) \
1936static void \
1937gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1938 GEN2_WRITE_HEADER; \
1939 __raw_uncore_write##x(uncore, reg, val); \
1940 GEN2_WRITE_FOOTER; \
1941}
1942
1943#define __gen5_write(x) \
1944static void \
1945gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1946 GEN2_WRITE_HEADER; \
1947 ilk_dummy_write(uncore); \
1948 __raw_uncore_write##x(uncore, reg, val); \
1949 GEN2_WRITE_FOOTER; \
1950}
1951
1952__gen5_write(8)
1953__gen5_write(16)
1954__gen5_write(32)
1955__gen2_write(8)
1956__gen2_write(16)
1957__gen2_write(32)
1958
1959#undef __gen5_write
1960#undef __gen2_write
1961
1962#undef GEN2_WRITE_FOOTER
1963#undef GEN2_WRITE_HEADER
1964
1965#define GEN6_WRITE_HEADER \
1966 u32 offset = i915_mmio_reg_offset(reg); \
1967 unsigned long irqflags; \
1968 bool unclaimed_reg_debug; \
1969 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1970 assert_rpm_wakelock_held(uncore->rpm); \
1971 spin_lock_irqsave(&uncore->lock, irqflags); \
1972 unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, false)
1973
1974#define GEN6_WRITE_FOOTER \
1975 if (unclaimed_reg_debug) \
1976 unclaimed_reg_debug_footer(uncore, reg, false); \
1977 spin_unlock_irqrestore(&uncore->lock, irqflags)
1978
1979#define __gen6_write(x) \
1980static void \
1981gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1982 GEN6_WRITE_HEADER; \
1983 if (NEEDS_FORCE_WAKE(offset)) \
1984 __gen6_gt_wait_for_fifo(uncore); \
1985 __raw_uncore_write##x(uncore, reg, val); \
1986 GEN6_WRITE_FOOTER; \
1987}
1988__gen6_write(8)
1989__gen6_write(16)
1990__gen6_write(32)
1991
1992#define __gen_fwtable_write(x) \
1993static void \
1994fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1995 enum forcewake_domains fw_engine; \
1996 GEN6_WRITE_HEADER; \
1997 fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
1998 if (fw_engine) \
1999 __force_wake_auto(uncore, fw_engine); \
2000 __raw_uncore_write##x(uncore, reg, val); \
2001 GEN6_WRITE_FOOTER; \
2002}
2003
2004static enum forcewake_domains
2005fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
2006{
2007 return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
2008}
2009
2010__gen_fwtable_write(8)
2011__gen_fwtable_write(16)
2012__gen_fwtable_write(32)
2013
2014#undef __gen_fwtable_write
2015#undef GEN6_WRITE_FOOTER
2016#undef GEN6_WRITE_HEADER
2017
2018#define __vgpu_write(x) \
2019static void \
2020vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2021 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2022 __raw_uncore_write##x(uncore, reg, val); \
2023}
2024__vgpu_write(8)
2025__vgpu_write(16)
2026__vgpu_write(32)
2027
2028#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
2029do { \
2030 (uncore)->funcs.mmio_writeb = x##_write8; \
2031 (uncore)->funcs.mmio_writew = x##_write16; \
2032 (uncore)->funcs.mmio_writel = x##_write32; \
2033} while (0)
2034
2035#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
2036do { \
2037 (uncore)->funcs.mmio_readb = x##_read8; \
2038 (uncore)->funcs.mmio_readw = x##_read16; \
2039 (uncore)->funcs.mmio_readl = x##_read32; \
2040 (uncore)->funcs.mmio_readq = x##_read64; \
2041} while (0)
2042
2043#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
2044do { \
2045 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
2046 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
2047} while (0)
2048
2049#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
2050do { \
2051 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
2052 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
2053} while (0)
2054
2055static int __fw_domain_init(struct intel_uncore *uncore,
2056 enum forcewake_domain_id domain_id,
2057 i915_reg_t reg_set,
2058 i915_reg_t reg_ack)
2059{
2060 struct intel_uncore_forcewake_domain *d;
2061
2062 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2063 GEM_BUG_ON(uncore->fw_domain[domain_id]);
2064
2065 if (i915_inject_probe_failure(uncore->i915))
2066 return -ENOMEM;
2067
2068 d = kzalloc(sizeof(*d), GFP_KERNEL);
2069 if (!d)
2070 return -ENOMEM;
2071
2072 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
2073 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
2074
2075 d->uncore = uncore;
2076 d->wake_count = 0;
2077 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
2078 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
2079
2080 d->id = domain_id;
2081
2082 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
2083 BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
2084 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
2085 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
2086 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
2087 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
2088 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
2089 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
2090 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
2091 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
2092 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
2093 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
2094 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
2095 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
2096 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
2097 BUILD_BUG_ON(FORCEWAKE_GSC != (1 << FW_DOMAIN_ID_GSC));
2098
2099 d->mask = BIT(domain_id);
2100
2101 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2102 d->timer.function = intel_uncore_fw_release_timer;
2103
2104 uncore->fw_domains |= BIT(domain_id);
2105
2106 fw_domain_reset(d);
2107
2108 uncore->fw_domain[domain_id] = d;
2109
2110 return 0;
2111}
2112
2113static void fw_domain_fini(struct intel_uncore *uncore,
2114 enum forcewake_domain_id domain_id)
2115{
2116 struct intel_uncore_forcewake_domain *d;
2117
2118 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2119
2120 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
2121 if (!d)
2122 return;
2123
2124 uncore->fw_domains &= ~BIT(domain_id);
2125 drm_WARN_ON(&uncore->i915->drm, d->wake_count);
2126 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
2127 kfree(d);
2128}
2129
2130static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
2131{
2132 struct intel_uncore_forcewake_domain *d;
2133 int tmp;
2134
2135 for_each_fw_domain(d, uncore, tmp)
2136 fw_domain_fini(uncore, d->id);
2137}
2138
2139static const struct intel_uncore_fw_get uncore_get_fallback = {
2140 .force_wake_get = fw_domains_get_with_fallback
2141};
2142
2143static const struct intel_uncore_fw_get uncore_get_normal = {
2144 .force_wake_get = fw_domains_get_normal,
2145};
2146
2147static const struct intel_uncore_fw_get uncore_get_thread_status = {
2148 .force_wake_get = fw_domains_get_with_thread_status
2149};
2150
2151static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
2152{
2153 struct drm_i915_private *i915 = uncore->i915;
2154 int ret = 0;
2155
2156 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2157
2158#define fw_domain_init(uncore__, id__, set__, ack__) \
2159 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
2160
2161 if (GRAPHICS_VER(i915) >= 11) {
2162 intel_engine_mask_t emask;
2163 int i;
2164
2165 /* we'll prune the domains of missing engines later */
2166 emask = uncore->gt->info.engine_mask;
2167
2168 uncore->fw_get_funcs = &uncore_get_fallback;
2169 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2170 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2171 FORCEWAKE_GT_GEN9,
2172 FORCEWAKE_ACK_GT_MTL);
2173 else
2174 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2175 FORCEWAKE_GT_GEN9,
2176 FORCEWAKE_ACK_GT_GEN9);
2177
2178 if (RCS_MASK(uncore->gt) || CCS_MASK(uncore->gt))
2179 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2180 FORCEWAKE_RENDER_GEN9,
2181 FORCEWAKE_ACK_RENDER_GEN9);
2182
2183 for (i = 0; i < I915_MAX_VCS; i++) {
2184 if (!__HAS_ENGINE(emask, _VCS(i)))
2185 continue;
2186
2187 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
2188 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
2189 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
2190 }
2191 for (i = 0; i < I915_MAX_VECS; i++) {
2192 if (!__HAS_ENGINE(emask, _VECS(i)))
2193 continue;
2194
2195 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
2196 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
2197 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
2198 }
2199
2200 if (uncore->gt->type == GT_MEDIA)
2201 fw_domain_init(uncore, FW_DOMAIN_ID_GSC,
2202 FORCEWAKE_REQ_GSC, FORCEWAKE_ACK_GSC);
2203 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2204 uncore->fw_get_funcs = &uncore_get_fallback;
2205 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2206 FORCEWAKE_RENDER_GEN9,
2207 FORCEWAKE_ACK_RENDER_GEN9);
2208 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2209 FORCEWAKE_GT_GEN9,
2210 FORCEWAKE_ACK_GT_GEN9);
2211 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2212 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
2213 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
2214 uncore->fw_get_funcs = &uncore_get_normal;
2215 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2216 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
2217 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2218 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
2219 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2220 uncore->fw_get_funcs = &uncore_get_thread_status;
2221 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2222 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
2223 } else if (IS_IVYBRIDGE(i915)) {
2224 u32 ecobus;
2225
2226 /* IVB configs may use multi-threaded forcewake */
2227
2228 /* A small trick here - if the bios hasn't configured
2229 * MT forcewake, and if the device is in RC6, then
2230 * force_wake_mt_get will not wake the device and the
2231 * ECOBUS read will return zero. Which will be
2232 * (correctly) interpreted by the test below as MT
2233 * forcewake being disabled.
2234 */
2235 uncore->fw_get_funcs = &uncore_get_thread_status;
2236
2237 /* We need to init first for ECOBUS access and then
2238 * determine later if we want to reinit, in case of MT access is
2239 * not working. In this stage we don't know which flavour this
2240 * ivb is, so it is better to reset also the gen6 fw registers
2241 * before the ecobus check.
2242 */
2243
2244 __raw_uncore_write32(uncore, FORCEWAKE, 0);
2245 __raw_posting_read(uncore, ECOBUS);
2246
2247 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2248 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
2249 if (ret)
2250 goto out;
2251
2252 spin_lock_irq(&uncore->lock);
2253 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
2254 ecobus = __raw_uncore_read32(uncore, ECOBUS);
2255 fw_domains_put(uncore, FORCEWAKE_RENDER);
2256 spin_unlock_irq(&uncore->lock);
2257
2258 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
2259 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
2260 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
2261 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
2262 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2263 FORCEWAKE, FORCEWAKE_ACK);
2264 }
2265 } else if (GRAPHICS_VER(i915) == 6) {
2266 uncore->fw_get_funcs = &uncore_get_thread_status;
2267 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2268 FORCEWAKE, FORCEWAKE_ACK);
2269 }
2270
2271#undef fw_domain_init
2272
2273 /* All future platforms are expected to require complex power gating */
2274 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
2275
2276out:
2277 if (ret)
2278 intel_uncore_fw_domains_fini(uncore);
2279
2280 return ret;
2281}
2282
2283#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
2284{ \
2285 (uncore)->fw_domains_table = \
2286 (struct intel_forcewake_range *)(d); \
2287 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
2288}
2289
2290#define ASSIGN_SHADOW_TABLE(uncore, d) \
2291{ \
2292 (uncore)->shadowed_reg_table = d; \
2293 (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
2294}
2295
2296static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
2297 unsigned long action, void *data)
2298{
2299 struct intel_uncore *uncore = container_of(nb,
2300 struct intel_uncore, pmic_bus_access_nb);
2301
2302 switch (action) {
2303 case MBI_PMIC_BUS_ACCESS_BEGIN:
2304 /*
2305 * forcewake all now to make sure that we don't need to do a
2306 * forcewake later which on systems where this notifier gets
2307 * called requires the punit to access to the shared pmic i2c
2308 * bus, which will be busy after this notification, leading to:
2309 * "render: timed out waiting for forcewake ack request."
2310 * errors.
2311 *
2312 * The notifier is unregistered during intel_runtime_suspend(),
2313 * so it's ok to access the HW here without holding a RPM
2314 * wake reference -> disable wakeref asserts for the time of
2315 * the access.
2316 */
2317 disable_rpm_wakeref_asserts(uncore->rpm);
2318 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2319 enable_rpm_wakeref_asserts(uncore->rpm);
2320 break;
2321 case MBI_PMIC_BUS_ACCESS_END:
2322 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2323 break;
2324 }
2325
2326 return NOTIFY_OK;
2327}
2328
2329static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
2330{
2331 iounmap((void __iomem *)regs);
2332}
2333
2334int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
2335{
2336 struct drm_i915_private *i915 = uncore->i915;
2337 int mmio_size;
2338
2339 /*
2340 * Before gen4, the registers and the GTT are behind different BARs.
2341 * However, from gen4 onwards, the registers and the GTT are shared
2342 * in the same BAR, so we want to restrict this ioremap from
2343 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2344 * the register BAR remains the same size for all the earlier
2345 * generations up to Ironlake.
2346 * For dgfx chips register range is expanded to 4MB, and this larger
2347 * range is also used for integrated gpus beginning with Meteor Lake.
2348 */
2349 if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2350 mmio_size = 4 * 1024 * 1024;
2351 else if (GRAPHICS_VER(i915) >= 5)
2352 mmio_size = 2 * 1024 * 1024;
2353 else
2354 mmio_size = 512 * 1024;
2355
2356 uncore->regs = ioremap(phys_addr, mmio_size);
2357 if (uncore->regs == NULL) {
2358 drm_err(&i915->drm, "failed to map registers\n");
2359 return -EIO;
2360 }
2361
2362 return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio,
2363 (void __force *)uncore->regs);
2364}
2365
2366void intel_uncore_init_early(struct intel_uncore *uncore,
2367 struct intel_gt *gt)
2368{
2369 spin_lock_init(&uncore->lock);
2370 uncore->i915 = gt->i915;
2371 uncore->gt = gt;
2372 uncore->rpm = >->i915->runtime_pm;
2373}
2374
2375static void uncore_raw_init(struct intel_uncore *uncore)
2376{
2377 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
2378
2379 if (intel_vgpu_active(uncore->i915)) {
2380 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2381 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
2382 } else if (GRAPHICS_VER(uncore->i915) == 5) {
2383 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2384 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2385 } else {
2386 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2387 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2388 }
2389}
2390
2391static int uncore_media_forcewake_init(struct intel_uncore *uncore)
2392{
2393 struct drm_i915_private *i915 = uncore->i915;
2394
2395 if (MEDIA_VER(i915) >= 13) {
2396 ASSIGN_FW_DOMAINS_TABLE(uncore, __xelpmp_fw_ranges);
2397 ASSIGN_SHADOW_TABLE(uncore, xelpmp_shadowed_regs);
2398 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2399 } else {
2400 MISSING_CASE(MEDIA_VER(i915));
2401 return -ENODEV;
2402 }
2403
2404 return 0;
2405}
2406
2407static int uncore_forcewake_init(struct intel_uncore *uncore)
2408{
2409 struct drm_i915_private *i915 = uncore->i915;
2410 int ret;
2411
2412 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2413
2414 ret = intel_uncore_fw_domains_init(uncore);
2415 if (ret)
2416 return ret;
2417 forcewake_early_sanitize(uncore, 0);
2418
2419 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2420
2421 if (uncore->gt->type == GT_MEDIA)
2422 return uncore_media_forcewake_init(uncore);
2423
2424 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
2425 ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges);
2426 ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs);
2427 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2428 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
2429 ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
2430 ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
2431 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2432 } else if (GRAPHICS_VER(i915) >= 12) {
2433 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
2434 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2435 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2436 } else if (GRAPHICS_VER(i915) == 11) {
2437 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
2438 ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
2439 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2440 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2441 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
2442 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2443 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2444 } else if (IS_CHERRYVIEW(i915)) {
2445 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
2446 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2447 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2448 } else if (GRAPHICS_VER(i915) == 8) {
2449 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2450 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2451 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2452 } else if (IS_VALLEYVIEW(i915)) {
2453 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2454 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2455 } else if (IS_GRAPHICS_VER(i915, 6, 7)) {
2456 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2457 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2458 }
2459
2460 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2461 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
2462
2463 return 0;
2464}
2465
2466static int sanity_check_mmio_access(struct intel_uncore *uncore)
2467{
2468 struct drm_i915_private *i915 = uncore->i915;
2469
2470 if (GRAPHICS_VER(i915) < 8)
2471 return 0;
2472
2473 /*
2474 * Sanitycheck that MMIO access to the device is working properly. If
2475 * the CPU is unable to communcate with a PCI device, BAR reads will
2476 * return 0xFFFFFFFF. Let's make sure the device isn't in this state
2477 * before we start trying to access registers.
2478 *
2479 * We use the primary GT's forcewake register as our guinea pig since
2480 * it's been around since HSW and it's a masked register so the upper
2481 * 16 bits can never read back as 1's if device access is operating
2482 * properly.
2483 *
2484 * If MMIO isn't working, we'll wait up to 2 seconds to see if it
2485 * recovers, then give up.
2486 */
2487#define COND (__raw_uncore_read32(uncore, FORCEWAKE_MT) != ~0)
2488 if (wait_for(COND, 2000) == -ETIMEDOUT) {
2489 drm_err(&i915->drm, "Device is non-operational; MMIO access returns 0xFFFFFFFF!\n");
2490 return -EIO;
2491 }
2492
2493 return 0;
2494}
2495
2496int intel_uncore_init_mmio(struct intel_uncore *uncore)
2497{
2498 struct drm_i915_private *i915 = uncore->i915;
2499 int ret;
2500
2501 ret = sanity_check_mmio_access(uncore);
2502 if (ret)
2503 return ret;
2504
2505 /*
2506 * The boot firmware initializes local memory and assesses its health.
2507 * If memory training fails, the punit will have been instructed to
2508 * keep the GT powered down; we won't be able to communicate with it
2509 * and we should not continue with driver initialization.
2510 */
2511 if (IS_DGFX(i915) &&
2512 !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2513 drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2514 return -ENODEV;
2515 }
2516
2517 if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2518 uncore->flags |= UNCORE_HAS_FORCEWAKE;
2519
2520 if (!intel_uncore_has_forcewake(uncore)) {
2521 uncore_raw_init(uncore);
2522 } else {
2523 ret = uncore_forcewake_init(uncore);
2524 if (ret)
2525 return ret;
2526 }
2527
2528 /* make sure fw funcs are set if and only if we have fw*/
2529 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
2530 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2531 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2532
2533 if (HAS_FPGA_DBG_UNCLAIMED(i915))
2534 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2535
2536 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2537 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2538
2539 if (IS_GRAPHICS_VER(i915, 6, 7))
2540 uncore->flags |= UNCORE_HAS_FIFO;
2541
2542 /* clear out unclaimed reg detection bit */
2543 if (intel_uncore_unclaimed_mmio(uncore))
2544 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
2545
2546 return 0;
2547}
2548
2549/*
2550 * We might have detected that some engines are fused off after we initialized
2551 * the forcewake domains. Prune them, to make sure they only reference existing
2552 * engines.
2553 */
2554void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2555 struct intel_gt *gt)
2556{
2557 enum forcewake_domains fw_domains = uncore->fw_domains;
2558 enum forcewake_domain_id domain_id;
2559 int i;
2560
2561 if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2562 return;
2563
2564 for (i = 0; i < I915_MAX_VCS; i++) {
2565 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
2566
2567 if (HAS_ENGINE(gt, _VCS(i)))
2568 continue;
2569
2570 /*
2571 * Starting with XeHP, the power well for an even-numbered
2572 * VDBOX is also used for shared units within the
2573 * media slice such as SFC. So even if the engine
2574 * itself is fused off, we still need to initialize
2575 * the forcewake domain if any of the other engines
2576 * in the same media slice are present.
2577 */
2578 if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 55) && i % 2 == 0) {
2579 if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2580 continue;
2581
2582 if (HAS_ENGINE(gt, _VECS(i / 2)))
2583 continue;
2584 }
2585
2586 if (fw_domains & BIT(domain_id))
2587 fw_domain_fini(uncore, domain_id);
2588 }
2589
2590 for (i = 0; i < I915_MAX_VECS; i++) {
2591 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
2592
2593 if (HAS_ENGINE(gt, _VECS(i)))
2594 continue;
2595
2596 if (fw_domains & BIT(domain_id))
2597 fw_domain_fini(uncore, domain_id);
2598 }
2599
2600 if ((fw_domains & BIT(FW_DOMAIN_ID_GSC)) && !HAS_ENGINE(gt, GSC0))
2601 fw_domain_fini(uncore, FW_DOMAIN_ID_GSC);
2602}
2603
2604/*
2605 * The driver-initiated FLR is the highest level of reset that we can trigger
2606 * from within the driver. It is different from the PCI FLR in that it doesn't
2607 * fully reset the SGUnit and doesn't modify the PCI config space and therefore
2608 * it doesn't require a re-enumeration of the PCI BARs. However, the
2609 * driver-initiated FLR does still cause a reset of both GT and display and a
2610 * memory wipe of local and stolen memory, so recovery would require a full HW
2611 * re-init and saving/restoring (or re-populating) the wiped memory. Since we
2612 * perform the FLR as the very last action before releasing access to the HW
2613 * during the driver release flow, we don't attempt recovery at all, because
2614 * if/when a new instance of i915 is bound to the device it will do a full
2615 * re-init anyway.
2616 */
2617static void driver_initiated_flr(struct intel_uncore *uncore)
2618{
2619 struct drm_i915_private *i915 = uncore->i915;
2620 unsigned int flr_timeout_ms;
2621 int ret;
2622
2623 drm_dbg(&i915->drm, "Triggering Driver-FLR\n");
2624
2625 /*
2626 * The specification recommends a 3 seconds FLR reset timeout. To be
2627 * cautious, we will extend this to 9 seconds, three times the specified
2628 * timeout.
2629 */
2630 flr_timeout_ms = 9000;
2631
2632 /*
2633 * Make sure any pending FLR requests have cleared by waiting for the
2634 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
2635 * to make sure it's not still set from a prior attempt (it's a write to
2636 * clear bit).
2637 * Note that we should never be in a situation where a previous attempt
2638 * is still pending (unless the HW is totally dead), but better to be
2639 * safe in case something unexpected happens
2640 */
2641 ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms);
2642 if (ret) {
2643 drm_err(&i915->drm,
2644 "Failed to wait for Driver-FLR bit to clear! %d\n",
2645 ret);
2646 return;
2647 }
2648 intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2649
2650 /* Trigger the actual Driver-FLR */
2651 intel_uncore_rmw_fw(uncore, GU_CNTL, 0, DRIVERFLR);
2652
2653 /* Wait for hardware teardown to complete */
2654 ret = intel_wait_for_register_fw(uncore, GU_CNTL,
2655 DRIVERFLR, 0,
2656 flr_timeout_ms);
2657 if (ret) {
2658 drm_err(&i915->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
2659 return;
2660 }
2661
2662 /* Wait for hardware/firmware re-init to complete */
2663 ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
2664 DRIVERFLR_STATUS, DRIVERFLR_STATUS,
2665 flr_timeout_ms);
2666 if (ret) {
2667 drm_err(&i915->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
2668 return;
2669 }
2670
2671 /* Clear sticky completion status */
2672 intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2673}
2674
2675/* Called via drm-managed action */
2676void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
2677{
2678 struct intel_uncore *uncore = data;
2679
2680 if (intel_uncore_has_forcewake(uncore)) {
2681 iosf_mbi_punit_acquire();
2682 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2683 &uncore->pmic_bus_access_nb);
2684 intel_uncore_forcewake_reset(uncore);
2685 intel_uncore_fw_domains_fini(uncore);
2686 iosf_mbi_punit_release();
2687 }
2688
2689 if (intel_uncore_needs_flr_on_fini(uncore))
2690 driver_initiated_flr(uncore);
2691}
2692
2693/**
2694 * __intel_wait_for_register_fw - wait until register matches expected state
2695 * @uncore: the struct intel_uncore
2696 * @reg: the register to read
2697 * @mask: mask to apply to register value
2698 * @value: expected value
2699 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2700 * @slow_timeout_ms: slow timeout in millisecond
2701 * @out_value: optional placeholder to hold registry value
2702 *
2703 * This routine waits until the target register @reg contains the expected
2704 * @value after applying the @mask, i.e. it waits until ::
2705 *
2706 * (intel_uncore_read_fw(uncore, reg) & mask) == value
2707 *
2708 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
2709 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
2710 * must be not larger than 20,0000 microseconds.
2711 *
2712 * Note that this routine assumes the caller holds forcewake asserted, it is
2713 * not suitable for very long waits. See intel_wait_for_register() if you
2714 * wish to wait without holding forcewake for the duration (i.e. you expect
2715 * the wait to be slow).
2716 *
2717 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2718 */
2719int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2720 i915_reg_t reg,
2721 u32 mask,
2722 u32 value,
2723 unsigned int fast_timeout_us,
2724 unsigned int slow_timeout_ms,
2725 u32 *out_value)
2726{
2727 u32 reg_value = 0;
2728#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
2729 int ret;
2730
2731 /* Catch any overuse of this function */
2732 might_sleep_if(slow_timeout_ms);
2733 GEM_BUG_ON(fast_timeout_us > 20000);
2734 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
2735
2736 ret = -ETIMEDOUT;
2737 if (fast_timeout_us && fast_timeout_us <= 20000)
2738 ret = _wait_for_atomic(done, fast_timeout_us, 0);
2739 if (ret && slow_timeout_ms)
2740 ret = wait_for(done, slow_timeout_ms);
2741
2742 if (out_value)
2743 *out_value = reg_value;
2744
2745 return ret;
2746#undef done
2747}
2748
2749/**
2750 * __intel_wait_for_register - wait until register matches expected state
2751 * @uncore: the struct intel_uncore
2752 * @reg: the register to read
2753 * @mask: mask to apply to register value
2754 * @value: expected value
2755 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2756 * @slow_timeout_ms: slow timeout in millisecond
2757 * @out_value: optional placeholder to hold registry value
2758 *
2759 * This routine waits until the target register @reg contains the expected
2760 * @value after applying the @mask, i.e. it waits until ::
2761 *
2762 * (intel_uncore_read(uncore, reg) & mask) == value
2763 *
2764 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2765 *
2766 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2767 */
2768int __intel_wait_for_register(struct intel_uncore *uncore,
2769 i915_reg_t reg,
2770 u32 mask,
2771 u32 value,
2772 unsigned int fast_timeout_us,
2773 unsigned int slow_timeout_ms,
2774 u32 *out_value)
2775{
2776 unsigned fw =
2777 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
2778 u32 reg_value;
2779 int ret;
2780
2781 might_sleep_if(slow_timeout_ms);
2782
2783 spin_lock_irq(&uncore->lock);
2784 intel_uncore_forcewake_get__locked(uncore, fw);
2785
2786 ret = __intel_wait_for_register_fw(uncore,
2787 reg, mask, value,
2788 fast_timeout_us, 0, ®_value);
2789
2790 intel_uncore_forcewake_put__locked(uncore, fw);
2791 spin_unlock_irq(&uncore->lock);
2792
2793 if (ret && slow_timeout_ms)
2794 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2795 reg),
2796 (reg_value & mask) == value,
2797 slow_timeout_ms * 1000, 10, 1000);
2798
2799 /* just trace the final value */
2800 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2801
2802 if (out_value)
2803 *out_value = reg_value;
2804
2805 return ret;
2806}
2807
2808bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2809{
2810 bool ret;
2811
2812 if (!uncore->debug)
2813 return false;
2814
2815 spin_lock_irq(&uncore->debug->lock);
2816 ret = check_for_unclaimed_mmio(uncore);
2817 spin_unlock_irq(&uncore->debug->lock);
2818
2819 return ret;
2820}
2821
2822bool
2823intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2824{
2825 bool ret = false;
2826
2827 if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
2828 return false;
2829
2830 spin_lock_irq(&uncore->debug->lock);
2831
2832 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2833 goto out;
2834
2835 if (unlikely(check_for_unclaimed_mmio(uncore))) {
2836 if (!uncore->i915->params.mmio_debug) {
2837 drm_dbg(&uncore->i915->drm,
2838 "Unclaimed register detected, "
2839 "enabling oneshot unclaimed register reporting. "
2840 "Please use i915.mmio_debug=N for more information.\n");
2841 uncore->i915->params.mmio_debug++;
2842 }
2843 uncore->debug->unclaimed_mmio_check--;
2844 ret = true;
2845 }
2846
2847out:
2848 spin_unlock_irq(&uncore->debug->lock);
2849
2850 return ret;
2851}
2852
2853/**
2854 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2855 * a register
2856 * @uncore: pointer to struct intel_uncore
2857 * @reg: register in question
2858 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2859 *
2860 * Returns a set of forcewake domains required to be taken with for example
2861 * intel_uncore_forcewake_get for the specified register to be accessible in the
2862 * specified mode (read, write or read/write) with raw mmio accessors.
2863 *
2864 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2865 * callers to do FIFO management on their own or risk losing writes.
2866 */
2867enum forcewake_domains
2868intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2869 i915_reg_t reg, unsigned int op)
2870{
2871 enum forcewake_domains fw_domains = 0;
2872
2873 drm_WARN_ON(&uncore->i915->drm, !op);
2874
2875 if (!intel_uncore_has_forcewake(uncore))
2876 return 0;
2877
2878 if (op & FW_REG_READ)
2879 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2880
2881 if (op & FW_REG_WRITE)
2882 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2883
2884 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
2885
2886 return fw_domains;
2887}
2888
2889#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2890#include "selftests/mock_uncore.c"
2891#include "selftests/intel_uncore.c"
2892#endif