Loading...
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
26#include "i915_vgpu.h"
27
28#include <linux/pm_runtime.h>
29
30#define FORCEWAKE_ACK_TIMEOUT_MS 50
31
32#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
33
34static const char * const forcewake_domain_names[] = {
35 "render",
36 "blitter",
37 "media",
38};
39
40const char *
41intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
42{
43 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
44
45 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
46 return forcewake_domain_names[id];
47
48 WARN_ON(id);
49
50 return "unknown";
51}
52
53static inline void
54fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
55{
56 WARN_ON(!i915_mmio_reg_valid(d->reg_set));
57 __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
58}
59
60static inline void
61fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
62{
63 mod_timer_pinned(&d->timer, jiffies + 1);
64}
65
66static inline void
67fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
68{
69 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
70 FORCEWAKE_KERNEL) == 0,
71 FORCEWAKE_ACK_TIMEOUT_MS))
72 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
73 intel_uncore_forcewake_domain_to_str(d->id));
74}
75
76static inline void
77fw_domain_get(const struct intel_uncore_forcewake_domain *d)
78{
79 __raw_i915_write32(d->i915, d->reg_set, d->val_set);
80}
81
82static inline void
83fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
84{
85 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
86 FORCEWAKE_KERNEL),
87 FORCEWAKE_ACK_TIMEOUT_MS))
88 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
89 intel_uncore_forcewake_domain_to_str(d->id));
90}
91
92static inline void
93fw_domain_put(const struct intel_uncore_forcewake_domain *d)
94{
95 __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
96}
97
98static inline void
99fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
100{
101 /* something from same cacheline, but not from the set register */
102 if (i915_mmio_reg_valid(d->reg_post))
103 __raw_posting_read(d->i915, d->reg_post);
104}
105
106static void
107fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
108{
109 struct intel_uncore_forcewake_domain *d;
110 enum forcewake_domain_id id;
111
112 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
113 fw_domain_wait_ack_clear(d);
114 fw_domain_get(d);
115 fw_domain_wait_ack(d);
116 }
117}
118
119static void
120fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
121{
122 struct intel_uncore_forcewake_domain *d;
123 enum forcewake_domain_id id;
124
125 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
126 fw_domain_put(d);
127 fw_domain_posting_read(d);
128 }
129}
130
131static void
132fw_domains_posting_read(struct drm_i915_private *dev_priv)
133{
134 struct intel_uncore_forcewake_domain *d;
135 enum forcewake_domain_id id;
136
137 /* No need to do for all, just do for first found */
138 for_each_fw_domain(d, dev_priv, id) {
139 fw_domain_posting_read(d);
140 break;
141 }
142}
143
144static void
145fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
146{
147 struct intel_uncore_forcewake_domain *d;
148 enum forcewake_domain_id id;
149
150 if (dev_priv->uncore.fw_domains == 0)
151 return;
152
153 for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
154 fw_domain_reset(d);
155
156 fw_domains_posting_read(dev_priv);
157}
158
159static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
160{
161 /* w/a for a sporadic read returning 0 by waiting for the GT
162 * thread to wake up.
163 */
164 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
165 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
166 DRM_ERROR("GT thread status wait timed out\n");
167}
168
169static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
170 enum forcewake_domains fw_domains)
171{
172 fw_domains_get(dev_priv, fw_domains);
173
174 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
175 __gen6_gt_wait_for_thread_c0(dev_priv);
176}
177
178static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
179{
180 u32 gtfifodbg;
181
182 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
183 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
184 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
185}
186
187static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
188 enum forcewake_domains fw_domains)
189{
190 fw_domains_put(dev_priv, fw_domains);
191 gen6_gt_check_fifodbg(dev_priv);
192}
193
194static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
195{
196 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
197
198 return count & GT_FIFO_FREE_ENTRIES_MASK;
199}
200
201static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
202{
203 int ret = 0;
204
205 /* On VLV, FIFO will be shared by both SW and HW.
206 * So, we need to read the FREE_ENTRIES everytime */
207 if (IS_VALLEYVIEW(dev_priv->dev))
208 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
209
210 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
211 int loop = 500;
212 u32 fifo = fifo_free_entries(dev_priv);
213
214 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
215 udelay(10);
216 fifo = fifo_free_entries(dev_priv);
217 }
218 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
219 ++ret;
220 dev_priv->uncore.fifo_count = fifo;
221 }
222 dev_priv->uncore.fifo_count--;
223
224 return ret;
225}
226
227static void intel_uncore_fw_release_timer(unsigned long arg)
228{
229 struct intel_uncore_forcewake_domain *domain = (void *)arg;
230 unsigned long irqflags;
231
232 assert_rpm_device_not_suspended(domain->i915);
233
234 spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
235 if (WARN_ON(domain->wake_count == 0))
236 domain->wake_count++;
237
238 if (--domain->wake_count == 0)
239 domain->i915->uncore.funcs.force_wake_put(domain->i915,
240 1 << domain->id);
241
242 spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
243}
244
245void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
246{
247 struct drm_i915_private *dev_priv = dev->dev_private;
248 unsigned long irqflags;
249 struct intel_uncore_forcewake_domain *domain;
250 int retry_count = 100;
251 enum forcewake_domain_id id;
252 enum forcewake_domains fw = 0, active_domains;
253
254 /* Hold uncore.lock across reset to prevent any register access
255 * with forcewake not set correctly. Wait until all pending
256 * timers are run before holding.
257 */
258 while (1) {
259 active_domains = 0;
260
261 for_each_fw_domain(domain, dev_priv, id) {
262 if (del_timer_sync(&domain->timer) == 0)
263 continue;
264
265 intel_uncore_fw_release_timer((unsigned long)domain);
266 }
267
268 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
269
270 for_each_fw_domain(domain, dev_priv, id) {
271 if (timer_pending(&domain->timer))
272 active_domains |= (1 << id);
273 }
274
275 if (active_domains == 0)
276 break;
277
278 if (--retry_count == 0) {
279 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
280 break;
281 }
282
283 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
284 cond_resched();
285 }
286
287 WARN_ON(active_domains);
288
289 for_each_fw_domain(domain, dev_priv, id)
290 if (domain->wake_count)
291 fw |= 1 << id;
292
293 if (fw)
294 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
295
296 fw_domains_reset(dev_priv, FORCEWAKE_ALL);
297
298 if (restore) { /* If reset with a user forcewake, try to restore */
299 if (fw)
300 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
301
302 if (IS_GEN6(dev) || IS_GEN7(dev))
303 dev_priv->uncore.fifo_count =
304 fifo_free_entries(dev_priv);
305 }
306
307 if (!restore)
308 assert_forcewakes_inactive(dev_priv);
309
310 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
311}
312
313static void intel_uncore_ellc_detect(struct drm_device *dev)
314{
315 struct drm_i915_private *dev_priv = dev->dev_private;
316
317 if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
318 INTEL_INFO(dev)->gen >= 9) &&
319 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
320 /* The docs do not explain exactly how the calculation can be
321 * made. It is somewhat guessable, but for now, it's always
322 * 128MB.
323 * NB: We can't write IDICR yet because we do not have gt funcs
324 * set up */
325 dev_priv->ellc_size = 128;
326 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
327 }
328}
329
330static bool
331fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
332{
333 u32 dbg;
334
335 dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
336 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
337 return false;
338
339 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
340
341 return true;
342}
343
344static bool
345vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
346{
347 u32 cer;
348
349 cer = __raw_i915_read32(dev_priv, CLAIM_ER);
350 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
351 return false;
352
353 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
354
355 return true;
356}
357
358static bool
359check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
360{
361 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
362 return fpga_check_for_unclaimed_mmio(dev_priv);
363
364 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
365 return vlv_check_for_unclaimed_mmio(dev_priv);
366
367 return false;
368}
369
370static void __intel_uncore_early_sanitize(struct drm_device *dev,
371 bool restore_forcewake)
372{
373 struct drm_i915_private *dev_priv = dev->dev_private;
374
375 /* clear out unclaimed reg detection bit */
376 if (check_for_unclaimed_mmio(dev_priv))
377 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
378
379 /* clear out old GT FIFO errors */
380 if (IS_GEN6(dev) || IS_GEN7(dev))
381 __raw_i915_write32(dev_priv, GTFIFODBG,
382 __raw_i915_read32(dev_priv, GTFIFODBG));
383
384 /* WaDisableShadowRegForCpd:chv */
385 if (IS_CHERRYVIEW(dev)) {
386 __raw_i915_write32(dev_priv, GTFIFOCTL,
387 __raw_i915_read32(dev_priv, GTFIFOCTL) |
388 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
389 GT_FIFO_CTL_RC6_POLICY_STALL);
390 }
391
392 intel_uncore_forcewake_reset(dev, restore_forcewake);
393}
394
395void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
396{
397 __intel_uncore_early_sanitize(dev, restore_forcewake);
398 i915_check_and_clear_faults(dev);
399}
400
401void intel_uncore_sanitize(struct drm_device *dev)
402{
403 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
404
405 /* BIOS often leaves RC6 enabled, but disable it for hw init */
406 intel_disable_gt_powersave(dev);
407}
408
409static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
410 enum forcewake_domains fw_domains)
411{
412 struct intel_uncore_forcewake_domain *domain;
413 enum forcewake_domain_id id;
414
415 if (!dev_priv->uncore.funcs.force_wake_get)
416 return;
417
418 fw_domains &= dev_priv->uncore.fw_domains;
419
420 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
421 if (domain->wake_count++)
422 fw_domains &= ~(1 << id);
423 }
424
425 if (fw_domains)
426 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
427}
428
429/**
430 * intel_uncore_forcewake_get - grab forcewake domain references
431 * @dev_priv: i915 device instance
432 * @fw_domains: forcewake domains to get reference on
433 *
434 * This function can be used get GT's forcewake domain references.
435 * Normal register access will handle the forcewake domains automatically.
436 * However if some sequence requires the GT to not power down a particular
437 * forcewake domains this function should be called at the beginning of the
438 * sequence. And subsequently the reference should be dropped by symmetric
439 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
440 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
441 */
442void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
443 enum forcewake_domains fw_domains)
444{
445 unsigned long irqflags;
446
447 if (!dev_priv->uncore.funcs.force_wake_get)
448 return;
449
450 assert_rpm_wakelock_held(dev_priv);
451
452 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
453 __intel_uncore_forcewake_get(dev_priv, fw_domains);
454 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
455}
456
457/**
458 * intel_uncore_forcewake_get__locked - grab forcewake domain references
459 * @dev_priv: i915 device instance
460 * @fw_domains: forcewake domains to get reference on
461 *
462 * See intel_uncore_forcewake_get(). This variant places the onus
463 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
464 */
465void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
466 enum forcewake_domains fw_domains)
467{
468 assert_spin_locked(&dev_priv->uncore.lock);
469
470 if (!dev_priv->uncore.funcs.force_wake_get)
471 return;
472
473 __intel_uncore_forcewake_get(dev_priv, fw_domains);
474}
475
476static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
477 enum forcewake_domains fw_domains)
478{
479 struct intel_uncore_forcewake_domain *domain;
480 enum forcewake_domain_id id;
481
482 if (!dev_priv->uncore.funcs.force_wake_put)
483 return;
484
485 fw_domains &= dev_priv->uncore.fw_domains;
486
487 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
488 if (WARN_ON(domain->wake_count == 0))
489 continue;
490
491 if (--domain->wake_count)
492 continue;
493
494 domain->wake_count++;
495 fw_domain_arm_timer(domain);
496 }
497}
498
499/**
500 * intel_uncore_forcewake_put - release a forcewake domain reference
501 * @dev_priv: i915 device instance
502 * @fw_domains: forcewake domains to put references
503 *
504 * This function drops the device-level forcewakes for specified
505 * domains obtained by intel_uncore_forcewake_get().
506 */
507void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
508 enum forcewake_domains fw_domains)
509{
510 unsigned long irqflags;
511
512 if (!dev_priv->uncore.funcs.force_wake_put)
513 return;
514
515 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
516 __intel_uncore_forcewake_put(dev_priv, fw_domains);
517 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
518}
519
520/**
521 * intel_uncore_forcewake_put__locked - grab forcewake domain references
522 * @dev_priv: i915 device instance
523 * @fw_domains: forcewake domains to get reference on
524 *
525 * See intel_uncore_forcewake_put(). This variant places the onus
526 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
527 */
528void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
529 enum forcewake_domains fw_domains)
530{
531 assert_spin_locked(&dev_priv->uncore.lock);
532
533 if (!dev_priv->uncore.funcs.force_wake_put)
534 return;
535
536 __intel_uncore_forcewake_put(dev_priv, fw_domains);
537}
538
539void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
540{
541 struct intel_uncore_forcewake_domain *domain;
542 enum forcewake_domain_id id;
543
544 if (!dev_priv->uncore.funcs.force_wake_get)
545 return;
546
547 for_each_fw_domain(domain, dev_priv, id)
548 WARN_ON(domain->wake_count);
549}
550
551/* We give fast paths for the really cool registers */
552#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
553
554#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
555
556#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
557 (REG_RANGE((reg), 0x2000, 0x4000) || \
558 REG_RANGE((reg), 0x5000, 0x8000) || \
559 REG_RANGE((reg), 0xB000, 0x12000) || \
560 REG_RANGE((reg), 0x2E000, 0x30000))
561
562#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
563 (REG_RANGE((reg), 0x12000, 0x14000) || \
564 REG_RANGE((reg), 0x22000, 0x24000) || \
565 REG_RANGE((reg), 0x30000, 0x40000))
566
567#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
568 (REG_RANGE((reg), 0x2000, 0x4000) || \
569 REG_RANGE((reg), 0x5200, 0x8000) || \
570 REG_RANGE((reg), 0x8300, 0x8500) || \
571 REG_RANGE((reg), 0xB000, 0xB480) || \
572 REG_RANGE((reg), 0xE000, 0xE800))
573
574#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
575 (REG_RANGE((reg), 0x8800, 0x8900) || \
576 REG_RANGE((reg), 0xD000, 0xD800) || \
577 REG_RANGE((reg), 0x12000, 0x14000) || \
578 REG_RANGE((reg), 0x1A000, 0x1C000) || \
579 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
580 REG_RANGE((reg), 0x30000, 0x38000))
581
582#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
583 (REG_RANGE((reg), 0x4000, 0x5000) || \
584 REG_RANGE((reg), 0x8000, 0x8300) || \
585 REG_RANGE((reg), 0x8500, 0x8600) || \
586 REG_RANGE((reg), 0x9000, 0xB000) || \
587 REG_RANGE((reg), 0xF000, 0x10000))
588
589#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
590 REG_RANGE((reg), 0xB00, 0x2000)
591
592#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
593 (REG_RANGE((reg), 0x2000, 0x2700) || \
594 REG_RANGE((reg), 0x3000, 0x4000) || \
595 REG_RANGE((reg), 0x5200, 0x8000) || \
596 REG_RANGE((reg), 0x8140, 0x8160) || \
597 REG_RANGE((reg), 0x8300, 0x8500) || \
598 REG_RANGE((reg), 0x8C00, 0x8D00) || \
599 REG_RANGE((reg), 0xB000, 0xB480) || \
600 REG_RANGE((reg), 0xE000, 0xE900) || \
601 REG_RANGE((reg), 0x24400, 0x24800))
602
603#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
604 (REG_RANGE((reg), 0x8130, 0x8140) || \
605 REG_RANGE((reg), 0x8800, 0x8A00) || \
606 REG_RANGE((reg), 0xD000, 0xD800) || \
607 REG_RANGE((reg), 0x12000, 0x14000) || \
608 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
609 REG_RANGE((reg), 0x30000, 0x40000))
610
611#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
612 REG_RANGE((reg), 0x9400, 0x9800)
613
614#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
615 ((reg) < 0x40000 && \
616 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
617 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
618 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
619 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
620
621static void
622ilk_dummy_write(struct drm_i915_private *dev_priv)
623{
624 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
625 * the chip from rc6 before touching it for real. MI_MODE is masked,
626 * hence harmless to write 0 into. */
627 __raw_i915_write32(dev_priv, MI_MODE, 0);
628}
629
630static void
631__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
632 const i915_reg_t reg,
633 const bool read,
634 const bool before)
635{
636 /* XXX. We limit the auto arming traces for mmio
637 * debugs on these platforms. There are just too many
638 * revealed by these and CI/Bat suffers from the noise.
639 * Please fix and then re-enable the automatic traces.
640 */
641 if (i915.mmio_debug < 2 &&
642 (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
643 return;
644
645 if (WARN(check_for_unclaimed_mmio(dev_priv),
646 "Unclaimed register detected %s %s register 0x%x\n",
647 before ? "before" : "after",
648 read ? "reading" : "writing to",
649 i915_mmio_reg_offset(reg)))
650 i915.mmio_debug--; /* Only report the first N failures */
651}
652
653static inline void
654unclaimed_reg_debug(struct drm_i915_private *dev_priv,
655 const i915_reg_t reg,
656 const bool read,
657 const bool before)
658{
659 if (likely(!i915.mmio_debug))
660 return;
661
662 __unclaimed_reg_debug(dev_priv, reg, read, before);
663}
664
665#define GEN2_READ_HEADER(x) \
666 u##x val = 0; \
667 assert_rpm_wakelock_held(dev_priv);
668
669#define GEN2_READ_FOOTER \
670 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
671 return val
672
673#define __gen2_read(x) \
674static u##x \
675gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
676 GEN2_READ_HEADER(x); \
677 val = __raw_i915_read##x(dev_priv, reg); \
678 GEN2_READ_FOOTER; \
679}
680
681#define __gen5_read(x) \
682static u##x \
683gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
684 GEN2_READ_HEADER(x); \
685 ilk_dummy_write(dev_priv); \
686 val = __raw_i915_read##x(dev_priv, reg); \
687 GEN2_READ_FOOTER; \
688}
689
690__gen5_read(8)
691__gen5_read(16)
692__gen5_read(32)
693__gen5_read(64)
694__gen2_read(8)
695__gen2_read(16)
696__gen2_read(32)
697__gen2_read(64)
698
699#undef __gen5_read
700#undef __gen2_read
701
702#undef GEN2_READ_FOOTER
703#undef GEN2_READ_HEADER
704
705#define GEN6_READ_HEADER(x) \
706 u32 offset = i915_mmio_reg_offset(reg); \
707 unsigned long irqflags; \
708 u##x val = 0; \
709 assert_rpm_wakelock_held(dev_priv); \
710 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
711 unclaimed_reg_debug(dev_priv, reg, true, true)
712
713#define GEN6_READ_FOOTER \
714 unclaimed_reg_debug(dev_priv, reg, true, false); \
715 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
716 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
717 return val
718
719static inline void __force_wake_get(struct drm_i915_private *dev_priv,
720 enum forcewake_domains fw_domains)
721{
722 struct intel_uncore_forcewake_domain *domain;
723 enum forcewake_domain_id id;
724
725 if (WARN_ON(!fw_domains))
726 return;
727
728 /* Ideally GCC would be constant-fold and eliminate this loop */
729 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
730 if (domain->wake_count) {
731 fw_domains &= ~(1 << id);
732 continue;
733 }
734
735 domain->wake_count++;
736 fw_domain_arm_timer(domain);
737 }
738
739 if (fw_domains)
740 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
741}
742
743#define __gen6_read(x) \
744static u##x \
745gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
746 GEN6_READ_HEADER(x); \
747 if (NEEDS_FORCE_WAKE(offset)) \
748 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
749 val = __raw_i915_read##x(dev_priv, reg); \
750 GEN6_READ_FOOTER; \
751}
752
753#define __vlv_read(x) \
754static u##x \
755vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
756 enum forcewake_domains fw_engine = 0; \
757 GEN6_READ_HEADER(x); \
758 if (!NEEDS_FORCE_WAKE(offset)) \
759 fw_engine = 0; \
760 else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
761 fw_engine = FORCEWAKE_RENDER; \
762 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
763 fw_engine = FORCEWAKE_MEDIA; \
764 if (fw_engine) \
765 __force_wake_get(dev_priv, fw_engine); \
766 val = __raw_i915_read##x(dev_priv, reg); \
767 GEN6_READ_FOOTER; \
768}
769
770#define __chv_read(x) \
771static u##x \
772chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
773 enum forcewake_domains fw_engine = 0; \
774 GEN6_READ_HEADER(x); \
775 if (!NEEDS_FORCE_WAKE(offset)) \
776 fw_engine = 0; \
777 else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
778 fw_engine = FORCEWAKE_RENDER; \
779 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
780 fw_engine = FORCEWAKE_MEDIA; \
781 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
782 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
783 if (fw_engine) \
784 __force_wake_get(dev_priv, fw_engine); \
785 val = __raw_i915_read##x(dev_priv, reg); \
786 GEN6_READ_FOOTER; \
787}
788
789#define SKL_NEEDS_FORCE_WAKE(reg) \
790 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
791
792#define __gen9_read(x) \
793static u##x \
794gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
795 enum forcewake_domains fw_engine; \
796 GEN6_READ_HEADER(x); \
797 if (!SKL_NEEDS_FORCE_WAKE(offset)) \
798 fw_engine = 0; \
799 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
800 fw_engine = FORCEWAKE_RENDER; \
801 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
802 fw_engine = FORCEWAKE_MEDIA; \
803 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
804 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
805 else \
806 fw_engine = FORCEWAKE_BLITTER; \
807 if (fw_engine) \
808 __force_wake_get(dev_priv, fw_engine); \
809 val = __raw_i915_read##x(dev_priv, reg); \
810 GEN6_READ_FOOTER; \
811}
812
813__gen9_read(8)
814__gen9_read(16)
815__gen9_read(32)
816__gen9_read(64)
817__chv_read(8)
818__chv_read(16)
819__chv_read(32)
820__chv_read(64)
821__vlv_read(8)
822__vlv_read(16)
823__vlv_read(32)
824__vlv_read(64)
825__gen6_read(8)
826__gen6_read(16)
827__gen6_read(32)
828__gen6_read(64)
829
830#undef __gen9_read
831#undef __chv_read
832#undef __vlv_read
833#undef __gen6_read
834#undef GEN6_READ_FOOTER
835#undef GEN6_READ_HEADER
836
837#define VGPU_READ_HEADER(x) \
838 unsigned long irqflags; \
839 u##x val = 0; \
840 assert_rpm_device_not_suspended(dev_priv); \
841 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
842
843#define VGPU_READ_FOOTER \
844 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
845 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
846 return val
847
848#define __vgpu_read(x) \
849static u##x \
850vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
851 VGPU_READ_HEADER(x); \
852 val = __raw_i915_read##x(dev_priv, reg); \
853 VGPU_READ_FOOTER; \
854}
855
856__vgpu_read(8)
857__vgpu_read(16)
858__vgpu_read(32)
859__vgpu_read(64)
860
861#undef __vgpu_read
862#undef VGPU_READ_FOOTER
863#undef VGPU_READ_HEADER
864
865#define GEN2_WRITE_HEADER \
866 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
867 assert_rpm_wakelock_held(dev_priv); \
868
869#define GEN2_WRITE_FOOTER
870
871#define __gen2_write(x) \
872static void \
873gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
874 GEN2_WRITE_HEADER; \
875 __raw_i915_write##x(dev_priv, reg, val); \
876 GEN2_WRITE_FOOTER; \
877}
878
879#define __gen5_write(x) \
880static void \
881gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
882 GEN2_WRITE_HEADER; \
883 ilk_dummy_write(dev_priv); \
884 __raw_i915_write##x(dev_priv, reg, val); \
885 GEN2_WRITE_FOOTER; \
886}
887
888__gen5_write(8)
889__gen5_write(16)
890__gen5_write(32)
891__gen5_write(64)
892__gen2_write(8)
893__gen2_write(16)
894__gen2_write(32)
895__gen2_write(64)
896
897#undef __gen5_write
898#undef __gen2_write
899
900#undef GEN2_WRITE_FOOTER
901#undef GEN2_WRITE_HEADER
902
903#define GEN6_WRITE_HEADER \
904 u32 offset = i915_mmio_reg_offset(reg); \
905 unsigned long irqflags; \
906 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
907 assert_rpm_wakelock_held(dev_priv); \
908 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
909 unclaimed_reg_debug(dev_priv, reg, false, true)
910
911#define GEN6_WRITE_FOOTER \
912 unclaimed_reg_debug(dev_priv, reg, false, false); \
913 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
914
915#define __gen6_write(x) \
916static void \
917gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
918 u32 __fifo_ret = 0; \
919 GEN6_WRITE_HEADER; \
920 if (NEEDS_FORCE_WAKE(offset)) { \
921 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
922 } \
923 __raw_i915_write##x(dev_priv, reg, val); \
924 if (unlikely(__fifo_ret)) { \
925 gen6_gt_check_fifodbg(dev_priv); \
926 } \
927 GEN6_WRITE_FOOTER; \
928}
929
930#define __hsw_write(x) \
931static void \
932hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
933 u32 __fifo_ret = 0; \
934 GEN6_WRITE_HEADER; \
935 if (NEEDS_FORCE_WAKE(offset)) { \
936 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
937 } \
938 __raw_i915_write##x(dev_priv, reg, val); \
939 if (unlikely(__fifo_ret)) { \
940 gen6_gt_check_fifodbg(dev_priv); \
941 } \
942 GEN6_WRITE_FOOTER; \
943}
944
945static const i915_reg_t gen8_shadowed_regs[] = {
946 FORCEWAKE_MT,
947 GEN6_RPNSWREQ,
948 GEN6_RC_VIDEO_FREQ,
949 RING_TAIL(RENDER_RING_BASE),
950 RING_TAIL(GEN6_BSD_RING_BASE),
951 RING_TAIL(VEBOX_RING_BASE),
952 RING_TAIL(BLT_RING_BASE),
953 /* TODO: Other registers are not yet used */
954};
955
956static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
957 i915_reg_t reg)
958{
959 int i;
960 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
961 if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
962 return true;
963
964 return false;
965}
966
967#define __gen8_write(x) \
968static void \
969gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
970 GEN6_WRITE_HEADER; \
971 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
972 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
973 __raw_i915_write##x(dev_priv, reg, val); \
974 GEN6_WRITE_FOOTER; \
975}
976
977#define __chv_write(x) \
978static void \
979chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
980 enum forcewake_domains fw_engine = 0; \
981 GEN6_WRITE_HEADER; \
982 if (!NEEDS_FORCE_WAKE(offset) || \
983 is_gen8_shadowed(dev_priv, reg)) \
984 fw_engine = 0; \
985 else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
986 fw_engine = FORCEWAKE_RENDER; \
987 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
988 fw_engine = FORCEWAKE_MEDIA; \
989 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
990 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
991 if (fw_engine) \
992 __force_wake_get(dev_priv, fw_engine); \
993 __raw_i915_write##x(dev_priv, reg, val); \
994 GEN6_WRITE_FOOTER; \
995}
996
997static const i915_reg_t gen9_shadowed_regs[] = {
998 RING_TAIL(RENDER_RING_BASE),
999 RING_TAIL(GEN6_BSD_RING_BASE),
1000 RING_TAIL(VEBOX_RING_BASE),
1001 RING_TAIL(BLT_RING_BASE),
1002 FORCEWAKE_BLITTER_GEN9,
1003 FORCEWAKE_RENDER_GEN9,
1004 FORCEWAKE_MEDIA_GEN9,
1005 GEN6_RPNSWREQ,
1006 GEN6_RC_VIDEO_FREQ,
1007 /* TODO: Other registers are not yet used */
1008};
1009
1010static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
1011 i915_reg_t reg)
1012{
1013 int i;
1014 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
1015 if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
1016 return true;
1017
1018 return false;
1019}
1020
1021#define __gen9_write(x) \
1022static void \
1023gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
1024 bool trace) { \
1025 enum forcewake_domains fw_engine; \
1026 GEN6_WRITE_HEADER; \
1027 if (!SKL_NEEDS_FORCE_WAKE(offset) || \
1028 is_gen9_shadowed(dev_priv, reg)) \
1029 fw_engine = 0; \
1030 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
1031 fw_engine = FORCEWAKE_RENDER; \
1032 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
1033 fw_engine = FORCEWAKE_MEDIA; \
1034 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
1035 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
1036 else \
1037 fw_engine = FORCEWAKE_BLITTER; \
1038 if (fw_engine) \
1039 __force_wake_get(dev_priv, fw_engine); \
1040 __raw_i915_write##x(dev_priv, reg, val); \
1041 GEN6_WRITE_FOOTER; \
1042}
1043
1044__gen9_write(8)
1045__gen9_write(16)
1046__gen9_write(32)
1047__gen9_write(64)
1048__chv_write(8)
1049__chv_write(16)
1050__chv_write(32)
1051__chv_write(64)
1052__gen8_write(8)
1053__gen8_write(16)
1054__gen8_write(32)
1055__gen8_write(64)
1056__hsw_write(8)
1057__hsw_write(16)
1058__hsw_write(32)
1059__hsw_write(64)
1060__gen6_write(8)
1061__gen6_write(16)
1062__gen6_write(32)
1063__gen6_write(64)
1064
1065#undef __gen9_write
1066#undef __chv_write
1067#undef __gen8_write
1068#undef __hsw_write
1069#undef __gen6_write
1070#undef GEN6_WRITE_FOOTER
1071#undef GEN6_WRITE_HEADER
1072
1073#define VGPU_WRITE_HEADER \
1074 unsigned long irqflags; \
1075 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1076 assert_rpm_device_not_suspended(dev_priv); \
1077 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1078
1079#define VGPU_WRITE_FOOTER \
1080 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1081
1082#define __vgpu_write(x) \
1083static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1084 i915_reg_t reg, u##x val, bool trace) { \
1085 VGPU_WRITE_HEADER; \
1086 __raw_i915_write##x(dev_priv, reg, val); \
1087 VGPU_WRITE_FOOTER; \
1088}
1089
1090__vgpu_write(8)
1091__vgpu_write(16)
1092__vgpu_write(32)
1093__vgpu_write(64)
1094
1095#undef __vgpu_write
1096#undef VGPU_WRITE_FOOTER
1097#undef VGPU_WRITE_HEADER
1098
1099#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1100do { \
1101 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1102 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1103 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1104 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1105} while (0)
1106
1107#define ASSIGN_READ_MMIO_VFUNCS(x) \
1108do { \
1109 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1110 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1111 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1112 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1113} while (0)
1114
1115
1116static void fw_domain_init(struct drm_i915_private *dev_priv,
1117 enum forcewake_domain_id domain_id,
1118 i915_reg_t reg_set,
1119 i915_reg_t reg_ack)
1120{
1121 struct intel_uncore_forcewake_domain *d;
1122
1123 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1124 return;
1125
1126 d = &dev_priv->uncore.fw_domain[domain_id];
1127
1128 WARN_ON(d->wake_count);
1129
1130 d->wake_count = 0;
1131 d->reg_set = reg_set;
1132 d->reg_ack = reg_ack;
1133
1134 if (IS_GEN6(dev_priv)) {
1135 d->val_reset = 0;
1136 d->val_set = FORCEWAKE_KERNEL;
1137 d->val_clear = 0;
1138 } else {
1139 /* WaRsClearFWBitsAtReset:bdw,skl */
1140 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1141 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1142 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1143 }
1144
1145 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1146 d->reg_post = FORCEWAKE_ACK_VLV;
1147 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1148 d->reg_post = ECOBUS;
1149
1150 d->i915 = dev_priv;
1151 d->id = domain_id;
1152
1153 setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
1154
1155 dev_priv->uncore.fw_domains |= (1 << domain_id);
1156
1157 fw_domain_reset(d);
1158}
1159
1160static void intel_uncore_fw_domains_init(struct drm_device *dev)
1161{
1162 struct drm_i915_private *dev_priv = dev->dev_private;
1163
1164 if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1165 return;
1166
1167 if (IS_GEN9(dev)) {
1168 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1169 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1170 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1171 FORCEWAKE_RENDER_GEN9,
1172 FORCEWAKE_ACK_RENDER_GEN9);
1173 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1174 FORCEWAKE_BLITTER_GEN9,
1175 FORCEWAKE_ACK_BLITTER_GEN9);
1176 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1177 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1178 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1179 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1180 if (!IS_CHERRYVIEW(dev))
1181 dev_priv->uncore.funcs.force_wake_put =
1182 fw_domains_put_with_fifo;
1183 else
1184 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1185 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1186 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1187 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1188 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1189 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1190 dev_priv->uncore.funcs.force_wake_get =
1191 fw_domains_get_with_thread_status;
1192 if (IS_HASWELL(dev))
1193 dev_priv->uncore.funcs.force_wake_put =
1194 fw_domains_put_with_fifo;
1195 else
1196 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1197 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1198 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1199 } else if (IS_IVYBRIDGE(dev)) {
1200 u32 ecobus;
1201
1202 /* IVB configs may use multi-threaded forcewake */
1203
1204 /* A small trick here - if the bios hasn't configured
1205 * MT forcewake, and if the device is in RC6, then
1206 * force_wake_mt_get will not wake the device and the
1207 * ECOBUS read will return zero. Which will be
1208 * (correctly) interpreted by the test below as MT
1209 * forcewake being disabled.
1210 */
1211 dev_priv->uncore.funcs.force_wake_get =
1212 fw_domains_get_with_thread_status;
1213 dev_priv->uncore.funcs.force_wake_put =
1214 fw_domains_put_with_fifo;
1215
1216 /* We need to init first for ECOBUS access and then
1217 * determine later if we want to reinit, in case of MT access is
1218 * not working. In this stage we don't know which flavour this
1219 * ivb is, so it is better to reset also the gen6 fw registers
1220 * before the ecobus check.
1221 */
1222
1223 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1224 __raw_posting_read(dev_priv, ECOBUS);
1225
1226 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1227 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1228
1229 mutex_lock(&dev->struct_mutex);
1230 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1231 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1232 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1233 mutex_unlock(&dev->struct_mutex);
1234
1235 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1236 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1237 DRM_INFO("when using vblank-synced partial screen updates.\n");
1238 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1239 FORCEWAKE, FORCEWAKE_ACK);
1240 }
1241 } else if (IS_GEN6(dev)) {
1242 dev_priv->uncore.funcs.force_wake_get =
1243 fw_domains_get_with_thread_status;
1244 dev_priv->uncore.funcs.force_wake_put =
1245 fw_domains_put_with_fifo;
1246 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1247 FORCEWAKE, FORCEWAKE_ACK);
1248 }
1249
1250 /* All future platforms are expected to require complex power gating */
1251 WARN_ON(dev_priv->uncore.fw_domains == 0);
1252}
1253
1254void intel_uncore_init(struct drm_device *dev)
1255{
1256 struct drm_i915_private *dev_priv = dev->dev_private;
1257
1258 i915_check_vgpu(dev);
1259
1260 intel_uncore_ellc_detect(dev);
1261 intel_uncore_fw_domains_init(dev);
1262 __intel_uncore_early_sanitize(dev, false);
1263
1264 dev_priv->uncore.unclaimed_mmio_check = 1;
1265
1266 switch (INTEL_INFO(dev)->gen) {
1267 default:
1268 case 9:
1269 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1270 ASSIGN_READ_MMIO_VFUNCS(gen9);
1271 break;
1272 case 8:
1273 if (IS_CHERRYVIEW(dev)) {
1274 ASSIGN_WRITE_MMIO_VFUNCS(chv);
1275 ASSIGN_READ_MMIO_VFUNCS(chv);
1276
1277 } else {
1278 ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1279 ASSIGN_READ_MMIO_VFUNCS(gen6);
1280 }
1281 break;
1282 case 7:
1283 case 6:
1284 if (IS_HASWELL(dev)) {
1285 ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1286 } else {
1287 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1288 }
1289
1290 if (IS_VALLEYVIEW(dev)) {
1291 ASSIGN_READ_MMIO_VFUNCS(vlv);
1292 } else {
1293 ASSIGN_READ_MMIO_VFUNCS(gen6);
1294 }
1295 break;
1296 case 5:
1297 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1298 ASSIGN_READ_MMIO_VFUNCS(gen5);
1299 break;
1300 case 4:
1301 case 3:
1302 case 2:
1303 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1304 ASSIGN_READ_MMIO_VFUNCS(gen2);
1305 break;
1306 }
1307
1308 if (intel_vgpu_active(dev)) {
1309 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1310 ASSIGN_READ_MMIO_VFUNCS(vgpu);
1311 }
1312
1313 i915_check_and_clear_faults(dev);
1314}
1315#undef ASSIGN_WRITE_MMIO_VFUNCS
1316#undef ASSIGN_READ_MMIO_VFUNCS
1317
1318void intel_uncore_fini(struct drm_device *dev)
1319{
1320 /* Paranoia: make sure we have disabled everything before we exit. */
1321 intel_uncore_sanitize(dev);
1322 intel_uncore_forcewake_reset(dev, false);
1323}
1324
1325#define GEN_RANGE(l, h) GENMASK(h, l)
1326
1327static const struct register_whitelist {
1328 i915_reg_t offset_ldw, offset_udw;
1329 uint32_t size;
1330 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1331 uint32_t gen_bitmask;
1332} whitelist[] = {
1333 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1334 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1335 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1336};
1337
1338int i915_reg_read_ioctl(struct drm_device *dev,
1339 void *data, struct drm_file *file)
1340{
1341 struct drm_i915_private *dev_priv = dev->dev_private;
1342 struct drm_i915_reg_read *reg = data;
1343 struct register_whitelist const *entry = whitelist;
1344 unsigned size;
1345 i915_reg_t offset_ldw, offset_udw;
1346 int i, ret = 0;
1347
1348 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1349 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1350 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1351 break;
1352 }
1353
1354 if (i == ARRAY_SIZE(whitelist))
1355 return -EINVAL;
1356
1357 /* We use the low bits to encode extra flags as the register should
1358 * be naturally aligned (and those that are not so aligned merely
1359 * limit the available flags for that register).
1360 */
1361 offset_ldw = entry->offset_ldw;
1362 offset_udw = entry->offset_udw;
1363 size = entry->size;
1364 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1365
1366 intel_runtime_pm_get(dev_priv);
1367
1368 switch (size) {
1369 case 8 | 1:
1370 reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1371 break;
1372 case 8:
1373 reg->val = I915_READ64(offset_ldw);
1374 break;
1375 case 4:
1376 reg->val = I915_READ(offset_ldw);
1377 break;
1378 case 2:
1379 reg->val = I915_READ16(offset_ldw);
1380 break;
1381 case 1:
1382 reg->val = I915_READ8(offset_ldw);
1383 break;
1384 default:
1385 ret = -EINVAL;
1386 goto out;
1387 }
1388
1389out:
1390 intel_runtime_pm_put(dev_priv);
1391 return ret;
1392}
1393
1394int i915_get_reset_stats_ioctl(struct drm_device *dev,
1395 void *data, struct drm_file *file)
1396{
1397 struct drm_i915_private *dev_priv = dev->dev_private;
1398 struct drm_i915_reset_stats *args = data;
1399 struct i915_ctx_hang_stats *hs;
1400 struct intel_context *ctx;
1401 int ret;
1402
1403 if (args->flags || args->pad)
1404 return -EINVAL;
1405
1406 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1407 return -EPERM;
1408
1409 ret = mutex_lock_interruptible(&dev->struct_mutex);
1410 if (ret)
1411 return ret;
1412
1413 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1414 if (IS_ERR(ctx)) {
1415 mutex_unlock(&dev->struct_mutex);
1416 return PTR_ERR(ctx);
1417 }
1418 hs = &ctx->hang_stats;
1419
1420 if (capable(CAP_SYS_ADMIN))
1421 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1422 else
1423 args->reset_count = 0;
1424
1425 args->batch_active = hs->batch_active;
1426 args->batch_pending = hs->batch_pending;
1427
1428 mutex_unlock(&dev->struct_mutex);
1429
1430 return 0;
1431}
1432
1433static int i915_reset_complete(struct drm_device *dev)
1434{
1435 u8 gdrst;
1436 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1437 return (gdrst & GRDOM_RESET_STATUS) == 0;
1438}
1439
1440static int i915_do_reset(struct drm_device *dev)
1441{
1442 /* assert reset for at least 20 usec */
1443 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1444 udelay(20);
1445 pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1446
1447 return wait_for(i915_reset_complete(dev), 500);
1448}
1449
1450static int g4x_reset_complete(struct drm_device *dev)
1451{
1452 u8 gdrst;
1453 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1454 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1455}
1456
1457static int g33_do_reset(struct drm_device *dev)
1458{
1459 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1460 return wait_for(g4x_reset_complete(dev), 500);
1461}
1462
1463static int g4x_do_reset(struct drm_device *dev)
1464{
1465 struct drm_i915_private *dev_priv = dev->dev_private;
1466 int ret;
1467
1468 pci_write_config_byte(dev->pdev, I915_GDRST,
1469 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1470 ret = wait_for(g4x_reset_complete(dev), 500);
1471 if (ret)
1472 return ret;
1473
1474 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1475 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1476 POSTING_READ(VDECCLK_GATE_D);
1477
1478 pci_write_config_byte(dev->pdev, I915_GDRST,
1479 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1480 ret = wait_for(g4x_reset_complete(dev), 500);
1481 if (ret)
1482 return ret;
1483
1484 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1485 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1486 POSTING_READ(VDECCLK_GATE_D);
1487
1488 pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1489
1490 return 0;
1491}
1492
1493static int ironlake_do_reset(struct drm_device *dev)
1494{
1495 struct drm_i915_private *dev_priv = dev->dev_private;
1496 int ret;
1497
1498 I915_WRITE(ILK_GDSR,
1499 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1500 ret = wait_for((I915_READ(ILK_GDSR) &
1501 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1502 if (ret)
1503 return ret;
1504
1505 I915_WRITE(ILK_GDSR,
1506 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1507 ret = wait_for((I915_READ(ILK_GDSR) &
1508 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1509 if (ret)
1510 return ret;
1511
1512 I915_WRITE(ILK_GDSR, 0);
1513
1514 return 0;
1515}
1516
1517static int gen6_do_reset(struct drm_device *dev)
1518{
1519 struct drm_i915_private *dev_priv = dev->dev_private;
1520 int ret;
1521
1522 /* Reset the chip */
1523
1524 /* GEN6_GDRST is not in the gt power well, no need to check
1525 * for fifo space for the write or forcewake the chip for
1526 * the read
1527 */
1528 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
1529
1530 /* Spin waiting for the device to ack the reset request */
1531 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
1532
1533 intel_uncore_forcewake_reset(dev, true);
1534
1535 return ret;
1536}
1537
1538static int wait_for_register(struct drm_i915_private *dev_priv,
1539 i915_reg_t reg,
1540 const u32 mask,
1541 const u32 value,
1542 const unsigned long timeout_ms)
1543{
1544 return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
1545}
1546
1547static int gen8_do_reset(struct drm_device *dev)
1548{
1549 struct drm_i915_private *dev_priv = dev->dev_private;
1550 struct intel_engine_cs *engine;
1551 int i;
1552
1553 for_each_ring(engine, dev_priv, i) {
1554 I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1555 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1556
1557 if (wait_for_register(dev_priv,
1558 RING_RESET_CTL(engine->mmio_base),
1559 RESET_CTL_READY_TO_RESET,
1560 RESET_CTL_READY_TO_RESET,
1561 700)) {
1562 DRM_ERROR("%s: reset request timeout\n", engine->name);
1563 goto not_ready;
1564 }
1565 }
1566
1567 return gen6_do_reset(dev);
1568
1569not_ready:
1570 for_each_ring(engine, dev_priv, i)
1571 I915_WRITE(RING_RESET_CTL(engine->mmio_base),
1572 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1573
1574 return -EIO;
1575}
1576
1577static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
1578{
1579 if (!i915.reset)
1580 return NULL;
1581
1582 if (INTEL_INFO(dev)->gen >= 8)
1583 return gen8_do_reset;
1584 else if (INTEL_INFO(dev)->gen >= 6)
1585 return gen6_do_reset;
1586 else if (IS_GEN5(dev))
1587 return ironlake_do_reset;
1588 else if (IS_G4X(dev))
1589 return g4x_do_reset;
1590 else if (IS_G33(dev))
1591 return g33_do_reset;
1592 else if (INTEL_INFO(dev)->gen >= 3)
1593 return i915_do_reset;
1594 else
1595 return NULL;
1596}
1597
1598int intel_gpu_reset(struct drm_device *dev)
1599{
1600 struct drm_i915_private *dev_priv = to_i915(dev);
1601 int (*reset)(struct drm_device *);
1602 int ret;
1603
1604 reset = intel_get_gpu_reset(dev);
1605 if (reset == NULL)
1606 return -ENODEV;
1607
1608 /* If the power well sleeps during the reset, the reset
1609 * request may be dropped and never completes (causing -EIO).
1610 */
1611 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1612 ret = reset(dev);
1613 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1614
1615 return ret;
1616}
1617
1618bool intel_has_gpu_reset(struct drm_device *dev)
1619{
1620 return intel_get_gpu_reset(dev) != NULL;
1621}
1622
1623bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1624{
1625 return check_for_unclaimed_mmio(dev_priv);
1626}
1627
1628bool
1629intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1630{
1631 if (unlikely(i915.mmio_debug ||
1632 dev_priv->uncore.unclaimed_mmio_check <= 0))
1633 return false;
1634
1635 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1636 DRM_DEBUG("Unclaimed register detected, "
1637 "enabling oneshot unclaimed register reporting. "
1638 "Please use i915.mmio_debug=N for more information.\n");
1639 i915.mmio_debug++;
1640 dev_priv->uncore.unclaimed_mmio_check--;
1641 return true;
1642 }
1643
1644 return false;
1645}
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <linux/pm_runtime.h>
25#include <asm/iosf_mbi.h>
26
27#include "i915_drv.h"
28#include "i915_trace.h"
29#include "i915_vgpu.h"
30#include "intel_pm.h"
31
32#define FORCEWAKE_ACK_TIMEOUT_MS 50
33#define GT_FIFO_TIMEOUT_MS 10
34
35#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
36
37void
38intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
39{
40 spin_lock_init(&mmio_debug->lock);
41 mmio_debug->unclaimed_mmio_check = 1;
42}
43
44static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
45{
46 lockdep_assert_held(&mmio_debug->lock);
47
48 /* Save and disable mmio debugging for the user bypass */
49 if (!mmio_debug->suspend_count++) {
50 mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
51 mmio_debug->unclaimed_mmio_check = 0;
52 }
53}
54
55static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
56{
57 lockdep_assert_held(&mmio_debug->lock);
58
59 if (!--mmio_debug->suspend_count)
60 mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
61}
62
63static const char * const forcewake_domain_names[] = {
64 "render",
65 "blitter",
66 "media",
67 "vdbox0",
68 "vdbox1",
69 "vdbox2",
70 "vdbox3",
71 "vebox0",
72 "vebox1",
73};
74
75const char *
76intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
77{
78 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
79
80 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
81 return forcewake_domain_names[id];
82
83 WARN_ON(id);
84
85 return "unknown";
86}
87
88#define fw_ack(d) readl((d)->reg_ack)
89#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
90#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
91
92static inline void
93fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
94{
95 /*
96 * We don't really know if the powerwell for the forcewake domain we are
97 * trying to reset here does exist at this point (engines could be fused
98 * off in ICL+), so no waiting for acks
99 */
100 /* WaRsClearFWBitsAtReset:bdw,skl */
101 fw_clear(d, 0xffff);
102}
103
104static inline void
105fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
106{
107 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
108 d->uncore->fw_domains_timer |= d->mask;
109 d->wake_count++;
110 hrtimer_start_range_ns(&d->timer,
111 NSEC_PER_MSEC,
112 NSEC_PER_MSEC,
113 HRTIMER_MODE_REL);
114}
115
116static inline int
117__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
118 const u32 ack,
119 const u32 value)
120{
121 return wait_for_atomic((fw_ack(d) & ack) == value,
122 FORCEWAKE_ACK_TIMEOUT_MS);
123}
124
125static inline int
126wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
127 const u32 ack)
128{
129 return __wait_for_ack(d, ack, 0);
130}
131
132static inline int
133wait_ack_set(const struct intel_uncore_forcewake_domain *d,
134 const u32 ack)
135{
136 return __wait_for_ack(d, ack, ack);
137}
138
139static inline void
140fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
141{
142 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
143 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
144 intel_uncore_forcewake_domain_to_str(d->id));
145 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
146 }
147}
148
149enum ack_type {
150 ACK_CLEAR = 0,
151 ACK_SET
152};
153
154static int
155fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
156 const enum ack_type type)
157{
158 const u32 ack_bit = FORCEWAKE_KERNEL;
159 const u32 value = type == ACK_SET ? ack_bit : 0;
160 unsigned int pass;
161 bool ack_detected;
162
163 /*
164 * There is a possibility of driver's wake request colliding
165 * with hardware's own wake requests and that can cause
166 * hardware to not deliver the driver's ack message.
167 *
168 * Use a fallback bit toggle to kick the gpu state machine
169 * in the hope that the original ack will be delivered along with
170 * the fallback ack.
171 *
172 * This workaround is described in HSDES #1604254524 and it's known as:
173 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
174 * although the name is a bit misleading.
175 */
176
177 pass = 1;
178 do {
179 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
180
181 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
182 /* Give gt some time to relax before the polling frenzy */
183 udelay(10 * pass);
184 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
185
186 ack_detected = (fw_ack(d) & ack_bit) == value;
187
188 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
189 } while (!ack_detected && pass++ < 10);
190
191 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
192 intel_uncore_forcewake_domain_to_str(d->id),
193 type == ACK_SET ? "set" : "clear",
194 fw_ack(d),
195 pass);
196
197 return ack_detected ? 0 : -ETIMEDOUT;
198}
199
200static inline void
201fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
202{
203 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
204 return;
205
206 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
207 fw_domain_wait_ack_clear(d);
208}
209
210static inline void
211fw_domain_get(const struct intel_uncore_forcewake_domain *d)
212{
213 fw_set(d, FORCEWAKE_KERNEL);
214}
215
216static inline void
217fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
218{
219 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
220 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
221 intel_uncore_forcewake_domain_to_str(d->id));
222 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
223 }
224}
225
226static inline void
227fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
228{
229 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
230 return;
231
232 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
233 fw_domain_wait_ack_set(d);
234}
235
236static inline void
237fw_domain_put(const struct intel_uncore_forcewake_domain *d)
238{
239 fw_clear(d, FORCEWAKE_KERNEL);
240}
241
242static void
243fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
244{
245 struct intel_uncore_forcewake_domain *d;
246 unsigned int tmp;
247
248 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
249
250 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
251 fw_domain_wait_ack_clear(d);
252 fw_domain_get(d);
253 }
254
255 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
256 fw_domain_wait_ack_set(d);
257
258 uncore->fw_domains_active |= fw_domains;
259}
260
261static void
262fw_domains_get_with_fallback(struct intel_uncore *uncore,
263 enum forcewake_domains fw_domains)
264{
265 struct intel_uncore_forcewake_domain *d;
266 unsigned int tmp;
267
268 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
269
270 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
271 fw_domain_wait_ack_clear_fallback(d);
272 fw_domain_get(d);
273 }
274
275 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
276 fw_domain_wait_ack_set_fallback(d);
277
278 uncore->fw_domains_active |= fw_domains;
279}
280
281static void
282fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
283{
284 struct intel_uncore_forcewake_domain *d;
285 unsigned int tmp;
286
287 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
288
289 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
290 fw_domain_put(d);
291
292 uncore->fw_domains_active &= ~fw_domains;
293}
294
295static void
296fw_domains_reset(struct intel_uncore *uncore,
297 enum forcewake_domains fw_domains)
298{
299 struct intel_uncore_forcewake_domain *d;
300 unsigned int tmp;
301
302 if (!fw_domains)
303 return;
304
305 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
306
307 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
308 fw_domain_reset(d);
309}
310
311static inline u32 gt_thread_status(struct intel_uncore *uncore)
312{
313 u32 val;
314
315 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
316 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
317
318 return val;
319}
320
321static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
322{
323 /*
324 * w/a for a sporadic read returning 0 by waiting for the GT
325 * thread to wake up.
326 */
327 WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
328 "GT thread status wait timed out\n");
329}
330
331static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
332 enum forcewake_domains fw_domains)
333{
334 fw_domains_get(uncore, fw_domains);
335
336 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
337 __gen6_gt_wait_for_thread_c0(uncore);
338}
339
340static inline u32 fifo_free_entries(struct intel_uncore *uncore)
341{
342 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
343
344 return count & GT_FIFO_FREE_ENTRIES_MASK;
345}
346
347static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
348{
349 u32 n;
350
351 /* On VLV, FIFO will be shared by both SW and HW.
352 * So, we need to read the FREE_ENTRIES everytime */
353 if (IS_VALLEYVIEW(uncore->i915))
354 n = fifo_free_entries(uncore);
355 else
356 n = uncore->fifo_count;
357
358 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
359 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
360 GT_FIFO_NUM_RESERVED_ENTRIES,
361 GT_FIFO_TIMEOUT_MS)) {
362 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
363 return;
364 }
365 }
366
367 uncore->fifo_count = n - 1;
368}
369
370static enum hrtimer_restart
371intel_uncore_fw_release_timer(struct hrtimer *timer)
372{
373 struct intel_uncore_forcewake_domain *domain =
374 container_of(timer, struct intel_uncore_forcewake_domain, timer);
375 struct intel_uncore *uncore = domain->uncore;
376 unsigned long irqflags;
377
378 assert_rpm_device_not_suspended(uncore->rpm);
379
380 if (xchg(&domain->active, false))
381 return HRTIMER_RESTART;
382
383 spin_lock_irqsave(&uncore->lock, irqflags);
384
385 uncore->fw_domains_timer &= ~domain->mask;
386
387 GEM_BUG_ON(!domain->wake_count);
388 if (--domain->wake_count == 0)
389 uncore->funcs.force_wake_put(uncore, domain->mask);
390
391 spin_unlock_irqrestore(&uncore->lock, irqflags);
392
393 return HRTIMER_NORESTART;
394}
395
396/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
397static unsigned int
398intel_uncore_forcewake_reset(struct intel_uncore *uncore)
399{
400 unsigned long irqflags;
401 struct intel_uncore_forcewake_domain *domain;
402 int retry_count = 100;
403 enum forcewake_domains fw, active_domains;
404
405 iosf_mbi_assert_punit_acquired();
406
407 /* Hold uncore.lock across reset to prevent any register access
408 * with forcewake not set correctly. Wait until all pending
409 * timers are run before holding.
410 */
411 while (1) {
412 unsigned int tmp;
413
414 active_domains = 0;
415
416 for_each_fw_domain(domain, uncore, tmp) {
417 smp_store_mb(domain->active, false);
418 if (hrtimer_cancel(&domain->timer) == 0)
419 continue;
420
421 intel_uncore_fw_release_timer(&domain->timer);
422 }
423
424 spin_lock_irqsave(&uncore->lock, irqflags);
425
426 for_each_fw_domain(domain, uncore, tmp) {
427 if (hrtimer_active(&domain->timer))
428 active_domains |= domain->mask;
429 }
430
431 if (active_domains == 0)
432 break;
433
434 if (--retry_count == 0) {
435 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
436 break;
437 }
438
439 spin_unlock_irqrestore(&uncore->lock, irqflags);
440 cond_resched();
441 }
442
443 WARN_ON(active_domains);
444
445 fw = uncore->fw_domains_active;
446 if (fw)
447 uncore->funcs.force_wake_put(uncore, fw);
448
449 fw_domains_reset(uncore, uncore->fw_domains);
450 assert_forcewakes_inactive(uncore);
451
452 spin_unlock_irqrestore(&uncore->lock, irqflags);
453
454 return fw; /* track the lost user forcewake domains */
455}
456
457static bool
458fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
459{
460 u32 dbg;
461
462 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
463 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
464 return false;
465
466 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
467
468 return true;
469}
470
471static bool
472vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
473{
474 u32 cer;
475
476 cer = __raw_uncore_read32(uncore, CLAIM_ER);
477 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
478 return false;
479
480 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
481
482 return true;
483}
484
485static bool
486gen6_check_for_fifo_debug(struct intel_uncore *uncore)
487{
488 u32 fifodbg;
489
490 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
491
492 if (unlikely(fifodbg)) {
493 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
494 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
495 }
496
497 return fifodbg;
498}
499
500static bool
501check_for_unclaimed_mmio(struct intel_uncore *uncore)
502{
503 bool ret = false;
504
505 lockdep_assert_held(&uncore->debug->lock);
506
507 if (uncore->debug->suspend_count)
508 return false;
509
510 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
511 ret |= fpga_check_for_unclaimed_mmio(uncore);
512
513 if (intel_uncore_has_dbg_unclaimed(uncore))
514 ret |= vlv_check_for_unclaimed_mmio(uncore);
515
516 if (intel_uncore_has_fifo(uncore))
517 ret |= gen6_check_for_fifo_debug(uncore);
518
519 return ret;
520}
521
522static void forcewake_early_sanitize(struct intel_uncore *uncore,
523 unsigned int restore_forcewake)
524{
525 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
526
527 /* WaDisableShadowRegForCpd:chv */
528 if (IS_CHERRYVIEW(uncore->i915)) {
529 __raw_uncore_write32(uncore, GTFIFOCTL,
530 __raw_uncore_read32(uncore, GTFIFOCTL) |
531 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
532 GT_FIFO_CTL_RC6_POLICY_STALL);
533 }
534
535 iosf_mbi_punit_acquire();
536 intel_uncore_forcewake_reset(uncore);
537 if (restore_forcewake) {
538 spin_lock_irq(&uncore->lock);
539 uncore->funcs.force_wake_get(uncore, restore_forcewake);
540
541 if (intel_uncore_has_fifo(uncore))
542 uncore->fifo_count = fifo_free_entries(uncore);
543 spin_unlock_irq(&uncore->lock);
544 }
545 iosf_mbi_punit_release();
546}
547
548void intel_uncore_suspend(struct intel_uncore *uncore)
549{
550 if (!intel_uncore_has_forcewake(uncore))
551 return;
552
553 iosf_mbi_punit_acquire();
554 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
555 &uncore->pmic_bus_access_nb);
556 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
557 iosf_mbi_punit_release();
558}
559
560void intel_uncore_resume_early(struct intel_uncore *uncore)
561{
562 unsigned int restore_forcewake;
563
564 if (intel_uncore_unclaimed_mmio(uncore))
565 DRM_DEBUG("unclaimed mmio detected on resume, clearing\n");
566
567 if (!intel_uncore_has_forcewake(uncore))
568 return;
569
570 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
571 forcewake_early_sanitize(uncore, restore_forcewake);
572
573 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
574}
575
576void intel_uncore_runtime_resume(struct intel_uncore *uncore)
577{
578 if (!intel_uncore_has_forcewake(uncore))
579 return;
580
581 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
582}
583
584static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
585 enum forcewake_domains fw_domains)
586{
587 struct intel_uncore_forcewake_domain *domain;
588 unsigned int tmp;
589
590 fw_domains &= uncore->fw_domains;
591
592 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
593 if (domain->wake_count++) {
594 fw_domains &= ~domain->mask;
595 domain->active = true;
596 }
597 }
598
599 if (fw_domains)
600 uncore->funcs.force_wake_get(uncore, fw_domains);
601}
602
603/**
604 * intel_uncore_forcewake_get - grab forcewake domain references
605 * @uncore: the intel_uncore structure
606 * @fw_domains: forcewake domains to get reference on
607 *
608 * This function can be used get GT's forcewake domain references.
609 * Normal register access will handle the forcewake domains automatically.
610 * However if some sequence requires the GT to not power down a particular
611 * forcewake domains this function should be called at the beginning of the
612 * sequence. And subsequently the reference should be dropped by symmetric
613 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
614 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
615 */
616void intel_uncore_forcewake_get(struct intel_uncore *uncore,
617 enum forcewake_domains fw_domains)
618{
619 unsigned long irqflags;
620
621 if (!uncore->funcs.force_wake_get)
622 return;
623
624 assert_rpm_wakelock_held(uncore->rpm);
625
626 spin_lock_irqsave(&uncore->lock, irqflags);
627 __intel_uncore_forcewake_get(uncore, fw_domains);
628 spin_unlock_irqrestore(&uncore->lock, irqflags);
629}
630
631/**
632 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
633 * @uncore: the intel_uncore structure
634 *
635 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
636 * the GT powerwell and in the process disable our debugging for the
637 * duration of userspace's bypass.
638 */
639void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
640{
641 spin_lock_irq(&uncore->lock);
642 if (!uncore->user_forcewake_count++) {
643 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
644 spin_lock(&uncore->debug->lock);
645 mmio_debug_suspend(uncore->debug);
646 spin_unlock(&uncore->debug->lock);
647 }
648 spin_unlock_irq(&uncore->lock);
649}
650
651/**
652 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
653 * @uncore: the intel_uncore structure
654 *
655 * This function complements intel_uncore_forcewake_user_get() and releases
656 * the GT powerwell taken on behalf of the userspace bypass.
657 */
658void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
659{
660 spin_lock_irq(&uncore->lock);
661 if (!--uncore->user_forcewake_count) {
662 spin_lock(&uncore->debug->lock);
663 mmio_debug_resume(uncore->debug);
664
665 if (check_for_unclaimed_mmio(uncore))
666 dev_info(uncore->i915->drm.dev,
667 "Invalid mmio detected during user access\n");
668 spin_unlock(&uncore->debug->lock);
669
670 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
671 }
672 spin_unlock_irq(&uncore->lock);
673}
674
675/**
676 * intel_uncore_forcewake_get__locked - grab forcewake domain references
677 * @uncore: the intel_uncore structure
678 * @fw_domains: forcewake domains to get reference on
679 *
680 * See intel_uncore_forcewake_get(). This variant places the onus
681 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
682 */
683void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
684 enum forcewake_domains fw_domains)
685{
686 lockdep_assert_held(&uncore->lock);
687
688 if (!uncore->funcs.force_wake_get)
689 return;
690
691 __intel_uncore_forcewake_get(uncore, fw_domains);
692}
693
694static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
695 enum forcewake_domains fw_domains)
696{
697 struct intel_uncore_forcewake_domain *domain;
698 unsigned int tmp;
699
700 fw_domains &= uncore->fw_domains;
701
702 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
703 GEM_BUG_ON(!domain->wake_count);
704
705 if (--domain->wake_count) {
706 domain->active = true;
707 continue;
708 }
709
710 fw_domain_arm_timer(domain);
711 }
712}
713
714/**
715 * intel_uncore_forcewake_put - release a forcewake domain reference
716 * @uncore: the intel_uncore structure
717 * @fw_domains: forcewake domains to put references
718 *
719 * This function drops the device-level forcewakes for specified
720 * domains obtained by intel_uncore_forcewake_get().
721 */
722void intel_uncore_forcewake_put(struct intel_uncore *uncore,
723 enum forcewake_domains fw_domains)
724{
725 unsigned long irqflags;
726
727 if (!uncore->funcs.force_wake_put)
728 return;
729
730 spin_lock_irqsave(&uncore->lock, irqflags);
731 __intel_uncore_forcewake_put(uncore, fw_domains);
732 spin_unlock_irqrestore(&uncore->lock, irqflags);
733}
734
735/**
736 * intel_uncore_forcewake_put__locked - grab forcewake domain references
737 * @uncore: the intel_uncore structure
738 * @fw_domains: forcewake domains to get reference on
739 *
740 * See intel_uncore_forcewake_put(). This variant places the onus
741 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
742 */
743void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
744 enum forcewake_domains fw_domains)
745{
746 lockdep_assert_held(&uncore->lock);
747
748 if (!uncore->funcs.force_wake_put)
749 return;
750
751 __intel_uncore_forcewake_put(uncore, fw_domains);
752}
753
754void assert_forcewakes_inactive(struct intel_uncore *uncore)
755{
756 if (!uncore->funcs.force_wake_get)
757 return;
758
759 WARN(uncore->fw_domains_active,
760 "Expected all fw_domains to be inactive, but %08x are still on\n",
761 uncore->fw_domains_active);
762}
763
764void assert_forcewakes_active(struct intel_uncore *uncore,
765 enum forcewake_domains fw_domains)
766{
767 struct intel_uncore_forcewake_domain *domain;
768 unsigned int tmp;
769
770 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
771 return;
772
773 if (!uncore->funcs.force_wake_get)
774 return;
775
776 spin_lock_irq(&uncore->lock);
777
778 assert_rpm_wakelock_held(uncore->rpm);
779
780 fw_domains &= uncore->fw_domains;
781 WARN(fw_domains & ~uncore->fw_domains_active,
782 "Expected %08x fw_domains to be active, but %08x are off\n",
783 fw_domains, fw_domains & ~uncore->fw_domains_active);
784
785 /*
786 * Check that the caller has an explicit wakeref and we don't mistake
787 * it for the auto wakeref.
788 */
789 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
790 unsigned int actual = READ_ONCE(domain->wake_count);
791 unsigned int expect = 1;
792
793 if (uncore->fw_domains_timer & domain->mask)
794 expect++; /* pending automatic release */
795
796 if (WARN(actual < expect,
797 "Expected domain %d to be held awake by caller, count=%d\n",
798 domain->id, actual))
799 break;
800 }
801
802 spin_unlock_irq(&uncore->lock);
803}
804
805/* We give fast paths for the really cool registers */
806#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
807
808#define GEN11_NEEDS_FORCE_WAKE(reg) \
809 ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
810
811#define __gen6_reg_read_fw_domains(uncore, offset) \
812({ \
813 enum forcewake_domains __fwd; \
814 if (NEEDS_FORCE_WAKE(offset)) \
815 __fwd = FORCEWAKE_RENDER; \
816 else \
817 __fwd = 0; \
818 __fwd; \
819})
820
821static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
822{
823 if (offset < entry->start)
824 return -1;
825 else if (offset > entry->end)
826 return 1;
827 else
828 return 0;
829}
830
831/* Copied and "macroized" from lib/bsearch.c */
832#define BSEARCH(key, base, num, cmp) ({ \
833 unsigned int start__ = 0, end__ = (num); \
834 typeof(base) result__ = NULL; \
835 while (start__ < end__) { \
836 unsigned int mid__ = start__ + (end__ - start__) / 2; \
837 int ret__ = (cmp)((key), (base) + mid__); \
838 if (ret__ < 0) { \
839 end__ = mid__; \
840 } else if (ret__ > 0) { \
841 start__ = mid__ + 1; \
842 } else { \
843 result__ = (base) + mid__; \
844 break; \
845 } \
846 } \
847 result__; \
848})
849
850static enum forcewake_domains
851find_fw_domain(struct intel_uncore *uncore, u32 offset)
852{
853 const struct intel_forcewake_range *entry;
854
855 entry = BSEARCH(offset,
856 uncore->fw_domains_table,
857 uncore->fw_domains_table_entries,
858 fw_range_cmp);
859
860 if (!entry)
861 return 0;
862
863 /*
864 * The list of FW domains depends on the SKU in gen11+ so we
865 * can't determine it statically. We use FORCEWAKE_ALL and
866 * translate it here to the list of available domains.
867 */
868 if (entry->domains == FORCEWAKE_ALL)
869 return uncore->fw_domains;
870
871 WARN(entry->domains & ~uncore->fw_domains,
872 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
873 entry->domains & ~uncore->fw_domains, offset);
874
875 return entry->domains;
876}
877
878#define GEN_FW_RANGE(s, e, d) \
879 { .start = (s), .end = (e), .domains = (d) }
880
881#define HAS_FWTABLE(dev_priv) \
882 (INTEL_GEN(dev_priv) >= 9 || \
883 IS_CHERRYVIEW(dev_priv) || \
884 IS_VALLEYVIEW(dev_priv))
885
886/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
887static const struct intel_forcewake_range __vlv_fw_ranges[] = {
888 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
889 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
890 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
891 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
892 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
893 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
894 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
895};
896
897#define __fwtable_reg_read_fw_domains(uncore, offset) \
898({ \
899 enum forcewake_domains __fwd = 0; \
900 if (NEEDS_FORCE_WAKE((offset))) \
901 __fwd = find_fw_domain(uncore, offset); \
902 __fwd; \
903})
904
905#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
906({ \
907 enum forcewake_domains __fwd = 0; \
908 if (GEN11_NEEDS_FORCE_WAKE((offset))) \
909 __fwd = find_fw_domain(uncore, offset); \
910 __fwd; \
911})
912
913/* *Must* be sorted by offset! See intel_shadow_table_check(). */
914static const i915_reg_t gen8_shadowed_regs[] = {
915 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
916 GEN6_RPNSWREQ, /* 0xA008 */
917 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
918 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
919 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
920 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
921 /* TODO: Other registers are not yet used */
922};
923
924static const i915_reg_t gen11_shadowed_regs[] = {
925 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
926 GEN6_RPNSWREQ, /* 0xA008 */
927 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
928 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
929 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
930 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
931 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
932 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
933 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
934 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
935 /* TODO: Other registers are not yet used */
936};
937
938static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
939{
940 u32 offset = i915_mmio_reg_offset(*reg);
941
942 if (key < offset)
943 return -1;
944 else if (key > offset)
945 return 1;
946 else
947 return 0;
948}
949
950#define __is_genX_shadowed(x) \
951static bool is_gen##x##_shadowed(u32 offset) \
952{ \
953 const i915_reg_t *regs = gen##x##_shadowed_regs; \
954 return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \
955 mmio_reg_cmp); \
956}
957
958__is_genX_shadowed(8)
959__is_genX_shadowed(11)
960
961static enum forcewake_domains
962gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
963{
964 return FORCEWAKE_RENDER;
965}
966
967#define __gen8_reg_write_fw_domains(uncore, offset) \
968({ \
969 enum forcewake_domains __fwd; \
970 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
971 __fwd = FORCEWAKE_RENDER; \
972 else \
973 __fwd = 0; \
974 __fwd; \
975})
976
977/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
978static const struct intel_forcewake_range __chv_fw_ranges[] = {
979 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
980 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
981 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
982 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
983 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
984 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
985 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
986 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
987 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
988 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
989 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
990 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
991 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
992 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
993 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
994 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
995};
996
997#define __fwtable_reg_write_fw_domains(uncore, offset) \
998({ \
999 enum forcewake_domains __fwd = 0; \
1000 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
1001 __fwd = find_fw_domain(uncore, offset); \
1002 __fwd; \
1003})
1004
1005#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
1006({ \
1007 enum forcewake_domains __fwd = 0; \
1008 if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
1009 __fwd = find_fw_domain(uncore, offset); \
1010 __fwd; \
1011})
1012
1013/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1014static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1015 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
1016 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1017 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1018 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
1019 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1020 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
1021 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1022 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
1023 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1024 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1025 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
1026 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1027 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
1028 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1029 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
1030 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1031 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1032 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1033 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1034 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1035 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
1036 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1037 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
1038 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1039 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
1040 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1041 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
1042 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1043 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
1044 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1045 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
1046 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1047};
1048
1049/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1050static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1051 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
1052 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1053 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1054 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
1055 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1056 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
1057 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1058 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
1059 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1060 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
1061 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1062 GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
1063 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1064 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1065 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
1066 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1067 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1068 GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
1069 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1070 GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER),
1071 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1072 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
1073 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1074 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1075 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
1076 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
1077 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
1078 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1079 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
1080 GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
1081};
1082
1083static void
1084ilk_dummy_write(struct intel_uncore *uncore)
1085{
1086 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1087 * the chip from rc6 before touching it for real. MI_MODE is masked,
1088 * hence harmless to write 0 into. */
1089 __raw_uncore_write32(uncore, MI_MODE, 0);
1090}
1091
1092static void
1093__unclaimed_reg_debug(struct intel_uncore *uncore,
1094 const i915_reg_t reg,
1095 const bool read,
1096 const bool before)
1097{
1098 if (WARN(check_for_unclaimed_mmio(uncore) && !before,
1099 "Unclaimed %s register 0x%x\n",
1100 read ? "read from" : "write to",
1101 i915_mmio_reg_offset(reg)))
1102 /* Only report the first N failures */
1103 i915_modparams.mmio_debug--;
1104}
1105
1106static inline void
1107unclaimed_reg_debug(struct intel_uncore *uncore,
1108 const i915_reg_t reg,
1109 const bool read,
1110 const bool before)
1111{
1112 if (likely(!i915_modparams.mmio_debug))
1113 return;
1114
1115 /* interrupts are disabled and re-enabled around uncore->lock usage */
1116 lockdep_assert_held(&uncore->lock);
1117
1118 if (before)
1119 spin_lock(&uncore->debug->lock);
1120
1121 __unclaimed_reg_debug(uncore, reg, read, before);
1122
1123 if (!before)
1124 spin_unlock(&uncore->debug->lock);
1125}
1126
1127#define GEN2_READ_HEADER(x) \
1128 u##x val = 0; \
1129 assert_rpm_wakelock_held(uncore->rpm);
1130
1131#define GEN2_READ_FOOTER \
1132 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1133 return val
1134
1135#define __gen2_read(x) \
1136static u##x \
1137gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1138 GEN2_READ_HEADER(x); \
1139 val = __raw_uncore_read##x(uncore, reg); \
1140 GEN2_READ_FOOTER; \
1141}
1142
1143#define __gen5_read(x) \
1144static u##x \
1145gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1146 GEN2_READ_HEADER(x); \
1147 ilk_dummy_write(uncore); \
1148 val = __raw_uncore_read##x(uncore, reg); \
1149 GEN2_READ_FOOTER; \
1150}
1151
1152__gen5_read(8)
1153__gen5_read(16)
1154__gen5_read(32)
1155__gen5_read(64)
1156__gen2_read(8)
1157__gen2_read(16)
1158__gen2_read(32)
1159__gen2_read(64)
1160
1161#undef __gen5_read
1162#undef __gen2_read
1163
1164#undef GEN2_READ_FOOTER
1165#undef GEN2_READ_HEADER
1166
1167#define GEN6_READ_HEADER(x) \
1168 u32 offset = i915_mmio_reg_offset(reg); \
1169 unsigned long irqflags; \
1170 u##x val = 0; \
1171 assert_rpm_wakelock_held(uncore->rpm); \
1172 spin_lock_irqsave(&uncore->lock, irqflags); \
1173 unclaimed_reg_debug(uncore, reg, true, true)
1174
1175#define GEN6_READ_FOOTER \
1176 unclaimed_reg_debug(uncore, reg, true, false); \
1177 spin_unlock_irqrestore(&uncore->lock, irqflags); \
1178 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1179 return val
1180
1181static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1182 enum forcewake_domains fw_domains)
1183{
1184 struct intel_uncore_forcewake_domain *domain;
1185 unsigned int tmp;
1186
1187 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1188
1189 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1190 fw_domain_arm_timer(domain);
1191
1192 uncore->funcs.force_wake_get(uncore, fw_domains);
1193}
1194
1195static inline void __force_wake_auto(struct intel_uncore *uncore,
1196 enum forcewake_domains fw_domains)
1197{
1198 GEM_BUG_ON(!fw_domains);
1199
1200 /* Turn on all requested but inactive supported forcewake domains. */
1201 fw_domains &= uncore->fw_domains;
1202 fw_domains &= ~uncore->fw_domains_active;
1203
1204 if (fw_domains)
1205 ___force_wake_auto(uncore, fw_domains);
1206}
1207
1208#define __gen_read(func, x) \
1209static u##x \
1210func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1211 enum forcewake_domains fw_engine; \
1212 GEN6_READ_HEADER(x); \
1213 fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
1214 if (fw_engine) \
1215 __force_wake_auto(uncore, fw_engine); \
1216 val = __raw_uncore_read##x(uncore, reg); \
1217 GEN6_READ_FOOTER; \
1218}
1219
1220#define __gen_reg_read_funcs(func) \
1221static enum forcewake_domains \
1222func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1223 return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1224} \
1225\
1226__gen_read(func, 8) \
1227__gen_read(func, 16) \
1228__gen_read(func, 32) \
1229__gen_read(func, 64)
1230
1231__gen_reg_read_funcs(gen11_fwtable);
1232__gen_reg_read_funcs(fwtable);
1233__gen_reg_read_funcs(gen6);
1234
1235#undef __gen_reg_read_funcs
1236#undef GEN6_READ_FOOTER
1237#undef GEN6_READ_HEADER
1238
1239#define GEN2_WRITE_HEADER \
1240 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1241 assert_rpm_wakelock_held(uncore->rpm); \
1242
1243#define GEN2_WRITE_FOOTER
1244
1245#define __gen2_write(x) \
1246static void \
1247gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1248 GEN2_WRITE_HEADER; \
1249 __raw_uncore_write##x(uncore, reg, val); \
1250 GEN2_WRITE_FOOTER; \
1251}
1252
1253#define __gen5_write(x) \
1254static void \
1255gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1256 GEN2_WRITE_HEADER; \
1257 ilk_dummy_write(uncore); \
1258 __raw_uncore_write##x(uncore, reg, val); \
1259 GEN2_WRITE_FOOTER; \
1260}
1261
1262__gen5_write(8)
1263__gen5_write(16)
1264__gen5_write(32)
1265__gen2_write(8)
1266__gen2_write(16)
1267__gen2_write(32)
1268
1269#undef __gen5_write
1270#undef __gen2_write
1271
1272#undef GEN2_WRITE_FOOTER
1273#undef GEN2_WRITE_HEADER
1274
1275#define GEN6_WRITE_HEADER \
1276 u32 offset = i915_mmio_reg_offset(reg); \
1277 unsigned long irqflags; \
1278 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1279 assert_rpm_wakelock_held(uncore->rpm); \
1280 spin_lock_irqsave(&uncore->lock, irqflags); \
1281 unclaimed_reg_debug(uncore, reg, false, true)
1282
1283#define GEN6_WRITE_FOOTER \
1284 unclaimed_reg_debug(uncore, reg, false, false); \
1285 spin_unlock_irqrestore(&uncore->lock, irqflags)
1286
1287#define __gen6_write(x) \
1288static void \
1289gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1290 GEN6_WRITE_HEADER; \
1291 if (NEEDS_FORCE_WAKE(offset)) \
1292 __gen6_gt_wait_for_fifo(uncore); \
1293 __raw_uncore_write##x(uncore, reg, val); \
1294 GEN6_WRITE_FOOTER; \
1295}
1296__gen6_write(8)
1297__gen6_write(16)
1298__gen6_write(32)
1299
1300#define __gen_write(func, x) \
1301static void \
1302func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1303 enum forcewake_domains fw_engine; \
1304 GEN6_WRITE_HEADER; \
1305 fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
1306 if (fw_engine) \
1307 __force_wake_auto(uncore, fw_engine); \
1308 __raw_uncore_write##x(uncore, reg, val); \
1309 GEN6_WRITE_FOOTER; \
1310}
1311
1312#define __gen_reg_write_funcs(func) \
1313static enum forcewake_domains \
1314func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1315 return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1316} \
1317\
1318__gen_write(func, 8) \
1319__gen_write(func, 16) \
1320__gen_write(func, 32)
1321
1322__gen_reg_write_funcs(gen11_fwtable);
1323__gen_reg_write_funcs(fwtable);
1324__gen_reg_write_funcs(gen8);
1325
1326#undef __gen_reg_write_funcs
1327#undef GEN6_WRITE_FOOTER
1328#undef GEN6_WRITE_HEADER
1329
1330#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
1331do { \
1332 (uncore)->funcs.mmio_writeb = x##_write8; \
1333 (uncore)->funcs.mmio_writew = x##_write16; \
1334 (uncore)->funcs.mmio_writel = x##_write32; \
1335} while (0)
1336
1337#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
1338do { \
1339 (uncore)->funcs.mmio_readb = x##_read8; \
1340 (uncore)->funcs.mmio_readw = x##_read16; \
1341 (uncore)->funcs.mmio_readl = x##_read32; \
1342 (uncore)->funcs.mmio_readq = x##_read64; \
1343} while (0)
1344
1345#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
1346do { \
1347 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
1348 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
1349} while (0)
1350
1351#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
1352do { \
1353 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
1354 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
1355} while (0)
1356
1357static int __fw_domain_init(struct intel_uncore *uncore,
1358 enum forcewake_domain_id domain_id,
1359 i915_reg_t reg_set,
1360 i915_reg_t reg_ack)
1361{
1362 struct intel_uncore_forcewake_domain *d;
1363
1364 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1365 GEM_BUG_ON(uncore->fw_domain[domain_id]);
1366
1367 if (i915_inject_probe_failure(uncore->i915))
1368 return -ENOMEM;
1369
1370 d = kzalloc(sizeof(*d), GFP_KERNEL);
1371 if (!d)
1372 return -ENOMEM;
1373
1374 WARN_ON(!i915_mmio_reg_valid(reg_set));
1375 WARN_ON(!i915_mmio_reg_valid(reg_ack));
1376
1377 d->uncore = uncore;
1378 d->wake_count = 0;
1379 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1380 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
1381
1382 d->id = domain_id;
1383
1384 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1385 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1386 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1387 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1388 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1389 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1390 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
1391 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
1392 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
1393
1394 d->mask = BIT(domain_id);
1395
1396 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1397 d->timer.function = intel_uncore_fw_release_timer;
1398
1399 uncore->fw_domains |= BIT(domain_id);
1400
1401 fw_domain_reset(d);
1402
1403 uncore->fw_domain[domain_id] = d;
1404
1405 return 0;
1406}
1407
1408static void fw_domain_fini(struct intel_uncore *uncore,
1409 enum forcewake_domain_id domain_id)
1410{
1411 struct intel_uncore_forcewake_domain *d;
1412
1413 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1414
1415 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
1416 if (!d)
1417 return;
1418
1419 uncore->fw_domains &= ~BIT(domain_id);
1420 WARN_ON(d->wake_count);
1421 WARN_ON(hrtimer_cancel(&d->timer));
1422 kfree(d);
1423}
1424
1425static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
1426{
1427 struct intel_uncore_forcewake_domain *d;
1428 int tmp;
1429
1430 for_each_fw_domain(d, uncore, tmp)
1431 fw_domain_fini(uncore, d->id);
1432}
1433
1434static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
1435{
1436 struct drm_i915_private *i915 = uncore->i915;
1437 int ret = 0;
1438
1439 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
1440
1441#define fw_domain_init(uncore__, id__, set__, ack__) \
1442 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
1443
1444 if (INTEL_GEN(i915) >= 11) {
1445 int i;
1446
1447 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1448 uncore->funcs.force_wake_put = fw_domains_put;
1449 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1450 FORCEWAKE_RENDER_GEN9,
1451 FORCEWAKE_ACK_RENDER_GEN9);
1452 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
1453 FORCEWAKE_BLITTER_GEN9,
1454 FORCEWAKE_ACK_BLITTER_GEN9);
1455
1456 for (i = 0; i < I915_MAX_VCS; i++) {
1457 if (!HAS_ENGINE(i915, _VCS(i)))
1458 continue;
1459
1460 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
1461 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
1462 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
1463 }
1464 for (i = 0; i < I915_MAX_VECS; i++) {
1465 if (!HAS_ENGINE(i915, _VECS(i)))
1466 continue;
1467
1468 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
1469 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1470 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1471 }
1472 } else if (IS_GEN_RANGE(i915, 9, 10)) {
1473 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1474 uncore->funcs.force_wake_put = fw_domains_put;
1475 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1476 FORCEWAKE_RENDER_GEN9,
1477 FORCEWAKE_ACK_RENDER_GEN9);
1478 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
1479 FORCEWAKE_BLITTER_GEN9,
1480 FORCEWAKE_ACK_BLITTER_GEN9);
1481 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1482 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1483 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1484 uncore->funcs.force_wake_get = fw_domains_get;
1485 uncore->funcs.force_wake_put = fw_domains_put;
1486 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1487 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1488 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1489 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1490 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1491 uncore->funcs.force_wake_get =
1492 fw_domains_get_with_thread_status;
1493 uncore->funcs.force_wake_put = fw_domains_put;
1494 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1495 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1496 } else if (IS_IVYBRIDGE(i915)) {
1497 u32 ecobus;
1498
1499 /* IVB configs may use multi-threaded forcewake */
1500
1501 /* A small trick here - if the bios hasn't configured
1502 * MT forcewake, and if the device is in RC6, then
1503 * force_wake_mt_get will not wake the device and the
1504 * ECOBUS read will return zero. Which will be
1505 * (correctly) interpreted by the test below as MT
1506 * forcewake being disabled.
1507 */
1508 uncore->funcs.force_wake_get =
1509 fw_domains_get_with_thread_status;
1510 uncore->funcs.force_wake_put = fw_domains_put;
1511
1512 /* We need to init first for ECOBUS access and then
1513 * determine later if we want to reinit, in case of MT access is
1514 * not working. In this stage we don't know which flavour this
1515 * ivb is, so it is better to reset also the gen6 fw registers
1516 * before the ecobus check.
1517 */
1518
1519 __raw_uncore_write32(uncore, FORCEWAKE, 0);
1520 __raw_posting_read(uncore, ECOBUS);
1521
1522 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1523 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1524 if (ret)
1525 goto out;
1526
1527 spin_lock_irq(&uncore->lock);
1528 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
1529 ecobus = __raw_uncore_read32(uncore, ECOBUS);
1530 fw_domains_put(uncore, FORCEWAKE_RENDER);
1531 spin_unlock_irq(&uncore->lock);
1532
1533 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1534 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1535 DRM_INFO("when using vblank-synced partial screen updates.\n");
1536 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
1537 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1538 FORCEWAKE, FORCEWAKE_ACK);
1539 }
1540 } else if (IS_GEN(i915, 6)) {
1541 uncore->funcs.force_wake_get =
1542 fw_domains_get_with_thread_status;
1543 uncore->funcs.force_wake_put = fw_domains_put;
1544 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1545 FORCEWAKE, FORCEWAKE_ACK);
1546 }
1547
1548#undef fw_domain_init
1549
1550 /* All future platforms are expected to require complex power gating */
1551 WARN_ON(!ret && uncore->fw_domains == 0);
1552
1553out:
1554 if (ret)
1555 intel_uncore_fw_domains_fini(uncore);
1556
1557 return ret;
1558}
1559
1560#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
1561{ \
1562 (uncore)->fw_domains_table = \
1563 (struct intel_forcewake_range *)(d); \
1564 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
1565}
1566
1567static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1568 unsigned long action, void *data)
1569{
1570 struct intel_uncore *uncore = container_of(nb,
1571 struct intel_uncore, pmic_bus_access_nb);
1572
1573 switch (action) {
1574 case MBI_PMIC_BUS_ACCESS_BEGIN:
1575 /*
1576 * forcewake all now to make sure that we don't need to do a
1577 * forcewake later which on systems where this notifier gets
1578 * called requires the punit to access to the shared pmic i2c
1579 * bus, which will be busy after this notification, leading to:
1580 * "render: timed out waiting for forcewake ack request."
1581 * errors.
1582 *
1583 * The notifier is unregistered during intel_runtime_suspend(),
1584 * so it's ok to access the HW here without holding a RPM
1585 * wake reference -> disable wakeref asserts for the time of
1586 * the access.
1587 */
1588 disable_rpm_wakeref_asserts(uncore->rpm);
1589 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1590 enable_rpm_wakeref_asserts(uncore->rpm);
1591 break;
1592 case MBI_PMIC_BUS_ACCESS_END:
1593 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1594 break;
1595 }
1596
1597 return NOTIFY_OK;
1598}
1599
1600static int uncore_mmio_setup(struct intel_uncore *uncore)
1601{
1602 struct drm_i915_private *i915 = uncore->i915;
1603 struct pci_dev *pdev = i915->drm.pdev;
1604 int mmio_bar;
1605 int mmio_size;
1606
1607 mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
1608 /*
1609 * Before gen4, the registers and the GTT are behind different BARs.
1610 * However, from gen4 onwards, the registers and the GTT are shared
1611 * in the same BAR, so we want to restrict this ioremap from
1612 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1613 * the register BAR remains the same size for all the earlier
1614 * generations up to Ironlake.
1615 */
1616 if (INTEL_GEN(i915) < 5)
1617 mmio_size = 512 * 1024;
1618 else
1619 mmio_size = 2 * 1024 * 1024;
1620 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
1621 if (uncore->regs == NULL) {
1622 DRM_ERROR("failed to map registers\n");
1623
1624 return -EIO;
1625 }
1626
1627 return 0;
1628}
1629
1630static void uncore_mmio_cleanup(struct intel_uncore *uncore)
1631{
1632 struct pci_dev *pdev = uncore->i915->drm.pdev;
1633
1634 pci_iounmap(pdev, uncore->regs);
1635}
1636
1637void intel_uncore_init_early(struct intel_uncore *uncore,
1638 struct drm_i915_private *i915)
1639{
1640 spin_lock_init(&uncore->lock);
1641 uncore->i915 = i915;
1642 uncore->rpm = &i915->runtime_pm;
1643 uncore->debug = &i915->mmio_debug;
1644}
1645
1646static void uncore_raw_init(struct intel_uncore *uncore)
1647{
1648 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
1649
1650 if (IS_GEN(uncore->i915, 5)) {
1651 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
1652 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
1653 } else {
1654 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
1655 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
1656 }
1657}
1658
1659static int uncore_forcewake_init(struct intel_uncore *uncore)
1660{
1661 struct drm_i915_private *i915 = uncore->i915;
1662 int ret;
1663
1664 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
1665
1666 ret = intel_uncore_fw_domains_init(uncore);
1667 if (ret)
1668 return ret;
1669 forcewake_early_sanitize(uncore, 0);
1670
1671 if (IS_GEN_RANGE(i915, 6, 7)) {
1672 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
1673
1674 if (IS_VALLEYVIEW(i915)) {
1675 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
1676 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1677 } else {
1678 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1679 }
1680 } else if (IS_GEN(i915, 8)) {
1681 if (IS_CHERRYVIEW(i915)) {
1682 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
1683 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1684 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1685 } else {
1686 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
1687 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1688 }
1689 } else if (IS_GEN_RANGE(i915, 9, 10)) {
1690 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
1691 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1692 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1693 } else {
1694 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
1695 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
1696 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
1697 }
1698
1699 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
1700 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
1701
1702 return 0;
1703}
1704
1705int intel_uncore_init_mmio(struct intel_uncore *uncore)
1706{
1707 struct drm_i915_private *i915 = uncore->i915;
1708 int ret;
1709
1710 ret = uncore_mmio_setup(uncore);
1711 if (ret)
1712 return ret;
1713
1714 if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
1715 uncore->flags |= UNCORE_HAS_FORCEWAKE;
1716
1717 if (!intel_uncore_has_forcewake(uncore)) {
1718 uncore_raw_init(uncore);
1719 } else {
1720 ret = uncore_forcewake_init(uncore);
1721 if (ret)
1722 goto out_mmio_cleanup;
1723 }
1724
1725 /* make sure fw funcs are set if and only if we have fw*/
1726 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
1727 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
1728 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
1729 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
1730
1731 if (HAS_FPGA_DBG_UNCLAIMED(i915))
1732 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
1733
1734 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1735 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
1736
1737 if (IS_GEN_RANGE(i915, 6, 7))
1738 uncore->flags |= UNCORE_HAS_FIFO;
1739
1740 /* clear out unclaimed reg detection bit */
1741 if (intel_uncore_unclaimed_mmio(uncore))
1742 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
1743
1744 return 0;
1745
1746out_mmio_cleanup:
1747 uncore_mmio_cleanup(uncore);
1748
1749 return ret;
1750}
1751
1752/*
1753 * We might have detected that some engines are fused off after we initialized
1754 * the forcewake domains. Prune them, to make sure they only reference existing
1755 * engines.
1756 */
1757void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
1758{
1759 struct drm_i915_private *i915 = uncore->i915;
1760 enum forcewake_domains fw_domains = uncore->fw_domains;
1761 enum forcewake_domain_id domain_id;
1762 int i;
1763
1764 if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11)
1765 return;
1766
1767 for (i = 0; i < I915_MAX_VCS; i++) {
1768 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
1769
1770 if (HAS_ENGINE(i915, _VCS(i)))
1771 continue;
1772
1773 if (fw_domains & BIT(domain_id))
1774 fw_domain_fini(uncore, domain_id);
1775 }
1776
1777 for (i = 0; i < I915_MAX_VECS; i++) {
1778 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
1779
1780 if (HAS_ENGINE(i915, _VECS(i)))
1781 continue;
1782
1783 if (fw_domains & BIT(domain_id))
1784 fw_domain_fini(uncore, domain_id);
1785 }
1786}
1787
1788void intel_uncore_fini_mmio(struct intel_uncore *uncore)
1789{
1790 if (intel_uncore_has_forcewake(uncore)) {
1791 iosf_mbi_punit_acquire();
1792 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
1793 &uncore->pmic_bus_access_nb);
1794 intel_uncore_forcewake_reset(uncore);
1795 intel_uncore_fw_domains_fini(uncore);
1796 iosf_mbi_punit_release();
1797 }
1798
1799 uncore_mmio_cleanup(uncore);
1800}
1801
1802static const struct reg_whitelist {
1803 i915_reg_t offset_ldw;
1804 i915_reg_t offset_udw;
1805 u16 gen_mask;
1806 u8 size;
1807} reg_read_whitelist[] = { {
1808 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1809 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1810 .gen_mask = INTEL_GEN_MASK(4, 12),
1811 .size = 8
1812} };
1813
1814int i915_reg_read_ioctl(struct drm_device *dev,
1815 void *data, struct drm_file *file)
1816{
1817 struct drm_i915_private *i915 = to_i915(dev);
1818 struct intel_uncore *uncore = &i915->uncore;
1819 struct drm_i915_reg_read *reg = data;
1820 struct reg_whitelist const *entry;
1821 intel_wakeref_t wakeref;
1822 unsigned int flags;
1823 int remain;
1824 int ret = 0;
1825
1826 entry = reg_read_whitelist;
1827 remain = ARRAY_SIZE(reg_read_whitelist);
1828 while (remain) {
1829 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1830
1831 GEM_BUG_ON(!is_power_of_2(entry->size));
1832 GEM_BUG_ON(entry->size > 8);
1833 GEM_BUG_ON(entry_offset & (entry->size - 1));
1834
1835 if (INTEL_INFO(i915)->gen_mask & entry->gen_mask &&
1836 entry_offset == (reg->offset & -entry->size))
1837 break;
1838 entry++;
1839 remain--;
1840 }
1841
1842 if (!remain)
1843 return -EINVAL;
1844
1845 flags = reg->offset & (entry->size - 1);
1846
1847 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1848 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1849 reg->val = intel_uncore_read64_2x32(uncore,
1850 entry->offset_ldw,
1851 entry->offset_udw);
1852 else if (entry->size == 8 && flags == 0)
1853 reg->val = intel_uncore_read64(uncore,
1854 entry->offset_ldw);
1855 else if (entry->size == 4 && flags == 0)
1856 reg->val = intel_uncore_read(uncore, entry->offset_ldw);
1857 else if (entry->size == 2 && flags == 0)
1858 reg->val = intel_uncore_read16(uncore,
1859 entry->offset_ldw);
1860 else if (entry->size == 1 && flags == 0)
1861 reg->val = intel_uncore_read8(uncore,
1862 entry->offset_ldw);
1863 else
1864 ret = -EINVAL;
1865 }
1866
1867 return ret;
1868}
1869
1870/**
1871 * __intel_wait_for_register_fw - wait until register matches expected state
1872 * @uncore: the struct intel_uncore
1873 * @reg: the register to read
1874 * @mask: mask to apply to register value
1875 * @value: expected value
1876 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1877 * @slow_timeout_ms: slow timeout in millisecond
1878 * @out_value: optional placeholder to hold registry value
1879 *
1880 * This routine waits until the target register @reg contains the expected
1881 * @value after applying the @mask, i.e. it waits until ::
1882 *
1883 * (I915_READ_FW(reg) & mask) == value
1884 *
1885 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
1886 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
1887 * must be not larger than 20,0000 microseconds.
1888 *
1889 * Note that this routine assumes the caller holds forcewake asserted, it is
1890 * not suitable for very long waits. See intel_wait_for_register() if you
1891 * wish to wait without holding forcewake for the duration (i.e. you expect
1892 * the wait to be slow).
1893 *
1894 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
1895 */
1896int __intel_wait_for_register_fw(struct intel_uncore *uncore,
1897 i915_reg_t reg,
1898 u32 mask,
1899 u32 value,
1900 unsigned int fast_timeout_us,
1901 unsigned int slow_timeout_ms,
1902 u32 *out_value)
1903{
1904 u32 uninitialized_var(reg_value);
1905#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
1906 int ret;
1907
1908 /* Catch any overuse of this function */
1909 might_sleep_if(slow_timeout_ms);
1910 GEM_BUG_ON(fast_timeout_us > 20000);
1911
1912 ret = -ETIMEDOUT;
1913 if (fast_timeout_us && fast_timeout_us <= 20000)
1914 ret = _wait_for_atomic(done, fast_timeout_us, 0);
1915 if (ret && slow_timeout_ms)
1916 ret = wait_for(done, slow_timeout_ms);
1917
1918 if (out_value)
1919 *out_value = reg_value;
1920
1921 return ret;
1922#undef done
1923}
1924
1925/**
1926 * __intel_wait_for_register - wait until register matches expected state
1927 * @uncore: the struct intel_uncore
1928 * @reg: the register to read
1929 * @mask: mask to apply to register value
1930 * @value: expected value
1931 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1932 * @slow_timeout_ms: slow timeout in millisecond
1933 * @out_value: optional placeholder to hold registry value
1934 *
1935 * This routine waits until the target register @reg contains the expected
1936 * @value after applying the @mask, i.e. it waits until ::
1937 *
1938 * (I915_READ(reg) & mask) == value
1939 *
1940 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1941 *
1942 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
1943 */
1944int __intel_wait_for_register(struct intel_uncore *uncore,
1945 i915_reg_t reg,
1946 u32 mask,
1947 u32 value,
1948 unsigned int fast_timeout_us,
1949 unsigned int slow_timeout_ms,
1950 u32 *out_value)
1951{
1952 unsigned fw =
1953 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
1954 u32 reg_value;
1955 int ret;
1956
1957 might_sleep_if(slow_timeout_ms);
1958
1959 spin_lock_irq(&uncore->lock);
1960 intel_uncore_forcewake_get__locked(uncore, fw);
1961
1962 ret = __intel_wait_for_register_fw(uncore,
1963 reg, mask, value,
1964 fast_timeout_us, 0, ®_value);
1965
1966 intel_uncore_forcewake_put__locked(uncore, fw);
1967 spin_unlock_irq(&uncore->lock);
1968
1969 if (ret && slow_timeout_ms)
1970 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
1971 reg),
1972 (reg_value & mask) == value,
1973 slow_timeout_ms * 1000, 10, 1000);
1974
1975 /* just trace the final value */
1976 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
1977
1978 if (out_value)
1979 *out_value = reg_value;
1980
1981 return ret;
1982}
1983
1984bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
1985{
1986 bool ret;
1987
1988 spin_lock_irq(&uncore->debug->lock);
1989 ret = check_for_unclaimed_mmio(uncore);
1990 spin_unlock_irq(&uncore->debug->lock);
1991
1992 return ret;
1993}
1994
1995bool
1996intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
1997{
1998 bool ret = false;
1999
2000 spin_lock_irq(&uncore->debug->lock);
2001
2002 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2003 goto out;
2004
2005 if (unlikely(check_for_unclaimed_mmio(uncore))) {
2006 if (!i915_modparams.mmio_debug) {
2007 DRM_DEBUG("Unclaimed register detected, "
2008 "enabling oneshot unclaimed register reporting. "
2009 "Please use i915.mmio_debug=N for more information.\n");
2010 i915_modparams.mmio_debug++;
2011 }
2012 uncore->debug->unclaimed_mmio_check--;
2013 ret = true;
2014 }
2015
2016out:
2017 spin_unlock_irq(&uncore->debug->lock);
2018
2019 return ret;
2020}
2021
2022/**
2023 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2024 * a register
2025 * @uncore: pointer to struct intel_uncore
2026 * @reg: register in question
2027 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2028 *
2029 * Returns a set of forcewake domains required to be taken with for example
2030 * intel_uncore_forcewake_get for the specified register to be accessible in the
2031 * specified mode (read, write or read/write) with raw mmio accessors.
2032 *
2033 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2034 * callers to do FIFO management on their own or risk losing writes.
2035 */
2036enum forcewake_domains
2037intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2038 i915_reg_t reg, unsigned int op)
2039{
2040 enum forcewake_domains fw_domains = 0;
2041
2042 WARN_ON(!op);
2043
2044 if (!intel_uncore_has_forcewake(uncore))
2045 return 0;
2046
2047 if (op & FW_REG_READ)
2048 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2049
2050 if (op & FW_REG_WRITE)
2051 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2052
2053 WARN_ON(fw_domains & ~uncore->fw_domains);
2054
2055 return fw_domains;
2056}
2057
2058#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2059#include "selftests/mock_uncore.c"
2060#include "selftests/intel_uncore.c"
2061#endif