Loading...
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include "display/intel_crt.h"
7
8#include "i915_drv.h"
9#include "i915_irq.h"
10#include "intel_cdclk.h"
11#include "intel_combo_phy.h"
12#include "intel_display_power.h"
13#include "intel_de.h"
14#include "intel_display_types.h"
15#include "intel_dmc.h"
16#include "intel_dpio_phy.h"
17#include "intel_hotplug.h"
18#include "intel_pm.h"
19#include "intel_pps.h"
20#include "intel_sideband.h"
21#include "intel_tc.h"
22#include "intel_vga.h"
23
24bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
25 enum i915_power_well_id power_well_id);
26
27const char *
28intel_display_power_domain_str(enum intel_display_power_domain domain)
29{
30 switch (domain) {
31 case POWER_DOMAIN_DISPLAY_CORE:
32 return "DISPLAY_CORE";
33 case POWER_DOMAIN_PIPE_A:
34 return "PIPE_A";
35 case POWER_DOMAIN_PIPE_B:
36 return "PIPE_B";
37 case POWER_DOMAIN_PIPE_C:
38 return "PIPE_C";
39 case POWER_DOMAIN_PIPE_D:
40 return "PIPE_D";
41 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
42 return "PIPE_A_PANEL_FITTER";
43 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
44 return "PIPE_B_PANEL_FITTER";
45 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
46 return "PIPE_C_PANEL_FITTER";
47 case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
48 return "PIPE_D_PANEL_FITTER";
49 case POWER_DOMAIN_TRANSCODER_A:
50 return "TRANSCODER_A";
51 case POWER_DOMAIN_TRANSCODER_B:
52 return "TRANSCODER_B";
53 case POWER_DOMAIN_TRANSCODER_C:
54 return "TRANSCODER_C";
55 case POWER_DOMAIN_TRANSCODER_D:
56 return "TRANSCODER_D";
57 case POWER_DOMAIN_TRANSCODER_EDP:
58 return "TRANSCODER_EDP";
59 case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
60 return "TRANSCODER_VDSC_PW2";
61 case POWER_DOMAIN_TRANSCODER_DSI_A:
62 return "TRANSCODER_DSI_A";
63 case POWER_DOMAIN_TRANSCODER_DSI_C:
64 return "TRANSCODER_DSI_C";
65 case POWER_DOMAIN_PORT_DDI_A_LANES:
66 return "PORT_DDI_A_LANES";
67 case POWER_DOMAIN_PORT_DDI_B_LANES:
68 return "PORT_DDI_B_LANES";
69 case POWER_DOMAIN_PORT_DDI_C_LANES:
70 return "PORT_DDI_C_LANES";
71 case POWER_DOMAIN_PORT_DDI_D_LANES:
72 return "PORT_DDI_D_LANES";
73 case POWER_DOMAIN_PORT_DDI_E_LANES:
74 return "PORT_DDI_E_LANES";
75 case POWER_DOMAIN_PORT_DDI_F_LANES:
76 return "PORT_DDI_F_LANES";
77 case POWER_DOMAIN_PORT_DDI_G_LANES:
78 return "PORT_DDI_G_LANES";
79 case POWER_DOMAIN_PORT_DDI_H_LANES:
80 return "PORT_DDI_H_LANES";
81 case POWER_DOMAIN_PORT_DDI_I_LANES:
82 return "PORT_DDI_I_LANES";
83 case POWER_DOMAIN_PORT_DDI_A_IO:
84 return "PORT_DDI_A_IO";
85 case POWER_DOMAIN_PORT_DDI_B_IO:
86 return "PORT_DDI_B_IO";
87 case POWER_DOMAIN_PORT_DDI_C_IO:
88 return "PORT_DDI_C_IO";
89 case POWER_DOMAIN_PORT_DDI_D_IO:
90 return "PORT_DDI_D_IO";
91 case POWER_DOMAIN_PORT_DDI_E_IO:
92 return "PORT_DDI_E_IO";
93 case POWER_DOMAIN_PORT_DDI_F_IO:
94 return "PORT_DDI_F_IO";
95 case POWER_DOMAIN_PORT_DDI_G_IO:
96 return "PORT_DDI_G_IO";
97 case POWER_DOMAIN_PORT_DDI_H_IO:
98 return "PORT_DDI_H_IO";
99 case POWER_DOMAIN_PORT_DDI_I_IO:
100 return "PORT_DDI_I_IO";
101 case POWER_DOMAIN_PORT_DSI:
102 return "PORT_DSI";
103 case POWER_DOMAIN_PORT_CRT:
104 return "PORT_CRT";
105 case POWER_DOMAIN_PORT_OTHER:
106 return "PORT_OTHER";
107 case POWER_DOMAIN_VGA:
108 return "VGA";
109 case POWER_DOMAIN_AUDIO:
110 return "AUDIO";
111 case POWER_DOMAIN_AUX_A:
112 return "AUX_A";
113 case POWER_DOMAIN_AUX_B:
114 return "AUX_B";
115 case POWER_DOMAIN_AUX_C:
116 return "AUX_C";
117 case POWER_DOMAIN_AUX_D:
118 return "AUX_D";
119 case POWER_DOMAIN_AUX_E:
120 return "AUX_E";
121 case POWER_DOMAIN_AUX_F:
122 return "AUX_F";
123 case POWER_DOMAIN_AUX_G:
124 return "AUX_G";
125 case POWER_DOMAIN_AUX_H:
126 return "AUX_H";
127 case POWER_DOMAIN_AUX_I:
128 return "AUX_I";
129 case POWER_DOMAIN_AUX_IO_A:
130 return "AUX_IO_A";
131 case POWER_DOMAIN_AUX_C_TBT:
132 return "AUX_C_TBT";
133 case POWER_DOMAIN_AUX_D_TBT:
134 return "AUX_D_TBT";
135 case POWER_DOMAIN_AUX_E_TBT:
136 return "AUX_E_TBT";
137 case POWER_DOMAIN_AUX_F_TBT:
138 return "AUX_F_TBT";
139 case POWER_DOMAIN_AUX_G_TBT:
140 return "AUX_G_TBT";
141 case POWER_DOMAIN_AUX_H_TBT:
142 return "AUX_H_TBT";
143 case POWER_DOMAIN_AUX_I_TBT:
144 return "AUX_I_TBT";
145 case POWER_DOMAIN_GMBUS:
146 return "GMBUS";
147 case POWER_DOMAIN_INIT:
148 return "INIT";
149 case POWER_DOMAIN_MODESET:
150 return "MODESET";
151 case POWER_DOMAIN_GT_IRQ:
152 return "GT_IRQ";
153 case POWER_DOMAIN_DPLL_DC_OFF:
154 return "DPLL_DC_OFF";
155 case POWER_DOMAIN_TC_COLD_OFF:
156 return "TC_COLD_OFF";
157 default:
158 MISSING_CASE(domain);
159 return "?";
160 }
161}
162
163static void intel_power_well_enable(struct drm_i915_private *dev_priv,
164 struct i915_power_well *power_well)
165{
166 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
167 power_well->desc->ops->enable(dev_priv, power_well);
168 power_well->hw_enabled = true;
169}
170
171static void intel_power_well_disable(struct drm_i915_private *dev_priv,
172 struct i915_power_well *power_well)
173{
174 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
175 power_well->hw_enabled = false;
176 power_well->desc->ops->disable(dev_priv, power_well);
177}
178
179static void intel_power_well_get(struct drm_i915_private *dev_priv,
180 struct i915_power_well *power_well)
181{
182 if (!power_well->count++)
183 intel_power_well_enable(dev_priv, power_well);
184}
185
186static void intel_power_well_put(struct drm_i915_private *dev_priv,
187 struct i915_power_well *power_well)
188{
189 drm_WARN(&dev_priv->drm, !power_well->count,
190 "Use count on power well %s is already zero",
191 power_well->desc->name);
192
193 if (!--power_well->count)
194 intel_power_well_disable(dev_priv, power_well);
195}
196
197/**
198 * __intel_display_power_is_enabled - unlocked check for a power domain
199 * @dev_priv: i915 device instance
200 * @domain: power domain to check
201 *
202 * This is the unlocked version of intel_display_power_is_enabled() and should
203 * only be used from error capture and recovery code where deadlocks are
204 * possible.
205 *
206 * Returns:
207 * True when the power domain is enabled, false otherwise.
208 */
209bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
210 enum intel_display_power_domain domain)
211{
212 struct i915_power_well *power_well;
213 bool is_enabled;
214
215 if (dev_priv->runtime_pm.suspended)
216 return false;
217
218 is_enabled = true;
219
220 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
221 if (power_well->desc->always_on)
222 continue;
223
224 if (!power_well->hw_enabled) {
225 is_enabled = false;
226 break;
227 }
228 }
229
230 return is_enabled;
231}
232
233/**
234 * intel_display_power_is_enabled - check for a power domain
235 * @dev_priv: i915 device instance
236 * @domain: power domain to check
237 *
238 * This function can be used to check the hw power domain state. It is mostly
239 * used in hardware state readout functions. Everywhere else code should rely
240 * upon explicit power domain reference counting to ensure that the hardware
241 * block is powered up before accessing it.
242 *
243 * Callers must hold the relevant modesetting locks to ensure that concurrent
244 * threads can't disable the power well while the caller tries to read a few
245 * registers.
246 *
247 * Returns:
248 * True when the power domain is enabled, false otherwise.
249 */
250bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
251 enum intel_display_power_domain domain)
252{
253 struct i915_power_domains *power_domains;
254 bool ret;
255
256 power_domains = &dev_priv->power_domains;
257
258 mutex_lock(&power_domains->lock);
259 ret = __intel_display_power_is_enabled(dev_priv, domain);
260 mutex_unlock(&power_domains->lock);
261
262 return ret;
263}
264
265/*
266 * Starting with Haswell, we have a "Power Down Well" that can be turned off
267 * when not needed anymore. We have 4 registers that can request the power well
268 * to be enabled, and it will only be disabled if none of the registers is
269 * requesting it to be enabled.
270 */
271static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
272 u8 irq_pipe_mask, bool has_vga)
273{
274 if (has_vga)
275 intel_vga_reset_io_mem(dev_priv);
276
277 if (irq_pipe_mask)
278 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
279}
280
281static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
282 u8 irq_pipe_mask)
283{
284 if (irq_pipe_mask)
285 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
286}
287
288#define ICL_AUX_PW_TO_CH(pw_idx) \
289 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
290
291#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
292 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
293
294static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
295{
296 int pw_idx = power_well->desc->hsw.idx;
297
298 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
299 ICL_AUX_PW_TO_CH(pw_idx);
300}
301
302static struct intel_digital_port *
303aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
304 enum aux_ch aux_ch)
305{
306 struct intel_digital_port *dig_port = NULL;
307 struct intel_encoder *encoder;
308
309 for_each_intel_encoder(&dev_priv->drm, encoder) {
310 /* We'll check the MST primary port */
311 if (encoder->type == INTEL_OUTPUT_DP_MST)
312 continue;
313
314 dig_port = enc_to_dig_port(encoder);
315 if (!dig_port)
316 continue;
317
318 if (dig_port->aux_ch != aux_ch) {
319 dig_port = NULL;
320 continue;
321 }
322
323 break;
324 }
325
326 return dig_port;
327}
328
329static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
330 const struct i915_power_well *power_well)
331{
332 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
333 struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
334
335 return intel_port_to_phy(i915, dig_port->base.port);
336}
337
338static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
339 struct i915_power_well *power_well,
340 bool timeout_expected)
341{
342 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
343 int pw_idx = power_well->desc->hsw.idx;
344
345 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
346 if (intel_de_wait_for_set(dev_priv, regs->driver,
347 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
348 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
349 power_well->desc->name);
350
351 drm_WARN_ON(&dev_priv->drm, !timeout_expected);
352
353 }
354}
355
356static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
357 const struct i915_power_well_regs *regs,
358 int pw_idx)
359{
360 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
361 u32 ret;
362
363 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
364 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
365 if (regs->kvmr.reg)
366 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
367 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
368
369 return ret;
370}
371
372static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
373 struct i915_power_well *power_well)
374{
375 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
376 int pw_idx = power_well->desc->hsw.idx;
377 bool disabled;
378 u32 reqs;
379
380 /*
381 * Bspec doesn't require waiting for PWs to get disabled, but still do
382 * this for paranoia. The known cases where a PW will be forced on:
383 * - a KVMR request on any power well via the KVMR request register
384 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
385 * DEBUG request registers
386 * Skip the wait in case any of the request bits are set and print a
387 * diagnostic message.
388 */
389 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
390 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
391 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
392 if (disabled)
393 return;
394
395 drm_dbg_kms(&dev_priv->drm,
396 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
397 power_well->desc->name,
398 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
399}
400
401static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
402 enum skl_power_gate pg)
403{
404 /* Timeout 5us for PG#0, for other PGs 1us */
405 drm_WARN_ON(&dev_priv->drm,
406 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
407 SKL_FUSE_PG_DIST_STATUS(pg), 1));
408}
409
410static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
411 struct i915_power_well *power_well)
412{
413 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
414 int pw_idx = power_well->desc->hsw.idx;
415 u32 val;
416
417 if (power_well->desc->hsw.has_fuses) {
418 enum skl_power_gate pg;
419
420 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
421 SKL_PW_CTL_IDX_TO_PG(pw_idx);
422 /*
423 * For PW1 we have to wait both for the PW0/PG0 fuse state
424 * before enabling the power well and PW1/PG1's own fuse
425 * state after the enabling. For all other power wells with
426 * fuses we only have to wait for that PW/PG's fuse state
427 * after the enabling.
428 */
429 if (pg == SKL_PG1)
430 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
431 }
432
433 val = intel_de_read(dev_priv, regs->driver);
434 intel_de_write(dev_priv, regs->driver,
435 val | HSW_PWR_WELL_CTL_REQ(pw_idx));
436
437 hsw_wait_for_power_well_enable(dev_priv, power_well, false);
438
439 /* Display WA #1178: cnl */
440 if (IS_CANNONLAKE(dev_priv) &&
441 pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
442 pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
443 u32 val;
444
445 val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
446 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
447 intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
448 }
449
450 if (power_well->desc->hsw.has_fuses) {
451 enum skl_power_gate pg;
452
453 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
454 SKL_PW_CTL_IDX_TO_PG(pw_idx);
455 gen9_wait_for_power_well_fuses(dev_priv, pg);
456 }
457
458 hsw_power_well_post_enable(dev_priv,
459 power_well->desc->hsw.irq_pipe_mask,
460 power_well->desc->hsw.has_vga);
461}
462
463static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
464 struct i915_power_well *power_well)
465{
466 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
467 int pw_idx = power_well->desc->hsw.idx;
468 u32 val;
469
470 hsw_power_well_pre_disable(dev_priv,
471 power_well->desc->hsw.irq_pipe_mask);
472
473 val = intel_de_read(dev_priv, regs->driver);
474 intel_de_write(dev_priv, regs->driver,
475 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
476 hsw_wait_for_power_well_disable(dev_priv, power_well);
477}
478
479static void
480icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
481 struct i915_power_well *power_well)
482{
483 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
484 int pw_idx = power_well->desc->hsw.idx;
485 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
486 u32 val;
487
488 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
489
490 val = intel_de_read(dev_priv, regs->driver);
491 intel_de_write(dev_priv, regs->driver,
492 val | HSW_PWR_WELL_CTL_REQ(pw_idx));
493
494 if (DISPLAY_VER(dev_priv) < 12) {
495 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
496 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
497 val | ICL_LANE_ENABLE_AUX);
498 }
499
500 hsw_wait_for_power_well_enable(dev_priv, power_well, false);
501
502 /* Display WA #1178: icl */
503 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
504 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
505 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
506 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
507 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
508 }
509}
510
511static void
512icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
513 struct i915_power_well *power_well)
514{
515 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
516 int pw_idx = power_well->desc->hsw.idx;
517 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
518 u32 val;
519
520 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
521
522 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
523 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
524 val & ~ICL_LANE_ENABLE_AUX);
525
526 val = intel_de_read(dev_priv, regs->driver);
527 intel_de_write(dev_priv, regs->driver,
528 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
529
530 hsw_wait_for_power_well_disable(dev_priv, power_well);
531}
532
533#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
534
535static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
536
537static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
538 struct i915_power_well *power_well)
539{
540 int refs = hweight64(power_well->desc->domains &
541 async_put_domains_mask(&dev_priv->power_domains));
542
543 drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
544
545 return refs;
546}
547
548static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
549 struct i915_power_well *power_well,
550 struct intel_digital_port *dig_port)
551{
552 /* Bypass the check if all references are released asynchronously */
553 if (power_well_async_ref_count(dev_priv, power_well) ==
554 power_well->count)
555 return;
556
557 if (drm_WARN_ON(&dev_priv->drm, !dig_port))
558 return;
559
560 if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
561 return;
562
563 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
564}
565
566#else
567
568static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
569 struct i915_power_well *power_well,
570 struct intel_digital_port *dig_port)
571{
572}
573
574#endif
575
576#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
577
578static void icl_tc_cold_exit(struct drm_i915_private *i915)
579{
580 int ret, tries = 0;
581
582 while (1) {
583 ret = sandybridge_pcode_write_timeout(i915,
584 ICL_PCODE_EXIT_TCCOLD,
585 0, 250, 1);
586 if (ret != -EAGAIN || ++tries == 3)
587 break;
588 msleep(1);
589 }
590
591 /* Spec states that TC cold exit can take up to 1ms to complete */
592 if (!ret)
593 msleep(1);
594
595 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
596 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
597 "succeeded");
598}
599
600static void
601icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
602 struct i915_power_well *power_well)
603{
604 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
605 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
606 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
607 bool is_tbt = power_well->desc->hsw.is_tc_tbt;
608 bool timeout_expected;
609 u32 val;
610
611 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
612
613 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
614 val &= ~DP_AUX_CH_CTL_TBT_IO;
615 if (is_tbt)
616 val |= DP_AUX_CH_CTL_TBT_IO;
617 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
618
619 val = intel_de_read(dev_priv, regs->driver);
620 intel_de_write(dev_priv, regs->driver,
621 val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
622
623 /*
624 * An AUX timeout is expected if the TBT DP tunnel is down,
625 * or need to enable AUX on a legacy TypeC port as part of the TC-cold
626 * exit sequence.
627 */
628 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
629 if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
630 icl_tc_cold_exit(dev_priv);
631
632 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
633
634 if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
635 enum tc_port tc_port;
636
637 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
638 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
639 HIP_INDEX_VAL(tc_port, 0x2));
640
641 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
642 DKL_CMN_UC_DW27_UC_HEALTH, 1))
643 drm_warn(&dev_priv->drm,
644 "Timeout waiting TC uC health\n");
645 }
646}
647
648static void
649icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
650 struct i915_power_well *power_well)
651{
652 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
653 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
654
655 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
656
657 hsw_power_well_disable(dev_priv, power_well);
658}
659
660static void
661icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
662 struct i915_power_well *power_well)
663{
664 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
665
666 if (intel_phy_is_tc(dev_priv, phy))
667 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
668 else if (IS_ICELAKE(dev_priv))
669 return icl_combo_phy_aux_power_well_enable(dev_priv,
670 power_well);
671 else
672 return hsw_power_well_enable(dev_priv, power_well);
673}
674
675static void
676icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
677 struct i915_power_well *power_well)
678{
679 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
680
681 if (intel_phy_is_tc(dev_priv, phy))
682 return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
683 else if (IS_ICELAKE(dev_priv))
684 return icl_combo_phy_aux_power_well_disable(dev_priv,
685 power_well);
686 else
687 return hsw_power_well_disable(dev_priv, power_well);
688}
689
690/*
691 * We should only use the power well if we explicitly asked the hardware to
692 * enable it, so check if it's enabled and also check if we've requested it to
693 * be enabled.
694 */
695static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
696 struct i915_power_well *power_well)
697{
698 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
699 enum i915_power_well_id id = power_well->desc->id;
700 int pw_idx = power_well->desc->hsw.idx;
701 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
702 HSW_PWR_WELL_CTL_STATE(pw_idx);
703 u32 val;
704
705 val = intel_de_read(dev_priv, regs->driver);
706
707 /*
708 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
709 * and the MISC_IO PW will be not restored, so check instead for the
710 * BIOS's own request bits, which are forced-on for these power wells
711 * when exiting DC5/6.
712 */
713 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
714 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
715 val |= intel_de_read(dev_priv, regs->bios);
716
717 return (val & mask) == mask;
718}
719
720static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
721{
722 drm_WARN_ONCE(&dev_priv->drm,
723 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
724 "DC9 already programmed to be enabled.\n");
725 drm_WARN_ONCE(&dev_priv->drm,
726 intel_de_read(dev_priv, DC_STATE_EN) &
727 DC_STATE_EN_UPTO_DC5,
728 "DC5 still not disabled to enable DC9.\n");
729 drm_WARN_ONCE(&dev_priv->drm,
730 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
731 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
732 "Power well 2 on.\n");
733 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
734 "Interrupts not disabled yet.\n");
735
736 /*
737 * TODO: check for the following to verify the conditions to enter DC9
738 * state are satisfied:
739 * 1] Check relevant display engine registers to verify if mode set
740 * disable sequence was followed.
741 * 2] Check if display uninitialize sequence is initialized.
742 */
743}
744
745static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
746{
747 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
748 "Interrupts not disabled yet.\n");
749 drm_WARN_ONCE(&dev_priv->drm,
750 intel_de_read(dev_priv, DC_STATE_EN) &
751 DC_STATE_EN_UPTO_DC5,
752 "DC5 still not disabled.\n");
753
754 /*
755 * TODO: check for the following to verify DC9 state was indeed
756 * entered before programming to disable it:
757 * 1] Check relevant display engine registers to verify if mode
758 * set disable sequence was followed.
759 * 2] Check if display uninitialize sequence is initialized.
760 */
761}
762
763static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
764 u32 state)
765{
766 int rewrites = 0;
767 int rereads = 0;
768 u32 v;
769
770 intel_de_write(dev_priv, DC_STATE_EN, state);
771
772 /* It has been observed that disabling the dc6 state sometimes
773 * doesn't stick and dmc keeps returning old value. Make sure
774 * the write really sticks enough times and also force rewrite until
775 * we are confident that state is exactly what we want.
776 */
777 do {
778 v = intel_de_read(dev_priv, DC_STATE_EN);
779
780 if (v != state) {
781 intel_de_write(dev_priv, DC_STATE_EN, state);
782 rewrites++;
783 rereads = 0;
784 } else if (rereads++ > 5) {
785 break;
786 }
787
788 } while (rewrites < 100);
789
790 if (v != state)
791 drm_err(&dev_priv->drm,
792 "Writing dc state to 0x%x failed, now 0x%x\n",
793 state, v);
794
795 /* Most of the times we need one retry, avoid spam */
796 if (rewrites > 1)
797 drm_dbg_kms(&dev_priv->drm,
798 "Rewrote dc state to 0x%x %d times\n",
799 state, rewrites);
800}
801
802static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
803{
804 u32 mask;
805
806 mask = DC_STATE_EN_UPTO_DC5;
807
808 if (DISPLAY_VER(dev_priv) >= 12)
809 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
810 | DC_STATE_EN_DC9;
811 else if (DISPLAY_VER(dev_priv) == 11)
812 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
813 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
814 mask |= DC_STATE_EN_DC9;
815 else
816 mask |= DC_STATE_EN_UPTO_DC6;
817
818 return mask;
819}
820
821static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
822{
823 u32 val;
824
825 if (!HAS_DISPLAY(dev_priv))
826 return;
827
828 val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
829
830 drm_dbg_kms(&dev_priv->drm,
831 "Resetting DC state tracking from %02x to %02x\n",
832 dev_priv->dmc.dc_state, val);
833 dev_priv->dmc.dc_state = val;
834}
835
836/**
837 * gen9_set_dc_state - set target display C power state
838 * @dev_priv: i915 device instance
839 * @state: target DC power state
840 * - DC_STATE_DISABLE
841 * - DC_STATE_EN_UPTO_DC5
842 * - DC_STATE_EN_UPTO_DC6
843 * - DC_STATE_EN_DC9
844 *
845 * Signal to DMC firmware/HW the target DC power state passed in @state.
846 * DMC/HW can turn off individual display clocks and power rails when entering
847 * a deeper DC power state (higher in number) and turns these back when exiting
848 * that state to a shallower power state (lower in number). The HW will decide
849 * when to actually enter a given state on an on-demand basis, for instance
850 * depending on the active state of display pipes. The state of display
851 * registers backed by affected power rails are saved/restored as needed.
852 *
853 * Based on the above enabling a deeper DC power state is asynchronous wrt.
854 * enabling it. Disabling a deeper power state is synchronous: for instance
855 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
856 * back on and register state is restored. This is guaranteed by the MMIO write
857 * to DC_STATE_EN blocking until the state is restored.
858 */
859static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
860{
861 u32 val;
862 u32 mask;
863
864 if (!HAS_DISPLAY(dev_priv))
865 return;
866
867 if (drm_WARN_ON_ONCE(&dev_priv->drm,
868 state & ~dev_priv->dmc.allowed_dc_mask))
869 state &= dev_priv->dmc.allowed_dc_mask;
870
871 val = intel_de_read(dev_priv, DC_STATE_EN);
872 mask = gen9_dc_mask(dev_priv);
873 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
874 val & mask, state);
875
876 /* Check if DMC is ignoring our DC state requests */
877 if ((val & mask) != dev_priv->dmc.dc_state)
878 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
879 dev_priv->dmc.dc_state, val & mask);
880
881 val &= ~mask;
882 val |= state;
883
884 gen9_write_dc_state(dev_priv, val);
885
886 dev_priv->dmc.dc_state = val & mask;
887}
888
889static u32
890sanitize_target_dc_state(struct drm_i915_private *dev_priv,
891 u32 target_dc_state)
892{
893 u32 states[] = {
894 DC_STATE_EN_UPTO_DC6,
895 DC_STATE_EN_UPTO_DC5,
896 DC_STATE_EN_DC3CO,
897 DC_STATE_DISABLE,
898 };
899 int i;
900
901 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
902 if (target_dc_state != states[i])
903 continue;
904
905 if (dev_priv->dmc.allowed_dc_mask & target_dc_state)
906 break;
907
908 target_dc_state = states[i + 1];
909 }
910
911 return target_dc_state;
912}
913
914static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
915{
916 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
917 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
918}
919
920static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
921{
922 u32 val;
923
924 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
925 val = intel_de_read(dev_priv, DC_STATE_EN);
926 val &= ~DC_STATE_DC3CO_STATUS;
927 intel_de_write(dev_priv, DC_STATE_EN, val);
928 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
929 /*
930 * Delay of 200us DC3CO Exit time B.Spec 49196
931 */
932 usleep_range(200, 210);
933}
934
935static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
936{
937 assert_can_enable_dc9(dev_priv);
938
939 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
940 /*
941 * Power sequencer reset is not needed on
942 * platforms with South Display Engine on PCH,
943 * because PPS registers are always on.
944 */
945 if (!HAS_PCH_SPLIT(dev_priv))
946 intel_pps_reset_all(dev_priv);
947 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
948}
949
950static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
951{
952 assert_can_disable_dc9(dev_priv);
953
954 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
955
956 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
957
958 intel_pps_unlock_regs_wa(dev_priv);
959}
960
961static void assert_dmc_loaded(struct drm_i915_private *dev_priv)
962{
963 drm_WARN_ONCE(&dev_priv->drm,
964 !intel_de_read(dev_priv, DMC_PROGRAM(0)),
965 "DMC program storage start is NULL\n");
966 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE),
967 "DMC SSP Base Not fine\n");
968 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL),
969 "DMC HTP Not fine\n");
970}
971
972static struct i915_power_well *
973lookup_power_well(struct drm_i915_private *dev_priv,
974 enum i915_power_well_id power_well_id)
975{
976 struct i915_power_well *power_well;
977
978 for_each_power_well(dev_priv, power_well)
979 if (power_well->desc->id == power_well_id)
980 return power_well;
981
982 /*
983 * It's not feasible to add error checking code to the callers since
984 * this condition really shouldn't happen and it doesn't even make sense
985 * to abort things like display initialization sequences. Just return
986 * the first power well and hope the WARN gets reported so we can fix
987 * our driver.
988 */
989 drm_WARN(&dev_priv->drm, 1,
990 "Power well %d not defined for this platform\n",
991 power_well_id);
992 return &dev_priv->power_domains.power_wells[0];
993}
994
995/**
996 * intel_display_power_set_target_dc_state - Set target dc state.
997 * @dev_priv: i915 device
998 * @state: state which needs to be set as target_dc_state.
999 *
1000 * This function set the "DC off" power well target_dc_state,
1001 * based upon this target_dc_stste, "DC off" power well will
1002 * enable desired DC state.
1003 */
1004void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
1005 u32 state)
1006{
1007 struct i915_power_well *power_well;
1008 bool dc_off_enabled;
1009 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1010
1011 mutex_lock(&power_domains->lock);
1012 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
1013
1014 if (drm_WARN_ON(&dev_priv->drm, !power_well))
1015 goto unlock;
1016
1017 state = sanitize_target_dc_state(dev_priv, state);
1018
1019 if (state == dev_priv->dmc.target_dc_state)
1020 goto unlock;
1021
1022 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
1023 power_well);
1024 /*
1025 * If DC off power well is disabled, need to enable and disable the
1026 * DC off power well to effect target DC state.
1027 */
1028 if (!dc_off_enabled)
1029 power_well->desc->ops->enable(dev_priv, power_well);
1030
1031 dev_priv->dmc.target_dc_state = state;
1032
1033 if (!dc_off_enabled)
1034 power_well->desc->ops->disable(dev_priv, power_well);
1035
1036unlock:
1037 mutex_unlock(&power_domains->lock);
1038}
1039
1040static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
1041{
1042 enum i915_power_well_id high_pg;
1043
1044 /* Power wells at this level and above must be disabled for DC5 entry */
1045 if (DISPLAY_VER(dev_priv) == 12)
1046 high_pg = ICL_DISP_PW_3;
1047 else
1048 high_pg = SKL_DISP_PW_2;
1049
1050 drm_WARN_ONCE(&dev_priv->drm,
1051 intel_display_power_well_is_enabled(dev_priv, high_pg),
1052 "Power wells above platform's DC5 limit still enabled.\n");
1053
1054 drm_WARN_ONCE(&dev_priv->drm,
1055 (intel_de_read(dev_priv, DC_STATE_EN) &
1056 DC_STATE_EN_UPTO_DC5),
1057 "DC5 already programmed to be enabled.\n");
1058 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
1059
1060 assert_dmc_loaded(dev_priv);
1061}
1062
1063static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1064{
1065 assert_can_enable_dc5(dev_priv);
1066
1067 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
1068
1069 /* Wa Display #1183: skl,kbl,cfl */
1070 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
1071 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1072 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1073
1074 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1075}
1076
1077static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1078{
1079 drm_WARN_ONCE(&dev_priv->drm,
1080 intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1081 "Backlight is not disabled.\n");
1082 drm_WARN_ONCE(&dev_priv->drm,
1083 (intel_de_read(dev_priv, DC_STATE_EN) &
1084 DC_STATE_EN_UPTO_DC6),
1085 "DC6 already programmed to be enabled.\n");
1086
1087 assert_dmc_loaded(dev_priv);
1088}
1089
1090static void skl_enable_dc6(struct drm_i915_private *dev_priv)
1091{
1092 assert_can_enable_dc6(dev_priv);
1093
1094 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
1095
1096 /* Wa Display #1183: skl,kbl,cfl */
1097 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
1098 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1099 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1100
1101 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1102}
1103
1104static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1105 struct i915_power_well *power_well)
1106{
1107 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1108 int pw_idx = power_well->desc->hsw.idx;
1109 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1110 u32 bios_req = intel_de_read(dev_priv, regs->bios);
1111
1112 /* Take over the request bit if set by BIOS. */
1113 if (bios_req & mask) {
1114 u32 drv_req = intel_de_read(dev_priv, regs->driver);
1115
1116 if (!(drv_req & mask))
1117 intel_de_write(dev_priv, regs->driver, drv_req | mask);
1118 intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1119 }
1120}
1121
1122static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1123 struct i915_power_well *power_well)
1124{
1125 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1126}
1127
1128static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1129 struct i915_power_well *power_well)
1130{
1131 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1132}
1133
1134static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1135 struct i915_power_well *power_well)
1136{
1137 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1138}
1139
1140static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1141{
1142 struct i915_power_well *power_well;
1143
1144 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1145 if (power_well->count > 0)
1146 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1147
1148 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1149 if (power_well->count > 0)
1150 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1151
1152 if (IS_GEMINILAKE(dev_priv)) {
1153 power_well = lookup_power_well(dev_priv,
1154 GLK_DISP_PW_DPIO_CMN_C);
1155 if (power_well->count > 0)
1156 bxt_ddi_phy_verify_state(dev_priv,
1157 power_well->desc->bxt.phy);
1158 }
1159}
1160
1161static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1162 struct i915_power_well *power_well)
1163{
1164 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1165 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1166}
1167
1168static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1169{
1170 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1171 u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
1172
1173 drm_WARN(&dev_priv->drm,
1174 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1175 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1176 hw_enabled_dbuf_slices,
1177 enabled_dbuf_slices);
1178}
1179
1180static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1181{
1182 struct intel_cdclk_config cdclk_config = {};
1183
1184 if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
1185 tgl_disable_dc3co(dev_priv);
1186 return;
1187 }
1188
1189 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1190
1191 if (!HAS_DISPLAY(dev_priv))
1192 return;
1193
1194 dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1195 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1196 drm_WARN_ON(&dev_priv->drm,
1197 intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1198 &cdclk_config));
1199
1200 gen9_assert_dbuf_enabled(dev_priv);
1201
1202 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
1203 bxt_verify_ddi_phy_power_wells(dev_priv);
1204
1205 if (DISPLAY_VER(dev_priv) >= 11)
1206 /*
1207 * DMC retains HW context only for port A, the other combo
1208 * PHY's HW context for port B is lost after DC transitions,
1209 * so we need to restore it manually.
1210 */
1211 intel_combo_phy_init(dev_priv);
1212}
1213
1214static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1215 struct i915_power_well *power_well)
1216{
1217 gen9_disable_dc_states(dev_priv);
1218}
1219
1220static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1221 struct i915_power_well *power_well)
1222{
1223 if (!intel_dmc_has_payload(dev_priv))
1224 return;
1225
1226 switch (dev_priv->dmc.target_dc_state) {
1227 case DC_STATE_EN_DC3CO:
1228 tgl_enable_dc3co(dev_priv);
1229 break;
1230 case DC_STATE_EN_UPTO_DC6:
1231 skl_enable_dc6(dev_priv);
1232 break;
1233 case DC_STATE_EN_UPTO_DC5:
1234 gen9_enable_dc5(dev_priv);
1235 break;
1236 }
1237}
1238
1239static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1240 struct i915_power_well *power_well)
1241{
1242}
1243
1244static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1245 struct i915_power_well *power_well)
1246{
1247}
1248
1249static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1250 struct i915_power_well *power_well)
1251{
1252 return true;
1253}
1254
1255static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1256 struct i915_power_well *power_well)
1257{
1258 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1259 i830_enable_pipe(dev_priv, PIPE_A);
1260 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1261 i830_enable_pipe(dev_priv, PIPE_B);
1262}
1263
1264static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1265 struct i915_power_well *power_well)
1266{
1267 i830_disable_pipe(dev_priv, PIPE_B);
1268 i830_disable_pipe(dev_priv, PIPE_A);
1269}
1270
1271static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1272 struct i915_power_well *power_well)
1273{
1274 return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1275 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1276}
1277
1278static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1279 struct i915_power_well *power_well)
1280{
1281 if (power_well->count > 0)
1282 i830_pipes_power_well_enable(dev_priv, power_well);
1283 else
1284 i830_pipes_power_well_disable(dev_priv, power_well);
1285}
1286
1287static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1288 struct i915_power_well *power_well, bool enable)
1289{
1290 int pw_idx = power_well->desc->vlv.idx;
1291 u32 mask;
1292 u32 state;
1293 u32 ctrl;
1294
1295 mask = PUNIT_PWRGT_MASK(pw_idx);
1296 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1297 PUNIT_PWRGT_PWR_GATE(pw_idx);
1298
1299 vlv_punit_get(dev_priv);
1300
1301#define COND \
1302 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1303
1304 if (COND)
1305 goto out;
1306
1307 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1308 ctrl &= ~mask;
1309 ctrl |= state;
1310 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1311
1312 if (wait_for(COND, 100))
1313 drm_err(&dev_priv->drm,
1314 "timeout setting power well state %08x (%08x)\n",
1315 state,
1316 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1317
1318#undef COND
1319
1320out:
1321 vlv_punit_put(dev_priv);
1322}
1323
1324static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1325 struct i915_power_well *power_well)
1326{
1327 vlv_set_power_well(dev_priv, power_well, true);
1328}
1329
1330static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1331 struct i915_power_well *power_well)
1332{
1333 vlv_set_power_well(dev_priv, power_well, false);
1334}
1335
1336static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1337 struct i915_power_well *power_well)
1338{
1339 int pw_idx = power_well->desc->vlv.idx;
1340 bool enabled = false;
1341 u32 mask;
1342 u32 state;
1343 u32 ctrl;
1344
1345 mask = PUNIT_PWRGT_MASK(pw_idx);
1346 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1347
1348 vlv_punit_get(dev_priv);
1349
1350 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1351 /*
1352 * We only ever set the power-on and power-gate states, anything
1353 * else is unexpected.
1354 */
1355 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1356 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1357 if (state == ctrl)
1358 enabled = true;
1359
1360 /*
1361 * A transient state at this point would mean some unexpected party
1362 * is poking at the power controls too.
1363 */
1364 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1365 drm_WARN_ON(&dev_priv->drm, ctrl != state);
1366
1367 vlv_punit_put(dev_priv);
1368
1369 return enabled;
1370}
1371
1372static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1373{
1374 u32 val;
1375
1376 /*
1377 * On driver load, a pipe may be active and driving a DSI display.
1378 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1379 * (and never recovering) in this case. intel_dsi_post_disable() will
1380 * clear it when we turn off the display.
1381 */
1382 val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1383 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1384 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1385 intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1386
1387 /*
1388 * Disable trickle feed and enable pnd deadline calculation
1389 */
1390 intel_de_write(dev_priv, MI_ARB_VLV,
1391 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1392 intel_de_write(dev_priv, CBR1_VLV, 0);
1393
1394 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1395 intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1396 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1397 1000));
1398}
1399
1400static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1401{
1402 struct intel_encoder *encoder;
1403 enum pipe pipe;
1404
1405 /*
1406 * Enable the CRI clock source so we can get at the
1407 * display and the reference clock for VGA
1408 * hotplug / manual detection. Supposedly DSI also
1409 * needs the ref clock up and running.
1410 *
1411 * CHV DPLL B/C have some issues if VGA mode is enabled.
1412 */
1413 for_each_pipe(dev_priv, pipe) {
1414 u32 val = intel_de_read(dev_priv, DPLL(pipe));
1415
1416 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1417 if (pipe != PIPE_A)
1418 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1419
1420 intel_de_write(dev_priv, DPLL(pipe), val);
1421 }
1422
1423 vlv_init_display_clock_gating(dev_priv);
1424
1425 spin_lock_irq(&dev_priv->irq_lock);
1426 valleyview_enable_display_irqs(dev_priv);
1427 spin_unlock_irq(&dev_priv->irq_lock);
1428
1429 /*
1430 * During driver initialization/resume we can avoid restoring the
1431 * part of the HW/SW state that will be inited anyway explicitly.
1432 */
1433 if (dev_priv->power_domains.initializing)
1434 return;
1435
1436 intel_hpd_init(dev_priv);
1437 intel_hpd_poll_disable(dev_priv);
1438
1439 /* Re-enable the ADPA, if we have one */
1440 for_each_intel_encoder(&dev_priv->drm, encoder) {
1441 if (encoder->type == INTEL_OUTPUT_ANALOG)
1442 intel_crt_reset(&encoder->base);
1443 }
1444
1445 intel_vga_redisable_power_on(dev_priv);
1446
1447 intel_pps_unlock_regs_wa(dev_priv);
1448}
1449
1450static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1451{
1452 spin_lock_irq(&dev_priv->irq_lock);
1453 valleyview_disable_display_irqs(dev_priv);
1454 spin_unlock_irq(&dev_priv->irq_lock);
1455
1456 /* make sure we're done processing display irqs */
1457 intel_synchronize_irq(dev_priv);
1458
1459 intel_pps_reset_all(dev_priv);
1460
1461 /* Prevent us from re-enabling polling on accident in late suspend */
1462 if (!dev_priv->drm.dev->power.is_suspended)
1463 intel_hpd_poll_enable(dev_priv);
1464}
1465
1466static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1467 struct i915_power_well *power_well)
1468{
1469 vlv_set_power_well(dev_priv, power_well, true);
1470
1471 vlv_display_power_well_init(dev_priv);
1472}
1473
1474static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1475 struct i915_power_well *power_well)
1476{
1477 vlv_display_power_well_deinit(dev_priv);
1478
1479 vlv_set_power_well(dev_priv, power_well, false);
1480}
1481
1482static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1483 struct i915_power_well *power_well)
1484{
1485 /* since ref/cri clock was enabled */
1486 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1487
1488 vlv_set_power_well(dev_priv, power_well, true);
1489
1490 /*
1491 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1492 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1493 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1494 * b. The other bits such as sfr settings / modesel may all
1495 * be set to 0.
1496 *
1497 * This should only be done on init and resume from S3 with
1498 * both PLLs disabled, or we risk losing DPIO and PLL
1499 * synchronization.
1500 */
1501 intel_de_write(dev_priv, DPIO_CTL,
1502 intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1503}
1504
1505static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1506 struct i915_power_well *power_well)
1507{
1508 enum pipe pipe;
1509
1510 for_each_pipe(dev_priv, pipe)
1511 assert_pll_disabled(dev_priv, pipe);
1512
1513 /* Assert common reset */
1514 intel_de_write(dev_priv, DPIO_CTL,
1515 intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1516
1517 vlv_set_power_well(dev_priv, power_well, false);
1518}
1519
1520#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1521
1522#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1523
1524static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1525{
1526 struct i915_power_well *cmn_bc =
1527 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1528 struct i915_power_well *cmn_d =
1529 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1530 u32 phy_control = dev_priv->chv_phy_control;
1531 u32 phy_status = 0;
1532 u32 phy_status_mask = 0xffffffff;
1533
1534 /*
1535 * The BIOS can leave the PHY is some weird state
1536 * where it doesn't fully power down some parts.
1537 * Disable the asserts until the PHY has been fully
1538 * reset (ie. the power well has been disabled at
1539 * least once).
1540 */
1541 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1542 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1543 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1544 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1545 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1546 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1547 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1548
1549 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1550 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1551 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1552 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1553
1554 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1555 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1556
1557 /* this assumes override is only used to enable lanes */
1558 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1559 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1560
1561 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1562 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1563
1564 /* CL1 is on whenever anything is on in either channel */
1565 if (BITS_SET(phy_control,
1566 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1567 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1568 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1569
1570 /*
1571 * The DPLLB check accounts for the pipe B + port A usage
1572 * with CL2 powered up but all the lanes in the second channel
1573 * powered down.
1574 */
1575 if (BITS_SET(phy_control,
1576 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1577 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1578 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1579
1580 if (BITS_SET(phy_control,
1581 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1582 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1583 if (BITS_SET(phy_control,
1584 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1585 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1586
1587 if (BITS_SET(phy_control,
1588 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1589 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1590 if (BITS_SET(phy_control,
1591 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1592 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1593 }
1594
1595 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1596 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1597
1598 /* this assumes override is only used to enable lanes */
1599 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1600 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1601
1602 if (BITS_SET(phy_control,
1603 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1604 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1605
1606 if (BITS_SET(phy_control,
1607 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1608 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1609 if (BITS_SET(phy_control,
1610 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1611 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1612 }
1613
1614 phy_status &= phy_status_mask;
1615
1616 /*
1617 * The PHY may be busy with some initial calibration and whatnot,
1618 * so the power state can take a while to actually change.
1619 */
1620 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1621 phy_status_mask, phy_status, 10))
1622 drm_err(&dev_priv->drm,
1623 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1624 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1625 phy_status, dev_priv->chv_phy_control);
1626}
1627
1628#undef BITS_SET
1629
1630static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1631 struct i915_power_well *power_well)
1632{
1633 enum dpio_phy phy;
1634 enum pipe pipe;
1635 u32 tmp;
1636
1637 drm_WARN_ON_ONCE(&dev_priv->drm,
1638 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1639 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1640
1641 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1642 pipe = PIPE_A;
1643 phy = DPIO_PHY0;
1644 } else {
1645 pipe = PIPE_C;
1646 phy = DPIO_PHY1;
1647 }
1648
1649 /* since ref/cri clock was enabled */
1650 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1651 vlv_set_power_well(dev_priv, power_well, true);
1652
1653 /* Poll for phypwrgood signal */
1654 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1655 PHY_POWERGOOD(phy), 1))
1656 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1657 phy);
1658
1659 vlv_dpio_get(dev_priv);
1660
1661 /* Enable dynamic power down */
1662 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1663 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1664 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1665 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1666
1667 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1668 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1669 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1670 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1671 } else {
1672 /*
1673 * Force the non-existing CL2 off. BXT does this
1674 * too, so maybe it saves some power even though
1675 * CL2 doesn't exist?
1676 */
1677 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1678 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1679 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1680 }
1681
1682 vlv_dpio_put(dev_priv);
1683
1684 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1685 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1686 dev_priv->chv_phy_control);
1687
1688 drm_dbg_kms(&dev_priv->drm,
1689 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1690 phy, dev_priv->chv_phy_control);
1691
1692 assert_chv_phy_status(dev_priv);
1693}
1694
1695static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1696 struct i915_power_well *power_well)
1697{
1698 enum dpio_phy phy;
1699
1700 drm_WARN_ON_ONCE(&dev_priv->drm,
1701 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1702 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1703
1704 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1705 phy = DPIO_PHY0;
1706 assert_pll_disabled(dev_priv, PIPE_A);
1707 assert_pll_disabled(dev_priv, PIPE_B);
1708 } else {
1709 phy = DPIO_PHY1;
1710 assert_pll_disabled(dev_priv, PIPE_C);
1711 }
1712
1713 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1714 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1715 dev_priv->chv_phy_control);
1716
1717 vlv_set_power_well(dev_priv, power_well, false);
1718
1719 drm_dbg_kms(&dev_priv->drm,
1720 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1721 phy, dev_priv->chv_phy_control);
1722
1723 /* PHY is fully reset now, so we can enable the PHY state asserts */
1724 dev_priv->chv_phy_assert[phy] = true;
1725
1726 assert_chv_phy_status(dev_priv);
1727}
1728
1729static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1730 enum dpio_channel ch, bool override, unsigned int mask)
1731{
1732 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1733 u32 reg, val, expected, actual;
1734
1735 /*
1736 * The BIOS can leave the PHY is some weird state
1737 * where it doesn't fully power down some parts.
1738 * Disable the asserts until the PHY has been fully
1739 * reset (ie. the power well has been disabled at
1740 * least once).
1741 */
1742 if (!dev_priv->chv_phy_assert[phy])
1743 return;
1744
1745 if (ch == DPIO_CH0)
1746 reg = _CHV_CMN_DW0_CH0;
1747 else
1748 reg = _CHV_CMN_DW6_CH1;
1749
1750 vlv_dpio_get(dev_priv);
1751 val = vlv_dpio_read(dev_priv, pipe, reg);
1752 vlv_dpio_put(dev_priv);
1753
1754 /*
1755 * This assumes !override is only used when the port is disabled.
1756 * All lanes should power down even without the override when
1757 * the port is disabled.
1758 */
1759 if (!override || mask == 0xf) {
1760 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1761 /*
1762 * If CH1 common lane is not active anymore
1763 * (eg. for pipe B DPLL) the entire channel will
1764 * shut down, which causes the common lane registers
1765 * to read as 0. That means we can't actually check
1766 * the lane power down status bits, but as the entire
1767 * register reads as 0 it's a good indication that the
1768 * channel is indeed entirely powered down.
1769 */
1770 if (ch == DPIO_CH1 && val == 0)
1771 expected = 0;
1772 } else if (mask != 0x0) {
1773 expected = DPIO_ANYDL_POWERDOWN;
1774 } else {
1775 expected = 0;
1776 }
1777
1778 if (ch == DPIO_CH0)
1779 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1780 else
1781 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1782 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1783
1784 drm_WARN(&dev_priv->drm, actual != expected,
1785 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1786 !!(actual & DPIO_ALLDL_POWERDOWN),
1787 !!(actual & DPIO_ANYDL_POWERDOWN),
1788 !!(expected & DPIO_ALLDL_POWERDOWN),
1789 !!(expected & DPIO_ANYDL_POWERDOWN),
1790 reg, val);
1791}
1792
1793bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1794 enum dpio_channel ch, bool override)
1795{
1796 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1797 bool was_override;
1798
1799 mutex_lock(&power_domains->lock);
1800
1801 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1802
1803 if (override == was_override)
1804 goto out;
1805
1806 if (override)
1807 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1808 else
1809 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1810
1811 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1812 dev_priv->chv_phy_control);
1813
1814 drm_dbg_kms(&dev_priv->drm,
1815 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1816 phy, ch, dev_priv->chv_phy_control);
1817
1818 assert_chv_phy_status(dev_priv);
1819
1820out:
1821 mutex_unlock(&power_domains->lock);
1822
1823 return was_override;
1824}
1825
1826void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1827 bool override, unsigned int mask)
1828{
1829 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1830 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1831 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
1832 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
1833
1834 mutex_lock(&power_domains->lock);
1835
1836 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1837 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1838
1839 if (override)
1840 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1841 else
1842 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1843
1844 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1845 dev_priv->chv_phy_control);
1846
1847 drm_dbg_kms(&dev_priv->drm,
1848 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1849 phy, ch, mask, dev_priv->chv_phy_control);
1850
1851 assert_chv_phy_status(dev_priv);
1852
1853 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1854
1855 mutex_unlock(&power_domains->lock);
1856}
1857
1858static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1859 struct i915_power_well *power_well)
1860{
1861 enum pipe pipe = PIPE_A;
1862 bool enabled;
1863 u32 state, ctrl;
1864
1865 vlv_punit_get(dev_priv);
1866
1867 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1868 /*
1869 * We only ever set the power-on and power-gate states, anything
1870 * else is unexpected.
1871 */
1872 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1873 state != DP_SSS_PWR_GATE(pipe));
1874 enabled = state == DP_SSS_PWR_ON(pipe);
1875
1876 /*
1877 * A transient state at this point would mean some unexpected party
1878 * is poking at the power controls too.
1879 */
1880 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1881 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1882
1883 vlv_punit_put(dev_priv);
1884
1885 return enabled;
1886}
1887
1888static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1889 struct i915_power_well *power_well,
1890 bool enable)
1891{
1892 enum pipe pipe = PIPE_A;
1893 u32 state;
1894 u32 ctrl;
1895
1896 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1897
1898 vlv_punit_get(dev_priv);
1899
1900#define COND \
1901 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1902
1903 if (COND)
1904 goto out;
1905
1906 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1907 ctrl &= ~DP_SSC_MASK(pipe);
1908 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1909 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1910
1911 if (wait_for(COND, 100))
1912 drm_err(&dev_priv->drm,
1913 "timeout setting power well state %08x (%08x)\n",
1914 state,
1915 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1916
1917#undef COND
1918
1919out:
1920 vlv_punit_put(dev_priv);
1921}
1922
1923static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1924 struct i915_power_well *power_well)
1925{
1926 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1927 dev_priv->chv_phy_control);
1928}
1929
1930static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1931 struct i915_power_well *power_well)
1932{
1933 chv_set_pipe_power_well(dev_priv, power_well, true);
1934
1935 vlv_display_power_well_init(dev_priv);
1936}
1937
1938static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1939 struct i915_power_well *power_well)
1940{
1941 vlv_display_power_well_deinit(dev_priv);
1942
1943 chv_set_pipe_power_well(dev_priv, power_well, false);
1944}
1945
1946static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1947{
1948 return power_domains->async_put_domains[0] |
1949 power_domains->async_put_domains[1];
1950}
1951
1952#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1953
1954static bool
1955assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1956{
1957 struct drm_i915_private *i915 = container_of(power_domains,
1958 struct drm_i915_private,
1959 power_domains);
1960 return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
1961 power_domains->async_put_domains[1]);
1962}
1963
1964static bool
1965__async_put_domains_state_ok(struct i915_power_domains *power_domains)
1966{
1967 struct drm_i915_private *i915 = container_of(power_domains,
1968 struct drm_i915_private,
1969 power_domains);
1970 enum intel_display_power_domain domain;
1971 bool err = false;
1972
1973 err |= !assert_async_put_domain_masks_disjoint(power_domains);
1974 err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
1975 !!__async_put_domains_mask(power_domains));
1976
1977 for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1978 err |= drm_WARN_ON(&i915->drm,
1979 power_domains->domain_use_count[domain] != 1);
1980
1981 return !err;
1982}
1983
1984static void print_power_domains(struct i915_power_domains *power_domains,
1985 const char *prefix, u64 mask)
1986{
1987 struct drm_i915_private *i915 = container_of(power_domains,
1988 struct drm_i915_private,
1989 power_domains);
1990 enum intel_display_power_domain domain;
1991
1992 drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
1993 for_each_power_domain(domain, mask)
1994 drm_dbg(&i915->drm, "%s use_count %d\n",
1995 intel_display_power_domain_str(domain),
1996 power_domains->domain_use_count[domain]);
1997}
1998
1999static void
2000print_async_put_domains_state(struct i915_power_domains *power_domains)
2001{
2002 struct drm_i915_private *i915 = container_of(power_domains,
2003 struct drm_i915_private,
2004 power_domains);
2005
2006 drm_dbg(&i915->drm, "async_put_wakeref %u\n",
2007 power_domains->async_put_wakeref);
2008
2009 print_power_domains(power_domains, "async_put_domains[0]",
2010 power_domains->async_put_domains[0]);
2011 print_power_domains(power_domains, "async_put_domains[1]",
2012 power_domains->async_put_domains[1]);
2013}
2014
2015static void
2016verify_async_put_domains_state(struct i915_power_domains *power_domains)
2017{
2018 if (!__async_put_domains_state_ok(power_domains))
2019 print_async_put_domains_state(power_domains);
2020}
2021
2022#else
2023
2024static void
2025assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
2026{
2027}
2028
2029static void
2030verify_async_put_domains_state(struct i915_power_domains *power_domains)
2031{
2032}
2033
2034#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
2035
2036static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
2037{
2038 assert_async_put_domain_masks_disjoint(power_domains);
2039
2040 return __async_put_domains_mask(power_domains);
2041}
2042
2043static void
2044async_put_domains_clear_domain(struct i915_power_domains *power_domains,
2045 enum intel_display_power_domain domain)
2046{
2047 assert_async_put_domain_masks_disjoint(power_domains);
2048
2049 power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
2050 power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
2051}
2052
2053static bool
2054intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
2055 enum intel_display_power_domain domain)
2056{
2057 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2058 bool ret = false;
2059
2060 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
2061 goto out_verify;
2062
2063 async_put_domains_clear_domain(power_domains, domain);
2064
2065 ret = true;
2066
2067 if (async_put_domains_mask(power_domains))
2068 goto out_verify;
2069
2070 cancel_delayed_work(&power_domains->async_put_work);
2071 intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
2072 fetch_and_zero(&power_domains->async_put_wakeref));
2073out_verify:
2074 verify_async_put_domains_state(power_domains);
2075
2076 return ret;
2077}
2078
2079static void
2080__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
2081 enum intel_display_power_domain domain)
2082{
2083 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2084 struct i915_power_well *power_well;
2085
2086 if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2087 return;
2088
2089 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
2090 intel_power_well_get(dev_priv, power_well);
2091
2092 power_domains->domain_use_count[domain]++;
2093}
2094
2095/**
2096 * intel_display_power_get - grab a power domain reference
2097 * @dev_priv: i915 device instance
2098 * @domain: power domain to reference
2099 *
2100 * This function grabs a power domain reference for @domain and ensures that the
2101 * power domain and all its parents are powered up. Therefore users should only
2102 * grab a reference to the innermost power domain they need.
2103 *
2104 * Any power domain reference obtained by this function must have a symmetric
2105 * call to intel_display_power_put() to release the reference again.
2106 */
2107intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
2108 enum intel_display_power_domain domain)
2109{
2110 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2111 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2112
2113 mutex_lock(&power_domains->lock);
2114 __intel_display_power_get_domain(dev_priv, domain);
2115 mutex_unlock(&power_domains->lock);
2116
2117 return wakeref;
2118}
2119
2120/**
2121 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2122 * @dev_priv: i915 device instance
2123 * @domain: power domain to reference
2124 *
2125 * This function grabs a power domain reference for @domain and ensures that the
2126 * power domain and all its parents are powered up. Therefore users should only
2127 * grab a reference to the innermost power domain they need.
2128 *
2129 * Any power domain reference obtained by this function must have a symmetric
2130 * call to intel_display_power_put() to release the reference again.
2131 */
2132intel_wakeref_t
2133intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2134 enum intel_display_power_domain domain)
2135{
2136 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2137 intel_wakeref_t wakeref;
2138 bool is_enabled;
2139
2140 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2141 if (!wakeref)
2142 return false;
2143
2144 mutex_lock(&power_domains->lock);
2145
2146 if (__intel_display_power_is_enabled(dev_priv, domain)) {
2147 __intel_display_power_get_domain(dev_priv, domain);
2148 is_enabled = true;
2149 } else {
2150 is_enabled = false;
2151 }
2152
2153 mutex_unlock(&power_domains->lock);
2154
2155 if (!is_enabled) {
2156 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2157 wakeref = 0;
2158 }
2159
2160 return wakeref;
2161}
2162
2163static void
2164__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2165 enum intel_display_power_domain domain)
2166{
2167 struct i915_power_domains *power_domains;
2168 struct i915_power_well *power_well;
2169 const char *name = intel_display_power_domain_str(domain);
2170
2171 power_domains = &dev_priv->power_domains;
2172
2173 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2174 "Use count on domain %s is already zero\n",
2175 name);
2176 drm_WARN(&dev_priv->drm,
2177 async_put_domains_mask(power_domains) & BIT_ULL(domain),
2178 "Async disabling of domain %s is pending\n",
2179 name);
2180
2181 power_domains->domain_use_count[domain]--;
2182
2183 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2184 intel_power_well_put(dev_priv, power_well);
2185}
2186
2187static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2188 enum intel_display_power_domain domain)
2189{
2190 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2191
2192 mutex_lock(&power_domains->lock);
2193 __intel_display_power_put_domain(dev_priv, domain);
2194 mutex_unlock(&power_domains->lock);
2195}
2196
2197static void
2198queue_async_put_domains_work(struct i915_power_domains *power_domains,
2199 intel_wakeref_t wakeref)
2200{
2201 struct drm_i915_private *i915 = container_of(power_domains,
2202 struct drm_i915_private,
2203 power_domains);
2204 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2205 power_domains->async_put_wakeref = wakeref;
2206 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
2207 &power_domains->async_put_work,
2208 msecs_to_jiffies(100)));
2209}
2210
2211static void
2212release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2213{
2214 struct drm_i915_private *dev_priv =
2215 container_of(power_domains, struct drm_i915_private,
2216 power_domains);
2217 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2218 enum intel_display_power_domain domain;
2219 intel_wakeref_t wakeref;
2220
2221 /*
2222 * The caller must hold already raw wakeref, upgrade that to a proper
2223 * wakeref to make the state checker happy about the HW access during
2224 * power well disabling.
2225 */
2226 assert_rpm_raw_wakeref_held(rpm);
2227 wakeref = intel_runtime_pm_get(rpm);
2228
2229 for_each_power_domain(domain, mask) {
2230 /* Clear before put, so put's sanity check is happy. */
2231 async_put_domains_clear_domain(power_domains, domain);
2232 __intel_display_power_put_domain(dev_priv, domain);
2233 }
2234
2235 intel_runtime_pm_put(rpm, wakeref);
2236}
2237
2238static void
2239intel_display_power_put_async_work(struct work_struct *work)
2240{
2241 struct drm_i915_private *dev_priv =
2242 container_of(work, struct drm_i915_private,
2243 power_domains.async_put_work.work);
2244 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2245 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2246 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2247 intel_wakeref_t old_work_wakeref = 0;
2248
2249 mutex_lock(&power_domains->lock);
2250
2251 /*
2252 * Bail out if all the domain refs pending to be released were grabbed
2253 * by subsequent gets or a flush_work.
2254 */
2255 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2256 if (!old_work_wakeref)
2257 goto out_verify;
2258
2259 release_async_put_domains(power_domains,
2260 power_domains->async_put_domains[0]);
2261
2262 /* Requeue the work if more domains were async put meanwhile. */
2263 if (power_domains->async_put_domains[1]) {
2264 power_domains->async_put_domains[0] =
2265 fetch_and_zero(&power_domains->async_put_domains[1]);
2266 queue_async_put_domains_work(power_domains,
2267 fetch_and_zero(&new_work_wakeref));
2268 } else {
2269 /*
2270 * Cancel the work that got queued after this one got dequeued,
2271 * since here we released the corresponding async-put reference.
2272 */
2273 cancel_delayed_work(&power_domains->async_put_work);
2274 }
2275
2276out_verify:
2277 verify_async_put_domains_state(power_domains);
2278
2279 mutex_unlock(&power_domains->lock);
2280
2281 if (old_work_wakeref)
2282 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2283 if (new_work_wakeref)
2284 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2285}
2286
2287/**
2288 * intel_display_power_put_async - release a power domain reference asynchronously
2289 * @i915: i915 device instance
2290 * @domain: power domain to reference
2291 * @wakeref: wakeref acquired for the reference that is being released
2292 *
2293 * This function drops the power domain reference obtained by
2294 * intel_display_power_get*() and schedules a work to power down the
2295 * corresponding hardware block if this is the last reference.
2296 */
2297void __intel_display_power_put_async(struct drm_i915_private *i915,
2298 enum intel_display_power_domain domain,
2299 intel_wakeref_t wakeref)
2300{
2301 struct i915_power_domains *power_domains = &i915->power_domains;
2302 struct intel_runtime_pm *rpm = &i915->runtime_pm;
2303 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2304
2305 mutex_lock(&power_domains->lock);
2306
2307 if (power_domains->domain_use_count[domain] > 1) {
2308 __intel_display_power_put_domain(i915, domain);
2309
2310 goto out_verify;
2311 }
2312
2313 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2314
2315 /* Let a pending work requeue itself or queue a new one. */
2316 if (power_domains->async_put_wakeref) {
2317 power_domains->async_put_domains[1] |= BIT_ULL(domain);
2318 } else {
2319 power_domains->async_put_domains[0] |= BIT_ULL(domain);
2320 queue_async_put_domains_work(power_domains,
2321 fetch_and_zero(&work_wakeref));
2322 }
2323
2324out_verify:
2325 verify_async_put_domains_state(power_domains);
2326
2327 mutex_unlock(&power_domains->lock);
2328
2329 if (work_wakeref)
2330 intel_runtime_pm_put_raw(rpm, work_wakeref);
2331
2332 intel_runtime_pm_put(rpm, wakeref);
2333}
2334
2335/**
2336 * intel_display_power_flush_work - flushes the async display power disabling work
2337 * @i915: i915 device instance
2338 *
2339 * Flushes any pending work that was scheduled by a preceding
2340 * intel_display_power_put_async() call, completing the disabling of the
2341 * corresponding power domains.
2342 *
2343 * Note that the work handler function may still be running after this
2344 * function returns; to ensure that the work handler isn't running use
2345 * intel_display_power_flush_work_sync() instead.
2346 */
2347void intel_display_power_flush_work(struct drm_i915_private *i915)
2348{
2349 struct i915_power_domains *power_domains = &i915->power_domains;
2350 intel_wakeref_t work_wakeref;
2351
2352 mutex_lock(&power_domains->lock);
2353
2354 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2355 if (!work_wakeref)
2356 goto out_verify;
2357
2358 release_async_put_domains(power_domains,
2359 async_put_domains_mask(power_domains));
2360 cancel_delayed_work(&power_domains->async_put_work);
2361
2362out_verify:
2363 verify_async_put_domains_state(power_domains);
2364
2365 mutex_unlock(&power_domains->lock);
2366
2367 if (work_wakeref)
2368 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2369}
2370
2371/**
2372 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2373 * @i915: i915 device instance
2374 *
2375 * Like intel_display_power_flush_work(), but also ensure that the work
2376 * handler function is not running any more when this function returns.
2377 */
2378static void
2379intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2380{
2381 struct i915_power_domains *power_domains = &i915->power_domains;
2382
2383 intel_display_power_flush_work(i915);
2384 cancel_delayed_work_sync(&power_domains->async_put_work);
2385
2386 verify_async_put_domains_state(power_domains);
2387
2388 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2389}
2390
2391#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2392/**
2393 * intel_display_power_put - release a power domain reference
2394 * @dev_priv: i915 device instance
2395 * @domain: power domain to reference
2396 * @wakeref: wakeref acquired for the reference that is being released
2397 *
2398 * This function drops the power domain reference obtained by
2399 * intel_display_power_get() and might power down the corresponding hardware
2400 * block right away if this is the last reference.
2401 */
2402void intel_display_power_put(struct drm_i915_private *dev_priv,
2403 enum intel_display_power_domain domain,
2404 intel_wakeref_t wakeref)
2405{
2406 __intel_display_power_put(dev_priv, domain);
2407 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2408}
2409#else
2410/**
2411 * intel_display_power_put_unchecked - release an unchecked power domain reference
2412 * @dev_priv: i915 device instance
2413 * @domain: power domain to reference
2414 *
2415 * This function drops the power domain reference obtained by
2416 * intel_display_power_get() and might power down the corresponding hardware
2417 * block right away if this is the last reference.
2418 *
2419 * This function is only for the power domain code's internal use to suppress wakeref
2420 * tracking when the correspondig debug kconfig option is disabled, should not
2421 * be used otherwise.
2422 */
2423void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2424 enum intel_display_power_domain domain)
2425{
2426 __intel_display_power_put(dev_priv, domain);
2427 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2428}
2429#endif
2430
2431void
2432intel_display_power_get_in_set(struct drm_i915_private *i915,
2433 struct intel_display_power_domain_set *power_domain_set,
2434 enum intel_display_power_domain domain)
2435{
2436 intel_wakeref_t __maybe_unused wf;
2437
2438 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
2439
2440 wf = intel_display_power_get(i915, domain);
2441#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2442 power_domain_set->wakerefs[domain] = wf;
2443#endif
2444 power_domain_set->mask |= BIT_ULL(domain);
2445}
2446
2447bool
2448intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
2449 struct intel_display_power_domain_set *power_domain_set,
2450 enum intel_display_power_domain domain)
2451{
2452 intel_wakeref_t wf;
2453
2454 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
2455
2456 wf = intel_display_power_get_if_enabled(i915, domain);
2457 if (!wf)
2458 return false;
2459
2460#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2461 power_domain_set->wakerefs[domain] = wf;
2462#endif
2463 power_domain_set->mask |= BIT_ULL(domain);
2464
2465 return true;
2466}
2467
2468void
2469intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
2470 struct intel_display_power_domain_set *power_domain_set,
2471 u64 mask)
2472{
2473 enum intel_display_power_domain domain;
2474
2475 drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask);
2476
2477 for_each_power_domain(domain, mask) {
2478 intel_wakeref_t __maybe_unused wf = -1;
2479
2480#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2481 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
2482#endif
2483 intel_display_power_put(i915, domain, wf);
2484 power_domain_set->mask &= ~BIT_ULL(domain);
2485 }
2486}
2487
2488#define I830_PIPES_POWER_DOMAINS ( \
2489 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2490 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2491 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2492 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2493 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2494 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2495 BIT_ULL(POWER_DOMAIN_INIT))
2496
2497#define VLV_DISPLAY_POWER_DOMAINS ( \
2498 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2499 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2500 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2501 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2502 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2503 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2504 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2505 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2506 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2507 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2508 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2509 BIT_ULL(POWER_DOMAIN_VGA) | \
2510 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2511 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2512 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2513 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2514 BIT_ULL(POWER_DOMAIN_INIT))
2515
2516#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
2517 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2518 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2519 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2520 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2521 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2522 BIT_ULL(POWER_DOMAIN_INIT))
2523
2524#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
2525 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2526 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2527 BIT_ULL(POWER_DOMAIN_INIT))
2528
2529#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
2530 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2531 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2532 BIT_ULL(POWER_DOMAIN_INIT))
2533
2534#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
2535 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2536 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2537 BIT_ULL(POWER_DOMAIN_INIT))
2538
2539#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
2540 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2541 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2542 BIT_ULL(POWER_DOMAIN_INIT))
2543
2544#define CHV_DISPLAY_POWER_DOMAINS ( \
2545 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2546 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2547 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2548 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2549 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2550 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2551 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2552 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2553 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2554 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2555 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2556 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2557 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2558 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2559 BIT_ULL(POWER_DOMAIN_VGA) | \
2560 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2561 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2562 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2563 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2564 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2565 BIT_ULL(POWER_DOMAIN_INIT))
2566
2567#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
2568 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2569 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2570 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2571 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2572 BIT_ULL(POWER_DOMAIN_INIT))
2573
2574#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
2575 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2576 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2577 BIT_ULL(POWER_DOMAIN_INIT))
2578
2579#define HSW_DISPLAY_POWER_DOMAINS ( \
2580 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2581 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2582 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2583 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2584 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2585 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2586 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2587 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2588 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2589 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2590 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2591 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2592 BIT_ULL(POWER_DOMAIN_VGA) | \
2593 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2594 BIT_ULL(POWER_DOMAIN_INIT))
2595
2596#define BDW_DISPLAY_POWER_DOMAINS ( \
2597 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2598 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2599 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2600 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2601 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2602 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2603 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2604 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2605 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2606 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2607 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2608 BIT_ULL(POWER_DOMAIN_VGA) | \
2609 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2610 BIT_ULL(POWER_DOMAIN_INIT))
2611
2612#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2613 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2614 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2615 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2616 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2617 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2618 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2619 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2620 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2621 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2622 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2623 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2624 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2625 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2626 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2627 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2628 BIT_ULL(POWER_DOMAIN_VGA) | \
2629 BIT_ULL(POWER_DOMAIN_INIT))
2630#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
2631 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2632 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2633 BIT_ULL(POWER_DOMAIN_INIT))
2634#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2635 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2636 BIT_ULL(POWER_DOMAIN_INIT))
2637#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2638 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2639 BIT_ULL(POWER_DOMAIN_INIT))
2640#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
2641 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2642 BIT_ULL(POWER_DOMAIN_INIT))
2643#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2644 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2645 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2646 BIT_ULL(POWER_DOMAIN_MODESET) | \
2647 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2648 BIT_ULL(POWER_DOMAIN_INIT))
2649
2650#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2651 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2652 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2653 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2654 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2655 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2656 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2657 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2658 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2659 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2660 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2661 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2662 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2663 BIT_ULL(POWER_DOMAIN_VGA) | \
2664 BIT_ULL(POWER_DOMAIN_INIT))
2665#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2666 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2667 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2668 BIT_ULL(POWER_DOMAIN_MODESET) | \
2669 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2670 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2671 BIT_ULL(POWER_DOMAIN_INIT))
2672#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
2673 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2674 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2675 BIT_ULL(POWER_DOMAIN_INIT))
2676#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
2677 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2678 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2679 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2680 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2681 BIT_ULL(POWER_DOMAIN_INIT))
2682
2683#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2684 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2685 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2686 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2687 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2688 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2689 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2690 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2691 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2692 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2693 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2694 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2695 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2696 BIT_ULL(POWER_DOMAIN_VGA) | \
2697 BIT_ULL(POWER_DOMAIN_INIT))
2698#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
2699 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2700#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2701 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2702#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2703 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2704#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
2705 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2706 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2707 BIT_ULL(POWER_DOMAIN_INIT))
2708#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
2709 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2710 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2711 BIT_ULL(POWER_DOMAIN_INIT))
2712#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
2713 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2714 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2715 BIT_ULL(POWER_DOMAIN_INIT))
2716#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
2717 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2718 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2719 BIT_ULL(POWER_DOMAIN_INIT))
2720#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
2721 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2722 BIT_ULL(POWER_DOMAIN_INIT))
2723#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
2724 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2725 BIT_ULL(POWER_DOMAIN_INIT))
2726#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2727 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2728 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2729 BIT_ULL(POWER_DOMAIN_MODESET) | \
2730 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2731 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2732 BIT_ULL(POWER_DOMAIN_INIT))
2733
2734#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2735 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2736 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2737 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2738 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2739 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2740 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2741 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2742 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2743 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2744 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2745 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2746 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2747 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2748 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2749 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2750 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2751 BIT_ULL(POWER_DOMAIN_VGA) | \
2752 BIT_ULL(POWER_DOMAIN_INIT))
2753#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
2754 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2755 BIT_ULL(POWER_DOMAIN_INIT))
2756#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
2757 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2758 BIT_ULL(POWER_DOMAIN_INIT))
2759#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
2760 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2761 BIT_ULL(POWER_DOMAIN_INIT))
2762#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
2763 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2764 BIT_ULL(POWER_DOMAIN_INIT))
2765#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
2766 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2767 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2768 BIT_ULL(POWER_DOMAIN_INIT))
2769#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
2770 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2771 BIT_ULL(POWER_DOMAIN_INIT))
2772#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
2773 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2774 BIT_ULL(POWER_DOMAIN_INIT))
2775#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
2776 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2777 BIT_ULL(POWER_DOMAIN_INIT))
2778#define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
2779 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2780 BIT_ULL(POWER_DOMAIN_INIT))
2781#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
2782 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2783 BIT_ULL(POWER_DOMAIN_INIT))
2784#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2785 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2786 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2787 BIT_ULL(POWER_DOMAIN_MODESET) | \
2788 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2789 BIT_ULL(POWER_DOMAIN_INIT))
2790
2791/*
2792 * ICL PW_0/PG_0 domains (HW/DMC control):
2793 * - PCI
2794 * - clocks except port PLL
2795 * - central power except FBC
2796 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2797 * ICL PW_1/PG_1 domains (HW/DMC control):
2798 * - DBUF function
2799 * - PIPE_A and its planes, except VGA
2800 * - transcoder EDP + PSR
2801 * - transcoder DSI
2802 * - DDI_A
2803 * - FBC
2804 */
2805#define ICL_PW_4_POWER_DOMAINS ( \
2806 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2807 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2808 BIT_ULL(POWER_DOMAIN_INIT))
2809 /* VDSC/joining */
2810#define ICL_PW_3_POWER_DOMAINS ( \
2811 ICL_PW_4_POWER_DOMAINS | \
2812 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2813 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2814 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2815 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2816 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2817 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2818 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2819 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2820 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2821 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2822 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2823 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2824 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2825 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2826 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2827 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \
2828 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
2829 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
2830 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
2831 BIT_ULL(POWER_DOMAIN_VGA) | \
2832 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2833 BIT_ULL(POWER_DOMAIN_INIT))
2834 /*
2835 * - transcoder WD
2836 * - KVMR (HW control)
2837 */
2838#define ICL_PW_2_POWER_DOMAINS ( \
2839 ICL_PW_3_POWER_DOMAINS | \
2840 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
2841 BIT_ULL(POWER_DOMAIN_INIT))
2842 /*
2843 * - KVMR (HW control)
2844 */
2845#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2846 ICL_PW_2_POWER_DOMAINS | \
2847 BIT_ULL(POWER_DOMAIN_MODESET) | \
2848 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2849 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \
2850 BIT_ULL(POWER_DOMAIN_INIT))
2851
2852#define ICL_DDI_IO_A_POWER_DOMAINS ( \
2853 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2854#define ICL_DDI_IO_B_POWER_DOMAINS ( \
2855 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2856#define ICL_DDI_IO_C_POWER_DOMAINS ( \
2857 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2858#define ICL_DDI_IO_D_POWER_DOMAINS ( \
2859 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2860#define ICL_DDI_IO_E_POWER_DOMAINS ( \
2861 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2862#define ICL_DDI_IO_F_POWER_DOMAINS ( \
2863 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2864
2865#define ICL_AUX_A_IO_POWER_DOMAINS ( \
2866 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2867 BIT_ULL(POWER_DOMAIN_AUX_A))
2868#define ICL_AUX_B_IO_POWER_DOMAINS ( \
2869 BIT_ULL(POWER_DOMAIN_AUX_B))
2870#define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \
2871 BIT_ULL(POWER_DOMAIN_AUX_C))
2872#define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \
2873 BIT_ULL(POWER_DOMAIN_AUX_D))
2874#define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \
2875 BIT_ULL(POWER_DOMAIN_AUX_E))
2876#define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \
2877 BIT_ULL(POWER_DOMAIN_AUX_F))
2878#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \
2879 BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2880#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \
2881 BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2882#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \
2883 BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2884#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \
2885 BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2886
2887#define TGL_PW_5_POWER_DOMAINS ( \
2888 BIT_ULL(POWER_DOMAIN_PIPE_D) | \
2889 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
2890 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
2891 BIT_ULL(POWER_DOMAIN_INIT))
2892
2893#define TGL_PW_4_POWER_DOMAINS ( \
2894 TGL_PW_5_POWER_DOMAINS | \
2895 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2896 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2897 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2898 BIT_ULL(POWER_DOMAIN_INIT))
2899
2900#define TGL_PW_3_POWER_DOMAINS ( \
2901 TGL_PW_4_POWER_DOMAINS | \
2902 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2903 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2904 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2905 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
2906 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
2907 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \
2908 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \
2909 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \
2910 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \
2911 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
2912 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
2913 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
2914 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
2915 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \
2916 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \
2917 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
2918 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
2919 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
2920 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
2921 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
2922 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
2923 BIT_ULL(POWER_DOMAIN_VGA) | \
2924 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2925 BIT_ULL(POWER_DOMAIN_INIT))
2926
2927#define TGL_PW_2_POWER_DOMAINS ( \
2928 TGL_PW_3_POWER_DOMAINS | \
2929 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
2930 BIT_ULL(POWER_DOMAIN_INIT))
2931
2932#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2933 TGL_PW_3_POWER_DOMAINS | \
2934 BIT_ULL(POWER_DOMAIN_MODESET) | \
2935 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2936 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2937 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2938 BIT_ULL(POWER_DOMAIN_INIT))
2939
2940#define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
2941#define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
2942#define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
2943#define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
2944#define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5)
2945#define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6)
2946
2947#define TGL_AUX_A_IO_POWER_DOMAINS ( \
2948 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2949 BIT_ULL(POWER_DOMAIN_AUX_A))
2950#define TGL_AUX_B_IO_POWER_DOMAINS ( \
2951 BIT_ULL(POWER_DOMAIN_AUX_B))
2952#define TGL_AUX_C_IO_POWER_DOMAINS ( \
2953 BIT_ULL(POWER_DOMAIN_AUX_C))
2954
2955#define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1)
2956#define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2)
2957#define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3)
2958#define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4)
2959#define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5)
2960#define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6)
2961
2962#define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1)
2963#define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2)
2964#define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3)
2965#define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4)
2966#define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5)
2967#define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6)
2968
2969#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \
2970 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
2971 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
2972 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
2973 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
2974 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \
2975 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \
2976 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
2977 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
2978 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
2979 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
2980 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
2981 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
2982 BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
2983
2984#define RKL_PW_4_POWER_DOMAINS ( \
2985 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2986 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2987 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2988 BIT_ULL(POWER_DOMAIN_INIT))
2989
2990#define RKL_PW_3_POWER_DOMAINS ( \
2991 RKL_PW_4_POWER_DOMAINS | \
2992 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2993 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2994 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2995 BIT_ULL(POWER_DOMAIN_VGA) | \
2996 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2997 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
2998 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
2999 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
3000 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
3001 BIT_ULL(POWER_DOMAIN_INIT))
3002
3003/*
3004 * There is no PW_2/PG_2 on RKL.
3005 *
3006 * RKL PW_1/PG_1 domains (under HW/DMC control):
3007 * - DBUF function (note: registers are in PW0)
3008 * - PIPE_A and its planes and VDSC/joining, except VGA
3009 * - transcoder A
3010 * - DDI_A and DDI_B
3011 * - FBC
3012 *
3013 * RKL PW_0/PG_0 domains (under HW/DMC control):
3014 * - PCI
3015 * - clocks except port PLL
3016 * - shared functions:
3017 * * interrupts except pipe interrupts
3018 * * MBus except PIPE_MBUS_DBOX_CTL
3019 * * DBUF registers
3020 * - central power except FBC
3021 * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
3022 */
3023
3024#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
3025 RKL_PW_3_POWER_DOMAINS | \
3026 BIT_ULL(POWER_DOMAIN_MODESET) | \
3027 BIT_ULL(POWER_DOMAIN_AUX_A) | \
3028 BIT_ULL(POWER_DOMAIN_AUX_B) | \
3029 BIT_ULL(POWER_DOMAIN_INIT))
3030
3031/*
3032 * XE_LPD Power Domains
3033 *
3034 * Previous platforms required that PG(n-1) be enabled before PG(n). That
3035 * dependency chain turns into a dependency tree on XE_LPD:
3036 *
3037 * PG0
3038 * |
3039 * --PG1--
3040 * / \
3041 * PGA --PG2--
3042 * / | \
3043 * PGB PGC PGD
3044 *
3045 * Power wells must be enabled from top to bottom and disabled from bottom
3046 * to top. This allows pipes to be power gated independently.
3047 */
3048
3049#define XELPD_PW_D_POWER_DOMAINS ( \
3050 BIT_ULL(POWER_DOMAIN_PIPE_D) | \
3051 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
3052 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
3053 BIT_ULL(POWER_DOMAIN_INIT))
3054
3055#define XELPD_PW_C_POWER_DOMAINS ( \
3056 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
3057 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
3058 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
3059 BIT_ULL(POWER_DOMAIN_INIT))
3060
3061#define XELPD_PW_B_POWER_DOMAINS ( \
3062 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
3063 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
3064 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
3065 BIT_ULL(POWER_DOMAIN_INIT))
3066
3067#define XELPD_PW_A_POWER_DOMAINS ( \
3068 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
3069 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
3070 BIT_ULL(POWER_DOMAIN_INIT))
3071
3072#define XELPD_PW_2_POWER_DOMAINS ( \
3073 XELPD_PW_B_POWER_DOMAINS | \
3074 XELPD_PW_C_POWER_DOMAINS | \
3075 XELPD_PW_D_POWER_DOMAINS | \
3076 BIT_ULL(POWER_DOMAIN_AUDIO) | \
3077 BIT_ULL(POWER_DOMAIN_VGA) | \
3078 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
3079 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \
3080 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \
3081 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
3082 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
3083 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \
3084 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \
3085 BIT_ULL(POWER_DOMAIN_AUX_C) | \
3086 BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) | \
3087 BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) | \
3088 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
3089 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
3090 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
3091 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
3092 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
3093 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
3094 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
3095 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
3096 BIT_ULL(POWER_DOMAIN_INIT))
3097
3098/*
3099 * XELPD PW_1/PG_1 domains (under HW/DMC control):
3100 * - DBUF function (registers are in PW0)
3101 * - Transcoder A
3102 * - DDI_A and DDI_B
3103 *
3104 * XELPD PW_0/PW_1 domains (under HW/DMC control):
3105 * - PCI
3106 * - Clocks except port PLL
3107 * - Shared functions:
3108 * * interrupts except pipe interrupts
3109 * * MBus except PIPE_MBUS_DBOX_CTL
3110 * * DBUF registers
3111 * - Central power except FBC
3112 * - Top-level GTC (DDI-level GTC is in the well associated with the DDI)
3113 */
3114
3115#define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS ( \
3116 XELPD_PW_2_POWER_DOMAINS | \
3117 BIT_ULL(POWER_DOMAIN_MODESET) | \
3118 BIT_ULL(POWER_DOMAIN_AUX_A) | \
3119 BIT_ULL(POWER_DOMAIN_AUX_B) | \
3120 BIT_ULL(POWER_DOMAIN_INIT))
3121
3122#define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD)
3123#define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_XELPD)
3124#define XELPD_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1)
3125#define XELPD_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2)
3126#define XELPD_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3)
3127#define XELPD_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4)
3128
3129#define XELPD_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1)
3130#define XELPD_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2)
3131#define XELPD_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3)
3132#define XELPD_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4)
3133
3134#define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD)
3135#define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD)
3136#define XELPD_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
3137#define XELPD_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
3138#define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
3139#define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
3140
3141static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
3142 .sync_hw = i9xx_power_well_sync_hw_noop,
3143 .enable = i9xx_always_on_power_well_noop,
3144 .disable = i9xx_always_on_power_well_noop,
3145 .is_enabled = i9xx_always_on_power_well_enabled,
3146};
3147
3148static const struct i915_power_well_ops chv_pipe_power_well_ops = {
3149 .sync_hw = chv_pipe_power_well_sync_hw,
3150 .enable = chv_pipe_power_well_enable,
3151 .disable = chv_pipe_power_well_disable,
3152 .is_enabled = chv_pipe_power_well_enabled,
3153};
3154
3155static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
3156 .sync_hw = i9xx_power_well_sync_hw_noop,
3157 .enable = chv_dpio_cmn_power_well_enable,
3158 .disable = chv_dpio_cmn_power_well_disable,
3159 .is_enabled = vlv_power_well_enabled,
3160};
3161
3162static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
3163 {
3164 .name = "always-on",
3165 .always_on = true,
3166 .domains = POWER_DOMAIN_MASK,
3167 .ops = &i9xx_always_on_power_well_ops,
3168 .id = DISP_PW_ID_NONE,
3169 },
3170};
3171
3172static const struct i915_power_well_ops i830_pipes_power_well_ops = {
3173 .sync_hw = i830_pipes_power_well_sync_hw,
3174 .enable = i830_pipes_power_well_enable,
3175 .disable = i830_pipes_power_well_disable,
3176 .is_enabled = i830_pipes_power_well_enabled,
3177};
3178
3179static const struct i915_power_well_desc i830_power_wells[] = {
3180 {
3181 .name = "always-on",
3182 .always_on = true,
3183 .domains = POWER_DOMAIN_MASK,
3184 .ops = &i9xx_always_on_power_well_ops,
3185 .id = DISP_PW_ID_NONE,
3186 },
3187 {
3188 .name = "pipes",
3189 .domains = I830_PIPES_POWER_DOMAINS,
3190 .ops = &i830_pipes_power_well_ops,
3191 .id = DISP_PW_ID_NONE,
3192 },
3193};
3194
3195static const struct i915_power_well_ops hsw_power_well_ops = {
3196 .sync_hw = hsw_power_well_sync_hw,
3197 .enable = hsw_power_well_enable,
3198 .disable = hsw_power_well_disable,
3199 .is_enabled = hsw_power_well_enabled,
3200};
3201
3202static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
3203 .sync_hw = i9xx_power_well_sync_hw_noop,
3204 .enable = gen9_dc_off_power_well_enable,
3205 .disable = gen9_dc_off_power_well_disable,
3206 .is_enabled = gen9_dc_off_power_well_enabled,
3207};
3208
3209static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
3210 .sync_hw = i9xx_power_well_sync_hw_noop,
3211 .enable = bxt_dpio_cmn_power_well_enable,
3212 .disable = bxt_dpio_cmn_power_well_disable,
3213 .is_enabled = bxt_dpio_cmn_power_well_enabled,
3214};
3215
3216static const struct i915_power_well_regs hsw_power_well_regs = {
3217 .bios = HSW_PWR_WELL_CTL1,
3218 .driver = HSW_PWR_WELL_CTL2,
3219 .kvmr = HSW_PWR_WELL_CTL3,
3220 .debug = HSW_PWR_WELL_CTL4,
3221};
3222
3223static const struct i915_power_well_desc hsw_power_wells[] = {
3224 {
3225 .name = "always-on",
3226 .always_on = true,
3227 .domains = POWER_DOMAIN_MASK,
3228 .ops = &i9xx_always_on_power_well_ops,
3229 .id = DISP_PW_ID_NONE,
3230 },
3231 {
3232 .name = "display",
3233 .domains = HSW_DISPLAY_POWER_DOMAINS,
3234 .ops = &hsw_power_well_ops,
3235 .id = HSW_DISP_PW_GLOBAL,
3236 {
3237 .hsw.regs = &hsw_power_well_regs,
3238 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3239 .hsw.has_vga = true,
3240 },
3241 },
3242};
3243
3244static const struct i915_power_well_desc bdw_power_wells[] = {
3245 {
3246 .name = "always-on",
3247 .always_on = true,
3248 .domains = POWER_DOMAIN_MASK,
3249 .ops = &i9xx_always_on_power_well_ops,
3250 .id = DISP_PW_ID_NONE,
3251 },
3252 {
3253 .name = "display",
3254 .domains = BDW_DISPLAY_POWER_DOMAINS,
3255 .ops = &hsw_power_well_ops,
3256 .id = HSW_DISP_PW_GLOBAL,
3257 {
3258 .hsw.regs = &hsw_power_well_regs,
3259 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3260 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3261 .hsw.has_vga = true,
3262 },
3263 },
3264};
3265
3266static const struct i915_power_well_ops vlv_display_power_well_ops = {
3267 .sync_hw = i9xx_power_well_sync_hw_noop,
3268 .enable = vlv_display_power_well_enable,
3269 .disable = vlv_display_power_well_disable,
3270 .is_enabled = vlv_power_well_enabled,
3271};
3272
3273static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3274 .sync_hw = i9xx_power_well_sync_hw_noop,
3275 .enable = vlv_dpio_cmn_power_well_enable,
3276 .disable = vlv_dpio_cmn_power_well_disable,
3277 .is_enabled = vlv_power_well_enabled,
3278};
3279
3280static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3281 .sync_hw = i9xx_power_well_sync_hw_noop,
3282 .enable = vlv_power_well_enable,
3283 .disable = vlv_power_well_disable,
3284 .is_enabled = vlv_power_well_enabled,
3285};
3286
3287static const struct i915_power_well_desc vlv_power_wells[] = {
3288 {
3289 .name = "always-on",
3290 .always_on = true,
3291 .domains = POWER_DOMAIN_MASK,
3292 .ops = &i9xx_always_on_power_well_ops,
3293 .id = DISP_PW_ID_NONE,
3294 },
3295 {
3296 .name = "display",
3297 .domains = VLV_DISPLAY_POWER_DOMAINS,
3298 .ops = &vlv_display_power_well_ops,
3299 .id = VLV_DISP_PW_DISP2D,
3300 {
3301 .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
3302 },
3303 },
3304 {
3305 .name = "dpio-tx-b-01",
3306 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3307 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3308 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3309 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3310 .ops = &vlv_dpio_power_well_ops,
3311 .id = DISP_PW_ID_NONE,
3312 {
3313 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
3314 },
3315 },
3316 {
3317 .name = "dpio-tx-b-23",
3318 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3319 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3320 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3321 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3322 .ops = &vlv_dpio_power_well_ops,
3323 .id = DISP_PW_ID_NONE,
3324 {
3325 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
3326 },
3327 },
3328 {
3329 .name = "dpio-tx-c-01",
3330 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3331 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3332 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3333 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3334 .ops = &vlv_dpio_power_well_ops,
3335 .id = DISP_PW_ID_NONE,
3336 {
3337 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
3338 },
3339 },
3340 {
3341 .name = "dpio-tx-c-23",
3342 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3343 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3344 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3345 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3346 .ops = &vlv_dpio_power_well_ops,
3347 .id = DISP_PW_ID_NONE,
3348 {
3349 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3350 },
3351 },
3352 {
3353 .name = "dpio-common",
3354 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3355 .ops = &vlv_dpio_cmn_power_well_ops,
3356 .id = VLV_DISP_PW_DPIO_CMN_BC,
3357 {
3358 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3359 },
3360 },
3361};
3362
3363static const struct i915_power_well_desc chv_power_wells[] = {
3364 {
3365 .name = "always-on",
3366 .always_on = true,
3367 .domains = POWER_DOMAIN_MASK,
3368 .ops = &i9xx_always_on_power_well_ops,
3369 .id = DISP_PW_ID_NONE,
3370 },
3371 {
3372 .name = "display",
3373 /*
3374 * Pipe A power well is the new disp2d well. Pipe B and C
3375 * power wells don't actually exist. Pipe A power well is
3376 * required for any pipe to work.
3377 */
3378 .domains = CHV_DISPLAY_POWER_DOMAINS,
3379 .ops = &chv_pipe_power_well_ops,
3380 .id = DISP_PW_ID_NONE,
3381 },
3382 {
3383 .name = "dpio-common-bc",
3384 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3385 .ops = &chv_dpio_cmn_power_well_ops,
3386 .id = VLV_DISP_PW_DPIO_CMN_BC,
3387 {
3388 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3389 },
3390 },
3391 {
3392 .name = "dpio-common-d",
3393 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3394 .ops = &chv_dpio_cmn_power_well_ops,
3395 .id = CHV_DISP_PW_DPIO_CMN_D,
3396 {
3397 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3398 },
3399 },
3400};
3401
3402bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3403 enum i915_power_well_id power_well_id)
3404{
3405 struct i915_power_well *power_well;
3406 bool ret;
3407
3408 power_well = lookup_power_well(dev_priv, power_well_id);
3409 ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3410
3411 return ret;
3412}
3413
3414static const struct i915_power_well_desc skl_power_wells[] = {
3415 {
3416 .name = "always-on",
3417 .always_on = true,
3418 .domains = POWER_DOMAIN_MASK,
3419 .ops = &i9xx_always_on_power_well_ops,
3420 .id = DISP_PW_ID_NONE,
3421 },
3422 {
3423 .name = "power well 1",
3424 /* Handled by the DMC firmware */
3425 .always_on = true,
3426 .domains = 0,
3427 .ops = &hsw_power_well_ops,
3428 .id = SKL_DISP_PW_1,
3429 {
3430 .hsw.regs = &hsw_power_well_regs,
3431 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3432 .hsw.has_fuses = true,
3433 },
3434 },
3435 {
3436 .name = "MISC IO power well",
3437 /* Handled by the DMC firmware */
3438 .always_on = true,
3439 .domains = 0,
3440 .ops = &hsw_power_well_ops,
3441 .id = SKL_DISP_PW_MISC_IO,
3442 {
3443 .hsw.regs = &hsw_power_well_regs,
3444 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3445 },
3446 },
3447 {
3448 .name = "DC off",
3449 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3450 .ops = &gen9_dc_off_power_well_ops,
3451 .id = SKL_DISP_DC_OFF,
3452 },
3453 {
3454 .name = "power well 2",
3455 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3456 .ops = &hsw_power_well_ops,
3457 .id = SKL_DISP_PW_2,
3458 {
3459 .hsw.regs = &hsw_power_well_regs,
3460 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3461 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3462 .hsw.has_vga = true,
3463 .hsw.has_fuses = true,
3464 },
3465 },
3466 {
3467 .name = "DDI A/E IO power well",
3468 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3469 .ops = &hsw_power_well_ops,
3470 .id = DISP_PW_ID_NONE,
3471 {
3472 .hsw.regs = &hsw_power_well_regs,
3473 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3474 },
3475 },
3476 {
3477 .name = "DDI B IO power well",
3478 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3479 .ops = &hsw_power_well_ops,
3480 .id = DISP_PW_ID_NONE,
3481 {
3482 .hsw.regs = &hsw_power_well_regs,
3483 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3484 },
3485 },
3486 {
3487 .name = "DDI C IO power well",
3488 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3489 .ops = &hsw_power_well_ops,
3490 .id = DISP_PW_ID_NONE,
3491 {
3492 .hsw.regs = &hsw_power_well_regs,
3493 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3494 },
3495 },
3496 {
3497 .name = "DDI D IO power well",
3498 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3499 .ops = &hsw_power_well_ops,
3500 .id = DISP_PW_ID_NONE,
3501 {
3502 .hsw.regs = &hsw_power_well_regs,
3503 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3504 },
3505 },
3506};
3507
3508static const struct i915_power_well_desc bxt_power_wells[] = {
3509 {
3510 .name = "always-on",
3511 .always_on = true,
3512 .domains = POWER_DOMAIN_MASK,
3513 .ops = &i9xx_always_on_power_well_ops,
3514 .id = DISP_PW_ID_NONE,
3515 },
3516 {
3517 .name = "power well 1",
3518 /* Handled by the DMC firmware */
3519 .always_on = true,
3520 .domains = 0,
3521 .ops = &hsw_power_well_ops,
3522 .id = SKL_DISP_PW_1,
3523 {
3524 .hsw.regs = &hsw_power_well_regs,
3525 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3526 .hsw.has_fuses = true,
3527 },
3528 },
3529 {
3530 .name = "DC off",
3531 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3532 .ops = &gen9_dc_off_power_well_ops,
3533 .id = SKL_DISP_DC_OFF,
3534 },
3535 {
3536 .name = "power well 2",
3537 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3538 .ops = &hsw_power_well_ops,
3539 .id = SKL_DISP_PW_2,
3540 {
3541 .hsw.regs = &hsw_power_well_regs,
3542 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3543 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3544 .hsw.has_vga = true,
3545 .hsw.has_fuses = true,
3546 },
3547 },
3548 {
3549 .name = "dpio-common-a",
3550 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3551 .ops = &bxt_dpio_cmn_power_well_ops,
3552 .id = BXT_DISP_PW_DPIO_CMN_A,
3553 {
3554 .bxt.phy = DPIO_PHY1,
3555 },
3556 },
3557 {
3558 .name = "dpio-common-bc",
3559 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3560 .ops = &bxt_dpio_cmn_power_well_ops,
3561 .id = VLV_DISP_PW_DPIO_CMN_BC,
3562 {
3563 .bxt.phy = DPIO_PHY0,
3564 },
3565 },
3566};
3567
3568static const struct i915_power_well_desc glk_power_wells[] = {
3569 {
3570 .name = "always-on",
3571 .always_on = true,
3572 .domains = POWER_DOMAIN_MASK,
3573 .ops = &i9xx_always_on_power_well_ops,
3574 .id = DISP_PW_ID_NONE,
3575 },
3576 {
3577 .name = "power well 1",
3578 /* Handled by the DMC firmware */
3579 .always_on = true,
3580 .domains = 0,
3581 .ops = &hsw_power_well_ops,
3582 .id = SKL_DISP_PW_1,
3583 {
3584 .hsw.regs = &hsw_power_well_regs,
3585 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3586 .hsw.has_fuses = true,
3587 },
3588 },
3589 {
3590 .name = "DC off",
3591 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3592 .ops = &gen9_dc_off_power_well_ops,
3593 .id = SKL_DISP_DC_OFF,
3594 },
3595 {
3596 .name = "power well 2",
3597 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3598 .ops = &hsw_power_well_ops,
3599 .id = SKL_DISP_PW_2,
3600 {
3601 .hsw.regs = &hsw_power_well_regs,
3602 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3603 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3604 .hsw.has_vga = true,
3605 .hsw.has_fuses = true,
3606 },
3607 },
3608 {
3609 .name = "dpio-common-a",
3610 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3611 .ops = &bxt_dpio_cmn_power_well_ops,
3612 .id = BXT_DISP_PW_DPIO_CMN_A,
3613 {
3614 .bxt.phy = DPIO_PHY1,
3615 },
3616 },
3617 {
3618 .name = "dpio-common-b",
3619 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3620 .ops = &bxt_dpio_cmn_power_well_ops,
3621 .id = VLV_DISP_PW_DPIO_CMN_BC,
3622 {
3623 .bxt.phy = DPIO_PHY0,
3624 },
3625 },
3626 {
3627 .name = "dpio-common-c",
3628 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3629 .ops = &bxt_dpio_cmn_power_well_ops,
3630 .id = GLK_DISP_PW_DPIO_CMN_C,
3631 {
3632 .bxt.phy = DPIO_PHY2,
3633 },
3634 },
3635 {
3636 .name = "AUX A",
3637 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3638 .ops = &hsw_power_well_ops,
3639 .id = DISP_PW_ID_NONE,
3640 {
3641 .hsw.regs = &hsw_power_well_regs,
3642 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3643 },
3644 },
3645 {
3646 .name = "AUX B",
3647 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3648 .ops = &hsw_power_well_ops,
3649 .id = DISP_PW_ID_NONE,
3650 {
3651 .hsw.regs = &hsw_power_well_regs,
3652 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3653 },
3654 },
3655 {
3656 .name = "AUX C",
3657 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3658 .ops = &hsw_power_well_ops,
3659 .id = DISP_PW_ID_NONE,
3660 {
3661 .hsw.regs = &hsw_power_well_regs,
3662 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3663 },
3664 },
3665 {
3666 .name = "DDI A IO power well",
3667 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3668 .ops = &hsw_power_well_ops,
3669 .id = DISP_PW_ID_NONE,
3670 {
3671 .hsw.regs = &hsw_power_well_regs,
3672 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3673 },
3674 },
3675 {
3676 .name = "DDI B IO power well",
3677 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3678 .ops = &hsw_power_well_ops,
3679 .id = DISP_PW_ID_NONE,
3680 {
3681 .hsw.regs = &hsw_power_well_regs,
3682 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3683 },
3684 },
3685 {
3686 .name = "DDI C IO power well",
3687 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3688 .ops = &hsw_power_well_ops,
3689 .id = DISP_PW_ID_NONE,
3690 {
3691 .hsw.regs = &hsw_power_well_regs,
3692 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3693 },
3694 },
3695};
3696
3697static const struct i915_power_well_desc cnl_power_wells[] = {
3698 {
3699 .name = "always-on",
3700 .always_on = true,
3701 .domains = POWER_DOMAIN_MASK,
3702 .ops = &i9xx_always_on_power_well_ops,
3703 .id = DISP_PW_ID_NONE,
3704 },
3705 {
3706 .name = "power well 1",
3707 /* Handled by the DMC firmware */
3708 .always_on = true,
3709 .domains = 0,
3710 .ops = &hsw_power_well_ops,
3711 .id = SKL_DISP_PW_1,
3712 {
3713 .hsw.regs = &hsw_power_well_regs,
3714 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3715 .hsw.has_fuses = true,
3716 },
3717 },
3718 {
3719 .name = "AUX A",
3720 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3721 .ops = &hsw_power_well_ops,
3722 .id = DISP_PW_ID_NONE,
3723 {
3724 .hsw.regs = &hsw_power_well_regs,
3725 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3726 },
3727 },
3728 {
3729 .name = "AUX B",
3730 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3731 .ops = &hsw_power_well_ops,
3732 .id = DISP_PW_ID_NONE,
3733 {
3734 .hsw.regs = &hsw_power_well_regs,
3735 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3736 },
3737 },
3738 {
3739 .name = "AUX C",
3740 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3741 .ops = &hsw_power_well_ops,
3742 .id = DISP_PW_ID_NONE,
3743 {
3744 .hsw.regs = &hsw_power_well_regs,
3745 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3746 },
3747 },
3748 {
3749 .name = "AUX D",
3750 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3751 .ops = &hsw_power_well_ops,
3752 .id = DISP_PW_ID_NONE,
3753 {
3754 .hsw.regs = &hsw_power_well_regs,
3755 .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3756 },
3757 },
3758 {
3759 .name = "DC off",
3760 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3761 .ops = &gen9_dc_off_power_well_ops,
3762 .id = SKL_DISP_DC_OFF,
3763 },
3764 {
3765 .name = "power well 2",
3766 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3767 .ops = &hsw_power_well_ops,
3768 .id = SKL_DISP_PW_2,
3769 {
3770 .hsw.regs = &hsw_power_well_regs,
3771 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3772 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3773 .hsw.has_vga = true,
3774 .hsw.has_fuses = true,
3775 },
3776 },
3777 {
3778 .name = "DDI A IO power well",
3779 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3780 .ops = &hsw_power_well_ops,
3781 .id = DISP_PW_ID_NONE,
3782 {
3783 .hsw.regs = &hsw_power_well_regs,
3784 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3785 },
3786 },
3787 {
3788 .name = "DDI B IO power well",
3789 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3790 .ops = &hsw_power_well_ops,
3791 .id = DISP_PW_ID_NONE,
3792 {
3793 .hsw.regs = &hsw_power_well_regs,
3794 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3795 },
3796 },
3797 {
3798 .name = "DDI C IO power well",
3799 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3800 .ops = &hsw_power_well_ops,
3801 .id = DISP_PW_ID_NONE,
3802 {
3803 .hsw.regs = &hsw_power_well_regs,
3804 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3805 },
3806 },
3807 {
3808 .name = "DDI D IO power well",
3809 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3810 .ops = &hsw_power_well_ops,
3811 .id = DISP_PW_ID_NONE,
3812 {
3813 .hsw.regs = &hsw_power_well_regs,
3814 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3815 },
3816 },
3817 {
3818 .name = "DDI F IO power well",
3819 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3820 .ops = &hsw_power_well_ops,
3821 .id = CNL_DISP_PW_DDI_F_IO,
3822 {
3823 .hsw.regs = &hsw_power_well_regs,
3824 .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3825 },
3826 },
3827 {
3828 .name = "AUX F",
3829 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3830 .ops = &hsw_power_well_ops,
3831 .id = CNL_DISP_PW_DDI_F_AUX,
3832 {
3833 .hsw.regs = &hsw_power_well_regs,
3834 .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3835 },
3836 },
3837};
3838
3839static const struct i915_power_well_ops icl_aux_power_well_ops = {
3840 .sync_hw = hsw_power_well_sync_hw,
3841 .enable = icl_aux_power_well_enable,
3842 .disable = icl_aux_power_well_disable,
3843 .is_enabled = hsw_power_well_enabled,
3844};
3845
3846static const struct i915_power_well_regs icl_aux_power_well_regs = {
3847 .bios = ICL_PWR_WELL_CTL_AUX1,
3848 .driver = ICL_PWR_WELL_CTL_AUX2,
3849 .debug = ICL_PWR_WELL_CTL_AUX4,
3850};
3851
3852static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3853 .bios = ICL_PWR_WELL_CTL_DDI1,
3854 .driver = ICL_PWR_WELL_CTL_DDI2,
3855 .debug = ICL_PWR_WELL_CTL_DDI4,
3856};
3857
3858static const struct i915_power_well_desc icl_power_wells[] = {
3859 {
3860 .name = "always-on",
3861 .always_on = true,
3862 .domains = POWER_DOMAIN_MASK,
3863 .ops = &i9xx_always_on_power_well_ops,
3864 .id = DISP_PW_ID_NONE,
3865 },
3866 {
3867 .name = "power well 1",
3868 /* Handled by the DMC firmware */
3869 .always_on = true,
3870 .domains = 0,
3871 .ops = &hsw_power_well_ops,
3872 .id = SKL_DISP_PW_1,
3873 {
3874 .hsw.regs = &hsw_power_well_regs,
3875 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3876 .hsw.has_fuses = true,
3877 },
3878 },
3879 {
3880 .name = "DC off",
3881 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3882 .ops = &gen9_dc_off_power_well_ops,
3883 .id = SKL_DISP_DC_OFF,
3884 },
3885 {
3886 .name = "power well 2",
3887 .domains = ICL_PW_2_POWER_DOMAINS,
3888 .ops = &hsw_power_well_ops,
3889 .id = SKL_DISP_PW_2,
3890 {
3891 .hsw.regs = &hsw_power_well_regs,
3892 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3893 .hsw.has_fuses = true,
3894 },
3895 },
3896 {
3897 .name = "power well 3",
3898 .domains = ICL_PW_3_POWER_DOMAINS,
3899 .ops = &hsw_power_well_ops,
3900 .id = ICL_DISP_PW_3,
3901 {
3902 .hsw.regs = &hsw_power_well_regs,
3903 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3904 .hsw.irq_pipe_mask = BIT(PIPE_B),
3905 .hsw.has_vga = true,
3906 .hsw.has_fuses = true,
3907 },
3908 },
3909 {
3910 .name = "DDI A IO",
3911 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3912 .ops = &hsw_power_well_ops,
3913 .id = DISP_PW_ID_NONE,
3914 {
3915 .hsw.regs = &icl_ddi_power_well_regs,
3916 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3917 },
3918 },
3919 {
3920 .name = "DDI B IO",
3921 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3922 .ops = &hsw_power_well_ops,
3923 .id = DISP_PW_ID_NONE,
3924 {
3925 .hsw.regs = &icl_ddi_power_well_regs,
3926 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3927 },
3928 },
3929 {
3930 .name = "DDI C IO",
3931 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3932 .ops = &hsw_power_well_ops,
3933 .id = DISP_PW_ID_NONE,
3934 {
3935 .hsw.regs = &icl_ddi_power_well_regs,
3936 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3937 },
3938 },
3939 {
3940 .name = "DDI D IO",
3941 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3942 .ops = &hsw_power_well_ops,
3943 .id = DISP_PW_ID_NONE,
3944 {
3945 .hsw.regs = &icl_ddi_power_well_regs,
3946 .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3947 },
3948 },
3949 {
3950 .name = "DDI E IO",
3951 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3952 .ops = &hsw_power_well_ops,
3953 .id = DISP_PW_ID_NONE,
3954 {
3955 .hsw.regs = &icl_ddi_power_well_regs,
3956 .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3957 },
3958 },
3959 {
3960 .name = "DDI F IO",
3961 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3962 .ops = &hsw_power_well_ops,
3963 .id = DISP_PW_ID_NONE,
3964 {
3965 .hsw.regs = &icl_ddi_power_well_regs,
3966 .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3967 },
3968 },
3969 {
3970 .name = "AUX A",
3971 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3972 .ops = &icl_aux_power_well_ops,
3973 .id = DISP_PW_ID_NONE,
3974 {
3975 .hsw.regs = &icl_aux_power_well_regs,
3976 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3977 },
3978 },
3979 {
3980 .name = "AUX B",
3981 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3982 .ops = &icl_aux_power_well_ops,
3983 .id = DISP_PW_ID_NONE,
3984 {
3985 .hsw.regs = &icl_aux_power_well_regs,
3986 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3987 },
3988 },
3989 {
3990 .name = "AUX C TC1",
3991 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3992 .ops = &icl_aux_power_well_ops,
3993 .id = DISP_PW_ID_NONE,
3994 {
3995 .hsw.regs = &icl_aux_power_well_regs,
3996 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3997 .hsw.is_tc_tbt = false,
3998 },
3999 },
4000 {
4001 .name = "AUX D TC2",
4002 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
4003 .ops = &icl_aux_power_well_ops,
4004 .id = DISP_PW_ID_NONE,
4005 {
4006 .hsw.regs = &icl_aux_power_well_regs,
4007 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
4008 .hsw.is_tc_tbt = false,
4009 },
4010 },
4011 {
4012 .name = "AUX E TC3",
4013 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
4014 .ops = &icl_aux_power_well_ops,
4015 .id = DISP_PW_ID_NONE,
4016 {
4017 .hsw.regs = &icl_aux_power_well_regs,
4018 .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
4019 .hsw.is_tc_tbt = false,
4020 },
4021 },
4022 {
4023 .name = "AUX F TC4",
4024 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
4025 .ops = &icl_aux_power_well_ops,
4026 .id = DISP_PW_ID_NONE,
4027 {
4028 .hsw.regs = &icl_aux_power_well_regs,
4029 .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
4030 .hsw.is_tc_tbt = false,
4031 },
4032 },
4033 {
4034 .name = "AUX C TBT1",
4035 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
4036 .ops = &icl_aux_power_well_ops,
4037 .id = DISP_PW_ID_NONE,
4038 {
4039 .hsw.regs = &icl_aux_power_well_regs,
4040 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
4041 .hsw.is_tc_tbt = true,
4042 },
4043 },
4044 {
4045 .name = "AUX D TBT2",
4046 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
4047 .ops = &icl_aux_power_well_ops,
4048 .id = DISP_PW_ID_NONE,
4049 {
4050 .hsw.regs = &icl_aux_power_well_regs,
4051 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
4052 .hsw.is_tc_tbt = true,
4053 },
4054 },
4055 {
4056 .name = "AUX E TBT3",
4057 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
4058 .ops = &icl_aux_power_well_ops,
4059 .id = DISP_PW_ID_NONE,
4060 {
4061 .hsw.regs = &icl_aux_power_well_regs,
4062 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
4063 .hsw.is_tc_tbt = true,
4064 },
4065 },
4066 {
4067 .name = "AUX F TBT4",
4068 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
4069 .ops = &icl_aux_power_well_ops,
4070 .id = DISP_PW_ID_NONE,
4071 {
4072 .hsw.regs = &icl_aux_power_well_regs,
4073 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
4074 .hsw.is_tc_tbt = true,
4075 },
4076 },
4077 {
4078 .name = "power well 4",
4079 .domains = ICL_PW_4_POWER_DOMAINS,
4080 .ops = &hsw_power_well_ops,
4081 .id = DISP_PW_ID_NONE,
4082 {
4083 .hsw.regs = &hsw_power_well_regs,
4084 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
4085 .hsw.has_fuses = true,
4086 .hsw.irq_pipe_mask = BIT(PIPE_C),
4087 },
4088 },
4089};
4090
4091static void
4092tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
4093{
4094 u8 tries = 0;
4095 int ret;
4096
4097 while (1) {
4098 u32 low_val;
4099 u32 high_val = 0;
4100
4101 if (block)
4102 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
4103 else
4104 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
4105
4106 /*
4107 * Spec states that we should timeout the request after 200us
4108 * but the function below will timeout after 500us
4109 */
4110 ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
4111 &high_val);
4112 if (ret == 0) {
4113 if (block &&
4114 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
4115 ret = -EIO;
4116 else
4117 break;
4118 }
4119
4120 if (++tries == 3)
4121 break;
4122
4123 msleep(1);
4124 }
4125
4126 if (ret)
4127 drm_err(&i915->drm, "TC cold %sblock failed\n",
4128 block ? "" : "un");
4129 else
4130 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
4131 block ? "" : "un");
4132}
4133
4134static void
4135tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
4136 struct i915_power_well *power_well)
4137{
4138 tgl_tc_cold_request(i915, true);
4139}
4140
4141static void
4142tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
4143 struct i915_power_well *power_well)
4144{
4145 tgl_tc_cold_request(i915, false);
4146}
4147
4148static void
4149tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
4150 struct i915_power_well *power_well)
4151{
4152 if (power_well->count > 0)
4153 tgl_tc_cold_off_power_well_enable(i915, power_well);
4154 else
4155 tgl_tc_cold_off_power_well_disable(i915, power_well);
4156}
4157
4158static bool
4159tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
4160 struct i915_power_well *power_well)
4161{
4162 /*
4163 * Not the correctly implementation but there is no way to just read it
4164 * from PCODE, so returning count to avoid state mismatch errors
4165 */
4166 return power_well->count;
4167}
4168
4169static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
4170 .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
4171 .enable = tgl_tc_cold_off_power_well_enable,
4172 .disable = tgl_tc_cold_off_power_well_disable,
4173 .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
4174};
4175
4176static const struct i915_power_well_desc tgl_power_wells[] = {
4177 {
4178 .name = "always-on",
4179 .always_on = true,
4180 .domains = POWER_DOMAIN_MASK,
4181 .ops = &i9xx_always_on_power_well_ops,
4182 .id = DISP_PW_ID_NONE,
4183 },
4184 {
4185 .name = "power well 1",
4186 /* Handled by the DMC firmware */
4187 .always_on = true,
4188 .domains = 0,
4189 .ops = &hsw_power_well_ops,
4190 .id = SKL_DISP_PW_1,
4191 {
4192 .hsw.regs = &hsw_power_well_regs,
4193 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
4194 .hsw.has_fuses = true,
4195 },
4196 },
4197 {
4198 .name = "DC off",
4199 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
4200 .ops = &gen9_dc_off_power_well_ops,
4201 .id = SKL_DISP_DC_OFF,
4202 },
4203 {
4204 .name = "power well 2",
4205 .domains = TGL_PW_2_POWER_DOMAINS,
4206 .ops = &hsw_power_well_ops,
4207 .id = SKL_DISP_PW_2,
4208 {
4209 .hsw.regs = &hsw_power_well_regs,
4210 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
4211 .hsw.has_fuses = true,
4212 },
4213 },
4214 {
4215 .name = "power well 3",
4216 .domains = TGL_PW_3_POWER_DOMAINS,
4217 .ops = &hsw_power_well_ops,
4218 .id = ICL_DISP_PW_3,
4219 {
4220 .hsw.regs = &hsw_power_well_regs,
4221 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
4222 .hsw.irq_pipe_mask = BIT(PIPE_B),
4223 .hsw.has_vga = true,
4224 .hsw.has_fuses = true,
4225 },
4226 },
4227 {
4228 .name = "DDI A IO",
4229 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
4230 .ops = &hsw_power_well_ops,
4231 .id = DISP_PW_ID_NONE,
4232 {
4233 .hsw.regs = &icl_ddi_power_well_regs,
4234 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4235 }
4236 },
4237 {
4238 .name = "DDI B IO",
4239 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
4240 .ops = &hsw_power_well_ops,
4241 .id = DISP_PW_ID_NONE,
4242 {
4243 .hsw.regs = &icl_ddi_power_well_regs,
4244 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4245 }
4246 },
4247 {
4248 .name = "DDI C IO",
4249 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
4250 .ops = &hsw_power_well_ops,
4251 .id = DISP_PW_ID_NONE,
4252 {
4253 .hsw.regs = &icl_ddi_power_well_regs,
4254 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4255 }
4256 },
4257 {
4258 .name = "DDI IO TC1",
4259 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
4260 .ops = &hsw_power_well_ops,
4261 .id = DISP_PW_ID_NONE,
4262 {
4263 .hsw.regs = &icl_ddi_power_well_regs,
4264 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4265 },
4266 },
4267 {
4268 .name = "DDI IO TC2",
4269 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
4270 .ops = &hsw_power_well_ops,
4271 .id = DISP_PW_ID_NONE,
4272 {
4273 .hsw.regs = &icl_ddi_power_well_regs,
4274 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4275 },
4276 },
4277 {
4278 .name = "DDI IO TC3",
4279 .domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
4280 .ops = &hsw_power_well_ops,
4281 .id = DISP_PW_ID_NONE,
4282 {
4283 .hsw.regs = &icl_ddi_power_well_regs,
4284 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4285 },
4286 },
4287 {
4288 .name = "DDI IO TC4",
4289 .domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
4290 .ops = &hsw_power_well_ops,
4291 .id = DISP_PW_ID_NONE,
4292 {
4293 .hsw.regs = &icl_ddi_power_well_regs,
4294 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4295 },
4296 },
4297 {
4298 .name = "DDI IO TC5",
4299 .domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
4300 .ops = &hsw_power_well_ops,
4301 .id = DISP_PW_ID_NONE,
4302 {
4303 .hsw.regs = &icl_ddi_power_well_regs,
4304 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4305 },
4306 },
4307 {
4308 .name = "DDI IO TC6",
4309 .domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
4310 .ops = &hsw_power_well_ops,
4311 .id = DISP_PW_ID_NONE,
4312 {
4313 .hsw.regs = &icl_ddi_power_well_regs,
4314 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4315 },
4316 },
4317 {
4318 .name = "TC cold off",
4319 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
4320 .ops = &tgl_tc_cold_off_ops,
4321 .id = TGL_DISP_PW_TC_COLD_OFF,
4322 },
4323 {
4324 .name = "AUX A",
4325 .domains = TGL_AUX_A_IO_POWER_DOMAINS,
4326 .ops = &icl_aux_power_well_ops,
4327 .id = DISP_PW_ID_NONE,
4328 {
4329 .hsw.regs = &icl_aux_power_well_regs,
4330 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4331 },
4332 },
4333 {
4334 .name = "AUX B",
4335 .domains = TGL_AUX_B_IO_POWER_DOMAINS,
4336 .ops = &icl_aux_power_well_ops,
4337 .id = DISP_PW_ID_NONE,
4338 {
4339 .hsw.regs = &icl_aux_power_well_regs,
4340 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4341 },
4342 },
4343 {
4344 .name = "AUX C",
4345 .domains = TGL_AUX_C_IO_POWER_DOMAINS,
4346 .ops = &icl_aux_power_well_ops,
4347 .id = DISP_PW_ID_NONE,
4348 {
4349 .hsw.regs = &icl_aux_power_well_regs,
4350 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4351 },
4352 },
4353 {
4354 .name = "AUX USBC1",
4355 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
4356 .ops = &icl_aux_power_well_ops,
4357 .id = DISP_PW_ID_NONE,
4358 {
4359 .hsw.regs = &icl_aux_power_well_regs,
4360 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4361 .hsw.is_tc_tbt = false,
4362 },
4363 },
4364 {
4365 .name = "AUX USBC2",
4366 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
4367 .ops = &icl_aux_power_well_ops,
4368 .id = DISP_PW_ID_NONE,
4369 {
4370 .hsw.regs = &icl_aux_power_well_regs,
4371 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4372 .hsw.is_tc_tbt = false,
4373 },
4374 },
4375 {
4376 .name = "AUX USBC3",
4377 .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS,
4378 .ops = &icl_aux_power_well_ops,
4379 .id = DISP_PW_ID_NONE,
4380 {
4381 .hsw.regs = &icl_aux_power_well_regs,
4382 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4383 .hsw.is_tc_tbt = false,
4384 },
4385 },
4386 {
4387 .name = "AUX USBC4",
4388 .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS,
4389 .ops = &icl_aux_power_well_ops,
4390 .id = DISP_PW_ID_NONE,
4391 {
4392 .hsw.regs = &icl_aux_power_well_regs,
4393 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4394 .hsw.is_tc_tbt = false,
4395 },
4396 },
4397 {
4398 .name = "AUX USBC5",
4399 .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS,
4400 .ops = &icl_aux_power_well_ops,
4401 .id = DISP_PW_ID_NONE,
4402 {
4403 .hsw.regs = &icl_aux_power_well_regs,
4404 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4405 .hsw.is_tc_tbt = false,
4406 },
4407 },
4408 {
4409 .name = "AUX USBC6",
4410 .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS,
4411 .ops = &icl_aux_power_well_ops,
4412 .id = DISP_PW_ID_NONE,
4413 {
4414 .hsw.regs = &icl_aux_power_well_regs,
4415 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4416 .hsw.is_tc_tbt = false,
4417 },
4418 },
4419 {
4420 .name = "AUX TBT1",
4421 .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS,
4422 .ops = &icl_aux_power_well_ops,
4423 .id = DISP_PW_ID_NONE,
4424 {
4425 .hsw.regs = &icl_aux_power_well_regs,
4426 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4427 .hsw.is_tc_tbt = true,
4428 },
4429 },
4430 {
4431 .name = "AUX TBT2",
4432 .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS,
4433 .ops = &icl_aux_power_well_ops,
4434 .id = DISP_PW_ID_NONE,
4435 {
4436 .hsw.regs = &icl_aux_power_well_regs,
4437 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4438 .hsw.is_tc_tbt = true,
4439 },
4440 },
4441 {
4442 .name = "AUX TBT3",
4443 .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS,
4444 .ops = &icl_aux_power_well_ops,
4445 .id = DISP_PW_ID_NONE,
4446 {
4447 .hsw.regs = &icl_aux_power_well_regs,
4448 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4449 .hsw.is_tc_tbt = true,
4450 },
4451 },
4452 {
4453 .name = "AUX TBT4",
4454 .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS,
4455 .ops = &icl_aux_power_well_ops,
4456 .id = DISP_PW_ID_NONE,
4457 {
4458 .hsw.regs = &icl_aux_power_well_regs,
4459 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4460 .hsw.is_tc_tbt = true,
4461 },
4462 },
4463 {
4464 .name = "AUX TBT5",
4465 .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS,
4466 .ops = &icl_aux_power_well_ops,
4467 .id = DISP_PW_ID_NONE,
4468 {
4469 .hsw.regs = &icl_aux_power_well_regs,
4470 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4471 .hsw.is_tc_tbt = true,
4472 },
4473 },
4474 {
4475 .name = "AUX TBT6",
4476 .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS,
4477 .ops = &icl_aux_power_well_ops,
4478 .id = DISP_PW_ID_NONE,
4479 {
4480 .hsw.regs = &icl_aux_power_well_regs,
4481 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4482 .hsw.is_tc_tbt = true,
4483 },
4484 },
4485 {
4486 .name = "power well 4",
4487 .domains = TGL_PW_4_POWER_DOMAINS,
4488 .ops = &hsw_power_well_ops,
4489 .id = DISP_PW_ID_NONE,
4490 {
4491 .hsw.regs = &hsw_power_well_regs,
4492 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
4493 .hsw.has_fuses = true,
4494 .hsw.irq_pipe_mask = BIT(PIPE_C),
4495 }
4496 },
4497 {
4498 .name = "power well 5",
4499 .domains = TGL_PW_5_POWER_DOMAINS,
4500 .ops = &hsw_power_well_ops,
4501 .id = DISP_PW_ID_NONE,
4502 {
4503 .hsw.regs = &hsw_power_well_regs,
4504 .hsw.idx = TGL_PW_CTL_IDX_PW_5,
4505 .hsw.has_fuses = true,
4506 .hsw.irq_pipe_mask = BIT(PIPE_D),
4507 },
4508 },
4509};
4510
4511static const struct i915_power_well_desc rkl_power_wells[] = {
4512 {
4513 .name = "always-on",
4514 .always_on = true,
4515 .domains = POWER_DOMAIN_MASK,
4516 .ops = &i9xx_always_on_power_well_ops,
4517 .id = DISP_PW_ID_NONE,
4518 },
4519 {
4520 .name = "power well 1",
4521 /* Handled by the DMC firmware */
4522 .always_on = true,
4523 .domains = 0,
4524 .ops = &hsw_power_well_ops,
4525 .id = SKL_DISP_PW_1,
4526 {
4527 .hsw.regs = &hsw_power_well_regs,
4528 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
4529 .hsw.has_fuses = true,
4530 },
4531 },
4532 {
4533 .name = "DC off",
4534 .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS,
4535 .ops = &gen9_dc_off_power_well_ops,
4536 .id = SKL_DISP_DC_OFF,
4537 },
4538 {
4539 .name = "power well 3",
4540 .domains = RKL_PW_3_POWER_DOMAINS,
4541 .ops = &hsw_power_well_ops,
4542 .id = ICL_DISP_PW_3,
4543 {
4544 .hsw.regs = &hsw_power_well_regs,
4545 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
4546 .hsw.irq_pipe_mask = BIT(PIPE_B),
4547 .hsw.has_vga = true,
4548 .hsw.has_fuses = true,
4549 },
4550 },
4551 {
4552 .name = "power well 4",
4553 .domains = RKL_PW_4_POWER_DOMAINS,
4554 .ops = &hsw_power_well_ops,
4555 .id = DISP_PW_ID_NONE,
4556 {
4557 .hsw.regs = &hsw_power_well_regs,
4558 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
4559 .hsw.has_fuses = true,
4560 .hsw.irq_pipe_mask = BIT(PIPE_C),
4561 }
4562 },
4563 {
4564 .name = "DDI A IO",
4565 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
4566 .ops = &hsw_power_well_ops,
4567 .id = DISP_PW_ID_NONE,
4568 {
4569 .hsw.regs = &icl_ddi_power_well_regs,
4570 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4571 }
4572 },
4573 {
4574 .name = "DDI B IO",
4575 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
4576 .ops = &hsw_power_well_ops,
4577 .id = DISP_PW_ID_NONE,
4578 {
4579 .hsw.regs = &icl_ddi_power_well_regs,
4580 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4581 }
4582 },
4583 {
4584 .name = "DDI IO TC1",
4585 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
4586 .ops = &hsw_power_well_ops,
4587 .id = DISP_PW_ID_NONE,
4588 {
4589 .hsw.regs = &icl_ddi_power_well_regs,
4590 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4591 },
4592 },
4593 {
4594 .name = "DDI IO TC2",
4595 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
4596 .ops = &hsw_power_well_ops,
4597 .id = DISP_PW_ID_NONE,
4598 {
4599 .hsw.regs = &icl_ddi_power_well_regs,
4600 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4601 },
4602 },
4603 {
4604 .name = "AUX A",
4605 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
4606 .ops = &icl_aux_power_well_ops,
4607 .id = DISP_PW_ID_NONE,
4608 {
4609 .hsw.regs = &icl_aux_power_well_regs,
4610 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4611 },
4612 },
4613 {
4614 .name = "AUX B",
4615 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
4616 .ops = &icl_aux_power_well_ops,
4617 .id = DISP_PW_ID_NONE,
4618 {
4619 .hsw.regs = &icl_aux_power_well_regs,
4620 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4621 },
4622 },
4623 {
4624 .name = "AUX USBC1",
4625 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
4626 .ops = &icl_aux_power_well_ops,
4627 .id = DISP_PW_ID_NONE,
4628 {
4629 .hsw.regs = &icl_aux_power_well_regs,
4630 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4631 },
4632 },
4633 {
4634 .name = "AUX USBC2",
4635 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
4636 .ops = &icl_aux_power_well_ops,
4637 .id = DISP_PW_ID_NONE,
4638 {
4639 .hsw.regs = &icl_aux_power_well_regs,
4640 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4641 },
4642 },
4643};
4644
4645static const struct i915_power_well_desc xelpd_power_wells[] = {
4646 {
4647 .name = "always-on",
4648 .always_on = true,
4649 .domains = POWER_DOMAIN_MASK,
4650 .ops = &i9xx_always_on_power_well_ops,
4651 .id = DISP_PW_ID_NONE,
4652 },
4653 {
4654 .name = "power well 1",
4655 /* Handled by the DMC firmware */
4656 .always_on = true,
4657 .domains = 0,
4658 .ops = &hsw_power_well_ops,
4659 .id = SKL_DISP_PW_1,
4660 {
4661 .hsw.regs = &hsw_power_well_regs,
4662 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
4663 .hsw.has_fuses = true,
4664 },
4665 },
4666 {
4667 .name = "DC off",
4668 .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS,
4669 .ops = &gen9_dc_off_power_well_ops,
4670 .id = SKL_DISP_DC_OFF,
4671 },
4672 {
4673 .name = "power well 2",
4674 .domains = XELPD_PW_2_POWER_DOMAINS,
4675 .ops = &hsw_power_well_ops,
4676 .id = SKL_DISP_PW_2,
4677 {
4678 .hsw.regs = &hsw_power_well_regs,
4679 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
4680 .hsw.has_vga = true,
4681 .hsw.has_fuses = true,
4682 },
4683 },
4684 {
4685 .name = "power well A",
4686 .domains = XELPD_PW_A_POWER_DOMAINS,
4687 .ops = &hsw_power_well_ops,
4688 .id = DISP_PW_ID_NONE,
4689 {
4690 .hsw.regs = &hsw_power_well_regs,
4691 .hsw.idx = XELPD_PW_CTL_IDX_PW_A,
4692 .hsw.irq_pipe_mask = BIT(PIPE_A),
4693 .hsw.has_fuses = true,
4694 },
4695 },
4696 {
4697 .name = "power well B",
4698 .domains = XELPD_PW_B_POWER_DOMAINS,
4699 .ops = &hsw_power_well_ops,
4700 .id = DISP_PW_ID_NONE,
4701 {
4702 .hsw.regs = &hsw_power_well_regs,
4703 .hsw.idx = XELPD_PW_CTL_IDX_PW_B,
4704 .hsw.irq_pipe_mask = BIT(PIPE_B),
4705 .hsw.has_fuses = true,
4706 },
4707 },
4708 {
4709 .name = "power well C",
4710 .domains = XELPD_PW_C_POWER_DOMAINS,
4711 .ops = &hsw_power_well_ops,
4712 .id = DISP_PW_ID_NONE,
4713 {
4714 .hsw.regs = &hsw_power_well_regs,
4715 .hsw.idx = XELPD_PW_CTL_IDX_PW_C,
4716 .hsw.irq_pipe_mask = BIT(PIPE_C),
4717 .hsw.has_fuses = true,
4718 },
4719 },
4720 {
4721 .name = "power well D",
4722 .domains = XELPD_PW_D_POWER_DOMAINS,
4723 .ops = &hsw_power_well_ops,
4724 .id = DISP_PW_ID_NONE,
4725 {
4726 .hsw.regs = &hsw_power_well_regs,
4727 .hsw.idx = XELPD_PW_CTL_IDX_PW_D,
4728 .hsw.irq_pipe_mask = BIT(PIPE_D),
4729 .hsw.has_fuses = true,
4730 },
4731 },
4732 {
4733 .name = "DDI A IO",
4734 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
4735 .ops = &hsw_power_well_ops,
4736 .id = DISP_PW_ID_NONE,
4737 {
4738 .hsw.regs = &icl_ddi_power_well_regs,
4739 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4740 }
4741 },
4742 {
4743 .name = "DDI B IO",
4744 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
4745 .ops = &hsw_power_well_ops,
4746 .id = DISP_PW_ID_NONE,
4747 {
4748 .hsw.regs = &icl_ddi_power_well_regs,
4749 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4750 }
4751 },
4752 {
4753 .name = "DDI C IO",
4754 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
4755 .ops = &hsw_power_well_ops,
4756 .id = DISP_PW_ID_NONE,
4757 {
4758 .hsw.regs = &icl_ddi_power_well_regs,
4759 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4760 }
4761 },
4762 {
4763 .name = "DDI IO D_XELPD",
4764 .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS,
4765 .ops = &hsw_power_well_ops,
4766 .id = DISP_PW_ID_NONE,
4767 {
4768 .hsw.regs = &icl_ddi_power_well_regs,
4769 .hsw.idx = XELPD_PW_CTL_IDX_DDI_D,
4770 }
4771 },
4772 {
4773 .name = "DDI IO E_XELPD",
4774 .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS,
4775 .ops = &hsw_power_well_ops,
4776 .id = DISP_PW_ID_NONE,
4777 {
4778 .hsw.regs = &icl_ddi_power_well_regs,
4779 .hsw.idx = XELPD_PW_CTL_IDX_DDI_E,
4780 }
4781 },
4782 {
4783 .name = "DDI IO TC1",
4784 .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS,
4785 .ops = &hsw_power_well_ops,
4786 .id = DISP_PW_ID_NONE,
4787 {
4788 .hsw.regs = &icl_ddi_power_well_regs,
4789 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4790 }
4791 },
4792 {
4793 .name = "DDI IO TC2",
4794 .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS,
4795 .ops = &hsw_power_well_ops,
4796 .id = DISP_PW_ID_NONE,
4797 {
4798 .hsw.regs = &icl_ddi_power_well_regs,
4799 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4800 }
4801 },
4802 {
4803 .name = "DDI IO TC3",
4804 .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS,
4805 .ops = &hsw_power_well_ops,
4806 .id = DISP_PW_ID_NONE,
4807 {
4808 .hsw.regs = &icl_ddi_power_well_regs,
4809 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4810 }
4811 },
4812 {
4813 .name = "DDI IO TC4",
4814 .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS,
4815 .ops = &hsw_power_well_ops,
4816 .id = DISP_PW_ID_NONE,
4817 {
4818 .hsw.regs = &icl_ddi_power_well_regs,
4819 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4820 }
4821 },
4822 {
4823 .name = "AUX A",
4824 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
4825 .ops = &icl_aux_power_well_ops,
4826 .id = DISP_PW_ID_NONE,
4827 {
4828 .hsw.regs = &icl_aux_power_well_regs,
4829 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4830 },
4831 },
4832 {
4833 .name = "AUX B",
4834 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
4835 .ops = &icl_aux_power_well_ops,
4836 .id = DISP_PW_ID_NONE,
4837 {
4838 .hsw.regs = &icl_aux_power_well_regs,
4839 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4840 },
4841 },
4842 {
4843 .name = "AUX C",
4844 .domains = TGL_AUX_C_IO_POWER_DOMAINS,
4845 .ops = &icl_aux_power_well_ops,
4846 .id = DISP_PW_ID_NONE,
4847 {
4848 .hsw.regs = &icl_aux_power_well_regs,
4849 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4850 },
4851 },
4852 {
4853 .name = "AUX D_XELPD",
4854 .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS,
4855 .ops = &icl_aux_power_well_ops,
4856 .id = DISP_PW_ID_NONE,
4857 {
4858 .hsw.regs = &icl_aux_power_well_regs,
4859 .hsw.idx = XELPD_PW_CTL_IDX_AUX_D,
4860 },
4861 },
4862 {
4863 .name = "AUX E_XELPD",
4864 .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS,
4865 .ops = &icl_aux_power_well_ops,
4866 .id = DISP_PW_ID_NONE,
4867 {
4868 .hsw.regs = &icl_aux_power_well_regs,
4869 .hsw.idx = XELPD_PW_CTL_IDX_AUX_E,
4870 },
4871 },
4872 {
4873 .name = "AUX USBC1",
4874 .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS,
4875 .ops = &icl_aux_power_well_ops,
4876 .id = DISP_PW_ID_NONE,
4877 {
4878 .hsw.regs = &icl_aux_power_well_regs,
4879 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4880 },
4881 },
4882 {
4883 .name = "AUX USBC2",
4884 .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS,
4885 .ops = &icl_aux_power_well_ops,
4886 .id = DISP_PW_ID_NONE,
4887 {
4888 .hsw.regs = &icl_aux_power_well_regs,
4889 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4890 },
4891 },
4892 {
4893 .name = "AUX USBC3",
4894 .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS,
4895 .ops = &icl_aux_power_well_ops,
4896 .id = DISP_PW_ID_NONE,
4897 {
4898 .hsw.regs = &icl_aux_power_well_regs,
4899 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4900 },
4901 },
4902 {
4903 .name = "AUX USBC4",
4904 .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS,
4905 .ops = &icl_aux_power_well_ops,
4906 .id = DISP_PW_ID_NONE,
4907 {
4908 .hsw.regs = &icl_aux_power_well_regs,
4909 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4910 },
4911 },
4912 {
4913 .name = "AUX TBT1",
4914 .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS,
4915 .ops = &icl_aux_power_well_ops,
4916 .id = DISP_PW_ID_NONE,
4917 {
4918 .hsw.regs = &icl_aux_power_well_regs,
4919 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4920 .hsw.is_tc_tbt = true,
4921 },
4922 },
4923 {
4924 .name = "AUX TBT2",
4925 .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS,
4926 .ops = &icl_aux_power_well_ops,
4927 .id = DISP_PW_ID_NONE,
4928 {
4929 .hsw.regs = &icl_aux_power_well_regs,
4930 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4931 .hsw.is_tc_tbt = true,
4932 },
4933 },
4934 {
4935 .name = "AUX TBT3",
4936 .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS,
4937 .ops = &icl_aux_power_well_ops,
4938 .id = DISP_PW_ID_NONE,
4939 {
4940 .hsw.regs = &icl_aux_power_well_regs,
4941 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4942 .hsw.is_tc_tbt = true,
4943 },
4944 },
4945 {
4946 .name = "AUX TBT4",
4947 .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS,
4948 .ops = &icl_aux_power_well_ops,
4949 .id = DISP_PW_ID_NONE,
4950 {
4951 .hsw.regs = &icl_aux_power_well_regs,
4952 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4953 .hsw.is_tc_tbt = true,
4954 },
4955 },
4956};
4957
4958static int
4959sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4960 int disable_power_well)
4961{
4962 if (disable_power_well >= 0)
4963 return !!disable_power_well;
4964
4965 return 1;
4966}
4967
4968static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4969 int enable_dc)
4970{
4971 u32 mask;
4972 int requested_dc;
4973 int max_dc;
4974
4975 if (!HAS_DISPLAY(dev_priv))
4976 return 0;
4977
4978 if (IS_DG1(dev_priv))
4979 max_dc = 3;
4980 else if (DISPLAY_VER(dev_priv) >= 12)
4981 max_dc = 4;
4982 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4983 max_dc = 1;
4984 else if (DISPLAY_VER(dev_priv) >= 9)
4985 max_dc = 2;
4986 else
4987 max_dc = 0;
4988
4989 /*
4990 * DC9 has a separate HW flow from the rest of the DC states,
4991 * not depending on the DMC firmware. It's needed by system
4992 * suspend/resume, so allow it unconditionally.
4993 */
4994 mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
4995 DISPLAY_VER(dev_priv) >= 11 ?
4996 DC_STATE_EN_DC9 : 0;
4997
4998 if (!dev_priv->params.disable_power_well)
4999 max_dc = 0;
5000
5001 if (enable_dc >= 0 && enable_dc <= max_dc) {
5002 requested_dc = enable_dc;
5003 } else if (enable_dc == -1) {
5004 requested_dc = max_dc;
5005 } else if (enable_dc > max_dc && enable_dc <= 4) {
5006 drm_dbg_kms(&dev_priv->drm,
5007 "Adjusting requested max DC state (%d->%d)\n",
5008 enable_dc, max_dc);
5009 requested_dc = max_dc;
5010 } else {
5011 drm_err(&dev_priv->drm,
5012 "Unexpected value for enable_dc (%d)\n", enable_dc);
5013 requested_dc = max_dc;
5014 }
5015
5016 switch (requested_dc) {
5017 case 4:
5018 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
5019 break;
5020 case 3:
5021 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
5022 break;
5023 case 2:
5024 mask |= DC_STATE_EN_UPTO_DC6;
5025 break;
5026 case 1:
5027 mask |= DC_STATE_EN_UPTO_DC5;
5028 break;
5029 }
5030
5031 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
5032
5033 return mask;
5034}
5035
5036static int
5037__set_power_wells(struct i915_power_domains *power_domains,
5038 const struct i915_power_well_desc *power_well_descs,
5039 int power_well_descs_sz, u64 skip_mask)
5040{
5041 struct drm_i915_private *i915 = container_of(power_domains,
5042 struct drm_i915_private,
5043 power_domains);
5044 u64 power_well_ids = 0;
5045 int power_well_count = 0;
5046 int i, plt_idx = 0;
5047
5048 for (i = 0; i < power_well_descs_sz; i++)
5049 if (!(BIT_ULL(power_well_descs[i].id) & skip_mask))
5050 power_well_count++;
5051
5052 power_domains->power_well_count = power_well_count;
5053 power_domains->power_wells =
5054 kcalloc(power_well_count,
5055 sizeof(*power_domains->power_wells),
5056 GFP_KERNEL);
5057 if (!power_domains->power_wells)
5058 return -ENOMEM;
5059
5060 for (i = 0; i < power_well_descs_sz; i++) {
5061 enum i915_power_well_id id = power_well_descs[i].id;
5062
5063 if (BIT_ULL(id) & skip_mask)
5064 continue;
5065
5066 power_domains->power_wells[plt_idx++].desc =
5067 &power_well_descs[i];
5068
5069 if (id == DISP_PW_ID_NONE)
5070 continue;
5071
5072 drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
5073 drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
5074 power_well_ids |= BIT_ULL(id);
5075 }
5076
5077 return 0;
5078}
5079
5080#define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \
5081 __set_power_wells(power_domains, __power_well_descs, \
5082 ARRAY_SIZE(__power_well_descs), skip_mask)
5083
5084#define set_power_wells(power_domains, __power_well_descs) \
5085 set_power_wells_mask(power_domains, __power_well_descs, 0)
5086
5087/**
5088 * intel_power_domains_init - initializes the power domain structures
5089 * @dev_priv: i915 device instance
5090 *
5091 * Initializes the power domain structures for @dev_priv depending upon the
5092 * supported platform.
5093 */
5094int intel_power_domains_init(struct drm_i915_private *dev_priv)
5095{
5096 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5097 int err;
5098
5099 dev_priv->params.disable_power_well =
5100 sanitize_disable_power_well_option(dev_priv,
5101 dev_priv->params.disable_power_well);
5102 dev_priv->dmc.allowed_dc_mask =
5103 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
5104
5105 dev_priv->dmc.target_dc_state =
5106 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
5107
5108 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
5109
5110 mutex_init(&power_domains->lock);
5111
5112 INIT_DELAYED_WORK(&power_domains->async_put_work,
5113 intel_display_power_put_async_work);
5114
5115 /*
5116 * The enabling order will be from lower to higher indexed wells,
5117 * the disabling order is reversed.
5118 */
5119 if (!HAS_DISPLAY(dev_priv)) {
5120 power_domains->power_well_count = 0;
5121 err = 0;
5122 } else if (DISPLAY_VER(dev_priv) >= 13) {
5123 err = set_power_wells(power_domains, xelpd_power_wells);
5124 } else if (IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) {
5125 err = set_power_wells_mask(power_domains, tgl_power_wells,
5126 BIT_ULL(TGL_DISP_PW_TC_COLD_OFF));
5127 } else if (IS_ROCKETLAKE(dev_priv)) {
5128 err = set_power_wells(power_domains, rkl_power_wells);
5129 } else if (DISPLAY_VER(dev_priv) == 12) {
5130 err = set_power_wells(power_domains, tgl_power_wells);
5131 } else if (DISPLAY_VER(dev_priv) == 11) {
5132 err = set_power_wells(power_domains, icl_power_wells);
5133 } else if (IS_CNL_WITH_PORT_F(dev_priv)) {
5134 err = set_power_wells(power_domains, cnl_power_wells);
5135 } else if (IS_CANNONLAKE(dev_priv)) {
5136 err = set_power_wells_mask(power_domains, cnl_power_wells,
5137 BIT_ULL(CNL_DISP_PW_DDI_F_IO) |
5138 BIT_ULL(CNL_DISP_PW_DDI_F_AUX));
5139 } else if (IS_GEMINILAKE(dev_priv)) {
5140 err = set_power_wells(power_domains, glk_power_wells);
5141 } else if (IS_BROXTON(dev_priv)) {
5142 err = set_power_wells(power_domains, bxt_power_wells);
5143 } else if (DISPLAY_VER(dev_priv) == 9) {
5144 err = set_power_wells(power_domains, skl_power_wells);
5145 } else if (IS_CHERRYVIEW(dev_priv)) {
5146 err = set_power_wells(power_domains, chv_power_wells);
5147 } else if (IS_BROADWELL(dev_priv)) {
5148 err = set_power_wells(power_domains, bdw_power_wells);
5149 } else if (IS_HASWELL(dev_priv)) {
5150 err = set_power_wells(power_domains, hsw_power_wells);
5151 } else if (IS_VALLEYVIEW(dev_priv)) {
5152 err = set_power_wells(power_domains, vlv_power_wells);
5153 } else if (IS_I830(dev_priv)) {
5154 err = set_power_wells(power_domains, i830_power_wells);
5155 } else {
5156 err = set_power_wells(power_domains, i9xx_always_on_power_well);
5157 }
5158
5159 return err;
5160}
5161
5162/**
5163 * intel_power_domains_cleanup - clean up power domains resources
5164 * @dev_priv: i915 device instance
5165 *
5166 * Release any resources acquired by intel_power_domains_init()
5167 */
5168void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
5169{
5170 kfree(dev_priv->power_domains.power_wells);
5171}
5172
5173static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
5174{
5175 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5176 struct i915_power_well *power_well;
5177
5178 mutex_lock(&power_domains->lock);
5179 for_each_power_well(dev_priv, power_well) {
5180 power_well->desc->ops->sync_hw(dev_priv, power_well);
5181 power_well->hw_enabled =
5182 power_well->desc->ops->is_enabled(dev_priv, power_well);
5183 }
5184 mutex_unlock(&power_domains->lock);
5185}
5186
5187static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
5188 enum dbuf_slice slice, bool enable)
5189{
5190 i915_reg_t reg = DBUF_CTL_S(slice);
5191 bool state;
5192
5193 intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
5194 enable ? DBUF_POWER_REQUEST : 0);
5195 intel_de_posting_read(dev_priv, reg);
5196 udelay(10);
5197
5198 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
5199 drm_WARN(&dev_priv->drm, enable != state,
5200 "DBuf slice %d power %s timeout!\n",
5201 slice, enabledisable(enable));
5202}
5203
5204void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
5205 u8 req_slices)
5206{
5207 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5208 u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask;
5209 enum dbuf_slice slice;
5210
5211 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
5212 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
5213 req_slices, slice_mask);
5214
5215 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
5216 req_slices);
5217
5218 /*
5219 * Might be running this in parallel to gen9_dc_off_power_well_enable
5220 * being called from intel_dp_detect for instance,
5221 * which causes assertion triggered by race condition,
5222 * as gen9_assert_dbuf_enabled might preempt this when registers
5223 * were already updated, while dev_priv was not.
5224 */
5225 mutex_lock(&power_domains->lock);
5226
5227 for_each_dbuf_slice(dev_priv, slice)
5228 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
5229
5230 dev_priv->dbuf.enabled_slices = req_slices;
5231
5232 mutex_unlock(&power_domains->lock);
5233}
5234
5235static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
5236{
5237 dev_priv->dbuf.enabled_slices =
5238 intel_enabled_dbuf_slices_mask(dev_priv);
5239
5240 /*
5241 * Just power up at least 1 slice, we will
5242 * figure out later which slices we have and what we need.
5243 */
5244 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
5245 dev_priv->dbuf.enabled_slices);
5246}
5247
5248static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
5249{
5250 gen9_dbuf_slices_update(dev_priv, 0);
5251}
5252
5253static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
5254{
5255 enum dbuf_slice slice;
5256
5257 if (IS_ALDERLAKE_P(dev_priv))
5258 return;
5259
5260 for_each_dbuf_slice(dev_priv, slice)
5261 intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
5262 DBUF_TRACKER_STATE_SERVICE_MASK,
5263 DBUF_TRACKER_STATE_SERVICE(8));
5264}
5265
5266static void icl_mbus_init(struct drm_i915_private *dev_priv)
5267{
5268 unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask;
5269 u32 mask, val, i;
5270
5271 if (IS_ALDERLAKE_P(dev_priv))
5272 return;
5273
5274 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
5275 MBUS_ABOX_BT_CREDIT_POOL2_MASK |
5276 MBUS_ABOX_B_CREDIT_MASK |
5277 MBUS_ABOX_BW_CREDIT_MASK;
5278 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
5279 MBUS_ABOX_BT_CREDIT_POOL2(16) |
5280 MBUS_ABOX_B_CREDIT(1) |
5281 MBUS_ABOX_BW_CREDIT(1);
5282
5283 /*
5284 * gen12 platforms that use abox1 and abox2 for pixel data reads still
5285 * expect us to program the abox_ctl0 register as well, even though
5286 * we don't have to program other instance-0 registers like BW_BUDDY.
5287 */
5288 if (DISPLAY_VER(dev_priv) == 12)
5289 abox_regs |= BIT(0);
5290
5291 for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
5292 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
5293}
5294
5295static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
5296{
5297 u32 val = intel_de_read(dev_priv, LCPLL_CTL);
5298
5299 /*
5300 * The LCPLL register should be turned on by the BIOS. For now
5301 * let's just check its state and print errors in case
5302 * something is wrong. Don't even try to turn it on.
5303 */
5304
5305 if (val & LCPLL_CD_SOURCE_FCLK)
5306 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
5307
5308 if (val & LCPLL_PLL_DISABLE)
5309 drm_err(&dev_priv->drm, "LCPLL is disabled\n");
5310
5311 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
5312 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
5313}
5314
5315static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
5316{
5317 struct drm_device *dev = &dev_priv->drm;
5318 struct intel_crtc *crtc;
5319
5320 for_each_intel_crtc(dev, crtc)
5321 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
5322 pipe_name(crtc->pipe));
5323
5324 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
5325 "Display power well on\n");
5326 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
5327 "SPLL enabled\n");
5328 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
5329 "WRPLL1 enabled\n");
5330 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
5331 "WRPLL2 enabled\n");
5332 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
5333 "Panel power on\n");
5334 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
5335 "CPU PWM1 enabled\n");
5336 if (IS_HASWELL(dev_priv))
5337 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
5338 "CPU PWM2 enabled\n");
5339 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
5340 "PCH PWM1 enabled\n");
5341 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
5342 "Utility pin enabled\n");
5343 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
5344 "PCH GTC enabled\n");
5345
5346 /*
5347 * In theory we can still leave IRQs enabled, as long as only the HPD
5348 * interrupts remain enabled. We used to check for that, but since it's
5349 * gen-specific and since we only disable LCPLL after we fully disable
5350 * the interrupts, the check below should be enough.
5351 */
5352 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
5353}
5354
5355static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
5356{
5357 if (IS_HASWELL(dev_priv))
5358 return intel_de_read(dev_priv, D_COMP_HSW);
5359 else
5360 return intel_de_read(dev_priv, D_COMP_BDW);
5361}
5362
5363static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
5364{
5365 if (IS_HASWELL(dev_priv)) {
5366 if (sandybridge_pcode_write(dev_priv,
5367 GEN6_PCODE_WRITE_D_COMP, val))
5368 drm_dbg_kms(&dev_priv->drm,
5369 "Failed to write to D_COMP\n");
5370 } else {
5371 intel_de_write(dev_priv, D_COMP_BDW, val);
5372 intel_de_posting_read(dev_priv, D_COMP_BDW);
5373 }
5374}
5375
5376/*
5377 * This function implements pieces of two sequences from BSpec:
5378 * - Sequence for display software to disable LCPLL
5379 * - Sequence for display software to allow package C8+
5380 * The steps implemented here are just the steps that actually touch the LCPLL
5381 * register. Callers should take care of disabling all the display engine
5382 * functions, doing the mode unset, fixing interrupts, etc.
5383 */
5384static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
5385 bool switch_to_fclk, bool allow_power_down)
5386{
5387 u32 val;
5388
5389 assert_can_disable_lcpll(dev_priv);
5390
5391 val = intel_de_read(dev_priv, LCPLL_CTL);
5392
5393 if (switch_to_fclk) {
5394 val |= LCPLL_CD_SOURCE_FCLK;
5395 intel_de_write(dev_priv, LCPLL_CTL, val);
5396
5397 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
5398 LCPLL_CD_SOURCE_FCLK_DONE, 1))
5399 drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
5400
5401 val = intel_de_read(dev_priv, LCPLL_CTL);
5402 }
5403
5404 val |= LCPLL_PLL_DISABLE;
5405 intel_de_write(dev_priv, LCPLL_CTL, val);
5406 intel_de_posting_read(dev_priv, LCPLL_CTL);
5407
5408 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
5409 drm_err(&dev_priv->drm, "LCPLL still locked\n");
5410
5411 val = hsw_read_dcomp(dev_priv);
5412 val |= D_COMP_COMP_DISABLE;
5413 hsw_write_dcomp(dev_priv, val);
5414 ndelay(100);
5415
5416 if (wait_for((hsw_read_dcomp(dev_priv) &
5417 D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
5418 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
5419
5420 if (allow_power_down) {
5421 val = intel_de_read(dev_priv, LCPLL_CTL);
5422 val |= LCPLL_POWER_DOWN_ALLOW;
5423 intel_de_write(dev_priv, LCPLL_CTL, val);
5424 intel_de_posting_read(dev_priv, LCPLL_CTL);
5425 }
5426}
5427
5428/*
5429 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
5430 * source.
5431 */
5432static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
5433{
5434 u32 val;
5435
5436 val = intel_de_read(dev_priv, LCPLL_CTL);
5437
5438 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
5439 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
5440 return;
5441
5442 /*
5443 * Make sure we're not on PC8 state before disabling PC8, otherwise
5444 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
5445 */
5446 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
5447
5448 if (val & LCPLL_POWER_DOWN_ALLOW) {
5449 val &= ~LCPLL_POWER_DOWN_ALLOW;
5450 intel_de_write(dev_priv, LCPLL_CTL, val);
5451 intel_de_posting_read(dev_priv, LCPLL_CTL);
5452 }
5453
5454 val = hsw_read_dcomp(dev_priv);
5455 val |= D_COMP_COMP_FORCE;
5456 val &= ~D_COMP_COMP_DISABLE;
5457 hsw_write_dcomp(dev_priv, val);
5458
5459 val = intel_de_read(dev_priv, LCPLL_CTL);
5460 val &= ~LCPLL_PLL_DISABLE;
5461 intel_de_write(dev_priv, LCPLL_CTL, val);
5462
5463 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
5464 drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
5465
5466 if (val & LCPLL_CD_SOURCE_FCLK) {
5467 val = intel_de_read(dev_priv, LCPLL_CTL);
5468 val &= ~LCPLL_CD_SOURCE_FCLK;
5469 intel_de_write(dev_priv, LCPLL_CTL, val);
5470
5471 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
5472 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
5473 drm_err(&dev_priv->drm,
5474 "Switching back to LCPLL failed\n");
5475 }
5476
5477 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
5478
5479 intel_update_cdclk(dev_priv);
5480 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
5481}
5482
5483/*
5484 * Package states C8 and deeper are really deep PC states that can only be
5485 * reached when all the devices on the system allow it, so even if the graphics
5486 * device allows PC8+, it doesn't mean the system will actually get to these
5487 * states. Our driver only allows PC8+ when going into runtime PM.
5488 *
5489 * The requirements for PC8+ are that all the outputs are disabled, the power
5490 * well is disabled and most interrupts are disabled, and these are also
5491 * requirements for runtime PM. When these conditions are met, we manually do
5492 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
5493 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
5494 * hang the machine.
5495 *
5496 * When we really reach PC8 or deeper states (not just when we allow it) we lose
5497 * the state of some registers, so when we come back from PC8+ we need to
5498 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
5499 * need to take care of the registers kept by RC6. Notice that this happens even
5500 * if we don't put the device in PCI D3 state (which is what currently happens
5501 * because of the runtime PM support).
5502 *
5503 * For more, read "Display Sequences for Package C8" on the hardware
5504 * documentation.
5505 */
5506static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
5507{
5508 u32 val;
5509
5510 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
5511
5512 if (HAS_PCH_LPT_LP(dev_priv)) {
5513 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
5514 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5515 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
5516 }
5517
5518 lpt_disable_clkout_dp(dev_priv);
5519 hsw_disable_lcpll(dev_priv, true, true);
5520}
5521
5522static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
5523{
5524 u32 val;
5525
5526 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
5527
5528 hsw_restore_lcpll(dev_priv);
5529 intel_init_pch_refclk(dev_priv);
5530
5531 if (HAS_PCH_LPT_LP(dev_priv)) {
5532 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
5533 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
5534 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
5535 }
5536}
5537
5538static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
5539 bool enable)
5540{
5541 i915_reg_t reg;
5542 u32 reset_bits, val;
5543
5544 if (IS_IVYBRIDGE(dev_priv)) {
5545 reg = GEN7_MSG_CTL;
5546 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
5547 } else {
5548 reg = HSW_NDE_RSTWRN_OPT;
5549 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
5550 }
5551
5552 val = intel_de_read(dev_priv, reg);
5553
5554 if (enable)
5555 val |= reset_bits;
5556 else
5557 val &= ~reset_bits;
5558
5559 intel_de_write(dev_priv, reg, val);
5560}
5561
5562static void skl_display_core_init(struct drm_i915_private *dev_priv,
5563 bool resume)
5564{
5565 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5566 struct i915_power_well *well;
5567
5568 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5569
5570 /* enable PCH reset handshake */
5571 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5572
5573 if (!HAS_DISPLAY(dev_priv))
5574 return;
5575
5576 /* enable PG1 and Misc I/O */
5577 mutex_lock(&power_domains->lock);
5578
5579 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5580 intel_power_well_enable(dev_priv, well);
5581
5582 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
5583 intel_power_well_enable(dev_priv, well);
5584
5585 mutex_unlock(&power_domains->lock);
5586
5587 intel_cdclk_init_hw(dev_priv);
5588
5589 gen9_dbuf_enable(dev_priv);
5590
5591 if (resume && intel_dmc_has_payload(dev_priv))
5592 intel_dmc_load_program(dev_priv);
5593}
5594
5595static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
5596{
5597 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5598 struct i915_power_well *well;
5599
5600 if (!HAS_DISPLAY(dev_priv))
5601 return;
5602
5603 gen9_disable_dc_states(dev_priv);
5604
5605 gen9_dbuf_disable(dev_priv);
5606
5607 intel_cdclk_uninit_hw(dev_priv);
5608
5609 /* The spec doesn't call for removing the reset handshake flag */
5610 /* disable PG1 and Misc I/O */
5611
5612 mutex_lock(&power_domains->lock);
5613
5614 /*
5615 * BSpec says to keep the MISC IO power well enabled here, only
5616 * remove our request for power well 1.
5617 * Note that even though the driver's request is removed power well 1
5618 * may stay enabled after this due to DMC's own request on it.
5619 */
5620 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5621 intel_power_well_disable(dev_priv, well);
5622
5623 mutex_unlock(&power_domains->lock);
5624
5625 usleep_range(10, 30); /* 10 us delay per Bspec */
5626}
5627
5628static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5629{
5630 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5631 struct i915_power_well *well;
5632
5633 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5634
5635 /*
5636 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5637 * or else the reset will hang because there is no PCH to respond.
5638 * Move the handshake programming to initialization sequence.
5639 * Previously was left up to BIOS.
5640 */
5641 intel_pch_reset_handshake(dev_priv, false);
5642
5643 if (!HAS_DISPLAY(dev_priv))
5644 return;
5645
5646 /* Enable PG1 */
5647 mutex_lock(&power_domains->lock);
5648
5649 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5650 intel_power_well_enable(dev_priv, well);
5651
5652 mutex_unlock(&power_domains->lock);
5653
5654 intel_cdclk_init_hw(dev_priv);
5655
5656 gen9_dbuf_enable(dev_priv);
5657
5658 if (resume && intel_dmc_has_payload(dev_priv))
5659 intel_dmc_load_program(dev_priv);
5660}
5661
5662static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
5663{
5664 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5665 struct i915_power_well *well;
5666
5667 if (!HAS_DISPLAY(dev_priv))
5668 return;
5669
5670 gen9_disable_dc_states(dev_priv);
5671
5672 gen9_dbuf_disable(dev_priv);
5673
5674 intel_cdclk_uninit_hw(dev_priv);
5675
5676 /* The spec doesn't call for removing the reset handshake flag */
5677
5678 /*
5679 * Disable PW1 (PG1).
5680 * Note that even though the driver's request is removed power well 1
5681 * may stay enabled after this due to DMC's own request on it.
5682 */
5683 mutex_lock(&power_domains->lock);
5684
5685 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5686 intel_power_well_disable(dev_priv, well);
5687
5688 mutex_unlock(&power_domains->lock);
5689
5690 usleep_range(10, 30); /* 10 us delay per Bspec */
5691}
5692
5693static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5694{
5695 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5696 struct i915_power_well *well;
5697
5698 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5699
5700 /* 1. Enable PCH Reset Handshake */
5701 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5702
5703 if (!HAS_DISPLAY(dev_priv))
5704 return;
5705
5706 /* 2-3. */
5707 intel_combo_phy_init(dev_priv);
5708
5709 /*
5710 * 4. Enable Power Well 1 (PG1).
5711 * The AUX IO power wells will be enabled on demand.
5712 */
5713 mutex_lock(&power_domains->lock);
5714 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5715 intel_power_well_enable(dev_priv, well);
5716 mutex_unlock(&power_domains->lock);
5717
5718 /* 5. Enable CD clock */
5719 intel_cdclk_init_hw(dev_priv);
5720
5721 /* 6. Enable DBUF */
5722 gen9_dbuf_enable(dev_priv);
5723
5724 if (resume && intel_dmc_has_payload(dev_priv))
5725 intel_dmc_load_program(dev_priv);
5726}
5727
5728static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
5729{
5730 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5731 struct i915_power_well *well;
5732
5733 if (!HAS_DISPLAY(dev_priv))
5734 return;
5735
5736 gen9_disable_dc_states(dev_priv);
5737
5738 /* 1. Disable all display engine functions -> aready done */
5739
5740 /* 2. Disable DBUF */
5741 gen9_dbuf_disable(dev_priv);
5742
5743 /* 3. Disable CD clock */
5744 intel_cdclk_uninit_hw(dev_priv);
5745
5746 /*
5747 * 4. Disable Power Well 1 (PG1).
5748 * The AUX IO power wells are toggled on demand, so they are already
5749 * disabled at this point.
5750 */
5751 mutex_lock(&power_domains->lock);
5752 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5753 intel_power_well_disable(dev_priv, well);
5754 mutex_unlock(&power_domains->lock);
5755
5756 usleep_range(10, 30); /* 10 us delay per Bspec */
5757
5758 /* 5. */
5759 intel_combo_phy_uninit(dev_priv);
5760}
5761
5762struct buddy_page_mask {
5763 u32 page_mask;
5764 u8 type;
5765 u8 num_channels;
5766};
5767
5768static const struct buddy_page_mask tgl_buddy_page_masks[] = {
5769 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
5770 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF },
5771 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
5772 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
5773 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
5774 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E },
5775 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
5776 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
5777 {}
5778};
5779
5780static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5781 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5782 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
5783 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 },
5784 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
5785 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5786 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
5787 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 },
5788 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
5789 {}
5790};
5791
5792static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5793{
5794 enum intel_dram_type type = dev_priv->dram_info.type;
5795 u8 num_channels = dev_priv->dram_info.num_channels;
5796 const struct buddy_page_mask *table;
5797 unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
5798 int config, i;
5799
5800 if (IS_ALDERLAKE_S(dev_priv) ||
5801 IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) ||
5802 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
5803 /* Wa_1409767108:tgl,dg1,adl-s */
5804 table = wa_1409767108_buddy_page_masks;
5805 else
5806 table = tgl_buddy_page_masks;
5807
5808 for (config = 0; table[config].page_mask != 0; config++)
5809 if (table[config].num_channels == num_channels &&
5810 table[config].type == type)
5811 break;
5812
5813 if (table[config].page_mask == 0) {
5814 drm_dbg(&dev_priv->drm,
5815 "Unknown memory configuration; disabling address buddy logic.\n");
5816 for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
5817 intel_de_write(dev_priv, BW_BUDDY_CTL(i),
5818 BW_BUDDY_DISABLE);
5819 } else {
5820 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
5821 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
5822 table[config].page_mask);
5823
5824 /* Wa_22010178259:tgl,rkl */
5825 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
5826 BW_BUDDY_TLB_REQ_TIMER_MASK,
5827 BW_BUDDY_TLB_REQ_TIMER(0x8));
5828 }
5829 }
5830}
5831
5832static void icl_display_core_init(struct drm_i915_private *dev_priv,
5833 bool resume)
5834{
5835 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5836 struct i915_power_well *well;
5837 u32 val;
5838
5839 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5840
5841 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
5842 if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP &&
5843 INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
5844 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
5845 PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
5846
5847 /* 1. Enable PCH reset handshake. */
5848 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5849
5850 if (!HAS_DISPLAY(dev_priv))
5851 return;
5852
5853 /* 2. Initialize all combo phys */
5854 intel_combo_phy_init(dev_priv);
5855
5856 /*
5857 * 3. Enable Power Well 1 (PG1).
5858 * The AUX IO power wells will be enabled on demand.
5859 */
5860 mutex_lock(&power_domains->lock);
5861 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5862 intel_power_well_enable(dev_priv, well);
5863 mutex_unlock(&power_domains->lock);
5864
5865 /* 4. Enable CDCLK. */
5866 intel_cdclk_init_hw(dev_priv);
5867
5868 if (DISPLAY_VER(dev_priv) >= 12)
5869 gen12_dbuf_slices_config(dev_priv);
5870
5871 /* 5. Enable DBUF. */
5872 gen9_dbuf_enable(dev_priv);
5873
5874 /* 6. Setup MBUS. */
5875 icl_mbus_init(dev_priv);
5876
5877 /* 7. Program arbiter BW_BUDDY registers */
5878 if (DISPLAY_VER(dev_priv) >= 12)
5879 tgl_bw_buddy_init(dev_priv);
5880
5881 if (resume && intel_dmc_has_payload(dev_priv))
5882 intel_dmc_load_program(dev_priv);
5883
5884 /* Wa_14011508470 */
5885 if (DISPLAY_VER(dev_priv) == 12) {
5886 val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
5887 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
5888 intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
5889 }
5890
5891 /* Wa_14011503030:xelpd */
5892 if (DISPLAY_VER(dev_priv) >= 13)
5893 intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
5894}
5895
5896static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5897{
5898 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5899 struct i915_power_well *well;
5900
5901 if (!HAS_DISPLAY(dev_priv))
5902 return;
5903
5904 gen9_disable_dc_states(dev_priv);
5905
5906 /* 1. Disable all display engine functions -> aready done */
5907
5908 /* 2. Disable DBUF */
5909 gen9_dbuf_disable(dev_priv);
5910
5911 /* 3. Disable CD clock */
5912 intel_cdclk_uninit_hw(dev_priv);
5913
5914 /*
5915 * 4. Disable Power Well 1 (PG1).
5916 * The AUX IO power wells are toggled on demand, so they are already
5917 * disabled at this point.
5918 */
5919 mutex_lock(&power_domains->lock);
5920 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5921 intel_power_well_disable(dev_priv, well);
5922 mutex_unlock(&power_domains->lock);
5923
5924 /* 5. */
5925 intel_combo_phy_uninit(dev_priv);
5926}
5927
5928static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5929{
5930 struct i915_power_well *cmn_bc =
5931 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5932 struct i915_power_well *cmn_d =
5933 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5934
5935 /*
5936 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5937 * workaround never ever read DISPLAY_PHY_CONTROL, and
5938 * instead maintain a shadow copy ourselves. Use the actual
5939 * power well state and lane status to reconstruct the
5940 * expected initial value.
5941 */
5942 dev_priv->chv_phy_control =
5943 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5944 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5945 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5946 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5947 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5948
5949 /*
5950 * If all lanes are disabled we leave the override disabled
5951 * with all power down bits cleared to match the state we
5952 * would use after disabling the port. Otherwise enable the
5953 * override and set the lane powerdown bits accding to the
5954 * current lane status.
5955 */
5956 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5957 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5958 unsigned int mask;
5959
5960 mask = status & DPLL_PORTB_READY_MASK;
5961 if (mask == 0xf)
5962 mask = 0x0;
5963 else
5964 dev_priv->chv_phy_control |=
5965 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5966
5967 dev_priv->chv_phy_control |=
5968 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5969
5970 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5971 if (mask == 0xf)
5972 mask = 0x0;
5973 else
5974 dev_priv->chv_phy_control |=
5975 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5976
5977 dev_priv->chv_phy_control |=
5978 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5979
5980 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5981
5982 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5983 } else {
5984 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5985 }
5986
5987 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5988 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5989 unsigned int mask;
5990
5991 mask = status & DPLL_PORTD_READY_MASK;
5992
5993 if (mask == 0xf)
5994 mask = 0x0;
5995 else
5996 dev_priv->chv_phy_control |=
5997 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5998
5999 dev_priv->chv_phy_control |=
6000 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
6001
6002 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
6003
6004 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
6005 } else {
6006 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
6007 }
6008
6009 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
6010 dev_priv->chv_phy_control);
6011
6012 /* Defer application of initial phy_control to enabling the powerwell */
6013}
6014
6015static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
6016{
6017 struct i915_power_well *cmn =
6018 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
6019 struct i915_power_well *disp2d =
6020 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
6021
6022 /* If the display might be already active skip this */
6023 if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
6024 disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
6025 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
6026 return;
6027
6028 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
6029
6030 /* cmnlane needs DPLL registers */
6031 disp2d->desc->ops->enable(dev_priv, disp2d);
6032
6033 /*
6034 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
6035 * Need to assert and de-assert PHY SB reset by gating the
6036 * common lane power, then un-gating it.
6037 * Simply ungating isn't enough to reset the PHY enough to get
6038 * ports and lanes running.
6039 */
6040 cmn->desc->ops->disable(dev_priv, cmn);
6041}
6042
6043static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
6044{
6045 bool ret;
6046
6047 vlv_punit_get(dev_priv);
6048 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
6049 vlv_punit_put(dev_priv);
6050
6051 return ret;
6052}
6053
6054static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
6055{
6056 drm_WARN(&dev_priv->drm,
6057 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
6058 "VED not power gated\n");
6059}
6060
6061static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
6062{
6063 static const struct pci_device_id isp_ids[] = {
6064 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
6065 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
6066 {}
6067 };
6068
6069 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
6070 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
6071 "ISP not power gated\n");
6072}
6073
6074static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
6075
6076/**
6077 * intel_power_domains_init_hw - initialize hardware power domain state
6078 * @i915: i915 device instance
6079 * @resume: Called from resume code paths or not
6080 *
6081 * This function initializes the hardware power domain state and enables all
6082 * power wells belonging to the INIT power domain. Power wells in other
6083 * domains (and not in the INIT domain) are referenced or disabled by
6084 * intel_modeset_readout_hw_state(). After that the reference count of each
6085 * power well must match its HW enabled state, see
6086 * intel_power_domains_verify_state().
6087 *
6088 * It will return with power domains disabled (to be enabled later by
6089 * intel_power_domains_enable()) and must be paired with
6090 * intel_power_domains_driver_remove().
6091 */
6092void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
6093{
6094 struct i915_power_domains *power_domains = &i915->power_domains;
6095
6096 power_domains->initializing = true;
6097
6098 if (DISPLAY_VER(i915) >= 11) {
6099 icl_display_core_init(i915, resume);
6100 } else if (IS_CANNONLAKE(i915)) {
6101 cnl_display_core_init(i915, resume);
6102 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6103 bxt_display_core_init(i915, resume);
6104 } else if (DISPLAY_VER(i915) == 9) {
6105 skl_display_core_init(i915, resume);
6106 } else if (IS_CHERRYVIEW(i915)) {
6107 mutex_lock(&power_domains->lock);
6108 chv_phy_control_init(i915);
6109 mutex_unlock(&power_domains->lock);
6110 assert_isp_power_gated(i915);
6111 } else if (IS_VALLEYVIEW(i915)) {
6112 mutex_lock(&power_domains->lock);
6113 vlv_cmnlane_wa(i915);
6114 mutex_unlock(&power_domains->lock);
6115 assert_ved_power_gated(i915);
6116 assert_isp_power_gated(i915);
6117 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
6118 hsw_assert_cdclk(i915);
6119 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
6120 } else if (IS_IVYBRIDGE(i915)) {
6121 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
6122 }
6123
6124 /*
6125 * Keep all power wells enabled for any dependent HW access during
6126 * initialization and to make sure we keep BIOS enabled display HW
6127 * resources powered until display HW readout is complete. We drop
6128 * this reference in intel_power_domains_enable().
6129 */
6130 drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6131 power_domains->init_wakeref =
6132 intel_display_power_get(i915, POWER_DOMAIN_INIT);
6133
6134 /* Disable power support if the user asked so. */
6135 if (!i915->params.disable_power_well) {
6136 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
6137 i915->power_domains.disable_wakeref = intel_display_power_get(i915,
6138 POWER_DOMAIN_INIT);
6139 }
6140 intel_power_domains_sync_hw(i915);
6141
6142 power_domains->initializing = false;
6143}
6144
6145/**
6146 * intel_power_domains_driver_remove - deinitialize hw power domain state
6147 * @i915: i915 device instance
6148 *
6149 * De-initializes the display power domain HW state. It also ensures that the
6150 * device stays powered up so that the driver can be reloaded.
6151 *
6152 * It must be called with power domains already disabled (after a call to
6153 * intel_power_domains_disable()) and must be paired with
6154 * intel_power_domains_init_hw().
6155 */
6156void intel_power_domains_driver_remove(struct drm_i915_private *i915)
6157{
6158 intel_wakeref_t wakeref __maybe_unused =
6159 fetch_and_zero(&i915->power_domains.init_wakeref);
6160
6161 /* Remove the refcount we took to keep power well support disabled. */
6162 if (!i915->params.disable_power_well)
6163 intel_display_power_put(i915, POWER_DOMAIN_INIT,
6164 fetch_and_zero(&i915->power_domains.disable_wakeref));
6165
6166 intel_display_power_flush_work_sync(i915);
6167
6168 intel_power_domains_verify_state(i915);
6169
6170 /* Keep the power well enabled, but cancel its rpm wakeref. */
6171 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
6172}
6173
6174/**
6175 * intel_power_domains_enable - enable toggling of display power wells
6176 * @i915: i915 device instance
6177 *
6178 * Enable the ondemand enabling/disabling of the display power wells. Note that
6179 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
6180 * only at specific points of the display modeset sequence, thus they are not
6181 * affected by the intel_power_domains_enable()/disable() calls. The purpose
6182 * of these function is to keep the rest of power wells enabled until the end
6183 * of display HW readout (which will acquire the power references reflecting
6184 * the current HW state).
6185 */
6186void intel_power_domains_enable(struct drm_i915_private *i915)
6187{
6188 intel_wakeref_t wakeref __maybe_unused =
6189 fetch_and_zero(&i915->power_domains.init_wakeref);
6190
6191 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
6192 intel_power_domains_verify_state(i915);
6193}
6194
6195/**
6196 * intel_power_domains_disable - disable toggling of display power wells
6197 * @i915: i915 device instance
6198 *
6199 * Disable the ondemand enabling/disabling of the display power wells. See
6200 * intel_power_domains_enable() for which power wells this call controls.
6201 */
6202void intel_power_domains_disable(struct drm_i915_private *i915)
6203{
6204 struct i915_power_domains *power_domains = &i915->power_domains;
6205
6206 drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6207 power_domains->init_wakeref =
6208 intel_display_power_get(i915, POWER_DOMAIN_INIT);
6209
6210 intel_power_domains_verify_state(i915);
6211}
6212
6213/**
6214 * intel_power_domains_suspend - suspend power domain state
6215 * @i915: i915 device instance
6216 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
6217 *
6218 * This function prepares the hardware power domain state before entering
6219 * system suspend.
6220 *
6221 * It must be called with power domains already disabled (after a call to
6222 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
6223 */
6224void intel_power_domains_suspend(struct drm_i915_private *i915,
6225 enum i915_drm_suspend_mode suspend_mode)
6226{
6227 struct i915_power_domains *power_domains = &i915->power_domains;
6228 intel_wakeref_t wakeref __maybe_unused =
6229 fetch_and_zero(&power_domains->init_wakeref);
6230
6231 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
6232
6233 /*
6234 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
6235 * support don't manually deinit the power domains. This also means the
6236 * DMC firmware will stay active, it will power down any HW
6237 * resources as required and also enable deeper system power states
6238 * that would be blocked if the firmware was inactive.
6239 */
6240 if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
6241 suspend_mode == I915_DRM_SUSPEND_IDLE &&
6242 intel_dmc_has_payload(i915)) {
6243 intel_display_power_flush_work(i915);
6244 intel_power_domains_verify_state(i915);
6245 return;
6246 }
6247
6248 /*
6249 * Even if power well support was disabled we still want to disable
6250 * power wells if power domains must be deinitialized for suspend.
6251 */
6252 if (!i915->params.disable_power_well)
6253 intel_display_power_put(i915, POWER_DOMAIN_INIT,
6254 fetch_and_zero(&i915->power_domains.disable_wakeref));
6255
6256 intel_display_power_flush_work(i915);
6257 intel_power_domains_verify_state(i915);
6258
6259 if (DISPLAY_VER(i915) >= 11)
6260 icl_display_core_uninit(i915);
6261 else if (IS_CANNONLAKE(i915))
6262 cnl_display_core_uninit(i915);
6263 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
6264 bxt_display_core_uninit(i915);
6265 else if (DISPLAY_VER(i915) == 9)
6266 skl_display_core_uninit(i915);
6267
6268 power_domains->display_core_suspended = true;
6269}
6270
6271/**
6272 * intel_power_domains_resume - resume power domain state
6273 * @i915: i915 device instance
6274 *
6275 * This function resume the hardware power domain state during system resume.
6276 *
6277 * It will return with power domain support disabled (to be enabled later by
6278 * intel_power_domains_enable()) and must be paired with
6279 * intel_power_domains_suspend().
6280 */
6281void intel_power_domains_resume(struct drm_i915_private *i915)
6282{
6283 struct i915_power_domains *power_domains = &i915->power_domains;
6284
6285 if (power_domains->display_core_suspended) {
6286 intel_power_domains_init_hw(i915, true);
6287 power_domains->display_core_suspended = false;
6288 } else {
6289 drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6290 power_domains->init_wakeref =
6291 intel_display_power_get(i915, POWER_DOMAIN_INIT);
6292 }
6293
6294 intel_power_domains_verify_state(i915);
6295}
6296
6297#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
6298
6299static void intel_power_domains_dump_info(struct drm_i915_private *i915)
6300{
6301 struct i915_power_domains *power_domains = &i915->power_domains;
6302 struct i915_power_well *power_well;
6303
6304 for_each_power_well(i915, power_well) {
6305 enum intel_display_power_domain domain;
6306
6307 drm_dbg(&i915->drm, "%-25s %d\n",
6308 power_well->desc->name, power_well->count);
6309
6310 for_each_power_domain(domain, power_well->desc->domains)
6311 drm_dbg(&i915->drm, " %-23s %d\n",
6312 intel_display_power_domain_str(domain),
6313 power_domains->domain_use_count[domain]);
6314 }
6315}
6316
6317/**
6318 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
6319 * @i915: i915 device instance
6320 *
6321 * Verify if the reference count of each power well matches its HW enabled
6322 * state and the total refcount of the domains it belongs to. This must be
6323 * called after modeset HW state sanitization, which is responsible for
6324 * acquiring reference counts for any power wells in use and disabling the
6325 * ones left on by BIOS but not required by any active output.
6326 */
6327static void intel_power_domains_verify_state(struct drm_i915_private *i915)
6328{
6329 struct i915_power_domains *power_domains = &i915->power_domains;
6330 struct i915_power_well *power_well;
6331 bool dump_domain_info;
6332
6333 mutex_lock(&power_domains->lock);
6334
6335 verify_async_put_domains_state(power_domains);
6336
6337 dump_domain_info = false;
6338 for_each_power_well(i915, power_well) {
6339 enum intel_display_power_domain domain;
6340 int domains_count;
6341 bool enabled;
6342
6343 enabled = power_well->desc->ops->is_enabled(i915, power_well);
6344 if ((power_well->count || power_well->desc->always_on) !=
6345 enabled)
6346 drm_err(&i915->drm,
6347 "power well %s state mismatch (refcount %d/enabled %d)",
6348 power_well->desc->name,
6349 power_well->count, enabled);
6350
6351 domains_count = 0;
6352 for_each_power_domain(domain, power_well->desc->domains)
6353 domains_count += power_domains->domain_use_count[domain];
6354
6355 if (power_well->count != domains_count) {
6356 drm_err(&i915->drm,
6357 "power well %s refcount/domain refcount mismatch "
6358 "(refcount %d/domains refcount %d)\n",
6359 power_well->desc->name, power_well->count,
6360 domains_count);
6361 dump_domain_info = true;
6362 }
6363 }
6364
6365 if (dump_domain_info) {
6366 static bool dumped;
6367
6368 if (!dumped) {
6369 intel_power_domains_dump_info(i915);
6370 dumped = true;
6371 }
6372 }
6373
6374 mutex_unlock(&power_domains->lock);
6375}
6376
6377#else
6378
6379static void intel_power_domains_verify_state(struct drm_i915_private *i915)
6380{
6381}
6382
6383#endif
6384
6385void intel_display_power_suspend_late(struct drm_i915_private *i915)
6386{
6387 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
6388 IS_BROXTON(i915)) {
6389 bxt_enable_dc9(i915);
6390 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6391 hsw_enable_pc8(i915);
6392 }
6393
6394 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
6395 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
6396 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
6397}
6398
6399void intel_display_power_resume_early(struct drm_i915_private *i915)
6400{
6401 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
6402 IS_BROXTON(i915)) {
6403 gen9_sanitize_dc_state(i915);
6404 bxt_disable_dc9(i915);
6405 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6406 hsw_disable_pc8(i915);
6407 }
6408
6409 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
6410 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
6411 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
6412}
6413
6414void intel_display_power_suspend(struct drm_i915_private *i915)
6415{
6416 if (DISPLAY_VER(i915) >= 11) {
6417 icl_display_core_uninit(i915);
6418 bxt_enable_dc9(i915);
6419 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6420 bxt_display_core_uninit(i915);
6421 bxt_enable_dc9(i915);
6422 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6423 hsw_enable_pc8(i915);
6424 }
6425}
6426
6427void intel_display_power_resume(struct drm_i915_private *i915)
6428{
6429 if (DISPLAY_VER(i915) >= 11) {
6430 bxt_disable_dc9(i915);
6431 icl_display_core_init(i915, true);
6432 if (intel_dmc_has_payload(i915)) {
6433 if (i915->dmc.allowed_dc_mask &
6434 DC_STATE_EN_UPTO_DC6)
6435 skl_enable_dc6(i915);
6436 else if (i915->dmc.allowed_dc_mask &
6437 DC_STATE_EN_UPTO_DC5)
6438 gen9_enable_dc5(i915);
6439 }
6440 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6441 bxt_disable_dc9(i915);
6442 bxt_display_core_init(i915, true);
6443 if (intel_dmc_has_payload(i915) &&
6444 (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
6445 gen9_enable_dc5(i915);
6446 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6447 hsw_disable_pc8(i915);
6448 }
6449}
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include <linux/string_helpers.h>
7
8#include "i915_drv.h"
9#include "i915_irq.h"
10#include "i915_reg.h"
11#include "intel_backlight_regs.h"
12#include "intel_cdclk.h"
13#include "intel_clock_gating.h"
14#include "intel_combo_phy.h"
15#include "intel_de.h"
16#include "intel_display_power.h"
17#include "intel_display_power_map.h"
18#include "intel_display_power_well.h"
19#include "intel_display_types.h"
20#include "intel_dmc.h"
21#include "intel_mchbar_regs.h"
22#include "intel_pch_refclk.h"
23#include "intel_pcode.h"
24#include "intel_pmdemand.h"
25#include "intel_pps_regs.h"
26#include "intel_snps_phy.h"
27#include "skl_watermark.h"
28#include "skl_watermark_regs.h"
29#include "vlv_sideband.h"
30
31#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
32 for_each_power_well(__dev_priv, __power_well) \
33 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
34
35#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
36 for_each_power_well_reverse(__dev_priv, __power_well) \
37 for_each_if(test_bit((__domain), (__power_well)->domains.bits))
38
39static const char *
40intel_display_power_domain_str(enum intel_display_power_domain domain)
41{
42 switch (domain) {
43 case POWER_DOMAIN_DISPLAY_CORE:
44 return "DISPLAY_CORE";
45 case POWER_DOMAIN_PIPE_A:
46 return "PIPE_A";
47 case POWER_DOMAIN_PIPE_B:
48 return "PIPE_B";
49 case POWER_DOMAIN_PIPE_C:
50 return "PIPE_C";
51 case POWER_DOMAIN_PIPE_D:
52 return "PIPE_D";
53 case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
54 return "PIPE_PANEL_FITTER_A";
55 case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
56 return "PIPE_PANEL_FITTER_B";
57 case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
58 return "PIPE_PANEL_FITTER_C";
59 case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
60 return "PIPE_PANEL_FITTER_D";
61 case POWER_DOMAIN_TRANSCODER_A:
62 return "TRANSCODER_A";
63 case POWER_DOMAIN_TRANSCODER_B:
64 return "TRANSCODER_B";
65 case POWER_DOMAIN_TRANSCODER_C:
66 return "TRANSCODER_C";
67 case POWER_DOMAIN_TRANSCODER_D:
68 return "TRANSCODER_D";
69 case POWER_DOMAIN_TRANSCODER_EDP:
70 return "TRANSCODER_EDP";
71 case POWER_DOMAIN_TRANSCODER_DSI_A:
72 return "TRANSCODER_DSI_A";
73 case POWER_DOMAIN_TRANSCODER_DSI_C:
74 return "TRANSCODER_DSI_C";
75 case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
76 return "TRANSCODER_VDSC_PW2";
77 case POWER_DOMAIN_PORT_DDI_LANES_A:
78 return "PORT_DDI_LANES_A";
79 case POWER_DOMAIN_PORT_DDI_LANES_B:
80 return "PORT_DDI_LANES_B";
81 case POWER_DOMAIN_PORT_DDI_LANES_C:
82 return "PORT_DDI_LANES_C";
83 case POWER_DOMAIN_PORT_DDI_LANES_D:
84 return "PORT_DDI_LANES_D";
85 case POWER_DOMAIN_PORT_DDI_LANES_E:
86 return "PORT_DDI_LANES_E";
87 case POWER_DOMAIN_PORT_DDI_LANES_F:
88 return "PORT_DDI_LANES_F";
89 case POWER_DOMAIN_PORT_DDI_LANES_TC1:
90 return "PORT_DDI_LANES_TC1";
91 case POWER_DOMAIN_PORT_DDI_LANES_TC2:
92 return "PORT_DDI_LANES_TC2";
93 case POWER_DOMAIN_PORT_DDI_LANES_TC3:
94 return "PORT_DDI_LANES_TC3";
95 case POWER_DOMAIN_PORT_DDI_LANES_TC4:
96 return "PORT_DDI_LANES_TC4";
97 case POWER_DOMAIN_PORT_DDI_LANES_TC5:
98 return "PORT_DDI_LANES_TC5";
99 case POWER_DOMAIN_PORT_DDI_LANES_TC6:
100 return "PORT_DDI_LANES_TC6";
101 case POWER_DOMAIN_PORT_DDI_IO_A:
102 return "PORT_DDI_IO_A";
103 case POWER_DOMAIN_PORT_DDI_IO_B:
104 return "PORT_DDI_IO_B";
105 case POWER_DOMAIN_PORT_DDI_IO_C:
106 return "PORT_DDI_IO_C";
107 case POWER_DOMAIN_PORT_DDI_IO_D:
108 return "PORT_DDI_IO_D";
109 case POWER_DOMAIN_PORT_DDI_IO_E:
110 return "PORT_DDI_IO_E";
111 case POWER_DOMAIN_PORT_DDI_IO_F:
112 return "PORT_DDI_IO_F";
113 case POWER_DOMAIN_PORT_DDI_IO_TC1:
114 return "PORT_DDI_IO_TC1";
115 case POWER_DOMAIN_PORT_DDI_IO_TC2:
116 return "PORT_DDI_IO_TC2";
117 case POWER_DOMAIN_PORT_DDI_IO_TC3:
118 return "PORT_DDI_IO_TC3";
119 case POWER_DOMAIN_PORT_DDI_IO_TC4:
120 return "PORT_DDI_IO_TC4";
121 case POWER_DOMAIN_PORT_DDI_IO_TC5:
122 return "PORT_DDI_IO_TC5";
123 case POWER_DOMAIN_PORT_DDI_IO_TC6:
124 return "PORT_DDI_IO_TC6";
125 case POWER_DOMAIN_PORT_DSI:
126 return "PORT_DSI";
127 case POWER_DOMAIN_PORT_CRT:
128 return "PORT_CRT";
129 case POWER_DOMAIN_PORT_OTHER:
130 return "PORT_OTHER";
131 case POWER_DOMAIN_VGA:
132 return "VGA";
133 case POWER_DOMAIN_AUDIO_MMIO:
134 return "AUDIO_MMIO";
135 case POWER_DOMAIN_AUDIO_PLAYBACK:
136 return "AUDIO_PLAYBACK";
137 case POWER_DOMAIN_AUX_IO_A:
138 return "AUX_IO_A";
139 case POWER_DOMAIN_AUX_IO_B:
140 return "AUX_IO_B";
141 case POWER_DOMAIN_AUX_IO_C:
142 return "AUX_IO_C";
143 case POWER_DOMAIN_AUX_IO_D:
144 return "AUX_IO_D";
145 case POWER_DOMAIN_AUX_IO_E:
146 return "AUX_IO_E";
147 case POWER_DOMAIN_AUX_IO_F:
148 return "AUX_IO_F";
149 case POWER_DOMAIN_AUX_A:
150 return "AUX_A";
151 case POWER_DOMAIN_AUX_B:
152 return "AUX_B";
153 case POWER_DOMAIN_AUX_C:
154 return "AUX_C";
155 case POWER_DOMAIN_AUX_D:
156 return "AUX_D";
157 case POWER_DOMAIN_AUX_E:
158 return "AUX_E";
159 case POWER_DOMAIN_AUX_F:
160 return "AUX_F";
161 case POWER_DOMAIN_AUX_USBC1:
162 return "AUX_USBC1";
163 case POWER_DOMAIN_AUX_USBC2:
164 return "AUX_USBC2";
165 case POWER_DOMAIN_AUX_USBC3:
166 return "AUX_USBC3";
167 case POWER_DOMAIN_AUX_USBC4:
168 return "AUX_USBC4";
169 case POWER_DOMAIN_AUX_USBC5:
170 return "AUX_USBC5";
171 case POWER_DOMAIN_AUX_USBC6:
172 return "AUX_USBC6";
173 case POWER_DOMAIN_AUX_TBT1:
174 return "AUX_TBT1";
175 case POWER_DOMAIN_AUX_TBT2:
176 return "AUX_TBT2";
177 case POWER_DOMAIN_AUX_TBT3:
178 return "AUX_TBT3";
179 case POWER_DOMAIN_AUX_TBT4:
180 return "AUX_TBT4";
181 case POWER_DOMAIN_AUX_TBT5:
182 return "AUX_TBT5";
183 case POWER_DOMAIN_AUX_TBT6:
184 return "AUX_TBT6";
185 case POWER_DOMAIN_GMBUS:
186 return "GMBUS";
187 case POWER_DOMAIN_INIT:
188 return "INIT";
189 case POWER_DOMAIN_GT_IRQ:
190 return "GT_IRQ";
191 case POWER_DOMAIN_DC_OFF:
192 return "DC_OFF";
193 case POWER_DOMAIN_TC_COLD_OFF:
194 return "TC_COLD_OFF";
195 default:
196 MISSING_CASE(domain);
197 return "?";
198 }
199}
200
201static bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
202 enum intel_display_power_domain domain)
203{
204 struct i915_power_well *power_well;
205 bool is_enabled;
206
207 if (pm_runtime_suspended(dev_priv->drm.dev))
208 return false;
209
210 is_enabled = true;
211
212 for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
213 if (intel_power_well_is_always_on(power_well))
214 continue;
215
216 if (!intel_power_well_is_enabled_cached(power_well)) {
217 is_enabled = false;
218 break;
219 }
220 }
221
222 return is_enabled;
223}
224
225/**
226 * intel_display_power_is_enabled - check for a power domain
227 * @dev_priv: i915 device instance
228 * @domain: power domain to check
229 *
230 * This function can be used to check the hw power domain state. It is mostly
231 * used in hardware state readout functions. Everywhere else code should rely
232 * upon explicit power domain reference counting to ensure that the hardware
233 * block is powered up before accessing it.
234 *
235 * Callers must hold the relevant modesetting locks to ensure that concurrent
236 * threads can't disable the power well while the caller tries to read a few
237 * registers.
238 *
239 * Returns:
240 * True when the power domain is enabled, false otherwise.
241 */
242bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
243 enum intel_display_power_domain domain)
244{
245 struct i915_power_domains *power_domains;
246 bool ret;
247
248 power_domains = &dev_priv->display.power.domains;
249
250 mutex_lock(&power_domains->lock);
251 ret = __intel_display_power_is_enabled(dev_priv, domain);
252 mutex_unlock(&power_domains->lock);
253
254 return ret;
255}
256
257static u32
258sanitize_target_dc_state(struct drm_i915_private *i915,
259 u32 target_dc_state)
260{
261 struct i915_power_domains *power_domains = &i915->display.power.domains;
262 static const u32 states[] = {
263 DC_STATE_EN_UPTO_DC6,
264 DC_STATE_EN_UPTO_DC5,
265 DC_STATE_EN_DC3CO,
266 DC_STATE_DISABLE,
267 };
268 int i;
269
270 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
271 if (target_dc_state != states[i])
272 continue;
273
274 if (power_domains->allowed_dc_mask & target_dc_state)
275 break;
276
277 target_dc_state = states[i + 1];
278 }
279
280 return target_dc_state;
281}
282
283/**
284 * intel_display_power_set_target_dc_state - Set target dc state.
285 * @dev_priv: i915 device
286 * @state: state which needs to be set as target_dc_state.
287 *
288 * This function set the "DC off" power well target_dc_state,
289 * based upon this target_dc_stste, "DC off" power well will
290 * enable desired DC state.
291 */
292void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
293 u32 state)
294{
295 struct i915_power_well *power_well;
296 bool dc_off_enabled;
297 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
298
299 mutex_lock(&power_domains->lock);
300 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
301
302 if (drm_WARN_ON(&dev_priv->drm, !power_well))
303 goto unlock;
304
305 state = sanitize_target_dc_state(dev_priv, state);
306
307 if (state == power_domains->target_dc_state)
308 goto unlock;
309
310 dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
311 /*
312 * If DC off power well is disabled, need to enable and disable the
313 * DC off power well to effect target DC state.
314 */
315 if (!dc_off_enabled)
316 intel_power_well_enable(dev_priv, power_well);
317
318 power_domains->target_dc_state = state;
319
320 if (!dc_off_enabled)
321 intel_power_well_disable(dev_priv, power_well);
322
323unlock:
324 mutex_unlock(&power_domains->lock);
325}
326
327static void __async_put_domains_mask(struct i915_power_domains *power_domains,
328 struct intel_power_domain_mask *mask)
329{
330 bitmap_or(mask->bits,
331 power_domains->async_put_domains[0].bits,
332 power_domains->async_put_domains[1].bits,
333 POWER_DOMAIN_NUM);
334}
335
336#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
337
338static bool
339assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
340{
341 struct drm_i915_private *i915 = container_of(power_domains,
342 struct drm_i915_private,
343 display.power.domains);
344
345 return !drm_WARN_ON(&i915->drm,
346 bitmap_intersects(power_domains->async_put_domains[0].bits,
347 power_domains->async_put_domains[1].bits,
348 POWER_DOMAIN_NUM));
349}
350
351static bool
352__async_put_domains_state_ok(struct i915_power_domains *power_domains)
353{
354 struct drm_i915_private *i915 = container_of(power_domains,
355 struct drm_i915_private,
356 display.power.domains);
357 struct intel_power_domain_mask async_put_mask;
358 enum intel_display_power_domain domain;
359 bool err = false;
360
361 err |= !assert_async_put_domain_masks_disjoint(power_domains);
362 __async_put_domains_mask(power_domains, &async_put_mask);
363 err |= drm_WARN_ON(&i915->drm,
364 !!power_domains->async_put_wakeref !=
365 !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
366
367 for_each_power_domain(domain, &async_put_mask)
368 err |= drm_WARN_ON(&i915->drm,
369 power_domains->domain_use_count[domain] != 1);
370
371 return !err;
372}
373
374static void print_power_domains(struct i915_power_domains *power_domains,
375 const char *prefix, struct intel_power_domain_mask *mask)
376{
377 struct drm_i915_private *i915 = container_of(power_domains,
378 struct drm_i915_private,
379 display.power.domains);
380 enum intel_display_power_domain domain;
381
382 drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
383 for_each_power_domain(domain, mask)
384 drm_dbg(&i915->drm, "%s use_count %d\n",
385 intel_display_power_domain_str(domain),
386 power_domains->domain_use_count[domain]);
387}
388
389static void
390print_async_put_domains_state(struct i915_power_domains *power_domains)
391{
392 struct drm_i915_private *i915 = container_of(power_domains,
393 struct drm_i915_private,
394 display.power.domains);
395
396 drm_dbg(&i915->drm, "async_put_wakeref: %s\n",
397 str_yes_no(power_domains->async_put_wakeref));
398
399 print_power_domains(power_domains, "async_put_domains[0]",
400 &power_domains->async_put_domains[0]);
401 print_power_domains(power_domains, "async_put_domains[1]",
402 &power_domains->async_put_domains[1]);
403}
404
405static void
406verify_async_put_domains_state(struct i915_power_domains *power_domains)
407{
408 if (!__async_put_domains_state_ok(power_domains))
409 print_async_put_domains_state(power_domains);
410}
411
412#else
413
414static void
415assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
416{
417}
418
419static void
420verify_async_put_domains_state(struct i915_power_domains *power_domains)
421{
422}
423
424#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
425
426static void async_put_domains_mask(struct i915_power_domains *power_domains,
427 struct intel_power_domain_mask *mask)
428
429{
430 assert_async_put_domain_masks_disjoint(power_domains);
431
432 __async_put_domains_mask(power_domains, mask);
433}
434
435static void
436async_put_domains_clear_domain(struct i915_power_domains *power_domains,
437 enum intel_display_power_domain domain)
438{
439 assert_async_put_domain_masks_disjoint(power_domains);
440
441 clear_bit(domain, power_domains->async_put_domains[0].bits);
442 clear_bit(domain, power_domains->async_put_domains[1].bits);
443}
444
445static void
446cancel_async_put_work(struct i915_power_domains *power_domains, bool sync)
447{
448 if (sync)
449 cancel_delayed_work_sync(&power_domains->async_put_work);
450 else
451 cancel_delayed_work(&power_domains->async_put_work);
452
453 power_domains->async_put_next_delay = 0;
454}
455
456static bool
457intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
458 enum intel_display_power_domain domain)
459{
460 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
461 struct intel_power_domain_mask async_put_mask;
462 bool ret = false;
463
464 async_put_domains_mask(power_domains, &async_put_mask);
465 if (!test_bit(domain, async_put_mask.bits))
466 goto out_verify;
467
468 async_put_domains_clear_domain(power_domains, domain);
469
470 ret = true;
471
472 async_put_domains_mask(power_domains, &async_put_mask);
473 if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
474 goto out_verify;
475
476 cancel_async_put_work(power_domains, false);
477 intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
478 fetch_and_zero(&power_domains->async_put_wakeref));
479out_verify:
480 verify_async_put_domains_state(power_domains);
481
482 return ret;
483}
484
485static void
486__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
487 enum intel_display_power_domain domain)
488{
489 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
490 struct i915_power_well *power_well;
491
492 if (intel_display_power_grab_async_put_ref(dev_priv, domain))
493 return;
494
495 for_each_power_domain_well(dev_priv, power_well, domain)
496 intel_power_well_get(dev_priv, power_well);
497
498 power_domains->domain_use_count[domain]++;
499}
500
501/**
502 * intel_display_power_get - grab a power domain reference
503 * @dev_priv: i915 device instance
504 * @domain: power domain to reference
505 *
506 * This function grabs a power domain reference for @domain and ensures that the
507 * power domain and all its parents are powered up. Therefore users should only
508 * grab a reference to the innermost power domain they need.
509 *
510 * Any power domain reference obtained by this function must have a symmetric
511 * call to intel_display_power_put() to release the reference again.
512 */
513intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
514 enum intel_display_power_domain domain)
515{
516 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
517 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
518
519 mutex_lock(&power_domains->lock);
520 __intel_display_power_get_domain(dev_priv, domain);
521 mutex_unlock(&power_domains->lock);
522
523 return wakeref;
524}
525
526/**
527 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
528 * @dev_priv: i915 device instance
529 * @domain: power domain to reference
530 *
531 * This function grabs a power domain reference for @domain and ensures that the
532 * power domain and all its parents are powered up. Therefore users should only
533 * grab a reference to the innermost power domain they need.
534 *
535 * Any power domain reference obtained by this function must have a symmetric
536 * call to intel_display_power_put() to release the reference again.
537 */
538intel_wakeref_t
539intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
540 enum intel_display_power_domain domain)
541{
542 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
543 intel_wakeref_t wakeref;
544 bool is_enabled;
545
546 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
547 if (!wakeref)
548 return NULL;
549
550 mutex_lock(&power_domains->lock);
551
552 if (__intel_display_power_is_enabled(dev_priv, domain)) {
553 __intel_display_power_get_domain(dev_priv, domain);
554 is_enabled = true;
555 } else {
556 is_enabled = false;
557 }
558
559 mutex_unlock(&power_domains->lock);
560
561 if (!is_enabled) {
562 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
563 wakeref = NULL;
564 }
565
566 return wakeref;
567}
568
569static void
570__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
571 enum intel_display_power_domain domain)
572{
573 struct i915_power_domains *power_domains;
574 struct i915_power_well *power_well;
575 const char *name = intel_display_power_domain_str(domain);
576 struct intel_power_domain_mask async_put_mask;
577
578 power_domains = &dev_priv->display.power.domains;
579
580 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
581 "Use count on domain %s is already zero\n",
582 name);
583 async_put_domains_mask(power_domains, &async_put_mask);
584 drm_WARN(&dev_priv->drm,
585 test_bit(domain, async_put_mask.bits),
586 "Async disabling of domain %s is pending\n",
587 name);
588
589 power_domains->domain_use_count[domain]--;
590
591 for_each_power_domain_well_reverse(dev_priv, power_well, domain)
592 intel_power_well_put(dev_priv, power_well);
593}
594
595static void __intel_display_power_put(struct drm_i915_private *dev_priv,
596 enum intel_display_power_domain domain)
597{
598 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
599
600 mutex_lock(&power_domains->lock);
601 __intel_display_power_put_domain(dev_priv, domain);
602 mutex_unlock(&power_domains->lock);
603}
604
605static void
606queue_async_put_domains_work(struct i915_power_domains *power_domains,
607 intel_wakeref_t wakeref,
608 int delay_ms)
609{
610 struct drm_i915_private *i915 = container_of(power_domains,
611 struct drm_i915_private,
612 display.power.domains);
613 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
614 power_domains->async_put_wakeref = wakeref;
615 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
616 &power_domains->async_put_work,
617 msecs_to_jiffies(delay_ms)));
618}
619
620static void
621release_async_put_domains(struct i915_power_domains *power_domains,
622 struct intel_power_domain_mask *mask)
623{
624 struct drm_i915_private *dev_priv =
625 container_of(power_domains, struct drm_i915_private,
626 display.power.domains);
627 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
628 enum intel_display_power_domain domain;
629 intel_wakeref_t wakeref;
630
631 wakeref = intel_runtime_pm_get_noresume(rpm);
632
633 for_each_power_domain(domain, mask) {
634 /* Clear before put, so put's sanity check is happy. */
635 async_put_domains_clear_domain(power_domains, domain);
636 __intel_display_power_put_domain(dev_priv, domain);
637 }
638
639 intel_runtime_pm_put(rpm, wakeref);
640}
641
642static void
643intel_display_power_put_async_work(struct work_struct *work)
644{
645 struct drm_i915_private *dev_priv =
646 container_of(work, struct drm_i915_private,
647 display.power.domains.async_put_work.work);
648 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
649 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
650 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
651 intel_wakeref_t old_work_wakeref = NULL;
652
653 mutex_lock(&power_domains->lock);
654
655 /*
656 * Bail out if all the domain refs pending to be released were grabbed
657 * by subsequent gets or a flush_work.
658 */
659 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
660 if (!old_work_wakeref)
661 goto out_verify;
662
663 release_async_put_domains(power_domains,
664 &power_domains->async_put_domains[0]);
665
666 /*
667 * Cancel the work that got queued after this one got dequeued,
668 * since here we released the corresponding async-put reference.
669 */
670 cancel_async_put_work(power_domains, false);
671
672 /* Requeue the work if more domains were async put meanwhile. */
673 if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
674 bitmap_copy(power_domains->async_put_domains[0].bits,
675 power_domains->async_put_domains[1].bits,
676 POWER_DOMAIN_NUM);
677 bitmap_zero(power_domains->async_put_domains[1].bits,
678 POWER_DOMAIN_NUM);
679 queue_async_put_domains_work(power_domains,
680 fetch_and_zero(&new_work_wakeref),
681 power_domains->async_put_next_delay);
682 power_domains->async_put_next_delay = 0;
683 }
684
685out_verify:
686 verify_async_put_domains_state(power_domains);
687
688 mutex_unlock(&power_domains->lock);
689
690 if (old_work_wakeref)
691 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
692 if (new_work_wakeref)
693 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
694}
695
696/**
697 * __intel_display_power_put_async - release a power domain reference asynchronously
698 * @i915: i915 device instance
699 * @domain: power domain to reference
700 * @wakeref: wakeref acquired for the reference that is being released
701 * @delay_ms: delay of powering down the power domain
702 *
703 * This function drops the power domain reference obtained by
704 * intel_display_power_get*() and schedules a work to power down the
705 * corresponding hardware block if this is the last reference.
706 * The power down is delayed by @delay_ms if this is >= 0, or by a default
707 * 100 ms otherwise.
708 */
709void __intel_display_power_put_async(struct drm_i915_private *i915,
710 enum intel_display_power_domain domain,
711 intel_wakeref_t wakeref,
712 int delay_ms)
713{
714 struct i915_power_domains *power_domains = &i915->display.power.domains;
715 struct intel_runtime_pm *rpm = &i915->runtime_pm;
716 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
717
718 delay_ms = delay_ms >= 0 ? delay_ms : 100;
719
720 mutex_lock(&power_domains->lock);
721
722 if (power_domains->domain_use_count[domain] > 1) {
723 __intel_display_power_put_domain(i915, domain);
724
725 goto out_verify;
726 }
727
728 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
729
730 /* Let a pending work requeue itself or queue a new one. */
731 if (power_domains->async_put_wakeref) {
732 set_bit(domain, power_domains->async_put_domains[1].bits);
733 power_domains->async_put_next_delay = max(power_domains->async_put_next_delay,
734 delay_ms);
735 } else {
736 set_bit(domain, power_domains->async_put_domains[0].bits);
737 queue_async_put_domains_work(power_domains,
738 fetch_and_zero(&work_wakeref),
739 delay_ms);
740 }
741
742out_verify:
743 verify_async_put_domains_state(power_domains);
744
745 mutex_unlock(&power_domains->lock);
746
747 if (work_wakeref)
748 intel_runtime_pm_put_raw(rpm, work_wakeref);
749
750 intel_runtime_pm_put(rpm, wakeref);
751}
752
753/**
754 * intel_display_power_flush_work - flushes the async display power disabling work
755 * @i915: i915 device instance
756 *
757 * Flushes any pending work that was scheduled by a preceding
758 * intel_display_power_put_async() call, completing the disabling of the
759 * corresponding power domains.
760 *
761 * Note that the work handler function may still be running after this
762 * function returns; to ensure that the work handler isn't running use
763 * intel_display_power_flush_work_sync() instead.
764 */
765void intel_display_power_flush_work(struct drm_i915_private *i915)
766{
767 struct i915_power_domains *power_domains = &i915->display.power.domains;
768 struct intel_power_domain_mask async_put_mask;
769 intel_wakeref_t work_wakeref;
770
771 mutex_lock(&power_domains->lock);
772
773 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
774 if (!work_wakeref)
775 goto out_verify;
776
777 async_put_domains_mask(power_domains, &async_put_mask);
778 release_async_put_domains(power_domains, &async_put_mask);
779 cancel_async_put_work(power_domains, false);
780
781out_verify:
782 verify_async_put_domains_state(power_domains);
783
784 mutex_unlock(&power_domains->lock);
785
786 if (work_wakeref)
787 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
788}
789
790/**
791 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
792 * @i915: i915 device instance
793 *
794 * Like intel_display_power_flush_work(), but also ensure that the work
795 * handler function is not running any more when this function returns.
796 */
797static void
798intel_display_power_flush_work_sync(struct drm_i915_private *i915)
799{
800 struct i915_power_domains *power_domains = &i915->display.power.domains;
801
802 intel_display_power_flush_work(i915);
803 cancel_async_put_work(power_domains, true);
804
805 verify_async_put_domains_state(power_domains);
806
807 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
808}
809
810#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
811/**
812 * intel_display_power_put - release a power domain reference
813 * @dev_priv: i915 device instance
814 * @domain: power domain to reference
815 * @wakeref: wakeref acquired for the reference that is being released
816 *
817 * This function drops the power domain reference obtained by
818 * intel_display_power_get() and might power down the corresponding hardware
819 * block right away if this is the last reference.
820 */
821void intel_display_power_put(struct drm_i915_private *dev_priv,
822 enum intel_display_power_domain domain,
823 intel_wakeref_t wakeref)
824{
825 __intel_display_power_put(dev_priv, domain);
826 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
827}
828#else
829/**
830 * intel_display_power_put_unchecked - release an unchecked power domain reference
831 * @dev_priv: i915 device instance
832 * @domain: power domain to reference
833 *
834 * This function drops the power domain reference obtained by
835 * intel_display_power_get() and might power down the corresponding hardware
836 * block right away if this is the last reference.
837 *
838 * This function is only for the power domain code's internal use to suppress wakeref
839 * tracking when the correspondig debug kconfig option is disabled, should not
840 * be used otherwise.
841 */
842void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
843 enum intel_display_power_domain domain)
844{
845 __intel_display_power_put(dev_priv, domain);
846 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
847}
848#endif
849
850void
851intel_display_power_get_in_set(struct drm_i915_private *i915,
852 struct intel_display_power_domain_set *power_domain_set,
853 enum intel_display_power_domain domain)
854{
855 intel_wakeref_t __maybe_unused wf;
856
857 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
858
859 wf = intel_display_power_get(i915, domain);
860#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
861 power_domain_set->wakerefs[domain] = wf;
862#endif
863 set_bit(domain, power_domain_set->mask.bits);
864}
865
866bool
867intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
868 struct intel_display_power_domain_set *power_domain_set,
869 enum intel_display_power_domain domain)
870{
871 intel_wakeref_t wf;
872
873 drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
874
875 wf = intel_display_power_get_if_enabled(i915, domain);
876 if (!wf)
877 return false;
878
879#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
880 power_domain_set->wakerefs[domain] = wf;
881#endif
882 set_bit(domain, power_domain_set->mask.bits);
883
884 return true;
885}
886
887void
888intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
889 struct intel_display_power_domain_set *power_domain_set,
890 struct intel_power_domain_mask *mask)
891{
892 enum intel_display_power_domain domain;
893
894 drm_WARN_ON(&i915->drm,
895 !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
896
897 for_each_power_domain(domain, mask) {
898 intel_wakeref_t __maybe_unused wf = INTEL_WAKEREF_DEF;
899
900#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
901 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
902#endif
903 intel_display_power_put(i915, domain, wf);
904 clear_bit(domain, power_domain_set->mask.bits);
905 }
906}
907
908static int
909sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
910 int disable_power_well)
911{
912 if (disable_power_well >= 0)
913 return !!disable_power_well;
914
915 return 1;
916}
917
918static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
919 int enable_dc)
920{
921 u32 mask;
922 int requested_dc;
923 int max_dc;
924
925 if (!HAS_DISPLAY(dev_priv))
926 return 0;
927
928 if (DISPLAY_VER(dev_priv) >= 20)
929 max_dc = 2;
930 else if (IS_DG2(dev_priv))
931 max_dc = 1;
932 else if (IS_DG1(dev_priv))
933 max_dc = 3;
934 else if (DISPLAY_VER(dev_priv) >= 12)
935 max_dc = 4;
936 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
937 max_dc = 1;
938 else if (DISPLAY_VER(dev_priv) >= 9)
939 max_dc = 2;
940 else
941 max_dc = 0;
942
943 /*
944 * DC9 has a separate HW flow from the rest of the DC states,
945 * not depending on the DMC firmware. It's needed by system
946 * suspend/resume, so allow it unconditionally.
947 */
948 mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
949 DISPLAY_VER(dev_priv) >= 11 ?
950 DC_STATE_EN_DC9 : 0;
951
952 if (!dev_priv->display.params.disable_power_well)
953 max_dc = 0;
954
955 if (enable_dc >= 0 && enable_dc <= max_dc) {
956 requested_dc = enable_dc;
957 } else if (enable_dc == -1) {
958 requested_dc = max_dc;
959 } else if (enable_dc > max_dc && enable_dc <= 4) {
960 drm_dbg_kms(&dev_priv->drm,
961 "Adjusting requested max DC state (%d->%d)\n",
962 enable_dc, max_dc);
963 requested_dc = max_dc;
964 } else {
965 drm_err(&dev_priv->drm,
966 "Unexpected value for enable_dc (%d)\n", enable_dc);
967 requested_dc = max_dc;
968 }
969
970 switch (requested_dc) {
971 case 4:
972 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
973 break;
974 case 3:
975 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
976 break;
977 case 2:
978 mask |= DC_STATE_EN_UPTO_DC6;
979 break;
980 case 1:
981 mask |= DC_STATE_EN_UPTO_DC5;
982 break;
983 }
984
985 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
986
987 return mask;
988}
989
990/**
991 * intel_power_domains_init - initializes the power domain structures
992 * @dev_priv: i915 device instance
993 *
994 * Initializes the power domain structures for @dev_priv depending upon the
995 * supported platform.
996 */
997int intel_power_domains_init(struct drm_i915_private *dev_priv)
998{
999 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1000
1001 dev_priv->display.params.disable_power_well =
1002 sanitize_disable_power_well_option(dev_priv,
1003 dev_priv->display.params.disable_power_well);
1004 power_domains->allowed_dc_mask =
1005 get_allowed_dc_mask(dev_priv, dev_priv->display.params.enable_dc);
1006
1007 power_domains->target_dc_state =
1008 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1009
1010 mutex_init(&power_domains->lock);
1011
1012 INIT_DELAYED_WORK(&power_domains->async_put_work,
1013 intel_display_power_put_async_work);
1014
1015 return intel_display_power_map_init(power_domains);
1016}
1017
1018/**
1019 * intel_power_domains_cleanup - clean up power domains resources
1020 * @dev_priv: i915 device instance
1021 *
1022 * Release any resources acquired by intel_power_domains_init()
1023 */
1024void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
1025{
1026 intel_display_power_map_cleanup(&dev_priv->display.power.domains);
1027}
1028
1029static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
1030{
1031 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1032 struct i915_power_well *power_well;
1033
1034 mutex_lock(&power_domains->lock);
1035 for_each_power_well(dev_priv, power_well)
1036 intel_power_well_sync_hw(dev_priv, power_well);
1037 mutex_unlock(&power_domains->lock);
1038}
1039
1040static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
1041 enum dbuf_slice slice, bool enable)
1042{
1043 i915_reg_t reg = DBUF_CTL_S(slice);
1044 bool state;
1045
1046 intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
1047 enable ? DBUF_POWER_REQUEST : 0);
1048 intel_de_posting_read(dev_priv, reg);
1049 udelay(10);
1050
1051 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
1052 drm_WARN(&dev_priv->drm, enable != state,
1053 "DBuf slice %d power %s timeout!\n",
1054 slice, str_enable_disable(enable));
1055}
1056
1057void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
1058 u8 req_slices)
1059{
1060 struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1061 u8 slice_mask = DISPLAY_INFO(dev_priv)->dbuf.slice_mask;
1062 enum dbuf_slice slice;
1063
1064 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
1065 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
1066 req_slices, slice_mask);
1067
1068 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
1069 req_slices);
1070
1071 /*
1072 * Might be running this in parallel to gen9_dc_off_power_well_enable
1073 * being called from intel_dp_detect for instance,
1074 * which causes assertion triggered by race condition,
1075 * as gen9_assert_dbuf_enabled might preempt this when registers
1076 * were already updated, while dev_priv was not.
1077 */
1078 mutex_lock(&power_domains->lock);
1079
1080 for_each_dbuf_slice(dev_priv, slice)
1081 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
1082
1083 dev_priv->display.dbuf.enabled_slices = req_slices;
1084
1085 mutex_unlock(&power_domains->lock);
1086}
1087
1088static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
1089{
1090 u8 slices_mask;
1091
1092 dev_priv->display.dbuf.enabled_slices =
1093 intel_enabled_dbuf_slices_mask(dev_priv);
1094
1095 slices_mask = BIT(DBUF_S1) | dev_priv->display.dbuf.enabled_slices;
1096
1097 if (DISPLAY_VER(dev_priv) >= 14)
1098 intel_pmdemand_program_dbuf(dev_priv, slices_mask);
1099
1100 /*
1101 * Just power up at least 1 slice, we will
1102 * figure out later which slices we have and what we need.
1103 */
1104 gen9_dbuf_slices_update(dev_priv, slices_mask);
1105}
1106
1107static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
1108{
1109 gen9_dbuf_slices_update(dev_priv, 0);
1110
1111 if (DISPLAY_VER(dev_priv) >= 14)
1112 intel_pmdemand_program_dbuf(dev_priv, 0);
1113}
1114
1115static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
1116{
1117 enum dbuf_slice slice;
1118
1119 if (IS_ALDERLAKE_P(dev_priv))
1120 return;
1121
1122 for_each_dbuf_slice(dev_priv, slice)
1123 intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
1124 DBUF_TRACKER_STATE_SERVICE_MASK,
1125 DBUF_TRACKER_STATE_SERVICE(8));
1126}
1127
1128static void icl_mbus_init(struct drm_i915_private *dev_priv)
1129{
1130 unsigned long abox_regs = DISPLAY_INFO(dev_priv)->abox_mask;
1131 u32 mask, val, i;
1132
1133 if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1134 return;
1135
1136 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
1137 MBUS_ABOX_BT_CREDIT_POOL2_MASK |
1138 MBUS_ABOX_B_CREDIT_MASK |
1139 MBUS_ABOX_BW_CREDIT_MASK;
1140 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
1141 MBUS_ABOX_BT_CREDIT_POOL2(16) |
1142 MBUS_ABOX_B_CREDIT(1) |
1143 MBUS_ABOX_BW_CREDIT(1);
1144
1145 /*
1146 * gen12 platforms that use abox1 and abox2 for pixel data reads still
1147 * expect us to program the abox_ctl0 register as well, even though
1148 * we don't have to program other instance-0 registers like BW_BUDDY.
1149 */
1150 if (DISPLAY_VER(dev_priv) == 12)
1151 abox_regs |= BIT(0);
1152
1153 for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
1154 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
1155}
1156
1157static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
1158{
1159 u32 val = intel_de_read(dev_priv, LCPLL_CTL);
1160
1161 /*
1162 * The LCPLL register should be turned on by the BIOS. For now
1163 * let's just check its state and print errors in case
1164 * something is wrong. Don't even try to turn it on.
1165 */
1166
1167 if (val & LCPLL_CD_SOURCE_FCLK)
1168 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
1169
1170 if (val & LCPLL_PLL_DISABLE)
1171 drm_err(&dev_priv->drm, "LCPLL is disabled\n");
1172
1173 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
1174 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
1175}
1176
1177static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
1178{
1179 struct intel_display *display = &dev_priv->display;
1180 struct intel_crtc *crtc;
1181
1182 for_each_intel_crtc(display->drm, crtc)
1183 INTEL_DISPLAY_STATE_WARN(display, crtc->active,
1184 "CRTC for pipe %c enabled\n",
1185 pipe_name(crtc->pipe));
1186
1187 INTEL_DISPLAY_STATE_WARN(display, intel_de_read(display, HSW_PWR_WELL_CTL2),
1188 "Display power well on\n");
1189 INTEL_DISPLAY_STATE_WARN(display,
1190 intel_de_read(display, SPLL_CTL) & SPLL_PLL_ENABLE,
1191 "SPLL enabled\n");
1192 INTEL_DISPLAY_STATE_WARN(display,
1193 intel_de_read(display, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
1194 "WRPLL1 enabled\n");
1195 INTEL_DISPLAY_STATE_WARN(display,
1196 intel_de_read(display, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
1197 "WRPLL2 enabled\n");
1198 INTEL_DISPLAY_STATE_WARN(display,
1199 intel_de_read(display, PP_STATUS(display, 0)) & PP_ON,
1200 "Panel power on\n");
1201 INTEL_DISPLAY_STATE_WARN(display,
1202 intel_de_read(display, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
1203 "CPU PWM1 enabled\n");
1204 if (IS_HASWELL(dev_priv))
1205 INTEL_DISPLAY_STATE_WARN(display,
1206 intel_de_read(display, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
1207 "CPU PWM2 enabled\n");
1208 INTEL_DISPLAY_STATE_WARN(display,
1209 intel_de_read(display, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
1210 "PCH PWM1 enabled\n");
1211 INTEL_DISPLAY_STATE_WARN(display,
1212 (intel_de_read(display, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
1213 "Utility pin enabled in PWM mode\n");
1214 INTEL_DISPLAY_STATE_WARN(display,
1215 intel_de_read(display, PCH_GTC_CTL) & PCH_GTC_ENABLE,
1216 "PCH GTC enabled\n");
1217
1218 /*
1219 * In theory we can still leave IRQs enabled, as long as only the HPD
1220 * interrupts remain enabled. We used to check for that, but since it's
1221 * gen-specific and since we only disable LCPLL after we fully disable
1222 * the interrupts, the check below should be enough.
1223 */
1224 INTEL_DISPLAY_STATE_WARN(display, intel_irqs_enabled(dev_priv),
1225 "IRQs enabled\n");
1226}
1227
1228static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
1229{
1230 if (IS_HASWELL(dev_priv))
1231 return intel_de_read(dev_priv, D_COMP_HSW);
1232 else
1233 return intel_de_read(dev_priv, D_COMP_BDW);
1234}
1235
1236static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
1237{
1238 if (IS_HASWELL(dev_priv)) {
1239 if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
1240 drm_dbg_kms(&dev_priv->drm,
1241 "Failed to write to D_COMP\n");
1242 } else {
1243 intel_de_write(dev_priv, D_COMP_BDW, val);
1244 intel_de_posting_read(dev_priv, D_COMP_BDW);
1245 }
1246}
1247
1248/*
1249 * This function implements pieces of two sequences from BSpec:
1250 * - Sequence for display software to disable LCPLL
1251 * - Sequence for display software to allow package C8+
1252 * The steps implemented here are just the steps that actually touch the LCPLL
1253 * register. Callers should take care of disabling all the display engine
1254 * functions, doing the mode unset, fixing interrupts, etc.
1255 */
1256static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
1257 bool switch_to_fclk, bool allow_power_down)
1258{
1259 u32 val;
1260
1261 assert_can_disable_lcpll(dev_priv);
1262
1263 val = intel_de_read(dev_priv, LCPLL_CTL);
1264
1265 if (switch_to_fclk) {
1266 val |= LCPLL_CD_SOURCE_FCLK;
1267 intel_de_write(dev_priv, LCPLL_CTL, val);
1268
1269 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
1270 LCPLL_CD_SOURCE_FCLK_DONE, 1))
1271 drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
1272
1273 val = intel_de_read(dev_priv, LCPLL_CTL);
1274 }
1275
1276 val |= LCPLL_PLL_DISABLE;
1277 intel_de_write(dev_priv, LCPLL_CTL, val);
1278 intel_de_posting_read(dev_priv, LCPLL_CTL);
1279
1280 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
1281 drm_err(&dev_priv->drm, "LCPLL still locked\n");
1282
1283 val = hsw_read_dcomp(dev_priv);
1284 val |= D_COMP_COMP_DISABLE;
1285 hsw_write_dcomp(dev_priv, val);
1286 ndelay(100);
1287
1288 if (wait_for((hsw_read_dcomp(dev_priv) &
1289 D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
1290 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
1291
1292 if (allow_power_down) {
1293 intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
1294 intel_de_posting_read(dev_priv, LCPLL_CTL);
1295 }
1296}
1297
1298/*
1299 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
1300 * source.
1301 */
1302static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
1303{
1304 struct intel_display *display = &dev_priv->display;
1305 u32 val;
1306
1307 val = intel_de_read(dev_priv, LCPLL_CTL);
1308
1309 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
1310 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
1311 return;
1312
1313 /*
1314 * Make sure we're not on PC8 state before disabling PC8, otherwise
1315 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
1316 */
1317 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1318
1319 if (val & LCPLL_POWER_DOWN_ALLOW) {
1320 val &= ~LCPLL_POWER_DOWN_ALLOW;
1321 intel_de_write(dev_priv, LCPLL_CTL, val);
1322 intel_de_posting_read(dev_priv, LCPLL_CTL);
1323 }
1324
1325 val = hsw_read_dcomp(dev_priv);
1326 val |= D_COMP_COMP_FORCE;
1327 val &= ~D_COMP_COMP_DISABLE;
1328 hsw_write_dcomp(dev_priv, val);
1329
1330 val = intel_de_read(dev_priv, LCPLL_CTL);
1331 val &= ~LCPLL_PLL_DISABLE;
1332 intel_de_write(dev_priv, LCPLL_CTL, val);
1333
1334 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
1335 drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
1336
1337 if (val & LCPLL_CD_SOURCE_FCLK) {
1338 intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
1339
1340 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
1341 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
1342 drm_err(&dev_priv->drm,
1343 "Switching back to LCPLL failed\n");
1344 }
1345
1346 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1347
1348 intel_update_cdclk(display);
1349 intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
1350}
1351
1352/*
1353 * Package states C8 and deeper are really deep PC states that can only be
1354 * reached when all the devices on the system allow it, so even if the graphics
1355 * device allows PC8+, it doesn't mean the system will actually get to these
1356 * states. Our driver only allows PC8+ when going into runtime PM.
1357 *
1358 * The requirements for PC8+ are that all the outputs are disabled, the power
1359 * well is disabled and most interrupts are disabled, and these are also
1360 * requirements for runtime PM. When these conditions are met, we manually do
1361 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
1362 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
1363 * hang the machine.
1364 *
1365 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1366 * the state of some registers, so when we come back from PC8+ we need to
1367 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1368 * need to take care of the registers kept by RC6. Notice that this happens even
1369 * if we don't put the device in PCI D3 state (which is what currently happens
1370 * because of the runtime PM support).
1371 *
1372 * For more, read "Display Sequences for Package C8" on the hardware
1373 * documentation.
1374 */
1375static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
1376{
1377 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
1378
1379 if (HAS_PCH_LPT_LP(dev_priv))
1380 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
1381 PCH_LP_PARTITION_LEVEL_DISABLE, 0);
1382
1383 lpt_disable_clkout_dp(dev_priv);
1384 hsw_disable_lcpll(dev_priv, true, true);
1385}
1386
1387static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
1388{
1389 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
1390
1391 hsw_restore_lcpll(dev_priv);
1392 intel_init_pch_refclk(dev_priv);
1393
1394 /* Many display registers don't survive PC8+ */
1395 intel_clock_gating_init(dev_priv);
1396}
1397
1398static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
1399 bool enable)
1400{
1401 i915_reg_t reg;
1402 u32 reset_bits;
1403
1404 if (IS_IVYBRIDGE(dev_priv)) {
1405 reg = GEN7_MSG_CTL;
1406 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
1407 } else {
1408 reg = HSW_NDE_RSTWRN_OPT;
1409 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
1410 }
1411
1412 if (DISPLAY_VER(dev_priv) >= 14)
1413 reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
1414
1415 intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0);
1416}
1417
1418static void skl_display_core_init(struct drm_i915_private *dev_priv,
1419 bool resume)
1420{
1421 struct intel_display *display = &dev_priv->display;
1422 struct i915_power_domains *power_domains = &display->power.domains;
1423 struct i915_power_well *well;
1424
1425 gen9_set_dc_state(display, DC_STATE_DISABLE);
1426
1427 /* enable PCH reset handshake */
1428 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
1429
1430 if (!HAS_DISPLAY(dev_priv))
1431 return;
1432
1433 /* enable PG1 and Misc I/O */
1434 mutex_lock(&power_domains->lock);
1435
1436 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1437 intel_power_well_enable(dev_priv, well);
1438
1439 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1440 intel_power_well_enable(dev_priv, well);
1441
1442 mutex_unlock(&power_domains->lock);
1443
1444 intel_cdclk_init_hw(display);
1445
1446 gen9_dbuf_enable(dev_priv);
1447
1448 if (resume)
1449 intel_dmc_load_program(display);
1450}
1451
1452static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
1453{
1454 struct intel_display *display = &dev_priv->display;
1455 struct i915_power_domains *power_domains = &display->power.domains;
1456 struct i915_power_well *well;
1457
1458 if (!HAS_DISPLAY(dev_priv))
1459 return;
1460
1461 gen9_disable_dc_states(display);
1462 /* TODO: disable DMC program */
1463
1464 gen9_dbuf_disable(dev_priv);
1465
1466 intel_cdclk_uninit_hw(display);
1467
1468 /* The spec doesn't call for removing the reset handshake flag */
1469 /* disable PG1 and Misc I/O */
1470
1471 mutex_lock(&power_domains->lock);
1472
1473 /*
1474 * BSpec says to keep the MISC IO power well enabled here, only
1475 * remove our request for power well 1.
1476 * Note that even though the driver's request is removed power well 1
1477 * may stay enabled after this due to DMC's own request on it.
1478 */
1479 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1480 intel_power_well_disable(dev_priv, well);
1481
1482 mutex_unlock(&power_domains->lock);
1483
1484 usleep_range(10, 30); /* 10 us delay per Bspec */
1485}
1486
1487static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
1488{
1489 struct intel_display *display = &dev_priv->display;
1490 struct i915_power_domains *power_domains = &display->power.domains;
1491 struct i915_power_well *well;
1492
1493 gen9_set_dc_state(display, DC_STATE_DISABLE);
1494
1495 /*
1496 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
1497 * or else the reset will hang because there is no PCH to respond.
1498 * Move the handshake programming to initialization sequence.
1499 * Previously was left up to BIOS.
1500 */
1501 intel_pch_reset_handshake(dev_priv, false);
1502
1503 if (!HAS_DISPLAY(dev_priv))
1504 return;
1505
1506 /* Enable PG1 */
1507 mutex_lock(&power_domains->lock);
1508
1509 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1510 intel_power_well_enable(dev_priv, well);
1511
1512 mutex_unlock(&power_domains->lock);
1513
1514 intel_cdclk_init_hw(display);
1515
1516 gen9_dbuf_enable(dev_priv);
1517
1518 if (resume)
1519 intel_dmc_load_program(display);
1520}
1521
1522static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
1523{
1524 struct intel_display *display = &dev_priv->display;
1525 struct i915_power_domains *power_domains = &display->power.domains;
1526 struct i915_power_well *well;
1527
1528 if (!HAS_DISPLAY(dev_priv))
1529 return;
1530
1531 gen9_disable_dc_states(display);
1532 /* TODO: disable DMC program */
1533
1534 gen9_dbuf_disable(dev_priv);
1535
1536 intel_cdclk_uninit_hw(display);
1537
1538 /* The spec doesn't call for removing the reset handshake flag */
1539
1540 /*
1541 * Disable PW1 (PG1).
1542 * Note that even though the driver's request is removed power well 1
1543 * may stay enabled after this due to DMC's own request on it.
1544 */
1545 mutex_lock(&power_domains->lock);
1546
1547 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1548 intel_power_well_disable(dev_priv, well);
1549
1550 mutex_unlock(&power_domains->lock);
1551
1552 usleep_range(10, 30); /* 10 us delay per Bspec */
1553}
1554
1555struct buddy_page_mask {
1556 u32 page_mask;
1557 u8 type;
1558 u8 num_channels;
1559};
1560
1561static const struct buddy_page_mask tgl_buddy_page_masks[] = {
1562 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
1563 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF },
1564 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
1565 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
1566 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
1567 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E },
1568 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
1569 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
1570 {}
1571};
1572
1573static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
1574 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
1575 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
1576 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 },
1577 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
1578 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
1579 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
1580 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 },
1581 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
1582 {}
1583};
1584
1585static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
1586{
1587 enum intel_dram_type type = dev_priv->dram_info.type;
1588 u8 num_channels = dev_priv->dram_info.num_channels;
1589 const struct buddy_page_mask *table;
1590 unsigned long abox_mask = DISPLAY_INFO(dev_priv)->abox_mask;
1591 int config, i;
1592
1593 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */
1594 if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
1595 return;
1596
1597 if (IS_ALDERLAKE_S(dev_priv) ||
1598 (IS_ROCKETLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)))
1599 /* Wa_1409767108 */
1600 table = wa_1409767108_buddy_page_masks;
1601 else
1602 table = tgl_buddy_page_masks;
1603
1604 for (config = 0; table[config].page_mask != 0; config++)
1605 if (table[config].num_channels == num_channels &&
1606 table[config].type == type)
1607 break;
1608
1609 if (table[config].page_mask == 0) {
1610 drm_dbg(&dev_priv->drm,
1611 "Unknown memory configuration; disabling address buddy logic.\n");
1612 for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
1613 intel_de_write(dev_priv, BW_BUDDY_CTL(i),
1614 BW_BUDDY_DISABLE);
1615 } else {
1616 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
1617 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
1618 table[config].page_mask);
1619
1620 /* Wa_22010178259:tgl,dg1,rkl,adl-s */
1621 if (DISPLAY_VER(dev_priv) == 12)
1622 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
1623 BW_BUDDY_TLB_REQ_TIMER_MASK,
1624 BW_BUDDY_TLB_REQ_TIMER(0x8));
1625 }
1626 }
1627}
1628
1629static void icl_display_core_init(struct drm_i915_private *dev_priv,
1630 bool resume)
1631{
1632 struct intel_display *display = &dev_priv->display;
1633 struct i915_power_domains *power_domains = &display->power.domains;
1634 struct i915_power_well *well;
1635
1636 gen9_set_dc_state(display, DC_STATE_DISABLE);
1637
1638 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
1639 if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
1640 INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
1641 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
1642 PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
1643
1644 /* 1. Enable PCH reset handshake. */
1645 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
1646
1647 if (!HAS_DISPLAY(dev_priv))
1648 return;
1649
1650 /* 2. Initialize all combo phys */
1651 intel_combo_phy_init(dev_priv);
1652
1653 /*
1654 * 3. Enable Power Well 1 (PG1).
1655 * The AUX IO power wells will be enabled on demand.
1656 */
1657 mutex_lock(&power_domains->lock);
1658 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1659 intel_power_well_enable(dev_priv, well);
1660 mutex_unlock(&power_domains->lock);
1661
1662 if (DISPLAY_VER(dev_priv) == 14)
1663 intel_de_rmw(dev_priv, DC_STATE_EN,
1664 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
1665
1666 /* 4. Enable CDCLK. */
1667 intel_cdclk_init_hw(display);
1668
1669 if (DISPLAY_VER(dev_priv) >= 12)
1670 gen12_dbuf_slices_config(dev_priv);
1671
1672 /* 5. Enable DBUF. */
1673 gen9_dbuf_enable(dev_priv);
1674
1675 /* 6. Setup MBUS. */
1676 icl_mbus_init(dev_priv);
1677
1678 /* 7. Program arbiter BW_BUDDY registers */
1679 if (DISPLAY_VER(dev_priv) >= 12)
1680 tgl_bw_buddy_init(dev_priv);
1681
1682 /* 8. Ensure PHYs have completed calibration and adaptation */
1683 if (IS_DG2(dev_priv))
1684 intel_snps_phy_wait_for_calibration(dev_priv);
1685
1686 /* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */
1687 if (DISPLAY_VERx100(dev_priv) == 1401)
1688 intel_de_rmw(dev_priv, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1);
1689
1690 if (resume)
1691 intel_dmc_load_program(display);
1692
1693 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
1694 if (IS_DISPLAY_VERx100(dev_priv, 1200, 1300))
1695 intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
1696 DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
1697 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
1698
1699 /* Wa_14011503030:xelpd */
1700 if (DISPLAY_VER(dev_priv) == 13)
1701 intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
1702
1703 /* Wa_15013987218 */
1704 if (DISPLAY_VER(dev_priv) == 20) {
1705 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
1706 0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE);
1707 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
1708 PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, 0);
1709 }
1710}
1711
1712static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
1713{
1714 struct intel_display *display = &dev_priv->display;
1715 struct i915_power_domains *power_domains = &display->power.domains;
1716 struct i915_power_well *well;
1717
1718 if (!HAS_DISPLAY(dev_priv))
1719 return;
1720
1721 gen9_disable_dc_states(display);
1722 intel_dmc_disable_program(display);
1723
1724 /* 1. Disable all display engine functions -> aready done */
1725
1726 /* 2. Disable DBUF */
1727 gen9_dbuf_disable(dev_priv);
1728
1729 /* 3. Disable CD clock */
1730 intel_cdclk_uninit_hw(display);
1731
1732 if (DISPLAY_VER(dev_priv) == 14)
1733 intel_de_rmw(dev_priv, DC_STATE_EN, 0,
1734 HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
1735
1736 /*
1737 * 4. Disable Power Well 1 (PG1).
1738 * The AUX IO power wells are toggled on demand, so they are already
1739 * disabled at this point.
1740 */
1741 mutex_lock(&power_domains->lock);
1742 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1743 intel_power_well_disable(dev_priv, well);
1744 mutex_unlock(&power_domains->lock);
1745
1746 /* 5. */
1747 intel_combo_phy_uninit(dev_priv);
1748}
1749
1750static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1751{
1752 struct i915_power_well *cmn_bc =
1753 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1754 struct i915_power_well *cmn_d =
1755 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1756
1757 /*
1758 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1759 * workaround never ever read DISPLAY_PHY_CONTROL, and
1760 * instead maintain a shadow copy ourselves. Use the actual
1761 * power well state and lane status to reconstruct the
1762 * expected initial value.
1763 */
1764 dev_priv->display.power.chv_phy_control =
1765 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1766 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1767 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1768 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1769 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1770
1771 /*
1772 * If all lanes are disabled we leave the override disabled
1773 * with all power down bits cleared to match the state we
1774 * would use after disabling the port. Otherwise enable the
1775 * override and set the lane powerdown bits accding to the
1776 * current lane status.
1777 */
1778 if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
1779 u32 status = intel_de_read(dev_priv, DPLL(dev_priv, PIPE_A));
1780 unsigned int mask;
1781
1782 mask = status & DPLL_PORTB_READY_MASK;
1783 if (mask == 0xf)
1784 mask = 0x0;
1785 else
1786 dev_priv->display.power.chv_phy_control |=
1787 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
1788
1789 dev_priv->display.power.chv_phy_control |=
1790 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
1791
1792 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
1793 if (mask == 0xf)
1794 mask = 0x0;
1795 else
1796 dev_priv->display.power.chv_phy_control |=
1797 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
1798
1799 dev_priv->display.power.chv_phy_control |=
1800 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
1801
1802 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1803
1804 dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false;
1805 } else {
1806 dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true;
1807 }
1808
1809 if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
1810 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
1811 unsigned int mask;
1812
1813 mask = status & DPLL_PORTD_READY_MASK;
1814
1815 if (mask == 0xf)
1816 mask = 0x0;
1817 else
1818 dev_priv->display.power.chv_phy_control |=
1819 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
1820
1821 dev_priv->display.power.chv_phy_control |=
1822 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
1823
1824 dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
1825
1826 dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false;
1827 } else {
1828 dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true;
1829 }
1830
1831 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
1832 dev_priv->display.power.chv_phy_control);
1833
1834 /* Defer application of initial phy_control to enabling the powerwell */
1835}
1836
1837static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1838{
1839 struct i915_power_well *cmn =
1840 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1841 struct i915_power_well *disp2d =
1842 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
1843
1844 /* If the display might be already active skip this */
1845 if (intel_power_well_is_enabled(dev_priv, cmn) &&
1846 intel_power_well_is_enabled(dev_priv, disp2d) &&
1847 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
1848 return;
1849
1850 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
1851
1852 /* cmnlane needs DPLL registers */
1853 intel_power_well_enable(dev_priv, disp2d);
1854
1855 /*
1856 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1857 * Need to assert and de-assert PHY SB reset by gating the
1858 * common lane power, then un-gating it.
1859 * Simply ungating isn't enough to reset the PHY enough to get
1860 * ports and lanes running.
1861 */
1862 intel_power_well_disable(dev_priv, cmn);
1863}
1864
1865static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
1866{
1867 bool ret;
1868
1869 vlv_punit_get(dev_priv);
1870 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
1871 vlv_punit_put(dev_priv);
1872
1873 return ret;
1874}
1875
1876static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
1877{
1878 drm_WARN(&dev_priv->drm,
1879 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
1880 "VED not power gated\n");
1881}
1882
1883static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
1884{
1885 static const struct pci_device_id isp_ids[] = {
1886 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
1887 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
1888 {}
1889 };
1890
1891 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
1892 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
1893 "ISP not power gated\n");
1894}
1895
1896static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
1897
1898/**
1899 * intel_power_domains_init_hw - initialize hardware power domain state
1900 * @i915: i915 device instance
1901 * @resume: Called from resume code paths or not
1902 *
1903 * This function initializes the hardware power domain state and enables all
1904 * power wells belonging to the INIT power domain. Power wells in other
1905 * domains (and not in the INIT domain) are referenced or disabled by
1906 * intel_modeset_readout_hw_state(). After that the reference count of each
1907 * power well must match its HW enabled state, see
1908 * intel_power_domains_verify_state().
1909 *
1910 * It will return with power domains disabled (to be enabled later by
1911 * intel_power_domains_enable()) and must be paired with
1912 * intel_power_domains_driver_remove().
1913 */
1914void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
1915{
1916 struct i915_power_domains *power_domains = &i915->display.power.domains;
1917
1918 power_domains->initializing = true;
1919
1920 if (DISPLAY_VER(i915) >= 11) {
1921 icl_display_core_init(i915, resume);
1922 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
1923 bxt_display_core_init(i915, resume);
1924 } else if (DISPLAY_VER(i915) == 9) {
1925 skl_display_core_init(i915, resume);
1926 } else if (IS_CHERRYVIEW(i915)) {
1927 mutex_lock(&power_domains->lock);
1928 chv_phy_control_init(i915);
1929 mutex_unlock(&power_domains->lock);
1930 assert_isp_power_gated(i915);
1931 } else if (IS_VALLEYVIEW(i915)) {
1932 mutex_lock(&power_domains->lock);
1933 vlv_cmnlane_wa(i915);
1934 mutex_unlock(&power_domains->lock);
1935 assert_ved_power_gated(i915);
1936 assert_isp_power_gated(i915);
1937 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
1938 hsw_assert_cdclk(i915);
1939 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
1940 } else if (IS_IVYBRIDGE(i915)) {
1941 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
1942 }
1943
1944 /*
1945 * Keep all power wells enabled for any dependent HW access during
1946 * initialization and to make sure we keep BIOS enabled display HW
1947 * resources powered until display HW readout is complete. We drop
1948 * this reference in intel_power_domains_enable().
1949 */
1950 drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
1951 power_domains->init_wakeref =
1952 intel_display_power_get(i915, POWER_DOMAIN_INIT);
1953
1954 /* Disable power support if the user asked so. */
1955 if (!i915->display.params.disable_power_well) {
1956 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
1957 i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
1958 POWER_DOMAIN_INIT);
1959 }
1960 intel_power_domains_sync_hw(i915);
1961
1962 power_domains->initializing = false;
1963}
1964
1965/**
1966 * intel_power_domains_driver_remove - deinitialize hw power domain state
1967 * @i915: i915 device instance
1968 *
1969 * De-initializes the display power domain HW state. It also ensures that the
1970 * device stays powered up so that the driver can be reloaded.
1971 *
1972 * It must be called with power domains already disabled (after a call to
1973 * intel_power_domains_disable()) and must be paired with
1974 * intel_power_domains_init_hw().
1975 */
1976void intel_power_domains_driver_remove(struct drm_i915_private *i915)
1977{
1978 intel_wakeref_t wakeref __maybe_unused =
1979 fetch_and_zero(&i915->display.power.domains.init_wakeref);
1980
1981 /* Remove the refcount we took to keep power well support disabled. */
1982 if (!i915->display.params.disable_power_well)
1983 intel_display_power_put(i915, POWER_DOMAIN_INIT,
1984 fetch_and_zero(&i915->display.power.domains.disable_wakeref));
1985
1986 intel_display_power_flush_work_sync(i915);
1987
1988 intel_power_domains_verify_state(i915);
1989
1990 /* Keep the power well enabled, but cancel its rpm wakeref. */
1991 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1992}
1993
1994/**
1995 * intel_power_domains_sanitize_state - sanitize power domains state
1996 * @i915: i915 device instance
1997 *
1998 * Sanitize the power domains state during driver loading and system resume.
1999 * The function will disable all display power wells that BIOS has enabled
2000 * without a user for it (any user for a power well has taken a reference
2001 * on it by the time this function is called, after the state of all the
2002 * pipe, encoder, etc. HW resources have been sanitized).
2003 */
2004void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
2005{
2006 struct i915_power_domains *power_domains = &i915->display.power.domains;
2007 struct i915_power_well *power_well;
2008
2009 mutex_lock(&power_domains->lock);
2010
2011 for_each_power_well_reverse(i915, power_well) {
2012 if (power_well->desc->always_on || power_well->count ||
2013 !intel_power_well_is_enabled(i915, power_well))
2014 continue;
2015
2016 drm_dbg_kms(&i915->drm,
2017 "BIOS left unused %s power well enabled, disabling it\n",
2018 intel_power_well_name(power_well));
2019 intel_power_well_disable(i915, power_well);
2020 }
2021
2022 mutex_unlock(&power_domains->lock);
2023}
2024
2025/**
2026 * intel_power_domains_enable - enable toggling of display power wells
2027 * @i915: i915 device instance
2028 *
2029 * Enable the ondemand enabling/disabling of the display power wells. Note that
2030 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
2031 * only at specific points of the display modeset sequence, thus they are not
2032 * affected by the intel_power_domains_enable()/disable() calls. The purpose
2033 * of these function is to keep the rest of power wells enabled until the end
2034 * of display HW readout (which will acquire the power references reflecting
2035 * the current HW state).
2036 */
2037void intel_power_domains_enable(struct drm_i915_private *i915)
2038{
2039 intel_wakeref_t wakeref __maybe_unused =
2040 fetch_and_zero(&i915->display.power.domains.init_wakeref);
2041
2042 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2043 intel_power_domains_verify_state(i915);
2044}
2045
2046/**
2047 * intel_power_domains_disable - disable toggling of display power wells
2048 * @i915: i915 device instance
2049 *
2050 * Disable the ondemand enabling/disabling of the display power wells. See
2051 * intel_power_domains_enable() for which power wells this call controls.
2052 */
2053void intel_power_domains_disable(struct drm_i915_private *i915)
2054{
2055 struct i915_power_domains *power_domains = &i915->display.power.domains;
2056
2057 drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
2058 power_domains->init_wakeref =
2059 intel_display_power_get(i915, POWER_DOMAIN_INIT);
2060
2061 intel_power_domains_verify_state(i915);
2062}
2063
2064/**
2065 * intel_power_domains_suspend - suspend power domain state
2066 * @i915: i915 device instance
2067 * @s2idle: specifies whether we go to idle, or deeper sleep
2068 *
2069 * This function prepares the hardware power domain state before entering
2070 * system suspend.
2071 *
2072 * It must be called with power domains already disabled (after a call to
2073 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
2074 */
2075void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
2076{
2077 struct intel_display *display = &i915->display;
2078 struct i915_power_domains *power_domains = &display->power.domains;
2079 intel_wakeref_t wakeref __maybe_unused =
2080 fetch_and_zero(&power_domains->init_wakeref);
2081
2082 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2083
2084 /*
2085 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
2086 * support don't manually deinit the power domains. This also means the
2087 * DMC firmware will stay active, it will power down any HW
2088 * resources as required and also enable deeper system power states
2089 * that would be blocked if the firmware was inactive.
2090 */
2091 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle &&
2092 intel_dmc_has_payload(display)) {
2093 intel_display_power_flush_work(i915);
2094 intel_power_domains_verify_state(i915);
2095 return;
2096 }
2097
2098 /*
2099 * Even if power well support was disabled we still want to disable
2100 * power wells if power domains must be deinitialized for suspend.
2101 */
2102 if (!i915->display.params.disable_power_well)
2103 intel_display_power_put(i915, POWER_DOMAIN_INIT,
2104 fetch_and_zero(&i915->display.power.domains.disable_wakeref));
2105
2106 intel_display_power_flush_work(i915);
2107 intel_power_domains_verify_state(i915);
2108
2109 if (DISPLAY_VER(i915) >= 11)
2110 icl_display_core_uninit(i915);
2111 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
2112 bxt_display_core_uninit(i915);
2113 else if (DISPLAY_VER(i915) == 9)
2114 skl_display_core_uninit(i915);
2115
2116 power_domains->display_core_suspended = true;
2117}
2118
2119/**
2120 * intel_power_domains_resume - resume power domain state
2121 * @i915: i915 device instance
2122 *
2123 * This function resume the hardware power domain state during system resume.
2124 *
2125 * It will return with power domain support disabled (to be enabled later by
2126 * intel_power_domains_enable()) and must be paired with
2127 * intel_power_domains_suspend().
2128 */
2129void intel_power_domains_resume(struct drm_i915_private *i915)
2130{
2131 struct i915_power_domains *power_domains = &i915->display.power.domains;
2132
2133 if (power_domains->display_core_suspended) {
2134 intel_power_domains_init_hw(i915, true);
2135 power_domains->display_core_suspended = false;
2136 } else {
2137 drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
2138 power_domains->init_wakeref =
2139 intel_display_power_get(i915, POWER_DOMAIN_INIT);
2140 }
2141
2142 intel_power_domains_verify_state(i915);
2143}
2144
2145#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2146
2147static void intel_power_domains_dump_info(struct drm_i915_private *i915)
2148{
2149 struct i915_power_domains *power_domains = &i915->display.power.domains;
2150 struct i915_power_well *power_well;
2151
2152 for_each_power_well(i915, power_well) {
2153 enum intel_display_power_domain domain;
2154
2155 drm_dbg(&i915->drm, "%-25s %d\n",
2156 intel_power_well_name(power_well), intel_power_well_refcount(power_well));
2157
2158 for_each_power_domain(domain, intel_power_well_domains(power_well))
2159 drm_dbg(&i915->drm, " %-23s %d\n",
2160 intel_display_power_domain_str(domain),
2161 power_domains->domain_use_count[domain]);
2162 }
2163}
2164
2165/**
2166 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
2167 * @i915: i915 device instance
2168 *
2169 * Verify if the reference count of each power well matches its HW enabled
2170 * state and the total refcount of the domains it belongs to. This must be
2171 * called after modeset HW state sanitization, which is responsible for
2172 * acquiring reference counts for any power wells in use and disabling the
2173 * ones left on by BIOS but not required by any active output.
2174 */
2175static void intel_power_domains_verify_state(struct drm_i915_private *i915)
2176{
2177 struct i915_power_domains *power_domains = &i915->display.power.domains;
2178 struct i915_power_well *power_well;
2179 bool dump_domain_info;
2180
2181 mutex_lock(&power_domains->lock);
2182
2183 verify_async_put_domains_state(power_domains);
2184
2185 dump_domain_info = false;
2186 for_each_power_well(i915, power_well) {
2187 enum intel_display_power_domain domain;
2188 int domains_count;
2189 bool enabled;
2190
2191 enabled = intel_power_well_is_enabled(i915, power_well);
2192 if ((intel_power_well_refcount(power_well) ||
2193 intel_power_well_is_always_on(power_well)) !=
2194 enabled)
2195 drm_err(&i915->drm,
2196 "power well %s state mismatch (refcount %d/enabled %d)",
2197 intel_power_well_name(power_well),
2198 intel_power_well_refcount(power_well), enabled);
2199
2200 domains_count = 0;
2201 for_each_power_domain(domain, intel_power_well_domains(power_well))
2202 domains_count += power_domains->domain_use_count[domain];
2203
2204 if (intel_power_well_refcount(power_well) != domains_count) {
2205 drm_err(&i915->drm,
2206 "power well %s refcount/domain refcount mismatch "
2207 "(refcount %d/domains refcount %d)\n",
2208 intel_power_well_name(power_well),
2209 intel_power_well_refcount(power_well),
2210 domains_count);
2211 dump_domain_info = true;
2212 }
2213 }
2214
2215 if (dump_domain_info) {
2216 static bool dumped;
2217
2218 if (!dumped) {
2219 intel_power_domains_dump_info(i915);
2220 dumped = true;
2221 }
2222 }
2223
2224 mutex_unlock(&power_domains->lock);
2225}
2226
2227#else
2228
2229static void intel_power_domains_verify_state(struct drm_i915_private *i915)
2230{
2231}
2232
2233#endif
2234
2235void intel_display_power_suspend_late(struct drm_i915_private *i915)
2236{
2237 struct intel_display *display = &i915->display;
2238
2239 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
2240 IS_BROXTON(i915)) {
2241 bxt_enable_dc9(display);
2242 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2243 hsw_enable_pc8(i915);
2244 }
2245
2246 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2247 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
2248 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
2249}
2250
2251void intel_display_power_resume_early(struct drm_i915_private *i915)
2252{
2253 struct intel_display *display = &i915->display;
2254
2255 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
2256 IS_BROXTON(i915)) {
2257 gen9_sanitize_dc_state(display);
2258 bxt_disable_dc9(display);
2259 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2260 hsw_disable_pc8(i915);
2261 }
2262
2263 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
2264 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
2265 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
2266}
2267
2268void intel_display_power_suspend(struct drm_i915_private *i915)
2269{
2270 struct intel_display *display = &i915->display;
2271
2272 if (DISPLAY_VER(i915) >= 11) {
2273 icl_display_core_uninit(i915);
2274 bxt_enable_dc9(display);
2275 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
2276 bxt_display_core_uninit(i915);
2277 bxt_enable_dc9(display);
2278 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2279 hsw_enable_pc8(i915);
2280 }
2281}
2282
2283void intel_display_power_resume(struct drm_i915_private *i915)
2284{
2285 struct intel_display *display = &i915->display;
2286 struct i915_power_domains *power_domains = &display->power.domains;
2287
2288 if (DISPLAY_VER(i915) >= 11) {
2289 bxt_disable_dc9(display);
2290 icl_display_core_init(i915, true);
2291 if (intel_dmc_has_payload(display)) {
2292 if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
2293 skl_enable_dc6(display);
2294 else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
2295 gen9_enable_dc5(display);
2296 }
2297 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
2298 bxt_disable_dc9(display);
2299 bxt_display_core_init(i915, true);
2300 if (intel_dmc_has_payload(display) &&
2301 (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2302 gen9_enable_dc5(display);
2303 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2304 hsw_disable_pc8(i915);
2305 }
2306}
2307
2308void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
2309{
2310 struct i915_power_domains *power_domains = &i915->display.power.domains;
2311 int i;
2312
2313 mutex_lock(&power_domains->lock);
2314
2315 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2316 for (i = 0; i < power_domains->power_well_count; i++) {
2317 struct i915_power_well *power_well;
2318 enum intel_display_power_domain power_domain;
2319
2320 power_well = &power_domains->power_wells[i];
2321 seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
2322 intel_power_well_refcount(power_well));
2323
2324 for_each_power_domain(power_domain, intel_power_well_domains(power_well))
2325 seq_printf(m, " %-23s %d\n",
2326 intel_display_power_domain_str(power_domain),
2327 power_domains->domain_use_count[power_domain]);
2328 }
2329
2330 mutex_unlock(&power_domains->lock);
2331}
2332
2333struct intel_ddi_port_domains {
2334 enum port port_start;
2335 enum port port_end;
2336 enum aux_ch aux_ch_start;
2337 enum aux_ch aux_ch_end;
2338
2339 enum intel_display_power_domain ddi_lanes;
2340 enum intel_display_power_domain ddi_io;
2341 enum intel_display_power_domain aux_io;
2342 enum intel_display_power_domain aux_legacy_usbc;
2343 enum intel_display_power_domain aux_tbt;
2344};
2345
2346static const struct intel_ddi_port_domains
2347i9xx_port_domains[] = {
2348 {
2349 .port_start = PORT_A,
2350 .port_end = PORT_F,
2351 .aux_ch_start = AUX_CH_A,
2352 .aux_ch_end = AUX_CH_F,
2353
2354 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2355 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2356 .aux_io = POWER_DOMAIN_AUX_IO_A,
2357 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2358 .aux_tbt = POWER_DOMAIN_INVALID,
2359 },
2360};
2361
2362static const struct intel_ddi_port_domains
2363d11_port_domains[] = {
2364 {
2365 .port_start = PORT_A,
2366 .port_end = PORT_B,
2367 .aux_ch_start = AUX_CH_A,
2368 .aux_ch_end = AUX_CH_B,
2369
2370 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2371 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2372 .aux_io = POWER_DOMAIN_AUX_IO_A,
2373 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2374 .aux_tbt = POWER_DOMAIN_INVALID,
2375 }, {
2376 .port_start = PORT_C,
2377 .port_end = PORT_F,
2378 .aux_ch_start = AUX_CH_C,
2379 .aux_ch_end = AUX_CH_F,
2380
2381 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
2382 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
2383 .aux_io = POWER_DOMAIN_AUX_IO_C,
2384 .aux_legacy_usbc = POWER_DOMAIN_AUX_C,
2385 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2386 },
2387};
2388
2389static const struct intel_ddi_port_domains
2390d12_port_domains[] = {
2391 {
2392 .port_start = PORT_A,
2393 .port_end = PORT_C,
2394 .aux_ch_start = AUX_CH_A,
2395 .aux_ch_end = AUX_CH_C,
2396
2397 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2398 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2399 .aux_io = POWER_DOMAIN_AUX_IO_A,
2400 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2401 .aux_tbt = POWER_DOMAIN_INVALID,
2402 }, {
2403 .port_start = PORT_TC1,
2404 .port_end = PORT_TC6,
2405 .aux_ch_start = AUX_CH_USBC1,
2406 .aux_ch_end = AUX_CH_USBC6,
2407
2408 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2409 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2410 .aux_io = POWER_DOMAIN_INVALID,
2411 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2412 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2413 },
2414};
2415
2416static const struct intel_ddi_port_domains
2417d13_port_domains[] = {
2418 {
2419 .port_start = PORT_A,
2420 .port_end = PORT_C,
2421 .aux_ch_start = AUX_CH_A,
2422 .aux_ch_end = AUX_CH_C,
2423
2424 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
2425 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2426 .aux_io = POWER_DOMAIN_AUX_IO_A,
2427 .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
2428 .aux_tbt = POWER_DOMAIN_INVALID,
2429 }, {
2430 .port_start = PORT_TC1,
2431 .port_end = PORT_TC4,
2432 .aux_ch_start = AUX_CH_USBC1,
2433 .aux_ch_end = AUX_CH_USBC4,
2434
2435 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
2436 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2437 .aux_io = POWER_DOMAIN_INVALID,
2438 .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
2439 .aux_tbt = POWER_DOMAIN_AUX_TBT1,
2440 }, {
2441 .port_start = PORT_D_XELPD,
2442 .port_end = PORT_E_XELPD,
2443 .aux_ch_start = AUX_CH_D_XELPD,
2444 .aux_ch_end = AUX_CH_E_XELPD,
2445
2446 .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
2447 .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
2448 .aux_io = POWER_DOMAIN_AUX_IO_D,
2449 .aux_legacy_usbc = POWER_DOMAIN_AUX_D,
2450 .aux_tbt = POWER_DOMAIN_INVALID,
2451 },
2452};
2453
2454static void
2455intel_port_domains_for_platform(struct drm_i915_private *i915,
2456 const struct intel_ddi_port_domains **domains,
2457 int *domains_size)
2458{
2459 if (DISPLAY_VER(i915) >= 13) {
2460 *domains = d13_port_domains;
2461 *domains_size = ARRAY_SIZE(d13_port_domains);
2462 } else if (DISPLAY_VER(i915) >= 12) {
2463 *domains = d12_port_domains;
2464 *domains_size = ARRAY_SIZE(d12_port_domains);
2465 } else if (DISPLAY_VER(i915) >= 11) {
2466 *domains = d11_port_domains;
2467 *domains_size = ARRAY_SIZE(d11_port_domains);
2468 } else {
2469 *domains = i9xx_port_domains;
2470 *domains_size = ARRAY_SIZE(i9xx_port_domains);
2471 }
2472}
2473
2474static const struct intel_ddi_port_domains *
2475intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
2476{
2477 const struct intel_ddi_port_domains *domains;
2478 int domains_size;
2479 int i;
2480
2481 intel_port_domains_for_platform(i915, &domains, &domains_size);
2482 for (i = 0; i < domains_size; i++)
2483 if (port >= domains[i].port_start && port <= domains[i].port_end)
2484 return &domains[i];
2485
2486 return NULL;
2487}
2488
2489enum intel_display_power_domain
2490intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
2491{
2492 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
2493
2494 if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
2495 return POWER_DOMAIN_PORT_DDI_IO_A;
2496
2497 return domains->ddi_io + (int)(port - domains->port_start);
2498}
2499
2500enum intel_display_power_domain
2501intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
2502{
2503 const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
2504
2505 if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
2506 return POWER_DOMAIN_PORT_DDI_LANES_A;
2507
2508 return domains->ddi_lanes + (int)(port - domains->port_start);
2509}
2510
2511static const struct intel_ddi_port_domains *
2512intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
2513{
2514 const struct intel_ddi_port_domains *domains;
2515 int domains_size;
2516 int i;
2517
2518 intel_port_domains_for_platform(i915, &domains, &domains_size);
2519 for (i = 0; i < domains_size; i++)
2520 if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
2521 return &domains[i];
2522
2523 return NULL;
2524}
2525
2526enum intel_display_power_domain
2527intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2528{
2529 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2530
2531 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
2532 return POWER_DOMAIN_AUX_IO_A;
2533
2534 return domains->aux_io + (int)(aux_ch - domains->aux_ch_start);
2535}
2536
2537enum intel_display_power_domain
2538intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2539{
2540 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2541
2542 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
2543 return POWER_DOMAIN_AUX_A;
2544
2545 return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
2546}
2547
2548enum intel_display_power_domain
2549intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2550{
2551 const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2552
2553 if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
2554 return POWER_DOMAIN_AUX_TBT1;
2555
2556 return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
2557}