Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 * Copyright © 2012-2014 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *    Eugeni Dodonov <eugeni.dodonov@intel.com>
 25 *    Daniel Vetter <daniel.vetter@ffwll.ch>
 26 *
 27 */
 28
 29#include <linux/pm_runtime.h>
 30
 31#include <drm/drm_print.h>
 32
 33#include "i915_drv.h"
 34#include "i915_trace.h"
 35
 36/**
 37 * DOC: runtime pm
 38 *
 39 * The i915 driver supports dynamic enabling and disabling of entire hardware
 40 * blocks at runtime. This is especially important on the display side where
 41 * software is supposed to control many power gates manually on recent hardware,
 42 * since on the GT side a lot of the power management is done by the hardware.
 43 * But even there some manual control at the device level is required.
 44 *
 45 * Since i915 supports a diverse set of platforms with a unified codebase and
 46 * hardware engineers just love to shuffle functionality around between power
 47 * domains there's a sizeable amount of indirection required. This file provides
 48 * generic functions to the driver for grabbing and releasing references for
 49 * abstract power domains. It then maps those to the actual power wells
 50 * present for a given platform.
 51 */
 52
 53static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54{
 55	return container_of(rpm, struct drm_i915_private, runtime_pm);
 
 56}
 57
 58#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 
 
 
 
 
 
 
 
 59
 60static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 61{
 62	ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, dev_name(rpm->kdev));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63}
 64
 65static intel_wakeref_t
 66track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67{
 68	if (!rpm->available || rpm->no_wakeref_tracking)
 69		return INTEL_WAKEREF_DEF;
 70
 71	return intel_ref_tracker_alloc(&rpm->debug);
 
 
 
 
 
 
 72}
 73
 74static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
 75					     intel_wakeref_t wakeref)
 
 
 
 
 
 
 
 
 
 
 76{
 77	if (!rpm->available || rpm->no_wakeref_tracking)
 78		return;
 79
 80	intel_ref_tracker_free(&rpm->debug, wakeref);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 81}
 82
 83static void untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
 
 84{
 85	ref_tracker_dir_exit(&rpm->debug);
 
 
 
 
 
 
 
 
 86}
 87
 88static noinline void
 89__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
 90{
 91	unsigned long flags;
 
 
 92
 93	if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
 94					 &rpm->debug.lock,
 95					 flags))
 
 
 
 
 
 
 
 
 
 
 96		return;
 97
 98	ref_tracker_dir_print_locked(&rpm->debug, INTEL_REFTRACK_PRINT_LIMIT);
 99	spin_unlock_irqrestore(&rpm->debug.lock, flags);
 
100}
101
102void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
103				    struct drm_printer *p)
104{
105	intel_ref_tracker_show(&rpm->debug, p);
 
 
 
106}
107
108#else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
110static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
 
 
 
 
 
111{
 
 
 
 
 
 
 
 
 
112}
113
114static intel_wakeref_t
115track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
 
 
 
 
 
116{
117	return INTEL_WAKEREF_DEF;
 
 
 
118}
119
120static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
121					     intel_wakeref_t wakeref)
122{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123}
124
125static void
126__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
127{
128	atomic_dec(&rpm->wakeref_count);
 
 
 
 
 
 
 
 
 
 
 
129}
130
131static void
132untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
133{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134}
135
136#endif
 
 
137
138static void
139intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140{
141	if (wakelock) {
142		atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
143		assert_rpm_wakelock_held(rpm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144	} else {
145		atomic_inc(&rpm->wakeref_count);
146		assert_rpm_raw_wakeref_held(rpm);
 
 
 
 
 
 
147	}
 
 
 
 
 
 
 
 
 
 
148}
149
150static void
151intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
152{
153	if (wakelock) {
154		assert_rpm_wakelock_held(rpm);
155		atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
 
 
 
 
 
 
156	} else {
157		assert_rpm_raw_wakeref_held(rpm);
 
158	}
159
160	__intel_wakeref_dec_and_check_tracking(rpm);
 
 
 
 
 
 
 
 
 
 
 
161}
162
163static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
164					      bool wakelock)
165{
166	struct drm_i915_private *i915 = rpm_to_i915(rpm);
167	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
169	ret = pm_runtime_get_sync(rpm->kdev);
170	drm_WARN_ONCE(&i915->drm, ret < 0,
171		      "pm_runtime_get_sync() failed: %d\n", ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
173	intel_runtime_pm_acquire(rpm, wakelock);
174
175	return track_intel_runtime_pm_wakeref(rpm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176}
177
178/**
179 * intel_runtime_pm_get_raw - grab a raw runtime pm reference
180 * @rpm: the intel_runtime_pm structure
181 *
182 * This is the unlocked version of intel_display_power_is_enabled() and should
183 * only be used from error capture and recovery code where deadlocks are
184 * possible.
185 * This function grabs a device-level runtime pm reference (mostly used for
186 * asynchronous PM management from display code) and ensures that it is powered
187 * up. Raw references are not considered during wakelock assert checks.
188 *
189 * Any runtime pm reference obtained by this function must have a symmetric
190 * call to intel_runtime_pm_put_raw() to release the reference again.
191 *
192 * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates
193 * as True if the wakeref was acquired, or False otherwise.
194 */
195intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
 
196{
197	return __intel_runtime_pm_get(rpm, false);
 
 
 
 
 
 
 
 
198}
199
200/**
201 * intel_runtime_pm_get - grab a runtime pm reference
202 * @rpm: the intel_runtime_pm structure
203 *
204 * This function grabs a device-level runtime pm reference (mostly used for GEM
205 * code to ensure the GTT or GT is on) and ensures that it is powered up.
206 *
207 * Any runtime pm reference obtained by this function must have a symmetric
208 * call to intel_runtime_pm_put() to release the reference again.
209 *
210 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
 
211 */
212intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
 
213{
214	return __intel_runtime_pm_get(rpm, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215}
216
217/**
218 * __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active
219 * @rpm: the intel_runtime_pm structure
220 * @ignore_usecount: get a ref even if dev->power.usage_count is 0
221 *
222 * This function grabs a device-level runtime pm reference if the device is
223 * already active and ensures that it is powered up. It is illegal to try
224 * and access the HW should intel_runtime_pm_get_if_active() report failure.
225 *
226 * If @ignore_usecount is true, a reference will be acquired even if there is no
227 * user requiring the device to be powered up (dev->power.usage_count == 0).
228 * If the function returns false in this case then it's guaranteed that the
229 * device's runtime suspend hook has been called already or that it will be
230 * called (and hence it's also guaranteed that the device's runtime resume
231 * hook will be called eventually).
232 *
233 * Any runtime pm reference obtained by this function must have a symmetric
234 * call to intel_runtime_pm_put() to release the reference again.
235 *
236 * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
237 * as True if the wakeref was acquired, or False otherwise.
238 */
239static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm,
240							bool ignore_usecount)
241{
242	if (IS_ENABLED(CONFIG_PM)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243		/*
244		 * In cases runtime PM is disabled by the RPM core and we get
245		 * an -EINVAL return value we are not supposed to call this
246		 * function, since the power state is undefined. This applies
247		 * atm to the late/early system suspend/resume handlers.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248		 */
249		if ((ignore_usecount &&
250		     pm_runtime_get_if_active(rpm->kdev) <= 0) ||
251		    (!ignore_usecount &&
252		     pm_runtime_get_if_in_use(rpm->kdev) <= 0))
253			return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254	}
255
256	intel_runtime_pm_acquire(rpm, true);
 
 
 
257
258	return track_intel_runtime_pm_wakeref(rpm);
 
 
259}
260
261intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
262{
263	return __intel_runtime_pm_get_if_active(rpm, false);
 
 
 
 
 
 
 
 
 
 
 
264}
265
266intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
 
 
 
 
 
 
 
 
 
 
 
 
267{
268	return __intel_runtime_pm_get_if_active(rpm, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269}
270
271/**
272 * intel_runtime_pm_get_noresume - grab a runtime pm reference
273 * @rpm: the intel_runtime_pm structure
274 *
275 * This function grabs a device-level runtime pm reference.
276 *
277 * It will _not_ resume the device but instead only get an extra wakeref.
278 * Therefore it is only valid to call this functions from contexts where
279 * the device is known to be active and with another wakeref previously hold.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280 *
281 * Any runtime pm reference obtained by this function must have a symmetric
282 * call to intel_runtime_pm_put() to release the reference again.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283 *
284 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
 
 
 
 
285 */
286intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
287{
288	assert_rpm_raw_wakeref_held(rpm);
289	pm_runtime_get_noresume(rpm->kdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
291	intel_runtime_pm_acquire(rpm, true);
 
 
 
 
292
293	return track_intel_runtime_pm_wakeref(rpm);
294}
295
296static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
297				   intel_wakeref_t wref,
298				   bool wakelock)
 
 
 
 
 
 
 
 
299{
300	struct device *kdev = rpm->kdev;
301
302	untrack_intel_runtime_pm_wakeref(rpm, wref);
303
304	intel_runtime_pm_release(rpm, wakelock);
 
305
306	pm_runtime_mark_last_busy(kdev);
307	pm_runtime_put_autosuspend(kdev);
308}
309
310/**
311 * intel_runtime_pm_put_raw - release a raw runtime pm reference
312 * @rpm: the intel_runtime_pm structure
313 * @wref: wakeref acquired for the reference that is being released
314 *
315 * This function drops the device-level runtime pm reference obtained by
316 * intel_runtime_pm_get_raw() and might power down the corresponding
317 * hardware block right away if this is the last reference.
 
 
 
 
 
318 */
319void
320intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
321{
322	__intel_runtime_pm_put(rpm, wref, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323}
324
325/**
326 * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
327 * @rpm: the intel_runtime_pm structure
328 *
329 * This function drops the device-level runtime pm reference obtained by
330 * intel_runtime_pm_get() and might power down the corresponding
331 * hardware block right away if this is the last reference.
 
 
 
 
 
 
332 *
333 * This function exists only for historical reasons and should be avoided in
334 * new code, as the correctness of its use cannot be checked. Always use
335 * intel_runtime_pm_put() instead.
336 */
337void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
338{
339	__intel_runtime_pm_put(rpm, INTEL_WAKEREF_DEF, true);
 
 
 
 
 
 
340}
341
342#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
343/**
344 * intel_runtime_pm_put - release a runtime pm reference
345 * @rpm: the intel_runtime_pm structure
346 * @wref: wakeref acquired for the reference that is being released
347 *
348 * This function drops the device-level runtime pm reference obtained by
349 * intel_runtime_pm_get() and might power down the corresponding
350 * hardware block right away if this is the last reference.
351 */
352void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
353{
354	__intel_runtime_pm_put(rpm, wref, true);
 
 
 
 
 
 
 
355}
356#endif
357
358/**
359 * intel_runtime_pm_enable - enable runtime pm
360 * @rpm: the intel_runtime_pm structure
361 *
362 * This function enables runtime pm at the end of the driver load sequence.
363 *
364 * Note that this function does currently not enable runtime pm for the
365 * subordinate display power domains. That is done by
366 * intel_power_domains_enable().
367 */
368void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
369{
370	struct drm_i915_private *i915 = rpm_to_i915(rpm);
371	struct device *kdev = rpm->kdev;
372
373	/*
374	 * Disable the system suspend direct complete optimization, which can
375	 * leave the device suspended skipping the driver's suspend handlers
376	 * if the device was already runtime suspended. This is needed due to
377	 * the difference in our runtime and system suspend sequence and
378	 * becaue the HDA driver may require us to enable the audio power
379	 * domain during system suspend.
380	 */
381	dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE);
382
383	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
384	pm_runtime_mark_last_busy(kdev);
385
386	/*
387	 * Take a permanent reference to disable the RPM functionality and drop
388	 * it only when unloading the driver. Use the low level get/put helpers,
389	 * so the driver's own RPM reference tracking asserts also work on
390	 * platforms without RPM support.
391	 */
392	if (!rpm->available) {
393		int ret;
394
395		pm_runtime_dont_use_autosuspend(kdev);
396		ret = pm_runtime_get_sync(kdev);
397		drm_WARN(&i915->drm, ret < 0,
398			 "pm_runtime_get_sync() failed: %d\n", ret);
399	} else {
400		pm_runtime_use_autosuspend(kdev);
401	}
402
403	/*
404	 *  FIXME: Temp hammer to keep autosupend disable on lmem supported platforms.
405	 *  As per PCIe specs 5.3.1.4.1, all iomem read write request over a PCIe
406	 *  function will be unsupported in case PCIe endpoint function is in D3.
407	 *  Let's keep i915 autosuspend control 'on' till we fix all known issue
408	 *  with lmem access in D3.
409	 */
410	if (!IS_DGFX(i915))
411		pm_runtime_allow(kdev);
412
413	/*
414	 * The core calls the driver load handler with an RPM reference held.
415	 * We drop that here and will reacquire it during unloading in
416	 * intel_power_domains_fini().
417	 */
418	pm_runtime_put_autosuspend(kdev);
419}
420
421void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
422{
423	struct drm_i915_private *i915 = rpm_to_i915(rpm);
424	struct device *kdev = rpm->kdev;
425
426	/* Transfer rpm ownership back to core */
427	drm_WARN(&i915->drm, pm_runtime_get_sync(kdev) < 0,
428		 "Failed to pass rpm ownership back to core\n");
429
430	pm_runtime_dont_use_autosuspend(kdev);
431
432	if (!rpm->available)
433		pm_runtime_put(kdev);
434}
435
436void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
437{
438	struct drm_i915_private *i915 = rpm_to_i915(rpm);
439	int count = atomic_read(&rpm->wakeref_count);
440
441	intel_wakeref_auto_fini(&rpm->userfault_wakeref);
442
443	drm_WARN(&i915->drm, count,
444		 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
445		 intel_rpm_raw_wakeref_count(count),
446		 intel_rpm_wakelock_count(count));
447}
448
449void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm)
450{
451	intel_runtime_pm_driver_release(rpm);
452	untrack_all_intel_runtime_pm_wakerefs(rpm);
453}
454
455void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
456{
457	struct drm_i915_private *i915 = rpm_to_i915(rpm);
458	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
459	struct device *kdev = &pdev->dev;
460
461	rpm->kdev = kdev;
462	rpm->available = HAS_RUNTIME_PM(i915);
463	atomic_set(&rpm->wakeref_count, 0);
464
465	init_intel_runtime_pm_wakeref(rpm);
466	INIT_LIST_HEAD(&rpm->lmem_userfault_list);
467	spin_lock_init(&rpm->lmem_userfault_lock);
468	intel_wakeref_auto_init(&rpm->userfault_wakeref, i915);
469}
v4.17
   1/*
   2 * Copyright © 2012-2014 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eugeni Dodonov <eugeni.dodonov@intel.com>
  25 *    Daniel Vetter <daniel.vetter@ffwll.ch>
  26 *
  27 */
  28
  29#include <linux/pm_runtime.h>
  30#include <linux/vgaarb.h>
 
  31
  32#include "i915_drv.h"
  33#include "intel_drv.h"
  34
  35/**
  36 * DOC: runtime pm
  37 *
  38 * The i915 driver supports dynamic enabling and disabling of entire hardware
  39 * blocks at runtime. This is especially important on the display side where
  40 * software is supposed to control many power gates manually on recent hardware,
  41 * since on the GT side a lot of the power management is done by the hardware.
  42 * But even there some manual control at the device level is required.
  43 *
  44 * Since i915 supports a diverse set of platforms with a unified codebase and
  45 * hardware engineers just love to shuffle functionality around between power
  46 * domains there's a sizeable amount of indirection required. This file provides
  47 * generic functions to the driver for grabbing and releasing references for
  48 * abstract power domains. It then maps those to the actual power wells
  49 * present for a given platform.
  50 */
  51
  52bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
  53					 enum i915_power_well_id power_well_id);
  54
  55static struct i915_power_well *
  56lookup_power_well(struct drm_i915_private *dev_priv,
  57		  enum i915_power_well_id power_well_id);
  58
  59const char *
  60intel_display_power_domain_str(enum intel_display_power_domain domain)
  61{
  62	switch (domain) {
  63	case POWER_DOMAIN_PIPE_A:
  64		return "PIPE_A";
  65	case POWER_DOMAIN_PIPE_B:
  66		return "PIPE_B";
  67	case POWER_DOMAIN_PIPE_C:
  68		return "PIPE_C";
  69	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
  70		return "PIPE_A_PANEL_FITTER";
  71	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
  72		return "PIPE_B_PANEL_FITTER";
  73	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
  74		return "PIPE_C_PANEL_FITTER";
  75	case POWER_DOMAIN_TRANSCODER_A:
  76		return "TRANSCODER_A";
  77	case POWER_DOMAIN_TRANSCODER_B:
  78		return "TRANSCODER_B";
  79	case POWER_DOMAIN_TRANSCODER_C:
  80		return "TRANSCODER_C";
  81	case POWER_DOMAIN_TRANSCODER_EDP:
  82		return "TRANSCODER_EDP";
  83	case POWER_DOMAIN_TRANSCODER_DSI_A:
  84		return "TRANSCODER_DSI_A";
  85	case POWER_DOMAIN_TRANSCODER_DSI_C:
  86		return "TRANSCODER_DSI_C";
  87	case POWER_DOMAIN_PORT_DDI_A_LANES:
  88		return "PORT_DDI_A_LANES";
  89	case POWER_DOMAIN_PORT_DDI_B_LANES:
  90		return "PORT_DDI_B_LANES";
  91	case POWER_DOMAIN_PORT_DDI_C_LANES:
  92		return "PORT_DDI_C_LANES";
  93	case POWER_DOMAIN_PORT_DDI_D_LANES:
  94		return "PORT_DDI_D_LANES";
  95	case POWER_DOMAIN_PORT_DDI_E_LANES:
  96		return "PORT_DDI_E_LANES";
  97	case POWER_DOMAIN_PORT_DDI_F_LANES:
  98		return "PORT_DDI_F_LANES";
  99	case POWER_DOMAIN_PORT_DDI_A_IO:
 100		return "PORT_DDI_A_IO";
 101	case POWER_DOMAIN_PORT_DDI_B_IO:
 102		return "PORT_DDI_B_IO";
 103	case POWER_DOMAIN_PORT_DDI_C_IO:
 104		return "PORT_DDI_C_IO";
 105	case POWER_DOMAIN_PORT_DDI_D_IO:
 106		return "PORT_DDI_D_IO";
 107	case POWER_DOMAIN_PORT_DDI_E_IO:
 108		return "PORT_DDI_E_IO";
 109	case POWER_DOMAIN_PORT_DDI_F_IO:
 110		return "PORT_DDI_F_IO";
 111	case POWER_DOMAIN_PORT_DSI:
 112		return "PORT_DSI";
 113	case POWER_DOMAIN_PORT_CRT:
 114		return "PORT_CRT";
 115	case POWER_DOMAIN_PORT_OTHER:
 116		return "PORT_OTHER";
 117	case POWER_DOMAIN_VGA:
 118		return "VGA";
 119	case POWER_DOMAIN_AUDIO:
 120		return "AUDIO";
 121	case POWER_DOMAIN_PLLS:
 122		return "PLLS";
 123	case POWER_DOMAIN_AUX_A:
 124		return "AUX_A";
 125	case POWER_DOMAIN_AUX_B:
 126		return "AUX_B";
 127	case POWER_DOMAIN_AUX_C:
 128		return "AUX_C";
 129	case POWER_DOMAIN_AUX_D:
 130		return "AUX_D";
 131	case POWER_DOMAIN_AUX_F:
 132		return "AUX_F";
 133	case POWER_DOMAIN_AUX_IO_A:
 134		return "AUX_IO_A";
 135	case POWER_DOMAIN_GMBUS:
 136		return "GMBUS";
 137	case POWER_DOMAIN_INIT:
 138		return "INIT";
 139	case POWER_DOMAIN_MODESET:
 140		return "MODESET";
 141	case POWER_DOMAIN_GT_IRQ:
 142		return "GT_IRQ";
 143	default:
 144		MISSING_CASE(domain);
 145		return "?";
 146	}
 147}
 148
 149static void intel_power_well_enable(struct drm_i915_private *dev_priv,
 150				    struct i915_power_well *power_well)
 151{
 152	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
 153	power_well->ops->enable(dev_priv, power_well);
 154	power_well->hw_enabled = true;
 155}
 156
 157static void intel_power_well_disable(struct drm_i915_private *dev_priv,
 158				     struct i915_power_well *power_well)
 159{
 160	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
 161	power_well->hw_enabled = false;
 162	power_well->ops->disable(dev_priv, power_well);
 163}
 164
 165static void intel_power_well_get(struct drm_i915_private *dev_priv,
 166				 struct i915_power_well *power_well)
 167{
 168	if (!power_well->count++)
 169		intel_power_well_enable(dev_priv, power_well);
 170}
 171
 172static void intel_power_well_put(struct drm_i915_private *dev_priv,
 173				 struct i915_power_well *power_well)
 174{
 175	WARN(!power_well->count, "Use count on power well %s is already zero",
 176	     power_well->name);
 177
 178	if (!--power_well->count)
 179		intel_power_well_disable(dev_priv, power_well);
 180}
 181
 182/**
 183 * __intel_display_power_is_enabled - unlocked check for a power domain
 184 * @dev_priv: i915 device instance
 185 * @domain: power domain to check
 186 *
 187 * This is the unlocked version of intel_display_power_is_enabled() and should
 188 * only be used from error capture and recovery code where deadlocks are
 189 * possible.
 190 *
 191 * Returns:
 192 * True when the power domain is enabled, false otherwise.
 193 */
 194bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 195				      enum intel_display_power_domain domain)
 196{
 197	struct i915_power_well *power_well;
 198	bool is_enabled;
 199
 200	if (dev_priv->runtime_pm.suspended)
 201		return false;
 202
 203	is_enabled = true;
 204
 205	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
 206		if (power_well->always_on)
 207			continue;
 208
 209		if (!power_well->hw_enabled) {
 210			is_enabled = false;
 211			break;
 212		}
 213	}
 214
 215	return is_enabled;
 216}
 217
 218/**
 219 * intel_display_power_is_enabled - check for a power domain
 220 * @dev_priv: i915 device instance
 221 * @domain: power domain to check
 222 *
 223 * This function can be used to check the hw power domain state. It is mostly
 224 * used in hardware state readout functions. Everywhere else code should rely
 225 * upon explicit power domain reference counting to ensure that the hardware
 226 * block is powered up before accessing it.
 227 *
 228 * Callers must hold the relevant modesetting locks to ensure that concurrent
 229 * threads can't disable the power well while the caller tries to read a few
 230 * registers.
 231 *
 232 * Returns:
 233 * True when the power domain is enabled, false otherwise.
 234 */
 235bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
 236				    enum intel_display_power_domain domain)
 237{
 238	struct i915_power_domains *power_domains;
 239	bool ret;
 240
 241	power_domains = &dev_priv->power_domains;
 242
 243	mutex_lock(&power_domains->lock);
 244	ret = __intel_display_power_is_enabled(dev_priv, domain);
 245	mutex_unlock(&power_domains->lock);
 246
 247	return ret;
 248}
 249
 250/**
 251 * intel_display_set_init_power - set the initial power domain state
 252 * @dev_priv: i915 device instance
 253 * @enable: whether to enable or disable the initial power domain state
 254 *
 255 * For simplicity our driver load/unload and system suspend/resume code assumes
 256 * that all power domains are always enabled. This functions controls the state
 257 * of this little hack. While the initial power domain state is enabled runtime
 258 * pm is effectively disabled.
 259 */
 260void intel_display_set_init_power(struct drm_i915_private *dev_priv,
 261				  bool enable)
 262{
 263	if (dev_priv->power_domains.init_power_on == enable)
 264		return;
 265
 266	if (enable)
 267		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 268	else
 269		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 270
 271	dev_priv->power_domains.init_power_on = enable;
 272}
 273
 274/*
 275 * Starting with Haswell, we have a "Power Down Well" that can be turned off
 276 * when not needed anymore. We have 4 registers that can request the power well
 277 * to be enabled, and it will only be disabled if none of the registers is
 278 * requesting it to be enabled.
 279 */
 280static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
 281				       u8 irq_pipe_mask, bool has_vga)
 282{
 283	struct pci_dev *pdev = dev_priv->drm.pdev;
 284
 285	/*
 286	 * After we re-enable the power well, if we touch VGA register 0x3d5
 287	 * we'll get unclaimed register interrupts. This stops after we write
 288	 * anything to the VGA MSR register. The vgacon module uses this
 289	 * register all the time, so if we unbind our driver and, as a
 290	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
 291	 * console_unlock(). So make here we touch the VGA MSR register, making
 292	 * sure vgacon can keep working normally without triggering interrupts
 293	 * and error messages.
 294	 */
 295	if (has_vga) {
 296		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
 297		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
 298		vga_put(pdev, VGA_RSRC_LEGACY_IO);
 299	}
 300
 301	if (irq_pipe_mask)
 302		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
 303}
 304
 305static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
 306				       u8 irq_pipe_mask)
 307{
 308	if (irq_pipe_mask)
 309		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
 310}
 311
 312
 313static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
 314					   struct i915_power_well *power_well)
 315{
 316	enum i915_power_well_id id = power_well->id;
 317
 318	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
 319	WARN_ON(intel_wait_for_register(dev_priv,
 320					HSW_PWR_WELL_CTL_DRIVER(id),
 321					HSW_PWR_WELL_CTL_STATE(id),
 322					HSW_PWR_WELL_CTL_STATE(id),
 323					1));
 324}
 325
 326static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
 327				     enum i915_power_well_id id)
 328{
 329	u32 req_mask = HSW_PWR_WELL_CTL_REQ(id);
 330	u32 ret;
 331
 332	ret = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)) & req_mask ? 1 : 0;
 333	ret |= I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & req_mask ? 2 : 0;
 334	ret |= I915_READ(HSW_PWR_WELL_CTL_KVMR) & req_mask ? 4 : 0;
 335	ret |= I915_READ(HSW_PWR_WELL_CTL_DEBUG(id)) & req_mask ? 8 : 0;
 336
 337	return ret;
 338}
 339
 340static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
 341					    struct i915_power_well *power_well)
 342{
 343	enum i915_power_well_id id = power_well->id;
 344	bool disabled;
 345	u32 reqs;
 346
 347	/*
 348	 * Bspec doesn't require waiting for PWs to get disabled, but still do
 349	 * this for paranoia. The known cases where a PW will be forced on:
 350	 * - a KVMR request on any power well via the KVMR request register
 351	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
 352	 *   DEBUG request registers
 353	 * Skip the wait in case any of the request bits are set and print a
 354	 * diagnostic message.
 355	 */
 356	wait_for((disabled = !(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
 357			       HSW_PWR_WELL_CTL_STATE(id))) ||
 358		 (reqs = hsw_power_well_requesters(dev_priv, id)), 1);
 359	if (disabled)
 360		return;
 361
 362	DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
 363		      power_well->name,
 364		      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
 365}
 366
 367static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
 368					   enum skl_power_gate pg)
 369{
 370	/* Timeout 5us for PG#0, for other PGs 1us */
 371	WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
 372					SKL_FUSE_PG_DIST_STATUS(pg),
 373					SKL_FUSE_PG_DIST_STATUS(pg), 1));
 374}
 375
 376static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
 377				  struct i915_power_well *power_well)
 378{
 379	enum i915_power_well_id id = power_well->id;
 380	bool wait_fuses = power_well->hsw.has_fuses;
 381	enum skl_power_gate uninitialized_var(pg);
 382	u32 val;
 383
 384	if (wait_fuses) {
 385		pg = SKL_PW_TO_PG(id);
 386		/*
 387		 * For PW1 we have to wait both for the PW0/PG0 fuse state
 388		 * before enabling the power well and PW1/PG1's own fuse
 389		 * state after the enabling. For all other power wells with
 390		 * fuses we only have to wait for that PW/PG's fuse state
 391		 * after the enabling.
 392		 */
 393		if (pg == SKL_PG1)
 394			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
 395	}
 396
 397	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
 398	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
 399	hsw_wait_for_power_well_enable(dev_priv, power_well);
 400
 401	/* Display WA #1178: cnl */
 402	if (IS_CANNONLAKE(dev_priv) &&
 403	    (id == CNL_DISP_PW_AUX_B || id == CNL_DISP_PW_AUX_C ||
 404	     id == CNL_DISP_PW_AUX_D || id == CNL_DISP_PW_AUX_F)) {
 405		val = I915_READ(CNL_AUX_ANAOVRD1(id));
 406		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
 407		I915_WRITE(CNL_AUX_ANAOVRD1(id), val);
 408	}
 409
 410	if (wait_fuses)
 411		gen9_wait_for_power_well_fuses(dev_priv, pg);
 412
 413	hsw_power_well_post_enable(dev_priv, power_well->hsw.irq_pipe_mask,
 414				   power_well->hsw.has_vga);
 415}
 416
 417static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
 418				   struct i915_power_well *power_well)
 419{
 420	enum i915_power_well_id id = power_well->id;
 421	u32 val;
 422
 423	hsw_power_well_pre_disable(dev_priv, power_well->hsw.irq_pipe_mask);
 424
 425	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
 426	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
 427		   val & ~HSW_PWR_WELL_CTL_REQ(id));
 428	hsw_wait_for_power_well_disable(dev_priv, power_well);
 429}
 430
 431/*
 432 * We should only use the power well if we explicitly asked the hardware to
 433 * enable it, so check if it's enabled and also check if we've requested it to
 434 * be enabled.
 435 */
 436static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
 437				   struct i915_power_well *power_well)
 438{
 439	enum i915_power_well_id id = power_well->id;
 440	u32 mask = HSW_PWR_WELL_CTL_REQ(id) | HSW_PWR_WELL_CTL_STATE(id);
 441
 442	return (I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & mask) == mask;
 443}
 444
 445static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
 
 446{
 447	enum i915_power_well_id id = SKL_DISP_PW_2;
 448
 449	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
 450		  "DC9 already programmed to be enabled.\n");
 451	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
 452		  "DC5 still not disabled to enable DC9.\n");
 453	WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
 454		  HSW_PWR_WELL_CTL_REQ(id),
 455		  "Power well 2 on.\n");
 456	WARN_ONCE(intel_irqs_enabled(dev_priv),
 457		  "Interrupts not disabled yet.\n");
 458
 459	 /*
 460	  * TODO: check for the following to verify the conditions to enter DC9
 461	  * state are satisfied:
 462	  * 1] Check relevant display engine registers to verify if mode set
 463	  * disable sequence was followed.
 464	  * 2] Check if display uninitialize sequence is initialized.
 465	  */
 466}
 467
 468static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
 
 469{
 470	WARN_ONCE(intel_irqs_enabled(dev_priv),
 471		  "Interrupts not disabled yet.\n");
 472	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
 473		  "DC5 still not disabled.\n");
 474
 475	 /*
 476	  * TODO: check for the following to verify DC9 state was indeed
 477	  * entered before programming to disable it:
 478	  * 1] Check relevant display engine registers to verify if mode
 479	  *  set disable sequence was followed.
 480	  * 2] Check if display uninitialize sequence is initialized.
 481	  */
 482}
 483
 484static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
 485				u32 state)
 486{
 487	int rewrites = 0;
 488	int rereads = 0;
 489	u32 v;
 490
 491	I915_WRITE(DC_STATE_EN, state);
 492
 493	/* It has been observed that disabling the dc6 state sometimes
 494	 * doesn't stick and dmc keeps returning old value. Make sure
 495	 * the write really sticks enough times and also force rewrite until
 496	 * we are confident that state is exactly what we want.
 497	 */
 498	do  {
 499		v = I915_READ(DC_STATE_EN);
 500
 501		if (v != state) {
 502			I915_WRITE(DC_STATE_EN, state);
 503			rewrites++;
 504			rereads = 0;
 505		} else if (rereads++ > 5) {
 506			break;
 507		}
 508
 509	} while (rewrites < 100);
 510
 511	if (v != state)
 512		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
 513			  state, v);
 514
 515	/* Most of the times we need one retry, avoid spam */
 516	if (rewrites > 1)
 517		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
 518			      state, rewrites);
 519}
 520
 521static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
 522{
 523	u32 mask;
 524
 525	mask = DC_STATE_EN_UPTO_DC5;
 526	if (IS_GEN9_LP(dev_priv))
 527		mask |= DC_STATE_EN_DC9;
 528	else
 529		mask |= DC_STATE_EN_UPTO_DC6;
 530
 531	return mask;
 532}
 533
 534void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
 535{
 536	u32 val;
 537
 538	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
 539
 540	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
 541		      dev_priv->csr.dc_state, val);
 542	dev_priv->csr.dc_state = val;
 543}
 544
 545static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
 546{
 547	uint32_t val;
 548	uint32_t mask;
 549
 550	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
 551		state &= dev_priv->csr.allowed_dc_mask;
 552
 553	val = I915_READ(DC_STATE_EN);
 554	mask = gen9_dc_mask(dev_priv);
 555	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
 556		      val & mask, state);
 557
 558	/* Check if DMC is ignoring our DC state requests */
 559	if ((val & mask) != dev_priv->csr.dc_state)
 560		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
 561			  dev_priv->csr.dc_state, val & mask);
 562
 563	val &= ~mask;
 564	val |= state;
 565
 566	gen9_write_dc_state(dev_priv, val);
 567
 568	dev_priv->csr.dc_state = val & mask;
 569}
 570
 571void bxt_enable_dc9(struct drm_i915_private *dev_priv)
 572{
 573	assert_can_enable_dc9(dev_priv);
 574
 575	DRM_DEBUG_KMS("Enabling DC9\n");
 576
 577	intel_power_sequencer_reset(dev_priv);
 578	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
 579}
 580
 581void bxt_disable_dc9(struct drm_i915_private *dev_priv)
 582{
 583	assert_can_disable_dc9(dev_priv);
 584
 585	DRM_DEBUG_KMS("Disabling DC9\n");
 586
 587	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 588
 589	intel_pps_unlock_regs_wa(dev_priv);
 590}
 591
 592static void assert_csr_loaded(struct drm_i915_private *dev_priv)
 593{
 594	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
 595		  "CSR program storage start is NULL\n");
 596	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
 597	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
 598}
 599
 600static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
 601{
 602	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
 603					SKL_DISP_PW_2);
 604
 605	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
 606
 607	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
 608		  "DC5 already programmed to be enabled.\n");
 609	assert_rpm_wakelock_held(dev_priv);
 610
 611	assert_csr_loaded(dev_priv);
 612}
 613
 614void gen9_enable_dc5(struct drm_i915_private *dev_priv)
 615{
 616	assert_can_enable_dc5(dev_priv);
 617
 618	DRM_DEBUG_KMS("Enabling DC5\n");
 619
 620	/* Wa Display #1183: skl,kbl,cfl */
 621	if (IS_GEN9_BC(dev_priv))
 622		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
 623			   SKL_SELECT_ALTERNATE_DC_EXIT);
 624
 625	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
 626}
 627
 628static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
 629{
 630	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
 631		  "Backlight is not disabled.\n");
 632	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
 633		  "DC6 already programmed to be enabled.\n");
 634
 635	assert_csr_loaded(dev_priv);
 636}
 637
 638void skl_enable_dc6(struct drm_i915_private *dev_priv)
 639{
 640	assert_can_enable_dc6(dev_priv);
 641
 642	DRM_DEBUG_KMS("Enabling DC6\n");
 643
 644	/* Wa Display #1183: skl,kbl,cfl */
 645	if (IS_GEN9_BC(dev_priv))
 646		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
 647			   SKL_SELECT_ALTERNATE_DC_EXIT);
 648
 649	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
 650}
 651
 652void skl_disable_dc6(struct drm_i915_private *dev_priv)
 653{
 654	DRM_DEBUG_KMS("Disabling DC6\n");
 655
 656	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 657}
 658
 659static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
 660				   struct i915_power_well *power_well)
 661{
 662	enum i915_power_well_id id = power_well->id;
 663	u32 mask = HSW_PWR_WELL_CTL_REQ(id);
 664	u32 bios_req = I915_READ(HSW_PWR_WELL_CTL_BIOS(id));
 665
 666	/* Take over the request bit if set by BIOS. */
 667	if (bios_req & mask) {
 668		u32 drv_req = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
 669
 670		if (!(drv_req & mask))
 671			I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), drv_req | mask);
 672		I915_WRITE(HSW_PWR_WELL_CTL_BIOS(id), bios_req & ~mask);
 673	}
 674}
 675
 676static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 677					   struct i915_power_well *power_well)
 678{
 679	bxt_ddi_phy_init(dev_priv, power_well->bxt.phy);
 680}
 681
 682static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
 683					    struct i915_power_well *power_well)
 684{
 685	bxt_ddi_phy_uninit(dev_priv, power_well->bxt.phy);
 686}
 687
 688static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
 689					    struct i915_power_well *power_well)
 690{
 691	return bxt_ddi_phy_is_enabled(dev_priv, power_well->bxt.phy);
 692}
 693
 694static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
 695{
 696	struct i915_power_well *power_well;
 697
 698	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
 699	if (power_well->count > 0)
 700		bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
 701
 702	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
 703	if (power_well->count > 0)
 704		bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
 705
 706	if (IS_GEMINILAKE(dev_priv)) {
 707		power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
 708		if (power_well->count > 0)
 709			bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
 710	}
 711}
 712
 713static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
 714					   struct i915_power_well *power_well)
 715{
 716	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
 717}
 718
 719static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
 720{
 721	u32 tmp = I915_READ(DBUF_CTL);
 722
 723	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
 724	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
 725	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
 726}
 727
 728static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
 729					  struct i915_power_well *power_well)
 730{
 731	struct intel_cdclk_state cdclk_state = {};
 732
 733	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 734
 735	dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
 736	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
 737	WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
 738
 739	gen9_assert_dbuf_enabled(dev_priv);
 740
 741	if (IS_GEN9_LP(dev_priv))
 742		bxt_verify_ddi_phy_power_wells(dev_priv);
 743}
 744
 745static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
 746					   struct i915_power_well *power_well)
 747{
 748	if (!dev_priv->csr.dmc_payload)
 749		return;
 750
 751	if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
 752		skl_enable_dc6(dev_priv);
 753	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
 754		gen9_enable_dc5(dev_priv);
 755}
 756
 757static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
 758					 struct i915_power_well *power_well)
 759{
 760}
 761
 762static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
 763					   struct i915_power_well *power_well)
 764{
 765}
 766
 767static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
 768					     struct i915_power_well *power_well)
 769{
 770	return true;
 771}
 772
 773static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
 774					 struct i915_power_well *power_well)
 775{
 776	if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
 777		i830_enable_pipe(dev_priv, PIPE_A);
 778	if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
 779		i830_enable_pipe(dev_priv, PIPE_B);
 780}
 781
 782static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
 783					  struct i915_power_well *power_well)
 784{
 785	i830_disable_pipe(dev_priv, PIPE_B);
 786	i830_disable_pipe(dev_priv, PIPE_A);
 787}
 788
 789static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
 790					  struct i915_power_well *power_well)
 791{
 792	return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
 793		I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
 794}
 795
 796static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
 797					  struct i915_power_well *power_well)
 798{
 799	if (power_well->count > 0)
 800		i830_pipes_power_well_enable(dev_priv, power_well);
 801	else
 802		i830_pipes_power_well_disable(dev_priv, power_well);
 803}
 804
 805static void vlv_set_power_well(struct drm_i915_private *dev_priv,
 806			       struct i915_power_well *power_well, bool enable)
 807{
 808	enum i915_power_well_id power_well_id = power_well->id;
 809	u32 mask;
 810	u32 state;
 811	u32 ctrl;
 812
 813	mask = PUNIT_PWRGT_MASK(power_well_id);
 814	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
 815			 PUNIT_PWRGT_PWR_GATE(power_well_id);
 816
 817	mutex_lock(&dev_priv->pcu_lock);
 818
 819#define COND \
 820	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
 821
 822	if (COND)
 823		goto out;
 824
 825	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
 826	ctrl &= ~mask;
 827	ctrl |= state;
 828	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
 829
 830	if (wait_for(COND, 100))
 831		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
 832			  state,
 833			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
 834
 835#undef COND
 836
 837out:
 838	mutex_unlock(&dev_priv->pcu_lock);
 839}
 840
 841static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
 842				  struct i915_power_well *power_well)
 843{
 844	vlv_set_power_well(dev_priv, power_well, true);
 845}
 846
 847static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
 848				   struct i915_power_well *power_well)
 849{
 850	vlv_set_power_well(dev_priv, power_well, false);
 851}
 852
 853static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
 854				   struct i915_power_well *power_well)
 855{
 856	enum i915_power_well_id power_well_id = power_well->id;
 857	bool enabled = false;
 858	u32 mask;
 859	u32 state;
 860	u32 ctrl;
 861
 862	mask = PUNIT_PWRGT_MASK(power_well_id);
 863	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
 864
 865	mutex_lock(&dev_priv->pcu_lock);
 866
 867	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
 868	/*
 869	 * We only ever set the power-on and power-gate states, anything
 870	 * else is unexpected.
 871	 */
 872	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
 873		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
 874	if (state == ctrl)
 875		enabled = true;
 876
 877	/*
 878	 * A transient state at this point would mean some unexpected party
 879	 * is poking at the power controls too.
 880	 */
 881	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
 882	WARN_ON(ctrl != state);
 883
 884	mutex_unlock(&dev_priv->pcu_lock);
 885
 886	return enabled;
 887}
 888
 889static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
 890{
 891	u32 val;
 892
 893	/*
 894	 * On driver load, a pipe may be active and driving a DSI display.
 895	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
 896	 * (and never recovering) in this case. intel_dsi_post_disable() will
 897	 * clear it when we turn off the display.
 898	 */
 899	val = I915_READ(DSPCLK_GATE_D);
 900	val &= DPOUNIT_CLOCK_GATE_DISABLE;
 901	val |= VRHUNIT_CLOCK_GATE_DISABLE;
 902	I915_WRITE(DSPCLK_GATE_D, val);
 903
 904	/*
 905	 * Disable trickle feed and enable pnd deadline calculation
 906	 */
 907	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
 908	I915_WRITE(CBR1_VLV, 0);
 909
 910	WARN_ON(dev_priv->rawclk_freq == 0);
 911
 912	I915_WRITE(RAWCLK_FREQ_VLV,
 913		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
 914}
 915
 916static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
 917{
 918	struct intel_encoder *encoder;
 919	enum pipe pipe;
 920
 921	/*
 922	 * Enable the CRI clock source so we can get at the
 923	 * display and the reference clock for VGA
 924	 * hotplug / manual detection. Supposedly DSI also
 925	 * needs the ref clock up and running.
 926	 *
 927	 * CHV DPLL B/C have some issues if VGA mode is enabled.
 928	 */
 929	for_each_pipe(dev_priv, pipe) {
 930		u32 val = I915_READ(DPLL(pipe));
 931
 932		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 933		if (pipe != PIPE_A)
 934			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
 935
 936		I915_WRITE(DPLL(pipe), val);
 937	}
 938
 939	vlv_init_display_clock_gating(dev_priv);
 940
 941	spin_lock_irq(&dev_priv->irq_lock);
 942	valleyview_enable_display_irqs(dev_priv);
 943	spin_unlock_irq(&dev_priv->irq_lock);
 944
 945	/*
 946	 * During driver initialization/resume we can avoid restoring the
 947	 * part of the HW/SW state that will be inited anyway explicitly.
 948	 */
 949	if (dev_priv->power_domains.initializing)
 950		return;
 951
 952	intel_hpd_init(dev_priv);
 953
 954	/* Re-enable the ADPA, if we have one */
 955	for_each_intel_encoder(&dev_priv->drm, encoder) {
 956		if (encoder->type == INTEL_OUTPUT_ANALOG)
 957			intel_crt_reset(&encoder->base);
 958	}
 959
 960	i915_redisable_vga_power_on(dev_priv);
 961
 962	intel_pps_unlock_regs_wa(dev_priv);
 963}
 964
 965static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
 966{
 967	spin_lock_irq(&dev_priv->irq_lock);
 968	valleyview_disable_display_irqs(dev_priv);
 969	spin_unlock_irq(&dev_priv->irq_lock);
 970
 971	/* make sure we're done processing display irqs */
 972	synchronize_irq(dev_priv->drm.irq);
 973
 974	intel_power_sequencer_reset(dev_priv);
 975
 976	/* Prevent us from re-enabling polling on accident in late suspend */
 977	if (!dev_priv->drm.dev->power.is_suspended)
 978		intel_hpd_poll_init(dev_priv);
 979}
 980
 981static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
 982					  struct i915_power_well *power_well)
 983{
 984	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
 985
 986	vlv_set_power_well(dev_priv, power_well, true);
 987
 988	vlv_display_power_well_init(dev_priv);
 989}
 990
 991static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
 992					   struct i915_power_well *power_well)
 993{
 994	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
 995
 996	vlv_display_power_well_deinit(dev_priv);
 997
 998	vlv_set_power_well(dev_priv, power_well, false);
 999}
1000
1001static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1002					   struct i915_power_well *power_well)
1003{
1004	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1005
1006	/* since ref/cri clock was enabled */
1007	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1008
1009	vlv_set_power_well(dev_priv, power_well, true);
1010
1011	/*
1012	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1013	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1014	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1015	 *   b.	The other bits such as sfr settings / modesel may all
1016	 *	be set to 0.
1017	 *
1018	 * This should only be done on init and resume from S3 with
1019	 * both PLLs disabled, or we risk losing DPIO and PLL
1020	 * synchronization.
1021	 */
1022	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1023}
1024
1025static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1026					    struct i915_power_well *power_well)
1027{
1028	enum pipe pipe;
1029
1030	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1031
1032	for_each_pipe(dev_priv, pipe)
1033		assert_pll_disabled(dev_priv, pipe);
1034
1035	/* Assert common reset */
1036	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1037
1038	vlv_set_power_well(dev_priv, power_well, false);
1039}
1040
1041#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1042
1043static struct i915_power_well *
1044lookup_power_well(struct drm_i915_private *dev_priv,
1045		  enum i915_power_well_id power_well_id)
1046{
1047	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1048	int i;
1049
1050	for (i = 0; i < power_domains->power_well_count; i++) {
1051		struct i915_power_well *power_well;
1052
1053		power_well = &power_domains->power_wells[i];
1054		if (power_well->id == power_well_id)
1055			return power_well;
1056	}
1057
1058	return NULL;
1059}
1060
1061#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1062
1063static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1064{
1065	struct i915_power_well *cmn_bc =
1066		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1067	struct i915_power_well *cmn_d =
1068		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1069	u32 phy_control = dev_priv->chv_phy_control;
1070	u32 phy_status = 0;
1071	u32 phy_status_mask = 0xffffffff;
1072
1073	/*
1074	 * The BIOS can leave the PHY is some weird state
1075	 * where it doesn't fully power down some parts.
1076	 * Disable the asserts until the PHY has been fully
1077	 * reset (ie. the power well has been disabled at
1078	 * least once).
1079	 */
1080	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1081		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1082				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1083				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1084				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1085				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1086				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1087
1088	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1089		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1090				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1091				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1092
1093	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1094		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1095
1096		/* this assumes override is only used to enable lanes */
1097		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1098			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1099
1100		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1101			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1102
1103		/* CL1 is on whenever anything is on in either channel */
1104		if (BITS_SET(phy_control,
1105			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1106			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1107			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1108
1109		/*
1110		 * The DPLLB check accounts for the pipe B + port A usage
1111		 * with CL2 powered up but all the lanes in the second channel
1112		 * powered down.
1113		 */
1114		if (BITS_SET(phy_control,
1115			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1116		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1117			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1118
1119		if (BITS_SET(phy_control,
1120			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1121			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1122		if (BITS_SET(phy_control,
1123			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1124			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1125
1126		if (BITS_SET(phy_control,
1127			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1128			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1129		if (BITS_SET(phy_control,
1130			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1131			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1132	}
1133
1134	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1135		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1136
1137		/* this assumes override is only used to enable lanes */
1138		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1139			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1140
1141		if (BITS_SET(phy_control,
1142			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1143			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1144
1145		if (BITS_SET(phy_control,
1146			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1147			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1148		if (BITS_SET(phy_control,
1149			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1150			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1151	}
1152
1153	phy_status &= phy_status_mask;
1154
1155	/*
1156	 * The PHY may be busy with some initial calibration and whatnot,
1157	 * so the power state can take a while to actually change.
1158	 */
1159	if (intel_wait_for_register(dev_priv,
1160				    DISPLAY_PHY_STATUS,
1161				    phy_status_mask,
1162				    phy_status,
1163				    10))
1164		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1165			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1166			   phy_status, dev_priv->chv_phy_control);
1167}
1168
1169#undef BITS_SET
1170
1171static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1172					   struct i915_power_well *power_well)
1173{
1174	enum dpio_phy phy;
1175	enum pipe pipe;
1176	uint32_t tmp;
1177
1178	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1179		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1180
1181	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1182		pipe = PIPE_A;
1183		phy = DPIO_PHY0;
1184	} else {
1185		pipe = PIPE_C;
1186		phy = DPIO_PHY1;
1187	}
1188
1189	/* since ref/cri clock was enabled */
1190	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1191	vlv_set_power_well(dev_priv, power_well, true);
1192
1193	/* Poll for phypwrgood signal */
1194	if (intel_wait_for_register(dev_priv,
1195				    DISPLAY_PHY_STATUS,
1196				    PHY_POWERGOOD(phy),
1197				    PHY_POWERGOOD(phy),
1198				    1))
1199		DRM_ERROR("Display PHY %d is not power up\n", phy);
1200
1201	mutex_lock(&dev_priv->sb_lock);
1202
1203	/* Enable dynamic power down */
1204	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1205	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1206		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1207	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1208
1209	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1210		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1211		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1212		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1213	} else {
1214		/*
1215		 * Force the non-existing CL2 off. BXT does this
1216		 * too, so maybe it saves some power even though
1217		 * CL2 doesn't exist?
1218		 */
1219		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1220		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1221		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1222	}
1223
1224	mutex_unlock(&dev_priv->sb_lock);
1225
1226	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1227	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1228
1229	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1230		      phy, dev_priv->chv_phy_control);
1231
1232	assert_chv_phy_status(dev_priv);
1233}
1234
1235static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1236					    struct i915_power_well *power_well)
1237{
1238	enum dpio_phy phy;
1239
1240	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1241		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1242
1243	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1244		phy = DPIO_PHY0;
1245		assert_pll_disabled(dev_priv, PIPE_A);
1246		assert_pll_disabled(dev_priv, PIPE_B);
1247	} else {
1248		phy = DPIO_PHY1;
1249		assert_pll_disabled(dev_priv, PIPE_C);
1250	}
1251
1252	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1253	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1254
1255	vlv_set_power_well(dev_priv, power_well, false);
1256
1257	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1258		      phy, dev_priv->chv_phy_control);
1259
1260	/* PHY is fully reset now, so we can enable the PHY state asserts */
1261	dev_priv->chv_phy_assert[phy] = true;
1262
1263	assert_chv_phy_status(dev_priv);
1264}
1265
1266static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1267				     enum dpio_channel ch, bool override, unsigned int mask)
1268{
1269	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1270	u32 reg, val, expected, actual;
1271
1272	/*
1273	 * The BIOS can leave the PHY is some weird state
1274	 * where it doesn't fully power down some parts.
1275	 * Disable the asserts until the PHY has been fully
1276	 * reset (ie. the power well has been disabled at
1277	 * least once).
1278	 */
1279	if (!dev_priv->chv_phy_assert[phy])
1280		return;
1281
1282	if (ch == DPIO_CH0)
1283		reg = _CHV_CMN_DW0_CH0;
1284	else
1285		reg = _CHV_CMN_DW6_CH1;
1286
1287	mutex_lock(&dev_priv->sb_lock);
1288	val = vlv_dpio_read(dev_priv, pipe, reg);
1289	mutex_unlock(&dev_priv->sb_lock);
1290
1291	/*
1292	 * This assumes !override is only used when the port is disabled.
1293	 * All lanes should power down even without the override when
1294	 * the port is disabled.
1295	 */
1296	if (!override || mask == 0xf) {
1297		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1298		/*
1299		 * If CH1 common lane is not active anymore
1300		 * (eg. for pipe B DPLL) the entire channel will
1301		 * shut down, which causes the common lane registers
1302		 * to read as 0. That means we can't actually check
1303		 * the lane power down status bits, but as the entire
1304		 * register reads as 0 it's a good indication that the
1305		 * channel is indeed entirely powered down.
1306		 */
1307		if (ch == DPIO_CH1 && val == 0)
1308			expected = 0;
1309	} else if (mask != 0x0) {
1310		expected = DPIO_ANYDL_POWERDOWN;
1311	} else {
1312		expected = 0;
1313	}
1314
1315	if (ch == DPIO_CH0)
1316		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1317	else
1318		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1319	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1320
1321	WARN(actual != expected,
1322	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1323	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1324	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1325	     reg, val);
1326}
1327
1328bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1329			  enum dpio_channel ch, bool override)
1330{
1331	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1332	bool was_override;
1333
1334	mutex_lock(&power_domains->lock);
1335
1336	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1337
1338	if (override == was_override)
1339		goto out;
1340
1341	if (override)
1342		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1343	else
1344		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1345
1346	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1347
1348	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1349		      phy, ch, dev_priv->chv_phy_control);
1350
1351	assert_chv_phy_status(dev_priv);
1352
1353out:
1354	mutex_unlock(&power_domains->lock);
1355
1356	return was_override;
1357}
1358
1359void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1360			     bool override, unsigned int mask)
1361{
1362	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1363	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1364	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1365	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1366
1367	mutex_lock(&power_domains->lock);
1368
1369	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1370	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1371
1372	if (override)
1373		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1374	else
1375		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1376
1377	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1378
1379	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1380		      phy, ch, mask, dev_priv->chv_phy_control);
1381
1382	assert_chv_phy_status(dev_priv);
1383
1384	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1385
1386	mutex_unlock(&power_domains->lock);
1387}
1388
1389static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1390					struct i915_power_well *power_well)
1391{
1392	enum pipe pipe = PIPE_A;
1393	bool enabled;
1394	u32 state, ctrl;
1395
1396	mutex_lock(&dev_priv->pcu_lock);
1397
1398	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1399	/*
1400	 * We only ever set the power-on and power-gate states, anything
1401	 * else is unexpected.
1402	 */
1403	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1404	enabled = state == DP_SSS_PWR_ON(pipe);
1405
1406	/*
1407	 * A transient state at this point would mean some unexpected party
1408	 * is poking at the power controls too.
1409	 */
1410	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1411	WARN_ON(ctrl << 16 != state);
1412
1413	mutex_unlock(&dev_priv->pcu_lock);
1414
1415	return enabled;
1416}
1417
1418static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1419				    struct i915_power_well *power_well,
1420				    bool enable)
1421{
1422	enum pipe pipe = PIPE_A;
1423	u32 state;
1424	u32 ctrl;
1425
1426	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1427
1428	mutex_lock(&dev_priv->pcu_lock);
1429
1430#define COND \
1431	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1432
1433	if (COND)
1434		goto out;
1435
1436	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1437	ctrl &= ~DP_SSC_MASK(pipe);
1438	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1439	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1440
1441	if (wait_for(COND, 100))
1442		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1443			  state,
1444			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1445
1446#undef COND
1447
1448out:
1449	mutex_unlock(&dev_priv->pcu_lock);
1450}
1451
1452static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1453				       struct i915_power_well *power_well)
1454{
1455	WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
1456
1457	chv_set_pipe_power_well(dev_priv, power_well, true);
1458
1459	vlv_display_power_well_init(dev_priv);
1460}
1461
1462static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1463					struct i915_power_well *power_well)
1464{
1465	WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
1466
1467	vlv_display_power_well_deinit(dev_priv);
1468
1469	chv_set_pipe_power_well(dev_priv, power_well, false);
1470}
1471
1472static void
1473__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1474				 enum intel_display_power_domain domain)
1475{
1476	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1477	struct i915_power_well *power_well;
1478
1479	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1480		intel_power_well_get(dev_priv, power_well);
1481
1482	power_domains->domain_use_count[domain]++;
1483}
1484
1485/**
1486 * intel_display_power_get - grab a power domain reference
1487 * @dev_priv: i915 device instance
1488 * @domain: power domain to reference
1489 *
1490 * This function grabs a power domain reference for @domain and ensures that the
1491 * power domain and all its parents are powered up. Therefore users should only
1492 * grab a reference to the innermost power domain they need.
 
 
1493 *
1494 * Any power domain reference obtained by this function must have a symmetric
1495 * call to intel_display_power_put() to release the reference again.
 
 
 
1496 */
1497void intel_display_power_get(struct drm_i915_private *dev_priv,
1498			     enum intel_display_power_domain domain)
1499{
1500	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1501
1502	intel_runtime_pm_get(dev_priv);
1503
1504	mutex_lock(&power_domains->lock);
1505
1506	__intel_display_power_get_domain(dev_priv, domain);
1507
1508	mutex_unlock(&power_domains->lock);
1509}
1510
1511/**
1512 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1513 * @dev_priv: i915 device instance
1514 * @domain: power domain to reference
1515 *
1516 * This function grabs a power domain reference for @domain and ensures that the
1517 * power domain and all its parents are powered up. Therefore users should only
1518 * grab a reference to the innermost power domain they need.
 
1519 *
1520 * Any power domain reference obtained by this function must have a symmetric
1521 * call to intel_display_power_put() to release the reference again.
1522 */
1523bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1524					enum intel_display_power_domain domain)
1525{
1526	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1527	bool is_enabled;
1528
1529	if (!intel_runtime_pm_get_if_in_use(dev_priv))
1530		return false;
1531
1532	mutex_lock(&power_domains->lock);
1533
1534	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1535		__intel_display_power_get_domain(dev_priv, domain);
1536		is_enabled = true;
1537	} else {
1538		is_enabled = false;
1539	}
1540
1541	mutex_unlock(&power_domains->lock);
1542
1543	if (!is_enabled)
1544		intel_runtime_pm_put(dev_priv);
1545
1546	return is_enabled;
1547}
1548
1549/**
1550 * intel_display_power_put - release a power domain reference
1551 * @dev_priv: i915 device instance
1552 * @domain: power domain to reference
1553 *
1554 * This function drops the power domain reference obtained by
1555 * intel_display_power_get() and might power down the corresponding hardware
1556 * block right away if this is the last reference.
 
 
 
 
 
 
 
 
 
 
 
 
 
1557 */
1558void intel_display_power_put(struct drm_i915_private *dev_priv,
1559			     enum intel_display_power_domain domain)
1560{
1561	struct i915_power_domains *power_domains;
1562	struct i915_power_well *power_well;
1563
1564	power_domains = &dev_priv->power_domains;
1565
1566	mutex_lock(&power_domains->lock);
1567
1568	WARN(!power_domains->domain_use_count[domain],
1569	     "Use count on domain %s is already zero\n",
1570	     intel_display_power_domain_str(domain));
1571	power_domains->domain_use_count[domain]--;
1572
1573	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
1574		intel_power_well_put(dev_priv, power_well);
1575
1576	mutex_unlock(&power_domains->lock);
1577
1578	intel_runtime_pm_put(dev_priv);
1579}
1580
1581#define I830_PIPES_POWER_DOMAINS (		\
1582	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1583	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1584	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1585	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1586	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1587	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1588	BIT_ULL(POWER_DOMAIN_INIT))
1589
1590#define VLV_DISPLAY_POWER_DOMAINS (		\
1591	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1592	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1593	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1594	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1595	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1596	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1597	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1598	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1599	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1600	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1601	BIT_ULL(POWER_DOMAIN_VGA) |			\
1602	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1603	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1604	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1605	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1606	BIT_ULL(POWER_DOMAIN_INIT))
1607
1608#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1609	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1610	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1611	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1612	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1613	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1614	BIT_ULL(POWER_DOMAIN_INIT))
1615
1616#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1617	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1618	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1619	BIT_ULL(POWER_DOMAIN_INIT))
1620
1621#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1622	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1623	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1624	BIT_ULL(POWER_DOMAIN_INIT))
1625
1626#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1627	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1628	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1629	BIT_ULL(POWER_DOMAIN_INIT))
1630
1631#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1632	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1633	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1634	BIT_ULL(POWER_DOMAIN_INIT))
1635
1636#define CHV_DISPLAY_POWER_DOMAINS (		\
1637	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1638	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1639	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
1640	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1641	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1642	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
1643	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1644	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1645	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
1646	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1647	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1648	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1649	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1650	BIT_ULL(POWER_DOMAIN_VGA) |			\
1651	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1652	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1653	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1654	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1655	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1656	BIT_ULL(POWER_DOMAIN_INIT))
1657
1658#define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1659	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1660	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1661	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1662	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1663	BIT_ULL(POWER_DOMAIN_INIT))
1664
1665#define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
1666	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1667	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1668	BIT_ULL(POWER_DOMAIN_INIT))
1669
1670#define HSW_DISPLAY_POWER_DOMAINS (			\
1671	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1672	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1673	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
1674	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1675	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1676	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1677	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1678	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1679	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1680	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1681	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1682	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1683	BIT_ULL(POWER_DOMAIN_VGA) |				\
1684	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1685	BIT_ULL(POWER_DOMAIN_INIT))
1686
1687#define BDW_DISPLAY_POWER_DOMAINS (			\
1688	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1689	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1690	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1691	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1692	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1693	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1694	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1695	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1696	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1697	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1698	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1699	BIT_ULL(POWER_DOMAIN_VGA) |				\
1700	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1701	BIT_ULL(POWER_DOMAIN_INIT))
1702
1703#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1704	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1705	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1706	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1707	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1708	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1709	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1710	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1711	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1712	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1713	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1714	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
1715	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1716	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1717	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1718	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1719	BIT_ULL(POWER_DOMAIN_VGA) |				\
1720	BIT_ULL(POWER_DOMAIN_INIT))
1721#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
1722	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
1723	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
1724	BIT_ULL(POWER_DOMAIN_INIT))
1725#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
1726	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1727	BIT_ULL(POWER_DOMAIN_INIT))
1728#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
1729	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1730	BIT_ULL(POWER_DOMAIN_INIT))
1731#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
1732	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1733	BIT_ULL(POWER_DOMAIN_INIT))
1734#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1735	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1736	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1737	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1738	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1739	BIT_ULL(POWER_DOMAIN_INIT))
1740
1741#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1742	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1743	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1744	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1745	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1746	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1747	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1748	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1749	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1750	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1751	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1752	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1753	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1754	BIT_ULL(POWER_DOMAIN_VGA) |				\
1755	BIT_ULL(POWER_DOMAIN_INIT))
1756#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1757	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1758	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1759	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1760	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1761	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
1762	BIT_ULL(POWER_DOMAIN_INIT))
1763#define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
1764	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1765	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1766	BIT_ULL(POWER_DOMAIN_INIT))
1767#define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
1768	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1769	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1770	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1771	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1772	BIT_ULL(POWER_DOMAIN_INIT))
1773
1774#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1775	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1776	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1777	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1778	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1779	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1780	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1781	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1782	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1783	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1784	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1785	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1786	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1787	BIT_ULL(POWER_DOMAIN_VGA) |				\
1788	BIT_ULL(POWER_DOMAIN_INIT))
1789#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
1790	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
1791#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
1792	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
1793#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
1794	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
1795#define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
1796	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1797	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1798	BIT_ULL(POWER_DOMAIN_INIT))
1799#define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
1800	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1801	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1802	BIT_ULL(POWER_DOMAIN_INIT))
1803#define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
1804	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1805	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1806	BIT_ULL(POWER_DOMAIN_INIT))
1807#define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
1808	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
1809	BIT_ULL(POWER_DOMAIN_INIT))
1810#define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
1811	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1812	BIT_ULL(POWER_DOMAIN_INIT))
1813#define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
1814	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1815	BIT_ULL(POWER_DOMAIN_INIT))
1816#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1817	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1818	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1819	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1820	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1821	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
1822	BIT_ULL(POWER_DOMAIN_INIT))
1823
1824#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1825	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1826	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1827	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1828	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1829	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1830	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1831	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1832	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1833	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1834	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1835	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
1836	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1837	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1838	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1839	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1840	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1841	BIT_ULL(POWER_DOMAIN_VGA) |				\
1842	BIT_ULL(POWER_DOMAIN_INIT))
1843#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
1844	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
1845	BIT_ULL(POWER_DOMAIN_INIT))
1846#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
1847	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1848	BIT_ULL(POWER_DOMAIN_INIT))
1849#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
1850	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1851	BIT_ULL(POWER_DOMAIN_INIT))
1852#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
1853	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1854	BIT_ULL(POWER_DOMAIN_INIT))
1855#define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
1856	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1857	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
1858	BIT_ULL(POWER_DOMAIN_INIT))
1859#define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
1860	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1861	BIT_ULL(POWER_DOMAIN_INIT))
1862#define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
1863	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1864	BIT_ULL(POWER_DOMAIN_INIT))
1865#define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
1866	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1867	BIT_ULL(POWER_DOMAIN_INIT))
1868#define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
1869	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1870	BIT_ULL(POWER_DOMAIN_INIT))
1871#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
1872	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
1873	BIT_ULL(POWER_DOMAIN_INIT))
1874#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1875	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1876	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1877	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1878	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1879	BIT_ULL(POWER_DOMAIN_INIT))
1880
1881static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1882	.sync_hw = i9xx_power_well_sync_hw_noop,
1883	.enable = i9xx_always_on_power_well_noop,
1884	.disable = i9xx_always_on_power_well_noop,
1885	.is_enabled = i9xx_always_on_power_well_enabled,
1886};
1887
1888static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1889	.sync_hw = i9xx_power_well_sync_hw_noop,
1890	.enable = chv_pipe_power_well_enable,
1891	.disable = chv_pipe_power_well_disable,
1892	.is_enabled = chv_pipe_power_well_enabled,
1893};
1894
1895static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1896	.sync_hw = i9xx_power_well_sync_hw_noop,
1897	.enable = chv_dpio_cmn_power_well_enable,
1898	.disable = chv_dpio_cmn_power_well_disable,
1899	.is_enabled = vlv_power_well_enabled,
1900};
1901
1902static struct i915_power_well i9xx_always_on_power_well[] = {
1903	{
1904		.name = "always-on",
1905		.always_on = 1,
1906		.domains = POWER_DOMAIN_MASK,
1907		.ops = &i9xx_always_on_power_well_ops,
1908		.id = I915_DISP_PW_ALWAYS_ON,
1909	},
1910};
1911
1912static const struct i915_power_well_ops i830_pipes_power_well_ops = {
1913	.sync_hw = i830_pipes_power_well_sync_hw,
1914	.enable = i830_pipes_power_well_enable,
1915	.disable = i830_pipes_power_well_disable,
1916	.is_enabled = i830_pipes_power_well_enabled,
1917};
1918
1919static struct i915_power_well i830_power_wells[] = {
1920	{
1921		.name = "always-on",
1922		.always_on = 1,
1923		.domains = POWER_DOMAIN_MASK,
1924		.ops = &i9xx_always_on_power_well_ops,
1925		.id = I915_DISP_PW_ALWAYS_ON,
1926	},
1927	{
1928		.name = "pipes",
1929		.domains = I830_PIPES_POWER_DOMAINS,
1930		.ops = &i830_pipes_power_well_ops,
1931		.id = I830_DISP_PW_PIPES,
1932	},
1933};
1934
1935static const struct i915_power_well_ops hsw_power_well_ops = {
1936	.sync_hw = hsw_power_well_sync_hw,
1937	.enable = hsw_power_well_enable,
1938	.disable = hsw_power_well_disable,
1939	.is_enabled = hsw_power_well_enabled,
1940};
1941
1942static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1943	.sync_hw = i9xx_power_well_sync_hw_noop,
1944	.enable = gen9_dc_off_power_well_enable,
1945	.disable = gen9_dc_off_power_well_disable,
1946	.is_enabled = gen9_dc_off_power_well_enabled,
1947};
1948
1949static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
1950	.sync_hw = i9xx_power_well_sync_hw_noop,
1951	.enable = bxt_dpio_cmn_power_well_enable,
1952	.disable = bxt_dpio_cmn_power_well_disable,
1953	.is_enabled = bxt_dpio_cmn_power_well_enabled,
1954};
1955
1956static struct i915_power_well hsw_power_wells[] = {
1957	{
1958		.name = "always-on",
1959		.always_on = 1,
1960		.domains = POWER_DOMAIN_MASK,
1961		.ops = &i9xx_always_on_power_well_ops,
1962		.id = I915_DISP_PW_ALWAYS_ON,
1963	},
1964	{
1965		.name = "display",
1966		.domains = HSW_DISPLAY_POWER_DOMAINS,
1967		.ops = &hsw_power_well_ops,
1968		.id = HSW_DISP_PW_GLOBAL,
1969		{
1970			.hsw.has_vga = true,
1971		},
1972	},
1973};
1974
1975static struct i915_power_well bdw_power_wells[] = {
1976	{
1977		.name = "always-on",
1978		.always_on = 1,
1979		.domains = POWER_DOMAIN_MASK,
1980		.ops = &i9xx_always_on_power_well_ops,
1981		.id = I915_DISP_PW_ALWAYS_ON,
1982	},
1983	{
1984		.name = "display",
1985		.domains = BDW_DISPLAY_POWER_DOMAINS,
1986		.ops = &hsw_power_well_ops,
1987		.id = HSW_DISP_PW_GLOBAL,
1988		{
1989			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
1990			.hsw.has_vga = true,
1991		},
1992	},
1993};
1994
1995static const struct i915_power_well_ops vlv_display_power_well_ops = {
1996	.sync_hw = i9xx_power_well_sync_hw_noop,
1997	.enable = vlv_display_power_well_enable,
1998	.disable = vlv_display_power_well_disable,
1999	.is_enabled = vlv_power_well_enabled,
2000};
2001
2002static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2003	.sync_hw = i9xx_power_well_sync_hw_noop,
2004	.enable = vlv_dpio_cmn_power_well_enable,
2005	.disable = vlv_dpio_cmn_power_well_disable,
2006	.is_enabled = vlv_power_well_enabled,
2007};
2008
2009static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2010	.sync_hw = i9xx_power_well_sync_hw_noop,
2011	.enable = vlv_power_well_enable,
2012	.disable = vlv_power_well_disable,
2013	.is_enabled = vlv_power_well_enabled,
2014};
2015
2016static struct i915_power_well vlv_power_wells[] = {
2017	{
2018		.name = "always-on",
2019		.always_on = 1,
2020		.domains = POWER_DOMAIN_MASK,
2021		.ops = &i9xx_always_on_power_well_ops,
2022		.id = I915_DISP_PW_ALWAYS_ON,
2023	},
2024	{
2025		.name = "display",
2026		.domains = VLV_DISPLAY_POWER_DOMAINS,
2027		.id = PUNIT_POWER_WELL_DISP2D,
2028		.ops = &vlv_display_power_well_ops,
2029	},
2030	{
2031		.name = "dpio-tx-b-01",
2032		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2033			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2034			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2035			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2036		.ops = &vlv_dpio_power_well_ops,
2037		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
2038	},
2039	{
2040		.name = "dpio-tx-b-23",
2041		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2042			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2043			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2044			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2045		.ops = &vlv_dpio_power_well_ops,
2046		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
2047	},
2048	{
2049		.name = "dpio-tx-c-01",
2050		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2051			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2052			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2053			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2054		.ops = &vlv_dpio_power_well_ops,
2055		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
2056	},
2057	{
2058		.name = "dpio-tx-c-23",
2059		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2060			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2061			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2062			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2063		.ops = &vlv_dpio_power_well_ops,
2064		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
2065	},
2066	{
2067		.name = "dpio-common",
2068		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2069		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2070		.ops = &vlv_dpio_cmn_power_well_ops,
2071	},
2072};
2073
2074static struct i915_power_well chv_power_wells[] = {
2075	{
2076		.name = "always-on",
2077		.always_on = 1,
2078		.domains = POWER_DOMAIN_MASK,
2079		.ops = &i9xx_always_on_power_well_ops,
2080		.id = I915_DISP_PW_ALWAYS_ON,
2081	},
2082	{
2083		.name = "display",
2084		/*
2085		 * Pipe A power well is the new disp2d well. Pipe B and C
2086		 * power wells don't actually exist. Pipe A power well is
2087		 * required for any pipe to work.
2088		 */
2089		.domains = CHV_DISPLAY_POWER_DOMAINS,
2090		.id = CHV_DISP_PW_PIPE_A,
2091		.ops = &chv_pipe_power_well_ops,
2092	},
2093	{
2094		.name = "dpio-common-bc",
2095		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2096		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2097		.ops = &chv_dpio_cmn_power_well_ops,
2098	},
2099	{
2100		.name = "dpio-common-d",
2101		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2102		.id = PUNIT_POWER_WELL_DPIO_CMN_D,
2103		.ops = &chv_dpio_cmn_power_well_ops,
2104	},
2105};
2106
2107bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2108					 enum i915_power_well_id power_well_id)
2109{
2110	struct i915_power_well *power_well;
2111	bool ret;
2112
2113	power_well = lookup_power_well(dev_priv, power_well_id);
2114	ret = power_well->ops->is_enabled(dev_priv, power_well);
2115
2116	return ret;
2117}
2118
2119static struct i915_power_well skl_power_wells[] = {
2120	{
2121		.name = "always-on",
2122		.always_on = 1,
2123		.domains = POWER_DOMAIN_MASK,
2124		.ops = &i9xx_always_on_power_well_ops,
2125		.id = I915_DISP_PW_ALWAYS_ON,
2126	},
2127	{
2128		.name = "power well 1",
2129		/* Handled by the DMC firmware */
2130		.domains = 0,
2131		.ops = &hsw_power_well_ops,
2132		.id = SKL_DISP_PW_1,
2133		{
2134			.hsw.has_fuses = true,
2135		},
2136	},
2137	{
2138		.name = "MISC IO power well",
2139		/* Handled by the DMC firmware */
2140		.domains = 0,
2141		.ops = &hsw_power_well_ops,
2142		.id = SKL_DISP_PW_MISC_IO,
2143	},
2144	{
2145		.name = "DC off",
2146		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2147		.ops = &gen9_dc_off_power_well_ops,
2148		.id = SKL_DISP_PW_DC_OFF,
2149	},
2150	{
2151		.name = "power well 2",
2152		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2153		.ops = &hsw_power_well_ops,
2154		.id = SKL_DISP_PW_2,
2155		{
2156			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2157			.hsw.has_vga = true,
2158			.hsw.has_fuses = true,
2159		},
2160	},
2161	{
2162		.name = "DDI A/E IO power well",
2163		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2164		.ops = &hsw_power_well_ops,
2165		.id = SKL_DISP_PW_DDI_A_E,
2166	},
2167	{
2168		.name = "DDI B IO power well",
2169		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2170		.ops = &hsw_power_well_ops,
2171		.id = SKL_DISP_PW_DDI_B,
2172	},
2173	{
2174		.name = "DDI C IO power well",
2175		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2176		.ops = &hsw_power_well_ops,
2177		.id = SKL_DISP_PW_DDI_C,
2178	},
2179	{
2180		.name = "DDI D IO power well",
2181		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2182		.ops = &hsw_power_well_ops,
2183		.id = SKL_DISP_PW_DDI_D,
2184	},
2185};
2186
2187static struct i915_power_well bxt_power_wells[] = {
2188	{
2189		.name = "always-on",
2190		.always_on = 1,
2191		.domains = POWER_DOMAIN_MASK,
2192		.ops = &i9xx_always_on_power_well_ops,
2193		.id = I915_DISP_PW_ALWAYS_ON,
2194	},
2195	{
2196		.name = "power well 1",
2197		.domains = 0,
2198		.ops = &hsw_power_well_ops,
2199		.id = SKL_DISP_PW_1,
2200		{
2201			.hsw.has_fuses = true,
2202		},
2203	},
2204	{
2205		.name = "DC off",
2206		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2207		.ops = &gen9_dc_off_power_well_ops,
2208		.id = SKL_DISP_PW_DC_OFF,
2209	},
2210	{
2211		.name = "power well 2",
2212		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2213		.ops = &hsw_power_well_ops,
2214		.id = SKL_DISP_PW_2,
2215		{
2216			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2217			.hsw.has_vga = true,
2218			.hsw.has_fuses = true,
2219		},
2220	},
2221	{
2222		.name = "dpio-common-a",
2223		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2224		.ops = &bxt_dpio_cmn_power_well_ops,
2225		.id = BXT_DPIO_CMN_A,
2226		{
2227			.bxt.phy = DPIO_PHY1,
2228		},
2229	},
2230	{
2231		.name = "dpio-common-bc",
2232		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2233		.ops = &bxt_dpio_cmn_power_well_ops,
2234		.id = BXT_DPIO_CMN_BC,
2235		{
2236			.bxt.phy = DPIO_PHY0,
2237		},
2238	},
2239};
2240
2241static struct i915_power_well glk_power_wells[] = {
2242	{
2243		.name = "always-on",
2244		.always_on = 1,
2245		.domains = POWER_DOMAIN_MASK,
2246		.ops = &i9xx_always_on_power_well_ops,
2247		.id = I915_DISP_PW_ALWAYS_ON,
2248	},
2249	{
2250		.name = "power well 1",
2251		/* Handled by the DMC firmware */
2252		.domains = 0,
2253		.ops = &hsw_power_well_ops,
2254		.id = SKL_DISP_PW_1,
2255		{
2256			.hsw.has_fuses = true,
2257		},
2258	},
2259	{
2260		.name = "DC off",
2261		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2262		.ops = &gen9_dc_off_power_well_ops,
2263		.id = SKL_DISP_PW_DC_OFF,
2264	},
2265	{
2266		.name = "power well 2",
2267		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2268		.ops = &hsw_power_well_ops,
2269		.id = SKL_DISP_PW_2,
2270		{
2271			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2272			.hsw.has_vga = true,
2273			.hsw.has_fuses = true,
2274		},
2275	},
2276	{
2277		.name = "dpio-common-a",
2278		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2279		.ops = &bxt_dpio_cmn_power_well_ops,
2280		.id = BXT_DPIO_CMN_A,
2281		{
2282			.bxt.phy = DPIO_PHY1,
2283		},
2284	},
2285	{
2286		.name = "dpio-common-b",
2287		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2288		.ops = &bxt_dpio_cmn_power_well_ops,
2289		.id = BXT_DPIO_CMN_BC,
2290		{
2291			.bxt.phy = DPIO_PHY0,
2292		},
2293	},
2294	{
2295		.name = "dpio-common-c",
2296		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2297		.ops = &bxt_dpio_cmn_power_well_ops,
2298		.id = GLK_DPIO_CMN_C,
2299		{
2300			.bxt.phy = DPIO_PHY2,
2301		},
2302	},
2303	{
2304		.name = "AUX A",
2305		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2306		.ops = &hsw_power_well_ops,
2307		.id = GLK_DISP_PW_AUX_A,
2308	},
2309	{
2310		.name = "AUX B",
2311		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2312		.ops = &hsw_power_well_ops,
2313		.id = GLK_DISP_PW_AUX_B,
2314	},
2315	{
2316		.name = "AUX C",
2317		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2318		.ops = &hsw_power_well_ops,
2319		.id = GLK_DISP_PW_AUX_C,
2320	},
2321	{
2322		.name = "DDI A IO power well",
2323		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2324		.ops = &hsw_power_well_ops,
2325		.id = GLK_DISP_PW_DDI_A,
2326	},
2327	{
2328		.name = "DDI B IO power well",
2329		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2330		.ops = &hsw_power_well_ops,
2331		.id = SKL_DISP_PW_DDI_B,
2332	},
2333	{
2334		.name = "DDI C IO power well",
2335		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2336		.ops = &hsw_power_well_ops,
2337		.id = SKL_DISP_PW_DDI_C,
2338	},
2339};
2340
2341static struct i915_power_well cnl_power_wells[] = {
2342	{
2343		.name = "always-on",
2344		.always_on = 1,
2345		.domains = POWER_DOMAIN_MASK,
2346		.ops = &i9xx_always_on_power_well_ops,
2347		.id = I915_DISP_PW_ALWAYS_ON,
2348	},
2349	{
2350		.name = "power well 1",
2351		/* Handled by the DMC firmware */
2352		.domains = 0,
2353		.ops = &hsw_power_well_ops,
2354		.id = SKL_DISP_PW_1,
2355		{
2356			.hsw.has_fuses = true,
2357		},
2358	},
2359	{
2360		.name = "AUX A",
2361		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2362		.ops = &hsw_power_well_ops,
2363		.id = CNL_DISP_PW_AUX_A,
2364	},
2365	{
2366		.name = "AUX B",
2367		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2368		.ops = &hsw_power_well_ops,
2369		.id = CNL_DISP_PW_AUX_B,
2370	},
2371	{
2372		.name = "AUX C",
2373		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2374		.ops = &hsw_power_well_ops,
2375		.id = CNL_DISP_PW_AUX_C,
2376	},
2377	{
2378		.name = "AUX D",
2379		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2380		.ops = &hsw_power_well_ops,
2381		.id = CNL_DISP_PW_AUX_D,
2382	},
2383	{
2384		.name = "DC off",
2385		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2386		.ops = &gen9_dc_off_power_well_ops,
2387		.id = SKL_DISP_PW_DC_OFF,
2388	},
2389	{
2390		.name = "power well 2",
2391		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2392		.ops = &hsw_power_well_ops,
2393		.id = SKL_DISP_PW_2,
2394		{
2395			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2396			.hsw.has_vga = true,
2397			.hsw.has_fuses = true,
2398		},
2399	},
2400	{
2401		.name = "DDI A IO power well",
2402		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2403		.ops = &hsw_power_well_ops,
2404		.id = CNL_DISP_PW_DDI_A,
2405	},
2406	{
2407		.name = "DDI B IO power well",
2408		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
2409		.ops = &hsw_power_well_ops,
2410		.id = SKL_DISP_PW_DDI_B,
2411	},
2412	{
2413		.name = "DDI C IO power well",
2414		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
2415		.ops = &hsw_power_well_ops,
2416		.id = SKL_DISP_PW_DDI_C,
2417	},
2418	{
2419		.name = "DDI D IO power well",
2420		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
2421		.ops = &hsw_power_well_ops,
2422		.id = SKL_DISP_PW_DDI_D,
2423	},
2424	{
2425		.name = "DDI F IO power well",
2426		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
2427		.ops = &hsw_power_well_ops,
2428		.id = CNL_DISP_PW_DDI_F,
2429	},
2430	{
2431		.name = "AUX F",
2432		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
2433		.ops = &hsw_power_well_ops,
2434		.id = CNL_DISP_PW_AUX_F,
2435	},
2436};
2437
2438static int
2439sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2440				   int disable_power_well)
2441{
2442	if (disable_power_well >= 0)
2443		return !!disable_power_well;
2444
2445	return 1;
2446}
2447
2448static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2449				    int enable_dc)
2450{
2451	uint32_t mask;
2452	int requested_dc;
2453	int max_dc;
2454
2455	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
2456		max_dc = 2;
2457		mask = 0;
2458	} else if (IS_GEN9_LP(dev_priv)) {
2459		max_dc = 1;
2460		/*
2461		 * DC9 has a separate HW flow from the rest of the DC states,
2462		 * not depending on the DMC firmware. It's needed by system
2463		 * suspend/resume, so allow it unconditionally.
2464		 */
2465		mask = DC_STATE_EN_DC9;
2466	} else {
2467		max_dc = 0;
2468		mask = 0;
2469	}
2470
2471	if (!i915_modparams.disable_power_well)
2472		max_dc = 0;
2473
2474	if (enable_dc >= 0 && enable_dc <= max_dc) {
2475		requested_dc = enable_dc;
2476	} else if (enable_dc == -1) {
2477		requested_dc = max_dc;
2478	} else if (enable_dc > max_dc && enable_dc <= 2) {
2479		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2480			      enable_dc, max_dc);
2481		requested_dc = max_dc;
2482	} else {
2483		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2484		requested_dc = max_dc;
2485	}
2486
2487	if (requested_dc > 1)
2488		mask |= DC_STATE_EN_UPTO_DC6;
2489	if (requested_dc > 0)
2490		mask |= DC_STATE_EN_UPTO_DC5;
2491
2492	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2493
2494	return mask;
2495}
2496
2497static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv)
2498{
2499	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2500	u64 power_well_ids;
2501	int i;
2502
2503	power_well_ids = 0;
2504	for (i = 0; i < power_domains->power_well_count; i++) {
2505		enum i915_power_well_id id = power_domains->power_wells[i].id;
2506
2507		WARN_ON(id >= sizeof(power_well_ids) * 8);
2508		WARN_ON(power_well_ids & BIT_ULL(id));
2509		power_well_ids |= BIT_ULL(id);
2510	}
2511}
2512
2513#define set_power_wells(power_domains, __power_wells) ({		\
2514	(power_domains)->power_wells = (__power_wells);			\
2515	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
2516})
2517
2518/**
2519 * intel_power_domains_init - initializes the power domain structures
2520 * @dev_priv: i915 device instance
2521 *
2522 * Initializes the power domain structures for @dev_priv depending upon the
2523 * supported platform.
2524 */
2525int intel_power_domains_init(struct drm_i915_private *dev_priv)
2526{
2527	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2528
2529	i915_modparams.disable_power_well =
2530		sanitize_disable_power_well_option(dev_priv,
2531						   i915_modparams.disable_power_well);
2532	dev_priv->csr.allowed_dc_mask =
2533		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
2534
2535	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
2536
2537	mutex_init(&power_domains->lock);
2538
2539	/*
2540	 * The enabling order will be from lower to higher indexed wells,
2541	 * the disabling order is reversed.
2542	 */
2543	if (IS_HASWELL(dev_priv)) {
2544		set_power_wells(power_domains, hsw_power_wells);
2545	} else if (IS_BROADWELL(dev_priv)) {
2546		set_power_wells(power_domains, bdw_power_wells);
2547	} else if (IS_GEN9_BC(dev_priv)) {
2548		set_power_wells(power_domains, skl_power_wells);
2549	} else if (IS_CANNONLAKE(dev_priv)) {
2550		set_power_wells(power_domains, cnl_power_wells);
2551
2552		/*
2553		 * DDI and Aux IO are getting enabled for all ports
2554		 * regardless the presence or use. So, in order to avoid
2555		 * timeouts, lets remove them from the list
2556		 * for the SKUs without port F.
2557		 */
2558		if (!IS_CNL_WITH_PORT_F(dev_priv))
2559			power_domains->power_well_count -= 2;
2560
2561	} else if (IS_BROXTON(dev_priv)) {
2562		set_power_wells(power_domains, bxt_power_wells);
2563	} else if (IS_GEMINILAKE(dev_priv)) {
2564		set_power_wells(power_domains, glk_power_wells);
2565	} else if (IS_CHERRYVIEW(dev_priv)) {
2566		set_power_wells(power_domains, chv_power_wells);
2567	} else if (IS_VALLEYVIEW(dev_priv)) {
2568		set_power_wells(power_domains, vlv_power_wells);
2569	} else if (IS_I830(dev_priv)) {
2570		set_power_wells(power_domains, i830_power_wells);
2571	} else {
2572		set_power_wells(power_domains, i9xx_always_on_power_well);
2573	}
2574
2575	assert_power_well_ids_unique(dev_priv);
2576
2577	return 0;
2578}
2579
2580/**
2581 * intel_power_domains_fini - finalizes the power domain structures
2582 * @dev_priv: i915 device instance
 
 
2583 *
2584 * Finalizes the power domain structures for @dev_priv depending upon the
2585 * supported platform. This function also disables runtime pm and ensures that
2586 * the device stays powered up so that the driver can be reloaded.
2587 */
2588void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2589{
2590	struct device *kdev = &dev_priv->drm.pdev->dev;
2591
2592	/*
2593	 * The i915.ko module is still not prepared to be loaded when
2594	 * the power well is not enabled, so just enable it in case
2595	 * we're going to unload/reload.
2596	 * The following also reacquires the RPM reference the core passed
2597	 * to the driver during loading, which is dropped in
2598	 * intel_runtime_pm_enable(). We have to hand back the control of the
2599	 * device to the core with this reference held.
2600	 */
2601	intel_display_set_init_power(dev_priv, true);
2602
2603	/* Remove the refcount we took to keep power well support disabled. */
2604	if (!i915_modparams.disable_power_well)
2605		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2606
2607	/*
2608	 * Remove the refcount we took in intel_runtime_pm_enable() in case
2609	 * the platform doesn't support runtime PM.
2610	 */
2611	if (!HAS_RUNTIME_PM(dev_priv))
2612		pm_runtime_put(kdev);
2613}
2614
2615static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2616{
2617	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2618	struct i915_power_well *power_well;
2619
2620	mutex_lock(&power_domains->lock);
2621	for_each_power_well(dev_priv, power_well) {
2622		power_well->ops->sync_hw(dev_priv, power_well);
2623		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2624								     power_well);
2625	}
2626	mutex_unlock(&power_domains->lock);
2627}
2628
2629static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2630{
2631	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2632	POSTING_READ(DBUF_CTL);
2633
2634	udelay(10);
2635
2636	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2637		DRM_ERROR("DBuf power enable timeout\n");
2638}
2639
2640static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2641{
2642	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2643	POSTING_READ(DBUF_CTL);
2644
2645	udelay(10);
2646
2647	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2648		DRM_ERROR("DBuf power disable timeout!\n");
2649}
2650
2651/*
2652 * TODO: we shouldn't always enable DBUF_CTL_S2, we should only enable it when
2653 * needed and keep it disabled as much as possible.
2654 */
2655static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
2656{
2657	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
2658	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
2659	POSTING_READ(DBUF_CTL_S2);
2660
2661	udelay(10);
2662
2663	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
2664	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
2665		DRM_ERROR("DBuf power enable timeout\n");
2666}
2667
2668static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
2669{
2670	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
2671	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
2672	POSTING_READ(DBUF_CTL_S2);
2673
2674	udelay(10);
2675
2676	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
2677	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
2678		DRM_ERROR("DBuf power disable timeout!\n");
2679}
2680
2681static void icl_mbus_init(struct drm_i915_private *dev_priv)
2682{
2683	uint32_t val;
2684
2685	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
2686	      MBUS_ABOX_BT_CREDIT_POOL2(16) |
2687	      MBUS_ABOX_B_CREDIT(1) |
2688	      MBUS_ABOX_BW_CREDIT(1);
2689
2690	I915_WRITE(MBUS_ABOX_CTL, val);
2691}
2692
2693static void skl_display_core_init(struct drm_i915_private *dev_priv,
2694				   bool resume)
2695{
2696	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2697	struct i915_power_well *well;
2698	uint32_t val;
2699
2700	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2701
2702	/* enable PCH reset handshake */
2703	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2704	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2705
2706	/* enable PG1 and Misc I/O */
2707	mutex_lock(&power_domains->lock);
2708
2709	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2710	intel_power_well_enable(dev_priv, well);
2711
2712	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
2713	intel_power_well_enable(dev_priv, well);
2714
2715	mutex_unlock(&power_domains->lock);
2716
2717	skl_init_cdclk(dev_priv);
2718
2719	gen9_dbuf_enable(dev_priv);
2720
2721	if (resume && dev_priv->csr.dmc_payload)
2722		intel_csr_load_program(dev_priv);
2723}
2724
2725static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2726{
2727	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2728	struct i915_power_well *well;
2729
2730	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2731
2732	gen9_dbuf_disable(dev_priv);
2733
2734	skl_uninit_cdclk(dev_priv);
2735
2736	/* The spec doesn't call for removing the reset handshake flag */
2737	/* disable PG1 and Misc I/O */
2738
2739	mutex_lock(&power_domains->lock);
2740
2741	/*
2742	 * BSpec says to keep the MISC IO power well enabled here, only
2743	 * remove our request for power well 1.
2744	 * Note that even though the driver's request is removed power well 1
2745	 * may stay enabled after this due to DMC's own request on it.
2746	 */
2747	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2748	intel_power_well_disable(dev_priv, well);
2749
2750	mutex_unlock(&power_domains->lock);
2751
2752	usleep_range(10, 30);		/* 10 us delay per Bspec */
2753}
2754
2755void bxt_display_core_init(struct drm_i915_private *dev_priv,
2756			   bool resume)
2757{
2758	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2759	struct i915_power_well *well;
2760	uint32_t val;
2761
2762	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2763
2764	/*
2765	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
2766	 * or else the reset will hang because there is no PCH to respond.
2767	 * Move the handshake programming to initialization sequence.
2768	 * Previously was left up to BIOS.
2769	 */
2770	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2771	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
2772	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2773
2774	/* Enable PG1 */
2775	mutex_lock(&power_domains->lock);
2776
2777	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2778	intel_power_well_enable(dev_priv, well);
2779
2780	mutex_unlock(&power_domains->lock);
2781
2782	bxt_init_cdclk(dev_priv);
2783
2784	gen9_dbuf_enable(dev_priv);
2785
2786	if (resume && dev_priv->csr.dmc_payload)
2787		intel_csr_load_program(dev_priv);
2788}
2789
2790void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2791{
2792	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2793	struct i915_power_well *well;
2794
2795	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2796
2797	gen9_dbuf_disable(dev_priv);
2798
2799	bxt_uninit_cdclk(dev_priv);
2800
2801	/* The spec doesn't call for removing the reset handshake flag */
2802
2803	/*
2804	 * Disable PW1 (PG1).
2805	 * Note that even though the driver's request is removed power well 1
2806	 * may stay enabled after this due to DMC's own request on it.
2807	 */
2808	mutex_lock(&power_domains->lock);
2809
2810	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2811	intel_power_well_disable(dev_priv, well);
2812
2813	mutex_unlock(&power_domains->lock);
2814
2815	usleep_range(10, 30);		/* 10 us delay per Bspec */
2816}
2817
2818enum {
2819	PROCMON_0_85V_DOT_0,
2820	PROCMON_0_95V_DOT_0,
2821	PROCMON_0_95V_DOT_1,
2822	PROCMON_1_05V_DOT_0,
2823	PROCMON_1_05V_DOT_1,
2824};
2825
2826static const struct cnl_procmon {
2827	u32 dw1, dw9, dw10;
2828} cnl_procmon_values[] = {
2829	[PROCMON_0_85V_DOT_0] =
2830		{ .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
2831	[PROCMON_0_95V_DOT_0] =
2832		{ .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
2833	[PROCMON_0_95V_DOT_1] =
2834		{ .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
2835	[PROCMON_1_05V_DOT_0] =
2836		{ .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
2837	[PROCMON_1_05V_DOT_1] =
2838		{ .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
2839};
2840
2841/*
2842 * CNL has just one set of registers, while ICL has two sets: one for port A and
2843 * the other for port B. The CNL registers are equivalent to the ICL port A
2844 * registers, that's why we call the ICL macros even though the function has CNL
2845 * on its name.
2846 */
2847static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
2848				       enum port port)
2849{
2850	const struct cnl_procmon *procmon;
2851	u32 val;
2852
2853	val = I915_READ(ICL_PORT_COMP_DW3(port));
2854	switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
2855	default:
2856		MISSING_CASE(val);
2857	case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
2858		procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
2859		break;
2860	case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
2861		procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
2862		break;
2863	case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
2864		procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
2865		break;
2866	case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
2867		procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
2868		break;
2869	case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
2870		procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
2871		break;
2872	}
2873
2874	val = I915_READ(ICL_PORT_COMP_DW1(port));
2875	val &= ~((0xff << 16) | 0xff);
2876	val |= procmon->dw1;
2877	I915_WRITE(ICL_PORT_COMP_DW1(port), val);
2878
2879	I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
2880	I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
2881}
2882
2883static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
2884{
2885	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2886	struct i915_power_well *well;
2887	u32 val;
2888
2889	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2890
2891	/* 1. Enable PCH Reset Handshake */
2892	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2893	val |= RESET_PCH_HANDSHAKE_ENABLE;
2894	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2895
2896	/* 2. Enable Comp */
2897	val = I915_READ(CHICKEN_MISC_2);
2898	val &= ~CNL_COMP_PWR_DOWN;
2899	I915_WRITE(CHICKEN_MISC_2, val);
2900
2901	/* Dummy PORT_A to get the correct CNL register from the ICL macro */
2902	cnl_set_procmon_ref_values(dev_priv, PORT_A);
2903
2904	val = I915_READ(CNL_PORT_COMP_DW0);
2905	val |= COMP_INIT;
2906	I915_WRITE(CNL_PORT_COMP_DW0, val);
2907
2908	/* 3. */
2909	val = I915_READ(CNL_PORT_CL1CM_DW5);
2910	val |= CL_POWER_DOWN_ENABLE;
2911	I915_WRITE(CNL_PORT_CL1CM_DW5, val);
2912
2913	/*
2914	 * 4. Enable Power Well 1 (PG1).
2915	 *    The AUX IO power wells will be enabled on demand.
2916	 */
2917	mutex_lock(&power_domains->lock);
2918	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2919	intel_power_well_enable(dev_priv, well);
2920	mutex_unlock(&power_domains->lock);
2921
2922	/* 5. Enable CD clock */
2923	cnl_init_cdclk(dev_priv);
2924
2925	/* 6. Enable DBUF */
2926	gen9_dbuf_enable(dev_priv);
2927
2928	if (resume && dev_priv->csr.dmc_payload)
2929		intel_csr_load_program(dev_priv);
2930}
2931
2932static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
2933{
2934	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2935	struct i915_power_well *well;
2936	u32 val;
2937
2938	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2939
2940	/* 1. Disable all display engine functions -> aready done */
2941
2942	/* 2. Disable DBUF */
2943	gen9_dbuf_disable(dev_priv);
2944
2945	/* 3. Disable CD clock */
2946	cnl_uninit_cdclk(dev_priv);
2947
2948	/*
2949	 * 4. Disable Power Well 1 (PG1).
2950	 *    The AUX IO power wells are toggled on demand, so they are already
2951	 *    disabled at this point.
2952	 */
2953	mutex_lock(&power_domains->lock);
2954	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
2955	intel_power_well_disable(dev_priv, well);
2956	mutex_unlock(&power_domains->lock);
2957
2958	usleep_range(10, 30);		/* 10 us delay per Bspec */
2959
2960	/* 5. Disable Comp */
2961	val = I915_READ(CHICKEN_MISC_2);
2962	val |= CNL_COMP_PWR_DOWN;
2963	I915_WRITE(CHICKEN_MISC_2, val);
2964}
2965
2966static void icl_display_core_init(struct drm_i915_private *dev_priv,
2967				  bool resume)
2968{
2969	enum port port;
2970	u32 val;
2971
2972	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2973
2974	/* 1. Enable PCH reset handshake. */
2975	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2976	val |= RESET_PCH_HANDSHAKE_ENABLE;
2977	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
2978
2979	for (port = PORT_A; port <= PORT_B; port++) {
2980		/* 2. Enable DDI combo PHY comp. */
2981		val = I915_READ(ICL_PHY_MISC(port));
2982		val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
2983		I915_WRITE(ICL_PHY_MISC(port), val);
2984
2985		cnl_set_procmon_ref_values(dev_priv, port);
2986
2987		val = I915_READ(ICL_PORT_COMP_DW0(port));
2988		val |= COMP_INIT;
2989		I915_WRITE(ICL_PORT_COMP_DW0(port), val);
2990
2991		/* 3. Set power down enable. */
2992		val = I915_READ(ICL_PORT_CL_DW5(port));
2993		val |= CL_POWER_DOWN_ENABLE;
2994		I915_WRITE(ICL_PORT_CL_DW5(port), val);
2995	}
2996
2997	/* 4. Enable power well 1 (PG1) and aux IO power. */
2998	/* FIXME: ICL power wells code not here yet. */
2999
3000	/* 5. Enable CDCLK. */
3001	icl_init_cdclk(dev_priv);
3002
3003	/* 6. Enable DBUF. */
3004	icl_dbuf_enable(dev_priv);
3005
3006	/* 7. Setup MBUS. */
3007	icl_mbus_init(dev_priv);
3008
3009	/* 8. CHICKEN_DCPR_1 */
3010	I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
3011					CNL_DDI_CLOCK_REG_ACCESS_ON);
3012}
3013
3014static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3015{
3016	enum port port;
3017	u32 val;
3018
3019	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3020
3021	/* 1. Disable all display engine functions -> aready done */
3022
3023	/* 2. Disable DBUF */
3024	icl_dbuf_disable(dev_priv);
3025
3026	/* 3. Disable CD clock */
3027	icl_uninit_cdclk(dev_priv);
3028
3029	/* 4. Disable Power Well 1 (PG1) and Aux IO Power */
3030	/* FIXME: ICL power wells code not here yet. */
3031
3032	/* 5. Disable Comp */
3033	for (port = PORT_A; port <= PORT_B; port++) {
3034		val = I915_READ(ICL_PHY_MISC(port));
3035		val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3036		I915_WRITE(ICL_PHY_MISC(port), val);
3037	}
3038}
3039
3040static void chv_phy_control_init(struct drm_i915_private *dev_priv)
3041{
3042	struct i915_power_well *cmn_bc =
3043		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
3044	struct i915_power_well *cmn_d =
3045		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
3046
3047	/*
3048	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
3049	 * workaround never ever read DISPLAY_PHY_CONTROL, and
3050	 * instead maintain a shadow copy ourselves. Use the actual
3051	 * power well state and lane status to reconstruct the
3052	 * expected initial value.
3053	 */
3054	dev_priv->chv_phy_control =
3055		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
3056		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
3057		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
3058		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
3059		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
3060
3061	/*
3062	 * If all lanes are disabled we leave the override disabled
3063	 * with all power down bits cleared to match the state we
3064	 * would use after disabling the port. Otherwise enable the
3065	 * override and set the lane powerdown bits accding to the
3066	 * current lane status.
3067	 */
3068	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
3069		uint32_t status = I915_READ(DPLL(PIPE_A));
3070		unsigned int mask;
3071
3072		mask = status & DPLL_PORTB_READY_MASK;
3073		if (mask == 0xf)
3074			mask = 0x0;
3075		else
3076			dev_priv->chv_phy_control |=
3077				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
3078
3079		dev_priv->chv_phy_control |=
3080			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
3081
3082		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
3083		if (mask == 0xf)
3084			mask = 0x0;
3085		else
3086			dev_priv->chv_phy_control |=
3087				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
3088
3089		dev_priv->chv_phy_control |=
3090			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
3091
3092		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
3093
3094		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
3095	} else {
3096		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
3097	}
3098
3099	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
3100		uint32_t status = I915_READ(DPIO_PHY_STATUS);
3101		unsigned int mask;
3102
3103		mask = status & DPLL_PORTD_READY_MASK;
3104
3105		if (mask == 0xf)
3106			mask = 0x0;
3107		else
3108			dev_priv->chv_phy_control |=
3109				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
3110
3111		dev_priv->chv_phy_control |=
3112			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
3113
3114		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
3115
3116		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
3117	} else {
3118		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
3119	}
3120
3121	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
3122
3123	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
3124		      dev_priv->chv_phy_control);
3125}
3126
3127static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
3128{
3129	struct i915_power_well *cmn =
3130		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
3131	struct i915_power_well *disp2d =
3132		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
3133
3134	/* If the display might be already active skip this */
3135	if (cmn->ops->is_enabled(dev_priv, cmn) &&
3136	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
3137	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
3138		return;
3139
3140	DRM_DEBUG_KMS("toggling display PHY side reset\n");
3141
3142	/* cmnlane needs DPLL registers */
3143	disp2d->ops->enable(dev_priv, disp2d);
3144
3145	/*
3146	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3147	 * Need to assert and de-assert PHY SB reset by gating the
3148	 * common lane power, then un-gating it.
3149	 * Simply ungating isn't enough to reset the PHY enough to get
3150	 * ports and lanes running.
3151	 */
3152	cmn->ops->disable(dev_priv, cmn);
3153}
3154
3155/**
3156 * intel_power_domains_init_hw - initialize hardware power domain state
3157 * @dev_priv: i915 device instance
3158 * @resume: Called from resume code paths or not
3159 *
3160 * This function initializes the hardware power domain state and enables all
3161 * power wells belonging to the INIT power domain. Power wells in other
3162 * domains (and not in the INIT domain) are referenced or disabled during the
3163 * modeset state HW readout. After that the reference count of each power well
3164 * must match its HW enabled state, see intel_power_domains_verify_state().
3165 */
3166void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
3167{
3168	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3169
3170	power_domains->initializing = true;
3171
3172	if (IS_ICELAKE(dev_priv)) {
3173		icl_display_core_init(dev_priv, resume);
3174	} else if (IS_CANNONLAKE(dev_priv)) {
3175		cnl_display_core_init(dev_priv, resume);
3176	} else if (IS_GEN9_BC(dev_priv)) {
3177		skl_display_core_init(dev_priv, resume);
3178	} else if (IS_GEN9_LP(dev_priv)) {
3179		bxt_display_core_init(dev_priv, resume);
3180	} else if (IS_CHERRYVIEW(dev_priv)) {
3181		mutex_lock(&power_domains->lock);
3182		chv_phy_control_init(dev_priv);
3183		mutex_unlock(&power_domains->lock);
3184	} else if (IS_VALLEYVIEW(dev_priv)) {
3185		mutex_lock(&power_domains->lock);
3186		vlv_cmnlane_wa(dev_priv);
3187		mutex_unlock(&power_domains->lock);
3188	}
3189
3190	/* For now, we need the power well to be always enabled. */
3191	intel_display_set_init_power(dev_priv, true);
3192	/* Disable power support if the user asked so. */
3193	if (!i915_modparams.disable_power_well)
3194		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3195	intel_power_domains_sync_hw(dev_priv);
3196	power_domains->initializing = false;
3197}
3198
3199/**
3200 * intel_power_domains_suspend - suspend power domain state
3201 * @dev_priv: i915 device instance
3202 *
3203 * This function prepares the hardware power domain state before entering
3204 * system suspend. It must be paired with intel_power_domains_init_hw().
3205 */
3206void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
3207{
3208	/*
3209	 * Even if power well support was disabled we still want to disable
3210	 * power wells while we are system suspended.
3211	 */
3212	if (!i915_modparams.disable_power_well)
3213		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3214
3215	if (IS_ICELAKE(dev_priv))
3216		icl_display_core_uninit(dev_priv);
3217	else if (IS_CANNONLAKE(dev_priv))
3218		cnl_display_core_uninit(dev_priv);
3219	else if (IS_GEN9_BC(dev_priv))
3220		skl_display_core_uninit(dev_priv);
3221	else if (IS_GEN9_LP(dev_priv))
3222		bxt_display_core_uninit(dev_priv);
3223}
3224
3225static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
3226{
3227	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3228	struct i915_power_well *power_well;
3229
3230	for_each_power_well(dev_priv, power_well) {
3231		enum intel_display_power_domain domain;
3232
3233		DRM_DEBUG_DRIVER("%-25s %d\n",
3234				 power_well->name, power_well->count);
3235
3236		for_each_power_domain(domain, power_well->domains)
3237			DRM_DEBUG_DRIVER("  %-23s %d\n",
3238					 intel_display_power_domain_str(domain),
3239					 power_domains->domain_use_count[domain]);
3240	}
3241}
3242
3243/**
3244 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
3245 * @dev_priv: i915 device instance
3246 *
3247 * Verify if the reference count of each power well matches its HW enabled
3248 * state and the total refcount of the domains it belongs to. This must be
3249 * called after modeset HW state sanitization, which is responsible for
3250 * acquiring reference counts for any power wells in use and disabling the
3251 * ones left on by BIOS but not required by any active output.
3252 */
3253void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3254{
3255	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3256	struct i915_power_well *power_well;
3257	bool dump_domain_info;
3258
3259	mutex_lock(&power_domains->lock);
3260
3261	dump_domain_info = false;
3262	for_each_power_well(dev_priv, power_well) {
3263		enum intel_display_power_domain domain;
3264		int domains_count;
3265		bool enabled;
3266
3267		/*
3268		 * Power wells not belonging to any domain (like the MISC_IO
3269		 * and PW1 power wells) are under FW control, so ignore them,
3270		 * since their state can change asynchronously.
3271		 */
3272		if (!power_well->domains)
3273			continue;
3274
3275		enabled = power_well->ops->is_enabled(dev_priv, power_well);
3276		if ((power_well->count || power_well->always_on) != enabled)
3277			DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
3278				  power_well->name, power_well->count, enabled);
3279
3280		domains_count = 0;
3281		for_each_power_domain(domain, power_well->domains)
3282			domains_count += power_domains->domain_use_count[domain];
3283
3284		if (power_well->count != domains_count) {
3285			DRM_ERROR("power well %s refcount/domain refcount mismatch "
3286				  "(refcount %d/domains refcount %d)\n",
3287				  power_well->name, power_well->count,
3288				  domains_count);
3289			dump_domain_info = true;
3290		}
3291	}
3292
3293	if (dump_domain_info) {
3294		static bool dumped;
3295
3296		if (!dumped) {
3297			intel_power_domains_dump_info(dev_priv);
3298			dumped = true;
3299		}
3300	}
3301
3302	mutex_unlock(&power_domains->lock);
3303}
3304
3305/**
3306 * intel_runtime_pm_get - grab a runtime pm reference
3307 * @dev_priv: i915 device instance
3308 *
3309 * This function grabs a device-level runtime pm reference (mostly used for GEM
3310 * code to ensure the GTT or GT is on) and ensures that it is powered up.
3311 *
3312 * Any runtime pm reference obtained by this function must have a symmetric
3313 * call to intel_runtime_pm_put() to release the reference again.
3314 */
3315void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
3316{
3317	struct pci_dev *pdev = dev_priv->drm.pdev;
3318	struct device *kdev = &pdev->dev;
3319	int ret;
3320
3321	ret = pm_runtime_get_sync(kdev);
3322	WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3323
3324	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3325	assert_rpm_wakelock_held(dev_priv);
3326}
3327
3328/**
3329 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
3330 * @dev_priv: i915 device instance
 
3331 *
3332 * This function grabs a device-level runtime pm reference if the device is
3333 * already in use and ensures that it is powered up. It is illegal to try
3334 * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
3335 *
3336 * Any runtime pm reference obtained by this function must have a symmetric
3337 * call to intel_runtime_pm_put() to release the reference again.
3338 *
3339 * Returns: True if the wakeref was acquired, or False otherwise.
3340 */
3341bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
 
3342{
3343	if (IS_ENABLED(CONFIG_PM)) {
3344		struct pci_dev *pdev = dev_priv->drm.pdev;
3345		struct device *kdev = &pdev->dev;
3346
3347		/*
3348		 * In cases runtime PM is disabled by the RPM core and we get
3349		 * an -EINVAL return value we are not supposed to call this
3350		 * function, since the power state is undefined. This applies
3351		 * atm to the late/early system suspend/resume handlers.
3352		 */
3353		if (pm_runtime_get_if_in_use(kdev) <= 0)
3354			return false;
3355	}
3356
3357	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3358	assert_rpm_wakelock_held(dev_priv);
3359
3360	return true;
3361}
3362
3363/**
3364 * intel_runtime_pm_get_noresume - grab a runtime pm reference
3365 * @dev_priv: i915 device instance
3366 *
3367 * This function grabs a device-level runtime pm reference (mostly used for GEM
3368 * code to ensure the GTT or GT is on).
3369 *
3370 * It will _not_ power up the device but instead only check that it's powered
3371 * on.  Therefore it is only valid to call this functions from contexts where
3372 * the device is known to be powered up and where trying to power it up would
3373 * result in hilarity and deadlocks. That pretty much means only the system
3374 * suspend/resume code where this is used to grab runtime pm references for
3375 * delayed setup down in work items.
3376 *
3377 * Any runtime pm reference obtained by this function must have a symmetric
3378 * call to intel_runtime_pm_put() to release the reference again.
 
3379 */
3380void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
3381{
3382	struct pci_dev *pdev = dev_priv->drm.pdev;
3383	struct device *kdev = &pdev->dev;
3384
3385	assert_rpm_wakelock_held(dev_priv);
3386	pm_runtime_get_noresume(kdev);
3387
3388	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3389}
3390
 
3391/**
3392 * intel_runtime_pm_put - release a runtime pm reference
3393 * @dev_priv: i915 device instance
 
3394 *
3395 * This function drops the device-level runtime pm reference obtained by
3396 * intel_runtime_pm_get() and might power down the corresponding
3397 * hardware block right away if this is the last reference.
3398 */
3399void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
3400{
3401	struct pci_dev *pdev = dev_priv->drm.pdev;
3402	struct device *kdev = &pdev->dev;
3403
3404	assert_rpm_wakelock_held(dev_priv);
3405	atomic_dec(&dev_priv->runtime_pm.wakeref_count);
3406
3407	pm_runtime_mark_last_busy(kdev);
3408	pm_runtime_put_autosuspend(kdev);
3409}
 
3410
3411/**
3412 * intel_runtime_pm_enable - enable runtime pm
3413 * @dev_priv: i915 device instance
3414 *
3415 * This function enables runtime pm at the end of the driver load sequence.
3416 *
3417 * Note that this function does currently not enable runtime pm for the
3418 * subordinate display power domains. That is only done on the first modeset
3419 * using intel_display_set_init_power().
3420 */
3421void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
3422{
3423	struct pci_dev *pdev = dev_priv->drm.pdev;
3424	struct device *kdev = &pdev->dev;
 
 
 
 
 
 
 
 
 
 
3425
3426	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
3427	pm_runtime_mark_last_busy(kdev);
3428
3429	/*
3430	 * Take a permanent reference to disable the RPM functionality and drop
3431	 * it only when unloading the driver. Use the low level get/put helpers,
3432	 * so the driver's own RPM reference tracking asserts also work on
3433	 * platforms without RPM support.
3434	 */
3435	if (!HAS_RUNTIME_PM(dev_priv)) {
3436		int ret;
3437
3438		pm_runtime_dont_use_autosuspend(kdev);
3439		ret = pm_runtime_get_sync(kdev);
3440		WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
 
3441	} else {
3442		pm_runtime_use_autosuspend(kdev);
3443	}
3444
3445	/*
 
 
 
 
 
 
 
 
 
 
3446	 * The core calls the driver load handler with an RPM reference held.
3447	 * We drop that here and will reacquire it during unloading in
3448	 * intel_power_domains_fini().
3449	 */
3450	pm_runtime_put_autosuspend(kdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3451}