Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright (C) 2024 Intel Corporation
  4 */
  5
  6#include <linux/kernel.h>
  7
  8#include "intel_de.h"
  9#include "intel_dmc.h"
 10#include "intel_dmc_regs.h"
 11#include "intel_dmc_wl.h"
 12
 13/**
 14 * DOC: DMC wakelock support
 15 *
 16 * Wake lock is the mechanism to cause display engine to exit DC
 17 * states to allow programming to registers that are powered down in
 18 * those states. Previous projects exited DC states automatically when
 19 * detecting programming. Now software controls the exit by
 20 * programming the wake lock. This improves system performance and
 21 * system interactions and better fits the flip queue style of
 22 * programming. Wake lock is only required when DC5, DC6, or DC6v have
 23 * been enabled in DC_STATE_EN and the wake lock mode of operation has
 24 * been enabled.
 25 *
 26 * The wakelock mechanism in DMC allows the display engine to exit DC
 27 * states explicitly before programming registers that may be powered
 28 * down.  In earlier hardware, this was done automatically and
 29 * implicitly when the display engine accessed a register.  With the
 30 * wakelock implementation, the driver asserts a wakelock in DMC,
 31 * which forces it to exit the DC state until the wakelock is
 32 * deasserted.
 33 *
 34 * The mechanism can be enabled and disabled by writing to the
 35 * DMC_WAKELOCK_CFG register.  There are also 13 control registers
 36 * that can be used to hold and release different wakelocks.  In the
 37 * current implementation, we only need one wakelock, so only
 38 * DMC_WAKELOCK1_CTL is used.  The other definitions are here for
 39 * potential future use.
 40 */
 41
 42#define DMC_WAKELOCK_CTL_TIMEOUT 5
 43#define DMC_WAKELOCK_HOLD_TIME 50
 44
 45struct intel_dmc_wl_range {
 46	u32 start;
 47	u32 end;
 48};
 49
 50static struct intel_dmc_wl_range lnl_wl_range[] = {
 51	{ .start = 0x60000, .end = 0x7ffff },
 52};
 53
 54static void __intel_dmc_wl_release(struct intel_display *display)
 55{
 56	struct drm_i915_private *i915 = to_i915(display->drm);
 57	struct intel_dmc_wl *wl = &display->wl;
 58
 59	WARN_ON(refcount_read(&wl->refcount));
 60
 61	queue_delayed_work(i915->unordered_wq, &wl->work,
 62			   msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
 63}
 64
 65static void intel_dmc_wl_work(struct work_struct *work)
 66{
 67	struct intel_dmc_wl *wl =
 68		container_of(work, struct intel_dmc_wl, work.work);
 69	struct intel_display *display =
 70		container_of(wl, struct intel_display, wl);
 71	unsigned long flags;
 72
 73	spin_lock_irqsave(&wl->lock, flags);
 74
 75	/* Bail out if refcount reached zero while waiting for the spinlock */
 76	if (!refcount_read(&wl->refcount))
 77		goto out_unlock;
 78
 79	__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
 80
 81	if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
 82					      DMC_WAKELOCK_CTL_ACK, 0,
 83					      DMC_WAKELOCK_CTL_TIMEOUT)) {
 84		WARN_RATELIMIT(1, "DMC wakelock release timed out");
 85		goto out_unlock;
 86	}
 87
 88	wl->taken = false;
 89
 90out_unlock:
 91	spin_unlock_irqrestore(&wl->lock, flags);
 92}
 93
 94static bool intel_dmc_wl_check_range(u32 address)
 95{
 96	int i;
 97	bool wl_needed = false;
 98
 99	for (i = 0; i < ARRAY_SIZE(lnl_wl_range); i++) {
100		if (address >= lnl_wl_range[i].start &&
101		    address <= lnl_wl_range[i].end) {
102			wl_needed = true;
103			break;
104		}
105	}
106
107	return wl_needed;
108}
109
110static bool __intel_dmc_wl_supported(struct intel_display *display)
111{
112	if (DISPLAY_VER(display) < 20 ||
113	    !intel_dmc_has_payload(display) ||
114	    !display->params.enable_dmc_wl)
115		return false;
116
117	return true;
118}
119
120void intel_dmc_wl_init(struct intel_display *display)
121{
122	struct intel_dmc_wl *wl = &display->wl;
123
124	/* don't call __intel_dmc_wl_supported(), DMC is not loaded yet */
125	if (DISPLAY_VER(display) < 20 || !display->params.enable_dmc_wl)
126		return;
127
128	INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
129	spin_lock_init(&wl->lock);
130	refcount_set(&wl->refcount, 0);
131}
132
133void intel_dmc_wl_enable(struct intel_display *display)
134{
135	struct intel_dmc_wl *wl = &display->wl;
136	unsigned long flags;
137
138	if (!__intel_dmc_wl_supported(display))
139		return;
140
141	spin_lock_irqsave(&wl->lock, flags);
142
143	if (wl->enabled)
144		goto out_unlock;
145
146	/*
147	 * Enable wakelock in DMC.  We shouldn't try to take the
148	 * wakelock, because we're just enabling it, so call the
149	 * non-locking version directly here.
150	 */
151	__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
152
153	wl->enabled = true;
154	wl->taken = false;
155
156out_unlock:
157	spin_unlock_irqrestore(&wl->lock, flags);
158}
159
160void intel_dmc_wl_disable(struct intel_display *display)
161{
162	struct intel_dmc_wl *wl = &display->wl;
163	unsigned long flags;
164
165	if (!__intel_dmc_wl_supported(display))
166		return;
167
168	flush_delayed_work(&wl->work);
169
170	spin_lock_irqsave(&wl->lock, flags);
171
172	if (!wl->enabled)
173		goto out_unlock;
174
175	/* Disable wakelock in DMC */
176	__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
177
178	refcount_set(&wl->refcount, 0);
179	wl->enabled = false;
180	wl->taken = false;
181
182out_unlock:
183	spin_unlock_irqrestore(&wl->lock, flags);
184}
185
186void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
187{
188	struct intel_dmc_wl *wl = &display->wl;
189	unsigned long flags;
190
191	if (!__intel_dmc_wl_supported(display))
192		return;
193
194	if (!intel_dmc_wl_check_range(reg.reg))
195		return;
196
197	spin_lock_irqsave(&wl->lock, flags);
198
199	if (!wl->enabled)
200		goto out_unlock;
201
202	cancel_delayed_work(&wl->work);
203
204	if (refcount_inc_not_zero(&wl->refcount))
205		goto out_unlock;
206
207	refcount_set(&wl->refcount, 1);
208
209	/*
210	 * Only try to take the wakelock if it's not marked as taken
211	 * yet.  It may be already taken at this point if we have
212	 * already released the last reference, but the work has not
213	 * run yet.
214	 */
215	if (!wl->taken) {
216		__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
217				    DMC_WAKELOCK_CTL_REQ);
218
219		if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
220						      DMC_WAKELOCK_CTL_ACK,
221						      DMC_WAKELOCK_CTL_ACK,
222						      DMC_WAKELOCK_CTL_TIMEOUT)) {
223			WARN_RATELIMIT(1, "DMC wakelock ack timed out");
224			goto out_unlock;
225		}
226
227		wl->taken = true;
228	}
229
230out_unlock:
231	spin_unlock_irqrestore(&wl->lock, flags);
232}
233
234void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
235{
236	struct intel_dmc_wl *wl = &display->wl;
237	unsigned long flags;
238
239	if (!__intel_dmc_wl_supported(display))
240		return;
241
242	if (!intel_dmc_wl_check_range(reg.reg))
243		return;
244
245	spin_lock_irqsave(&wl->lock, flags);
246
247	if (!wl->enabled)
248		goto out_unlock;
249
250	if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
251			   "Tried to put wakelock with refcount zero\n"))
252		goto out_unlock;
253
254	if (refcount_dec_and_test(&wl->refcount)) {
255		__intel_dmc_wl_release(display);
256
257		goto out_unlock;
258	}
259
260out_unlock:
261	spin_unlock_irqrestore(&wl->lock, flags);
262}