Loading...
Note: File does not exist in v4.6.
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#include "xe_force_wake.h"
7
8#include <drm/drm_util.h>
9
10#include "regs/xe_gt_regs.h"
11#include "regs/xe_reg_defs.h"
12#include "xe_gt.h"
13#include "xe_gt_printk.h"
14#include "xe_mmio.h"
15#include "xe_sriov.h"
16
17#define XE_FORCE_WAKE_ACK_TIMEOUT_MS 50
18
19static const char *str_wake_sleep(bool wake)
20{
21 return wake ? "wake" : "sleep";
22}
23
24static void mark_domain_initialized(struct xe_force_wake *fw,
25 enum xe_force_wake_domain_id id)
26{
27 fw->initialized_domains |= BIT(id);
28}
29
30static void init_domain(struct xe_force_wake *fw,
31 enum xe_force_wake_domain_id id,
32 struct xe_reg reg, struct xe_reg ack)
33{
34 struct xe_force_wake_domain *domain = &fw->domains[id];
35
36 domain->id = id;
37 domain->reg_ctl = reg;
38 domain->reg_ack = ack;
39 domain->val = FORCEWAKE_MT(FORCEWAKE_KERNEL);
40 domain->mask = FORCEWAKE_MT_MASK(FORCEWAKE_KERNEL);
41
42 mark_domain_initialized(fw, id);
43}
44
45void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
46{
47 struct xe_device *xe = gt_to_xe(gt);
48
49 fw->gt = gt;
50 spin_lock_init(&fw->lock);
51
52 /* Assuming gen11+ so assert this assumption is correct */
53 xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
54
55 if (xe->info.graphics_verx100 >= 1270) {
56 init_domain(fw, XE_FW_DOMAIN_ID_GT,
57 FORCEWAKE_GT,
58 FORCEWAKE_ACK_GT_MTL);
59 } else {
60 init_domain(fw, XE_FW_DOMAIN_ID_GT,
61 FORCEWAKE_GT,
62 FORCEWAKE_ACK_GT);
63 }
64}
65
66void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
67{
68 int i, j;
69
70 /* Assuming gen11+ so assert this assumption is correct */
71 xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
72
73 if (!xe_gt_is_media_type(gt))
74 init_domain(fw, XE_FW_DOMAIN_ID_RENDER,
75 FORCEWAKE_RENDER,
76 FORCEWAKE_ACK_RENDER);
77
78 for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
79 if (!(gt->info.engine_mask & BIT(i)))
80 continue;
81
82 init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j,
83 FORCEWAKE_MEDIA_VDBOX(j),
84 FORCEWAKE_ACK_MEDIA_VDBOX(j));
85 }
86
87 for (i = XE_HW_ENGINE_VECS0, j = 0; i <= XE_HW_ENGINE_VECS3; ++i, ++j) {
88 if (!(gt->info.engine_mask & BIT(i)))
89 continue;
90
91 init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j,
92 FORCEWAKE_MEDIA_VEBOX(j),
93 FORCEWAKE_ACK_MEDIA_VEBOX(j));
94 }
95
96 if (gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0))
97 init_domain(fw, XE_FW_DOMAIN_ID_GSC,
98 FORCEWAKE_GSC,
99 FORCEWAKE_ACK_GSC);
100}
101
102static void __domain_ctl(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake)
103{
104 if (IS_SRIOV_VF(gt_to_xe(gt)))
105 return;
106
107 xe_mmio_write32(>->mmio, domain->reg_ctl, domain->mask | (wake ? domain->val : 0));
108}
109
110static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake)
111{
112 u32 value;
113 int ret;
114
115 if (IS_SRIOV_VF(gt_to_xe(gt)))
116 return 0;
117
118 ret = xe_mmio_wait32(>->mmio, domain->reg_ack, domain->val, wake ? domain->val : 0,
119 XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
120 &value, true);
121 if (ret)
122 xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
123 domain->id, str_wake_sleep(wake), ERR_PTR(ret),
124 domain->reg_ack.addr, value);
125 if (value == ~0) {
126 xe_gt_err(gt,
127 "Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
128 domain->id, str_wake_sleep(wake));
129 ret = -EIO;
130 }
131
132 return ret;
133}
134
135static void domain_wake(struct xe_gt *gt, struct xe_force_wake_domain *domain)
136{
137 __domain_ctl(gt, domain, true);
138}
139
140static int domain_wake_wait(struct xe_gt *gt,
141 struct xe_force_wake_domain *domain)
142{
143 return __domain_wait(gt, domain, true);
144}
145
146static void domain_sleep(struct xe_gt *gt, struct xe_force_wake_domain *domain)
147{
148 __domain_ctl(gt, domain, false);
149}
150
151static int domain_sleep_wait(struct xe_gt *gt,
152 struct xe_force_wake_domain *domain)
153{
154 return __domain_wait(gt, domain, false);
155}
156
157#define for_each_fw_domain_masked(domain__, mask__, fw__, tmp__) \
158 for (tmp__ = (mask__); tmp__; tmp__ &= ~BIT(ffs(tmp__) - 1)) \
159 for_each_if((domain__ = ((fw__)->domains + \
160 (ffs(tmp__) - 1))) && \
161 domain__->reg_ctl.addr)
162
163/**
164 * xe_force_wake_get() : Increase the domain refcount
165 * @fw: struct xe_force_wake
166 * @domains: forcewake domains to get refcount on
167 *
168 * This function wakes up @domains if they are asleep and takes references.
169 * If requested domain is XE_FORCEWAKE_ALL then only applicable/initialized
170 * domains will be considered for refcount and it is a caller responsibility
171 * to check returned ref if it includes any specific domain by using
172 * xe_force_wake_ref_has_domain() function. Caller must call
173 * xe_force_wake_put() function to decrease incremented refcounts.
174 *
175 * Return: opaque reference to woken domains or zero if none of requested
176 * domains were awake.
177 */
178unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw,
179 enum xe_force_wake_domains domains)
180{
181 struct xe_gt *gt = fw->gt;
182 struct xe_force_wake_domain *domain;
183 unsigned int ref_incr = 0, awake_rqst = 0, awake_failed = 0;
184 unsigned int tmp, ref_rqst;
185 unsigned long flags;
186
187 xe_gt_assert(gt, is_power_of_2(domains));
188 xe_gt_assert(gt, domains <= XE_FORCEWAKE_ALL);
189 xe_gt_assert(gt, domains == XE_FORCEWAKE_ALL || fw->initialized_domains & domains);
190
191 ref_rqst = (domains == XE_FORCEWAKE_ALL) ? fw->initialized_domains : domains;
192 spin_lock_irqsave(&fw->lock, flags);
193 for_each_fw_domain_masked(domain, ref_rqst, fw, tmp) {
194 if (!domain->ref++) {
195 awake_rqst |= BIT(domain->id);
196 domain_wake(gt, domain);
197 }
198 ref_incr |= BIT(domain->id);
199 }
200 for_each_fw_domain_masked(domain, awake_rqst, fw, tmp) {
201 if (domain_wake_wait(gt, domain) == 0) {
202 fw->awake_domains |= BIT(domain->id);
203 } else {
204 awake_failed |= BIT(domain->id);
205 --domain->ref;
206 }
207 }
208 ref_incr &= ~awake_failed;
209 spin_unlock_irqrestore(&fw->lock, flags);
210
211 xe_gt_WARN(gt, awake_failed, "Forcewake domain%s %#x failed to acknowledge awake request\n",
212 str_plural(hweight_long(awake_failed)), awake_failed);
213
214 if (domains == XE_FORCEWAKE_ALL && ref_incr == fw->initialized_domains)
215 ref_incr |= XE_FORCEWAKE_ALL;
216
217 return ref_incr;
218}
219
220/**
221 * xe_force_wake_put - Decrement the refcount and put domain to sleep if refcount becomes 0
222 * @fw: Pointer to the force wake structure
223 * @fw_ref: return of xe_force_wake_get()
224 *
225 * This function reduces the reference counts for domains in fw_ref. If
226 * refcount for any of the specified domain reaches 0, it puts the domain to sleep
227 * and waits for acknowledgment for domain to sleep within 50 milisec timeout.
228 * Warns in case of timeout of ack from domain.
229 */
230void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref)
231{
232 struct xe_gt *gt = fw->gt;
233 struct xe_force_wake_domain *domain;
234 unsigned int tmp, sleep = 0;
235 unsigned long flags;
236 int ack_fail = 0;
237
238 /*
239 * Avoid unnecessary lock and unlock when the function is called
240 * in error path of individual domains.
241 */
242 if (!fw_ref)
243 return;
244
245 if (xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
246 fw_ref = fw->initialized_domains;
247
248 spin_lock_irqsave(&fw->lock, flags);
249 for_each_fw_domain_masked(domain, fw_ref, fw, tmp) {
250 xe_gt_assert(gt, domain->ref);
251
252 if (!--domain->ref) {
253 sleep |= BIT(domain->id);
254 domain_sleep(gt, domain);
255 }
256 }
257 for_each_fw_domain_masked(domain, sleep, fw, tmp) {
258 if (domain_sleep_wait(gt, domain) == 0)
259 fw->awake_domains &= ~BIT(domain->id);
260 else
261 ack_fail |= BIT(domain->id);
262 }
263 spin_unlock_irqrestore(&fw->lock, flags);
264
265 xe_gt_WARN(gt, ack_fail, "Forcewake domain%s %#x failed to acknowledge sleep request\n",
266 str_plural(hweight_long(ack_fail)), ack_fail);
267}