Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2022 Intel Corporation
  4 */
  5
  6#include "xe_pcode.h"
  7
  8#include <linux/delay.h>
  9#include <linux/errno.h>
 10
 11#include <drm/drm_managed.h>
 12
 13#include "xe_assert.h"
 14#include "xe_device.h"
 
 15#include "xe_mmio.h"
 16#include "xe_pcode_api.h"
 17
 18/**
 19 * DOC: PCODE
 20 *
 21 * Xe PCODE is the component responsible for interfacing with the PCODE
 22 * firmware.
 23 * It shall provide a very simple ABI to other Xe components, but be the
 24 * single and consolidated place that will communicate with PCODE. All read
 25 * and write operations to PCODE will be internal and private to this component.
 26 *
 27 * What's next:
 28 * - PCODE hw metrics
 29 * - PCODE for display operations
 30 */
 31
 32static int pcode_mailbox_status(struct xe_tile *tile)
 33{
 34	u32 err;
 35	static const struct pcode_err_decode err_decode[] = {
 36		[PCODE_ILLEGAL_CMD] = {-ENXIO, "Illegal Command"},
 37		[PCODE_TIMEOUT] = {-ETIMEDOUT, "Timed out"},
 38		[PCODE_ILLEGAL_DATA] = {-EINVAL, "Illegal Data"},
 39		[PCODE_ILLEGAL_SUBCOMMAND] = {-ENXIO, "Illegal Subcommand"},
 40		[PCODE_LOCKED] = {-EBUSY, "PCODE Locked"},
 41		[PCODE_GT_RATIO_OUT_OF_RANGE] = {-EOVERFLOW,
 42			"GT ratio out of range"},
 43		[PCODE_REJECTED] = {-EACCES, "PCODE Rejected"},
 44		[PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
 45	};
 46
 47	err = xe_mmio_read32(&tile->mmio, PCODE_MAILBOX) & PCODE_ERROR_MASK;
 48	if (err) {
 49		drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s", err,
 50			err_decode[err].str ?: "Unknown");
 51		return err_decode[err].errno ?: -EPROTO;
 52	}
 53
 54	return 0;
 55}
 56
 57static int __pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1,
 58			      unsigned int timeout_ms, bool return_data,
 59			      bool atomic)
 60{
 61	struct xe_mmio *mmio = &tile->mmio;
 62	int err;
 63
 64	if (tile_to_xe(tile)->info.skip_pcode)
 65		return 0;
 66
 67	if ((xe_mmio_read32(mmio, PCODE_MAILBOX) & PCODE_READY) != 0)
 68		return -EAGAIN;
 69
 70	xe_mmio_write32(mmio, PCODE_DATA0, *data0);
 71	xe_mmio_write32(mmio, PCODE_DATA1, data1 ? *data1 : 0);
 72	xe_mmio_write32(mmio, PCODE_MAILBOX, PCODE_READY | mbox);
 73
 74	err = xe_mmio_wait32(mmio, PCODE_MAILBOX, PCODE_READY, 0,
 75			     timeout_ms * USEC_PER_MSEC, NULL, atomic);
 76	if (err)
 77		return err;
 78
 79	if (return_data) {
 80		*data0 = xe_mmio_read32(mmio, PCODE_DATA0);
 81		if (data1)
 82			*data1 = xe_mmio_read32(mmio, PCODE_DATA1);
 83	}
 84
 85	return pcode_mailbox_status(tile);
 86}
 87
 88static int pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1,
 89			    unsigned int timeout_ms, bool return_data,
 90			    bool atomic)
 91{
 92	if (tile_to_xe(tile)->info.skip_pcode)
 93		return 0;
 94
 95	lockdep_assert_held(&tile->pcode.lock);
 96
 97	return __pcode_mailbox_rw(tile, mbox, data0, data1, timeout_ms, return_data, atomic);
 98}
 99
100int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 data, int timeout)
101{
102	int err;
103
104	mutex_lock(&tile->pcode.lock);
105	err = pcode_mailbox_rw(tile, mbox, &data, NULL, timeout, false, false);
106	mutex_unlock(&tile->pcode.lock);
107
108	return err;
109}
110
111int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1)
112{
113	int err;
114
115	mutex_lock(&tile->pcode.lock);
116	err = pcode_mailbox_rw(tile, mbox, val, val1, 1, true, false);
117	mutex_unlock(&tile->pcode.lock);
118
119	return err;
120}
121
122static int pcode_try_request(struct xe_tile *tile, u32 mbox,
123			     u32 request, u32 reply_mask, u32 reply,
124			     u32 *status, bool atomic, int timeout_us, bool locked)
125{
126	int slept, wait = 10;
127
128	xe_tile_assert(tile, timeout_us > 0);
129
130	for (slept = 0; slept < timeout_us; slept += wait) {
131		if (locked)
132			*status = pcode_mailbox_rw(tile, mbox, &request, NULL, 1, true,
133						   atomic);
134		else
135			*status = __pcode_mailbox_rw(tile, mbox, &request, NULL, 1, true,
136						     atomic);
137		if ((*status == 0) && ((request & reply_mask) == reply))
138			return 0;
139
140		if (atomic)
141			udelay(wait);
142		else
143			usleep_range(wait, wait << 1);
144		wait <<= 1;
145	}
146
147	return -ETIMEDOUT;
148}
149
150/**
151 * xe_pcode_request - send PCODE request until acknowledgment
152 * @tile: tile
153 * @mbox: PCODE mailbox ID the request is targeted for
154 * @request: request ID
155 * @reply_mask: mask used to check for request acknowledgment
156 * @reply: value used to check for request acknowledgment
157 * @timeout_base_ms: timeout for polling with preemption enabled
158 *
159 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
160 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
161 * The request is acknowledged once the PCODE reply dword equals @reply after
162 * applying @reply_mask. Polling is first attempted with preemption enabled
163 * for @timeout_base_ms and if this times out for another 50 ms with
164 * preemption disabled.
165 *
166 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
167 * other error as reported by PCODE.
168 */
169int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request,
170		     u32 reply_mask, u32 reply, int timeout_base_ms)
171{
172	u32 status;
173	int ret;
174
175	xe_tile_assert(tile, timeout_base_ms <= 3);
176
177	mutex_lock(&tile->pcode.lock);
178
179	ret = pcode_try_request(tile, mbox, request, reply_mask, reply, &status,
180				false, timeout_base_ms * 1000, true);
181	if (!ret)
182		goto out;
183
184	/*
185	 * The above can time out if the number of requests was low (2 in the
186	 * worst case) _and_ PCODE was busy for some reason even after a
187	 * (queued) request and @timeout_base_ms delay. As a workaround retry
188	 * the poll with preemption disabled to maximize the number of
189	 * requests. Increase the timeout from @timeout_base_ms to 50ms to
190	 * account for interrupts that could reduce the number of these
191	 * requests, and for any quirks of the PCODE firmware that delays
192	 * the request completion.
193	 */
194	drm_err(&tile_to_xe(tile)->drm,
195		"PCODE timeout, retrying with preemption disabled\n");
 
196	preempt_disable();
197	ret = pcode_try_request(tile, mbox, request, reply_mask, reply, &status,
198				true, 50 * 1000, true);
199	preempt_enable();
200
201out:
202	mutex_unlock(&tile->pcode.lock);
203	return status ? status : ret;
204}
205/**
206 * xe_pcode_init_min_freq_table - Initialize PCODE's QOS frequency table
207 * @tile: tile instance
208 * @min_gt_freq: Minimal (RPn) GT frequency in units of 50MHz.
209 * @max_gt_freq: Maximal (RP0) GT frequency in units of 50MHz.
210 *
211 * This function initialize PCODE's QOS frequency table for a proper minimal
212 * frequency/power steering decision, depending on the current requested GT
213 * frequency. For older platforms this was a more complete table including
214 * the IA freq. However for the latest platforms this table become a simple
215 * 1-1 Ring vs GT frequency. Even though, without setting it, PCODE might
216 * not take the right decisions for some memory frequencies and affect latency.
217 *
218 * It returns 0 on success, and -ERROR number on failure, -EINVAL if max
219 * frequency is higher then the minimal, and other errors directly translated
220 * from the PCODE Error returs:
221 * - -ENXIO: "Illegal Command"
222 * - -ETIMEDOUT: "Timed out"
223 * - -EINVAL: "Illegal Data"
224 * - -ENXIO, "Illegal Subcommand"
225 * - -EBUSY: "PCODE Locked"
226 * - -EOVERFLOW, "GT ratio out of range"
227 * - -EACCES, "PCODE Rejected"
228 * - -EPROTO, "Unknown"
229 */
230int xe_pcode_init_min_freq_table(struct xe_tile *tile, u32 min_gt_freq,
231				 u32 max_gt_freq)
232{
233	int ret;
234	u32 freq;
235
236	if (!tile_to_xe(tile)->info.has_llc)
237		return 0;
238
239	if (max_gt_freq <= min_gt_freq)
240		return -EINVAL;
241
242	mutex_lock(&tile->pcode.lock);
243	for (freq = min_gt_freq; freq <= max_gt_freq; freq++) {
244		u32 data = freq << PCODE_FREQ_RING_RATIO_SHIFT | freq;
245
246		ret = pcode_mailbox_rw(tile, PCODE_WRITE_MIN_FREQ_TABLE,
247				       &data, NULL, 1, false, false);
248		if (ret)
249			goto unlock;
250	}
251
252unlock:
253	mutex_unlock(&tile->pcode.lock);
254	return ret;
255}
256
257/**
258 * xe_pcode_ready - Ensure PCODE is initialized
259 * @xe: xe instance
260 * @locked: true if lock held, false otherwise
261 *
262 * PCODE init mailbox is polled only on root gt of root tile
263 * as the root tile provides the initialization is complete only
264 * after all the tiles have completed the initialization.
265 * Called only on early probe without locks and with locks in
266 * resume path.
267 *
268 * Returns 0 on success, and -error number on failure.
269 */
270int xe_pcode_ready(struct xe_device *xe, bool locked)
271{
272	u32 status, request = DGFX_GET_INIT_STATUS;
273	struct xe_tile *tile = xe_device_get_root_tile(xe);
274	int timeout_us = 180000000; /* 3 min */
275	int ret;
276
277	if (xe->info.skip_pcode)
278		return 0;
279
280	if (!IS_DGFX(xe))
281		return 0;
282
283	if (locked)
284		mutex_lock(&tile->pcode.lock);
285
286	ret = pcode_try_request(tile, DGFX_PCODE_STATUS, request,
287				DGFX_INIT_STATUS_COMPLETE,
288				DGFX_INIT_STATUS_COMPLETE,
289				&status, false, timeout_us, locked);
290
291	if (locked)
292		mutex_unlock(&tile->pcode.lock);
293
294	if (ret)
295		drm_err(&xe->drm,
296			"PCODE initialization timedout after: 3 min\n");
297
298	return ret;
299}
300
301/**
302 * xe_pcode_init: initialize components of PCODE
303 * @tile: tile instance
304 *
305 * This function initializes the xe_pcode component.
306 * To be called once only during probe.
307 */
308void xe_pcode_init(struct xe_tile *tile)
309{
310	drmm_mutex_init(&tile_to_xe(tile)->drm, &tile->pcode.lock);
311}
312
313/**
314 * xe_pcode_probe_early: initializes PCODE
315 * @xe: xe instance
316 *
317 * This function checks the initialization status of PCODE
318 * To be called once only during early probe without locks.
319 *
320 * Returns 0 on success, error code otherwise
321 */
322int xe_pcode_probe_early(struct xe_device *xe)
323{
324	return xe_pcode_ready(xe, false);
325}
v6.9.4
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2022 Intel Corporation
  4 */
  5
  6#include "xe_pcode.h"
  7
  8#include <linux/delay.h>
  9#include <linux/errno.h>
 10
 11#include <drm/drm_managed.h>
 12
 
 13#include "xe_device.h"
 14#include "xe_gt.h"
 15#include "xe_mmio.h"
 16#include "xe_pcode_api.h"
 17
 18/**
 19 * DOC: PCODE
 20 *
 21 * Xe PCODE is the component responsible for interfacing with the PCODE
 22 * firmware.
 23 * It shall provide a very simple ABI to other Xe components, but be the
 24 * single and consolidated place that will communicate with PCODE. All read
 25 * and write operations to PCODE will be internal and private to this component.
 26 *
 27 * What's next:
 28 * - PCODE hw metrics
 29 * - PCODE for display operations
 30 */
 31
 32static int pcode_mailbox_status(struct xe_gt *gt)
 33{
 34	u32 err;
 35	static const struct pcode_err_decode err_decode[] = {
 36		[PCODE_ILLEGAL_CMD] = {-ENXIO, "Illegal Command"},
 37		[PCODE_TIMEOUT] = {-ETIMEDOUT, "Timed out"},
 38		[PCODE_ILLEGAL_DATA] = {-EINVAL, "Illegal Data"},
 39		[PCODE_ILLEGAL_SUBCOMMAND] = {-ENXIO, "Illegal Subcommand"},
 40		[PCODE_LOCKED] = {-EBUSY, "PCODE Locked"},
 41		[PCODE_GT_RATIO_OUT_OF_RANGE] = {-EOVERFLOW,
 42			"GT ratio out of range"},
 43		[PCODE_REJECTED] = {-EACCES, "PCODE Rejected"},
 44		[PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
 45	};
 46
 47	err = xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
 48	if (err) {
 49		drm_err(&gt_to_xe(gt)->drm, "PCODE Mailbox failed: %d %s", err,
 50			err_decode[err].str ?: "Unknown");
 51		return err_decode[err].errno ?: -EPROTO;
 52	}
 53
 54	return 0;
 55}
 56
 57static int __pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
 58			      unsigned int timeout_ms, bool return_data,
 59			      bool atomic)
 60{
 
 61	int err;
 62
 63	if (gt_to_xe(gt)->info.skip_pcode)
 64		return 0;
 65
 66	if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0)
 67		return -EAGAIN;
 68
 69	xe_mmio_write32(gt, PCODE_DATA0, *data0);
 70	xe_mmio_write32(gt, PCODE_DATA1, data1 ? *data1 : 0);
 71	xe_mmio_write32(gt, PCODE_MAILBOX, PCODE_READY | mbox);
 72
 73	err = xe_mmio_wait32(gt, PCODE_MAILBOX, PCODE_READY, 0,
 74			     timeout_ms * 1000, NULL, atomic);
 75	if (err)
 76		return err;
 77
 78	if (return_data) {
 79		*data0 = xe_mmio_read32(gt, PCODE_DATA0);
 80		if (data1)
 81			*data1 = xe_mmio_read32(gt, PCODE_DATA1);
 82	}
 83
 84	return pcode_mailbox_status(gt);
 85}
 86
 87static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
 88			    unsigned int timeout_ms, bool return_data,
 89			    bool atomic)
 90{
 91	if (gt_to_xe(gt)->info.skip_pcode)
 92		return 0;
 93
 94	lockdep_assert_held(&gt->pcode.lock);
 95
 96	return __pcode_mailbox_rw(gt, mbox, data0, data1, timeout_ms, return_data, atomic);
 97}
 98
 99int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 data, int timeout)
100{
101	int err;
102
103	mutex_lock(&gt->pcode.lock);
104	err = pcode_mailbox_rw(gt, mbox, &data, NULL, timeout, false, false);
105	mutex_unlock(&gt->pcode.lock);
106
107	return err;
108}
109
110int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1)
111{
112	int err;
113
114	mutex_lock(&gt->pcode.lock);
115	err = pcode_mailbox_rw(gt, mbox, val, val1, 1, true, false);
116	mutex_unlock(&gt->pcode.lock);
117
118	return err;
119}
120
121static int pcode_try_request(struct xe_gt *gt, u32 mbox,
122			     u32 request, u32 reply_mask, u32 reply,
123			     u32 *status, bool atomic, int timeout_us, bool locked)
124{
125	int slept, wait = 10;
126
 
 
127	for (slept = 0; slept < timeout_us; slept += wait) {
128		if (locked)
129			*status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
130						   atomic);
131		else
132			*status = __pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
133						     atomic);
134		if ((*status == 0) && ((request & reply_mask) == reply))
135			return 0;
136
137		if (atomic)
138			udelay(wait);
139		else
140			usleep_range(wait, wait << 1);
141		wait <<= 1;
142	}
143
144	return -ETIMEDOUT;
145}
146
147/**
148 * xe_pcode_request - send PCODE request until acknowledgment
149 * @gt: gt
150 * @mbox: PCODE mailbox ID the request is targeted for
151 * @request: request ID
152 * @reply_mask: mask used to check for request acknowledgment
153 * @reply: value used to check for request acknowledgment
154 * @timeout_base_ms: timeout for polling with preemption enabled
155 *
156 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
157 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
158 * The request is acknowledged once the PCODE reply dword equals @reply after
159 * applying @reply_mask. Polling is first attempted with preemption enabled
160 * for @timeout_base_ms and if this times out for another 50 ms with
161 * preemption disabled.
162 *
163 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
164 * other error as reported by PCODE.
165 */
166int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
167		      u32 reply_mask, u32 reply, int timeout_base_ms)
168{
169	u32 status;
170	int ret;
171
172	mutex_lock(&gt->pcode.lock);
 
 
173
174	ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
175				false, timeout_base_ms * 1000, true);
176	if (!ret)
177		goto out;
178
179	/*
180	 * The above can time out if the number of requests was low (2 in the
181	 * worst case) _and_ PCODE was busy for some reason even after a
182	 * (queued) request and @timeout_base_ms delay. As a workaround retry
183	 * the poll with preemption disabled to maximize the number of
184	 * requests. Increase the timeout from @timeout_base_ms to 50ms to
185	 * account for interrupts that could reduce the number of these
186	 * requests, and for any quirks of the PCODE firmware that delays
187	 * the request completion.
188	 */
189	drm_err(&gt_to_xe(gt)->drm,
190		"PCODE timeout, retrying with preemption disabled\n");
191	drm_WARN_ON_ONCE(&gt_to_xe(gt)->drm, timeout_base_ms > 1);
192	preempt_disable();
193	ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
194				true, 50 * 1000, true);
195	preempt_enable();
196
197out:
198	mutex_unlock(&gt->pcode.lock);
199	return status ? status : ret;
200}
201/**
202 * xe_pcode_init_min_freq_table - Initialize PCODE's QOS frequency table
203 * @gt: gt instance
204 * @min_gt_freq: Minimal (RPn) GT frequency in units of 50MHz.
205 * @max_gt_freq: Maximal (RP0) GT frequency in units of 50MHz.
206 *
207 * This function initialize PCODE's QOS frequency table for a proper minimal
208 * frequency/power steering decision, depending on the current requested GT
209 * frequency. For older platforms this was a more complete table including
210 * the IA freq. However for the latest platforms this table become a simple
211 * 1-1 Ring vs GT frequency. Even though, without setting it, PCODE might
212 * not take the right decisions for some memory frequencies and affect latency.
213 *
214 * It returns 0 on success, and -ERROR number on failure, -EINVAL if max
215 * frequency is higher then the minimal, and other errors directly translated
216 * from the PCODE Error returs:
217 * - -ENXIO: "Illegal Command"
218 * - -ETIMEDOUT: "Timed out"
219 * - -EINVAL: "Illegal Data"
220 * - -ENXIO, "Illegal Subcommand"
221 * - -EBUSY: "PCODE Locked"
222 * - -EOVERFLOW, "GT ratio out of range"
223 * - -EACCES, "PCODE Rejected"
224 * - -EPROTO, "Unknown"
225 */
226int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
227				 u32 max_gt_freq)
228{
229	int ret;
230	u32 freq;
231
232	if (!gt_to_xe(gt)->info.has_llc)
233		return 0;
234
235	if (max_gt_freq <= min_gt_freq)
236		return -EINVAL;
237
238	mutex_lock(&gt->pcode.lock);
239	for (freq = min_gt_freq; freq <= max_gt_freq; freq++) {
240		u32 data = freq << PCODE_FREQ_RING_RATIO_SHIFT | freq;
241
242		ret = pcode_mailbox_rw(gt, PCODE_WRITE_MIN_FREQ_TABLE,
243				       &data, NULL, 1, false, false);
244		if (ret)
245			goto unlock;
246	}
247
248unlock:
249	mutex_unlock(&gt->pcode.lock);
250	return ret;
251}
252
253/**
254 * xe_pcode_ready - Ensure PCODE is initialized
255 * @xe: xe instance
256 * @locked: true if lock held, false otherwise
257 *
258 * PCODE init mailbox is polled only on root gt of root tile
259 * as the root tile provides the initialization is complete only
260 * after all the tiles have completed the initialization.
261 * Called only on early probe without locks and with locks in
262 * resume path.
263 *
264 * Returns 0 on success, and -error number on failure.
265 */
266int xe_pcode_ready(struct xe_device *xe, bool locked)
267{
268	u32 status, request = DGFX_GET_INIT_STATUS;
269	struct xe_gt *gt = xe_root_mmio_gt(xe);
270	int timeout_us = 180000000; /* 3 min */
271	int ret;
272
273	if (xe->info.skip_pcode)
274		return 0;
275
276	if (!IS_DGFX(xe))
277		return 0;
278
279	if (locked)
280		mutex_lock(&gt->pcode.lock);
281
282	ret = pcode_try_request(gt, DGFX_PCODE_STATUS, request,
283				DGFX_INIT_STATUS_COMPLETE,
284				DGFX_INIT_STATUS_COMPLETE,
285				&status, false, timeout_us, locked);
286
287	if (locked)
288		mutex_unlock(&gt->pcode.lock);
289
290	if (ret)
291		drm_err(&xe->drm,
292			"PCODE initialization timedout after: 3 min\n");
293
294	return ret;
295}
296
297/**
298 * xe_pcode_init: initialize components of PCODE
299 * @gt: gt instance
300 *
301 * This function initializes the xe_pcode component.
302 * To be called once only during probe.
303 */
304void xe_pcode_init(struct xe_gt *gt)
305{
306	drmm_mutex_init(&gt_to_xe(gt)->drm, &gt->pcode.lock);
307}
308
309/**
310 * xe_pcode_probe_early: initializes PCODE
311 * @xe: xe instance
312 *
313 * This function checks the initialization status of PCODE
314 * To be called once only during early probe without locks.
315 *
316 * Returns 0 on success, error code otherwise
317 */
318int xe_pcode_probe_early(struct xe_device *xe)
319{
320	return xe_pcode_ready(xe, false);
321}