Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2023 Intel Corporation
  4 */
  5
  6#include "xe_gsc_proxy.h"
  7
  8#include <linux/component.h>
  9#include <linux/delay.h>
 10
 11#include <drm/drm_managed.h>
 12#include <drm/intel/i915_component.h>
 13#include <drm/intel/i915_gsc_proxy_mei_interface.h>
 14
 15#include "abi/gsc_proxy_commands_abi.h"
 16#include "regs/xe_gsc_regs.h"
 17#include "xe_bo.h"
 18#include "xe_force_wake.h"
 19#include "xe_gsc.h"
 20#include "xe_gsc_submit.h"
 21#include "xe_gt.h"
 22#include "xe_gt_printk.h"
 23#include "xe_map.h"
 24#include "xe_mmio.h"
 25#include "xe_pm.h"
 26
 27/*
 28 * GSC proxy:
 29 * The GSC uC needs to communicate with the CSME to perform certain operations.
 30 * Since the GSC can't perform this communication directly on platforms where it
 31 * is integrated in GT, the graphics driver needs to transfer the messages from
 32 * GSC to CSME and back. The proxy flow must be manually started after the GSC
 33 * is loaded to signal to GSC that we're ready to handle its messages and allow
 34 * it to query its init data from CSME; GSC will then trigger an HECI2 interrupt
 35 * if it needs to send messages to CSME again.
 36 * The proxy flow is as follow:
 37 * 1 - Xe submits a request to GSC asking for the message to CSME
 38 * 2 - GSC replies with the proxy header + payload for CSME
 39 * 3 - Xe sends the reply from GSC as-is to CSME via the mei proxy component
 40 * 4 - CSME replies with the proxy header + payload for GSC
 41 * 5 - Xe submits a request to GSC with the reply from CSME
 42 * 6 - GSC replies either with a new header + payload (same as step 2, so we
 43 *     restart from there) or with an end message.
 44 */
 45
 46/*
 47 * The component should load quite quickly in most cases, but it could take
 48 * a bit. Using a very big timeout just to cover the worst case scenario
 49 */
 50#define GSC_PROXY_INIT_TIMEOUT_MS 20000
 51
 52/* shorthand define for code compactness */
 53#define PROXY_HDR_SIZE (sizeof(struct xe_gsc_proxy_header))
 54
 55/* the protocol supports up to 32K in each direction */
 56#define GSC_PROXY_BUFFER_SIZE SZ_32K
 57#define GSC_PROXY_CHANNEL_SIZE (GSC_PROXY_BUFFER_SIZE * 2)
 58
 59static struct xe_gt *
 60gsc_to_gt(struct xe_gsc *gsc)
 61{
 62	return container_of(gsc, struct xe_gt, uc.gsc);
 63}
 64
 65bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
 66{
 67	struct xe_gt *gt = gsc_to_gt(gsc);
 68	u32 fwsts1 = xe_mmio_read32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE));
 69
 70	return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fwsts1) ==
 71	       HECI1_FWSTS1_PROXY_STATE_NORMAL;
 72}
 73
 74static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
 75{
 76	struct xe_gt *gt = gsc_to_gt(gsc);
 77
 78	/* make sure we never accidentally write the RST bit */
 79	clr |= HECI_H_CSR_RST;
 80
 81	xe_mmio_rmw32(&gt->mmio, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set);
 82}
 83
 84static void gsc_proxy_irq_clear(struct xe_gsc *gsc)
 85{
 86	/* The status bit is cleared by writing to it */
 87	__gsc_proxy_irq_rmw(gsc, 0, HECI_H_CSR_IS);
 88}
 89
 90static void gsc_proxy_irq_toggle(struct xe_gsc *gsc, bool enabled)
 91{
 92	u32 set = enabled ? HECI_H_CSR_IE : 0;
 93	u32 clr = enabled ? 0 : HECI_H_CSR_IE;
 94
 95	__gsc_proxy_irq_rmw(gsc, clr, set);
 96}
 97
 98static int proxy_send_to_csme(struct xe_gsc *gsc, u32 size)
 99{
100	struct xe_gt *gt = gsc_to_gt(gsc);
101	struct i915_gsc_proxy_component *comp = gsc->proxy.component;
102	int ret;
103
104	ret = comp->ops->send(comp->mei_dev, gsc->proxy.to_csme, size);
105	if (ret < 0) {
106		xe_gt_err(gt, "Failed to send CSME proxy message\n");
107		return ret;
108	}
109
110	ret = comp->ops->recv(comp->mei_dev, gsc->proxy.from_csme, GSC_PROXY_BUFFER_SIZE);
111	if (ret < 0) {
112		xe_gt_err(gt, "Failed to receive CSME proxy message\n");
113		return ret;
114	}
115
116	return ret;
117}
118
119static int proxy_send_to_gsc(struct xe_gsc *gsc, u32 size)
120{
121	struct xe_gt *gt = gsc_to_gt(gsc);
122	u64 addr_in = xe_bo_ggtt_addr(gsc->proxy.bo);
123	u64 addr_out = addr_in + GSC_PROXY_BUFFER_SIZE;
124	int err;
125
126	/* the message must contain at least the gsc and proxy headers */
127	if (size > GSC_PROXY_BUFFER_SIZE) {
128		xe_gt_err(gt, "Invalid GSC proxy message size: %u\n", size);
129		return -EINVAL;
130	}
131
132	err = xe_gsc_pkt_submit_kernel(gsc, addr_in, size,
133				       addr_out, GSC_PROXY_BUFFER_SIZE);
134	if (err) {
135		xe_gt_err(gt, "Failed to submit gsc proxy rq (%pe)\n", ERR_PTR(err));
136		return err;
137	}
138
139	return 0;
140}
141
142static int validate_proxy_header(struct xe_gsc_proxy_header *header,
143				 u32 source, u32 dest, u32 max_size)
144{
145	u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr);
146	u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr);
147
148	if (header->destination != dest || header->source != source)
149		return -ENOEXEC;
150
151	if (length + PROXY_HDR_SIZE > max_size)
152		return -E2BIG;
153
154	switch (type) {
155	case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD:
156		if (length > 0)
157			break;
158		fallthrough;
159	case GSC_PROXY_MSG_TYPE_PROXY_INVALID:
160		return -EIO;
161	default:
162		break;
163	}
164
165	return 0;
166}
167
168#define proxy_header_wr(xe_, map_, offset_, field_, val_) \
169	xe_map_wr_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_, val_)
170
171#define proxy_header_rd(xe_, map_, offset_, field_) \
172	xe_map_rd_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_)
173
174static u32 emit_proxy_header(struct xe_device *xe, struct iosys_map *map, u32 offset)
175{
176	xe_map_memset(xe, map, offset, 0, PROXY_HDR_SIZE);
177
178	proxy_header_wr(xe, map, offset, hdr,
179			FIELD_PREP(GSC_PROXY_TYPE, GSC_PROXY_MSG_TYPE_PROXY_QUERY) |
180			FIELD_PREP(GSC_PROXY_PAYLOAD_LENGTH, 0));
181
182	proxy_header_wr(xe, map, offset, source, GSC_PROXY_ADDRESSING_KMD);
183	proxy_header_wr(xe, map, offset, destination, GSC_PROXY_ADDRESSING_GSC);
184	proxy_header_wr(xe, map, offset, status, 0);
185
186	return offset + PROXY_HDR_SIZE;
187}
188
189static int proxy_query(struct xe_gsc *gsc)
190{
191	struct xe_gt *gt = gsc_to_gt(gsc);
192	struct xe_device *xe = gt_to_xe(gt);
193	struct xe_gsc_proxy_header *to_csme_hdr = gsc->proxy.to_csme;
194	void *to_csme_payload = gsc->proxy.to_csme + PROXY_HDR_SIZE;
195	u32 wr_offset;
196	u32 reply_offset;
197	u32 size;
198	int ret;
199
200	wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0,
201				       HECI_MEADDRESS_PROXY, 0, PROXY_HDR_SIZE);
202	wr_offset = emit_proxy_header(xe, &gsc->proxy.to_gsc, wr_offset);
203
204	size = wr_offset;
205
206	while (1) {
207		/*
208		 * Poison the GSC response header space to make sure we don't
209		 * read a stale reply.
210		 */
211		xe_gsc_poison_header(xe, &gsc->proxy.from_gsc, 0);
212
213		/* send proxy message to GSC */
214		ret = proxy_send_to_gsc(gsc, size);
215		if (ret)
216			goto proxy_error;
217
218		/* check the reply from GSC */
219		ret = xe_gsc_read_out_header(xe, &gsc->proxy.from_gsc, 0,
220					     PROXY_HDR_SIZE, &reply_offset);
221		if (ret) {
222			xe_gt_err(gt, "Invalid gsc header in proxy reply (%pe)\n",
223				  ERR_PTR(ret));
224			goto proxy_error;
225		}
226
227		/* copy the proxy header reply from GSC */
228		xe_map_memcpy_from(xe, to_csme_hdr, &gsc->proxy.from_gsc,
229				   reply_offset, PROXY_HDR_SIZE);
230
231		/* stop if this was the last message */
232		if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END)
233			break;
234
235		/* make sure the GSC-to-CSME proxy header is sane */
236		ret = validate_proxy_header(to_csme_hdr,
237					    GSC_PROXY_ADDRESSING_GSC,
238					    GSC_PROXY_ADDRESSING_CSME,
239					    GSC_PROXY_BUFFER_SIZE - reply_offset);
240		if (ret) {
241			xe_gt_err(gt, "invalid GSC to CSME proxy header! (%pe)\n",
242				  ERR_PTR(ret));
243			goto proxy_error;
244		}
245
246		/* copy the rest of the message */
247		size = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, to_csme_hdr->hdr);
248		xe_map_memcpy_from(xe, to_csme_payload, &gsc->proxy.from_gsc,
249				   reply_offset + PROXY_HDR_SIZE, size);
250
251		/* send the GSC message to the CSME */
252		ret = proxy_send_to_csme(gsc, size + PROXY_HDR_SIZE);
253		if (ret < 0)
254			goto proxy_error;
255
256		/* reply size from CSME, including the proxy header */
257		size = ret;
258		if (size < PROXY_HDR_SIZE) {
259			xe_gt_err(gt, "CSME to GSC proxy msg too small: 0x%x\n", size);
260			ret = -EPROTO;
261			goto proxy_error;
262		}
263
264		/* make sure the CSME-to-GSC proxy header is sane */
265		ret = validate_proxy_header(gsc->proxy.from_csme,
266					    GSC_PROXY_ADDRESSING_CSME,
267					    GSC_PROXY_ADDRESSING_GSC,
268					    GSC_PROXY_BUFFER_SIZE - reply_offset);
269		if (ret) {
270			xe_gt_err(gt, "invalid CSME to GSC proxy header! %d\n", ret);
271			goto proxy_error;
272		}
273
274		/* Emit a new header for sending the reply to the GSC */
275		wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0,
276					       HECI_MEADDRESS_PROXY, 0, size);
277
278		/* copy the CSME reply and update the total msg size to include the GSC header */
279		xe_map_memcpy_to(xe, &gsc->proxy.to_gsc, wr_offset, gsc->proxy.from_csme, size);
280
281		size += wr_offset;
282	}
283
284proxy_error:
285	return ret < 0 ? ret : 0;
286}
287
288int xe_gsc_proxy_request_handler(struct xe_gsc *gsc)
289{
290	struct xe_gt *gt = gsc_to_gt(gsc);
291	int slept;
292	int err;
293
294	if (!gsc->proxy.component_added)
295		return -ENODEV;
296
297	/* when GSC is loaded, we can queue this before the component is bound */
298	for (slept = 0; slept < GSC_PROXY_INIT_TIMEOUT_MS; slept += 100) {
299		if (gsc->proxy.component)
300			break;
301
302		msleep(100);
303	}
304
305	mutex_lock(&gsc->proxy.mutex);
306	if (!gsc->proxy.component) {
307		xe_gt_err(gt, "GSC proxy component not bound!\n");
308		err = -EIO;
309	} else {
310		/*
311		 * clear the pending interrupt and allow new proxy requests to
312		 * be generated while we handle the current one
313		 */
314		gsc_proxy_irq_clear(gsc);
315		err = proxy_query(gsc);
316	}
317	mutex_unlock(&gsc->proxy.mutex);
318	return err;
319}
320
321void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir)
322{
323	struct xe_gt *gt = gsc_to_gt(gsc);
324
325	if (unlikely(!iir))
326		return;
327
328	if (!gsc->proxy.component) {
329		xe_gt_err(gt, "GSC proxy irq received without the component being bound!\n");
330		return;
331	}
332
333	spin_lock(&gsc->lock);
334	gsc->work_actions |= GSC_ACTION_SW_PROXY;
335	spin_unlock(&gsc->lock);
336
337	queue_work(gsc->wq, &gsc->work);
338}
339
340static int xe_gsc_proxy_component_bind(struct device *xe_kdev,
341				       struct device *mei_kdev, void *data)
342{
343	struct xe_device *xe = kdev_to_xe_device(xe_kdev);
344	struct xe_gt *gt = xe->tiles[0].media_gt;
345	struct xe_gsc *gsc = &gt->uc.gsc;
346
347	mutex_lock(&gsc->proxy.mutex);
348	gsc->proxy.component = data;
349	gsc->proxy.component->mei_dev = mei_kdev;
350	mutex_unlock(&gsc->proxy.mutex);
351
352	return 0;
353}
354
355static void xe_gsc_proxy_component_unbind(struct device *xe_kdev,
356					  struct device *mei_kdev, void *data)
357{
358	struct xe_device *xe = kdev_to_xe_device(xe_kdev);
359	struct xe_gt *gt = xe->tiles[0].media_gt;
360	struct xe_gsc *gsc = &gt->uc.gsc;
361
362	xe_gsc_wait_for_worker_completion(gsc);
363
364	mutex_lock(&gsc->proxy.mutex);
365	gsc->proxy.component = NULL;
366	mutex_unlock(&gsc->proxy.mutex);
367}
368
369static const struct component_ops xe_gsc_proxy_component_ops = {
370	.bind   = xe_gsc_proxy_component_bind,
371	.unbind = xe_gsc_proxy_component_unbind,
372};
373
374static int proxy_channel_alloc(struct xe_gsc *gsc)
375{
376	struct xe_gt *gt = gsc_to_gt(gsc);
377	struct xe_tile *tile = gt_to_tile(gt);
378	struct xe_device *xe = gt_to_xe(gt);
379	struct xe_bo *bo;
380	void *csme;
381
382	csme = drmm_kzalloc(&xe->drm, GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL);
383	if (!csme)
384		return -ENOMEM;
385
386	bo = xe_managed_bo_create_pin_map(xe, tile, GSC_PROXY_CHANNEL_SIZE,
387					  XE_BO_FLAG_SYSTEM |
388					  XE_BO_FLAG_GGTT);
389	if (IS_ERR(bo))
390		return PTR_ERR(bo);
391
392	gsc->proxy.bo = bo;
393	gsc->proxy.to_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0);
394	gsc->proxy.from_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, GSC_PROXY_BUFFER_SIZE);
395	gsc->proxy.to_csme = csme;
396	gsc->proxy.from_csme = csme + GSC_PROXY_BUFFER_SIZE;
397
398	return 0;
399}
400
401/**
402 * xe_gsc_proxy_init() - init objects and MEI component required by GSC proxy
403 * @gsc: the GSC uC
404 *
405 * Return: 0 if the initialization was successful, a negative errno otherwise.
406 */
407int xe_gsc_proxy_init(struct xe_gsc *gsc)
408{
409	int err;
410	struct xe_gt *gt = gsc_to_gt(gsc);
411	struct xe_tile *tile = gt_to_tile(gt);
412	struct xe_device *xe = tile_to_xe(tile);
413
414	mutex_init(&gsc->proxy.mutex);
415
416	if (!IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)) {
417		xe_gt_info(gt, "can't init GSC proxy due to missing mei component\n");
418		return -ENODEV;
419	}
420
421	/* no multi-tile devices with this feature yet */
422	if (tile->id > 0) {
423		xe_gt_err(gt, "unexpected GSC proxy init on tile %u\n", tile->id);
424		return -EINVAL;
425	}
426
427	err = proxy_channel_alloc(gsc);
428	if (err)
429		return err;
430
431	err = component_add_typed(xe->drm.dev, &xe_gsc_proxy_component_ops,
432				  I915_COMPONENT_GSC_PROXY);
433	if (err < 0) {
434		xe_gt_err(gt, "Failed to add GSC_PROXY component (%pe)\n", ERR_PTR(err));
435		return err;
436	}
437
438	gsc->proxy.component_added = true;
439
440	/* the component must be removed before unload, so can't use drmm for cleanup */
441
442	return 0;
443}
444
445/**
446 * xe_gsc_proxy_remove() - remove the GSC proxy MEI component
447 * @gsc: the GSC uC
448 */
449void xe_gsc_proxy_remove(struct xe_gsc *gsc)
450{
451	struct xe_gt *gt = gsc_to_gt(gsc);
452	struct xe_device *xe = gt_to_xe(gt);
453	unsigned int fw_ref = 0;
454
455	if (!gsc->proxy.component_added)
456		return;
457
458	/* disable HECI2 IRQs */
459	xe_pm_runtime_get(xe);
460	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
461	if (!fw_ref)
462		xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n");
463
464	/* try do disable irq even if forcewake failed */
465	gsc_proxy_irq_toggle(gsc, false);
466
467	xe_force_wake_put(gt_to_fw(gt), fw_ref);
468	xe_pm_runtime_put(xe);
469
470	xe_gsc_wait_for_worker_completion(gsc);
471
472	component_del(xe->drm.dev, &xe_gsc_proxy_component_ops);
473	gsc->proxy.component_added = false;
474}
475
476/**
477 * xe_gsc_proxy_start() - start the proxy by submitting the first request
478 * @gsc: the GSC uC
479 *
480 * Return: 0 if the proxy are now enabled, a negative errno otherwise.
481 */
482int xe_gsc_proxy_start(struct xe_gsc *gsc)
483{
484	int err;
485
486	/* enable the proxy interrupt in the GSC shim layer */
487	gsc_proxy_irq_toggle(gsc, true);
488
489	/*
490	 * The handling of the first proxy request must be manually triggered to
491	 * notify the GSC that we're ready to support the proxy flow.
492	 */
493	err = xe_gsc_proxy_request_handler(gsc);
494	if (err)
495		return err;
496
497	if (!xe_gsc_proxy_init_done(gsc)) {
498		xe_gt_err(gsc_to_gt(gsc), "GSC FW reports proxy init not completed\n");
499		return -EIO;
500	}
501
502	return 0;
503}