Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright © 2014 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *    Vinit Azad <vinit.azad@intel.com>
 25 *    Ben Widawsky <ben@bwidawsk.net>
 26 *    Dave Gordon <david.s.gordon@intel.com>
 27 *    Alex Dai <yu.dai@intel.com>
 28 */
 29#include <linux/firmware.h>
 30#include "i915_drv.h"
 31#include "intel_guc.h"
 32
 33/**
 34 * DOC: GuC-specific firmware loader
 35 *
 36 * intel_guc:
 37 * Top level structure of guc. It handles firmware loading and manages client
 38 * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
 39 * ExecList submission.
 40 *
 41 * Firmware versioning:
 42 * The firmware build process will generate a version header file with major and
 43 * minor version defined. The versions are built into CSS header of firmware.
 44 * i915 kernel driver set the minimal firmware version required per platform.
 45 * The firmware installation package will install (symbolic link) proper version
 46 * of firmware.
 47 *
 48 * GuC address space:
 49 * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
 50 * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
 51 * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
 52 * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
 53 *
 54 * Firmware log:
 55 * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
 56 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
 57 * i915_guc_load_status will print out firmware loading status and scratch
 58 * registers value.
 59 *
 60 */
 61
 62#define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
 63MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
 64
 65/* User-friendly representation of an enum */
 66const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
 67{
 68	switch (status) {
 69	case GUC_FIRMWARE_FAIL:
 70		return "FAIL";
 71	case GUC_FIRMWARE_NONE:
 72		return "NONE";
 73	case GUC_FIRMWARE_PENDING:
 74		return "PENDING";
 75	case GUC_FIRMWARE_SUCCESS:
 76		return "SUCCESS";
 77	default:
 78		return "UNKNOWN!";
 79	}
 80};
 81
 82static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
 83{
 84	struct intel_engine_cs *ring;
 85	int i, irqs;
 86
 87	/* tell all command streamers NOT to forward interrupts and vblank to GuC */
 88	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
 89	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
 90	for_each_ring(ring, dev_priv, i)
 91		I915_WRITE(RING_MODE_GEN7(ring), irqs);
 92
 93	/* route all GT interrupts to the host */
 94	I915_WRITE(GUC_BCS_RCS_IER, 0);
 95	I915_WRITE(GUC_VCS2_VCS1_IER, 0);
 96	I915_WRITE(GUC_WD_VECS_IER, 0);
 97}
 98
 99static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
100{
101	struct intel_engine_cs *ring;
102	int i, irqs;
103
104	/* tell all command streamers to forward interrupts and vblank to GuC */
105	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
106	irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
107	for_each_ring(ring, dev_priv, i)
108		I915_WRITE(RING_MODE_GEN7(ring), irqs);
109
110	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
111	irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
112	       GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
113	/* These three registers have the same bit definitions */
114	I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
115	I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
116	I915_WRITE(GUC_WD_VECS_IER, ~irqs);
117}
118
119static u32 get_gttype(struct drm_i915_private *dev_priv)
120{
121	/* XXX: GT type based on PCI device ID? field seems unused by fw */
122	return 0;
123}
124
125static u32 get_core_family(struct drm_i915_private *dev_priv)
126{
127	switch (INTEL_INFO(dev_priv)->gen) {
128	case 9:
129		return GFXCORE_FAMILY_GEN9;
130
131	default:
132		DRM_ERROR("GUC: unsupported core family\n");
133		return GFXCORE_FAMILY_UNKNOWN;
134	}
135}
136
137static void set_guc_init_params(struct drm_i915_private *dev_priv)
138{
139	struct intel_guc *guc = &dev_priv->guc;
140	u32 params[GUC_CTL_MAX_DWORDS];
141	int i;
142
143	memset(&params, 0, sizeof(params));
144
145	params[GUC_CTL_DEVICE_INFO] |=
146		(get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
147		(get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
148
149	/*
150	 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
151	 * second. This ARAR is calculated by:
152	 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
153	 */
154	params[GUC_CTL_ARAT_HIGH] = 0;
155	params[GUC_CTL_ARAT_LOW] = 100000000;
156
157	params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
158
159	params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
160			GUC_CTL_VCS2_ENABLED;
161
162	if (i915.guc_log_level >= 0) {
163		params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
164		params[GUC_CTL_DEBUG] =
165			i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
166	}
167
168	if (guc->ads_obj) {
169		u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj)
170				>> PAGE_SHIFT;
171		params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
172		params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
173	}
174
175	/* If GuC submission is enabled, set up additional parameters here */
176	if (i915.enable_guc_submission) {
177		u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
178		u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
179
180		pgs >>= PAGE_SHIFT;
181		params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
182			(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
183
184		params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
185
186		/* Unmask this bit to enable the GuC's internal scheduler */
187		params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
188	}
189
190	I915_WRITE(SOFT_SCRATCH(0), 0);
191
192	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
193		I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
194}
195
196/*
197 * Read the GuC status register (GUC_STATUS) and store it in the
198 * specified location; then return a boolean indicating whether
199 * the value matches either of two values representing completion
200 * of the GuC boot process.
201 *
202 * This is used for polling the GuC status in a wait_for()
203 * loop below.
204 */
205static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
206				      u32 *status)
207{
208	u32 val = I915_READ(GUC_STATUS);
209	u32 uk_val = val & GS_UKERNEL_MASK;
210	*status = val;
211	return (uk_val == GS_UKERNEL_READY ||
212		((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
213}
214
215/*
216 * Transfer the firmware image to RAM for execution by the microcontroller.
217 *
218 * Architecturally, the DMA engine is bidirectional, and can potentially even
219 * transfer between GTT locations. This functionality is left out of the API
220 * for now as there is no need for it.
221 *
222 * Note that GuC needs the CSS header plus uKernel code to be copied by the
223 * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
224 */
225static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
226{
227	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
228	struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
229	unsigned long offset;
230	struct sg_table *sg = fw_obj->pages;
231	u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
232	int i, ret = 0;
233
234	/* where RSA signature starts */
235	offset = guc_fw->rsa_offset;
236
237	/* Copy RSA signature from the fw image to HW for verification */
238	sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
239	for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
240		I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
241
242	/* The header plus uCode will be copied to WOPCM via DMA, excluding any
243	 * other components */
244	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
245
246	/* Set the source address for the new blob */
247	offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
248	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
249	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
250
251	/*
252	 * Set the DMA destination. Current uCode expects the code to be
253	 * loaded at 8k; locations below this are used for the stack.
254	 */
255	I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
256	I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
257
258	/* Finally start the DMA */
259	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
260
261	/*
262	 * Wait for the DMA to complete & the GuC to start up.
263	 * NB: Docs recommend not using the interrupt for completion.
264	 * Measurements indicate this should take no more than 20ms, so a
265	 * timeout here indicates that the GuC has failed and is unusable.
266	 * (Higher levels of the driver will attempt to fall back to
267	 * execlist mode if this happens.)
268	 */
269	ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
270
271	DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
272			I915_READ(DMA_CTRL), status);
273
274	if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
275		DRM_ERROR("GuC firmware signature verification failed\n");
276		ret = -ENOEXEC;
277	}
278
279	DRM_DEBUG_DRIVER("returning %d\n", ret);
280
281	return ret;
282}
283
284/*
285 * Load the GuC firmware blob into the MinuteIA.
286 */
287static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
288{
289	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
290	struct drm_device *dev = dev_priv->dev;
291	int ret;
292
293	ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
294	if (ret) {
295		DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
296		return ret;
297	}
298
299	ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
300	if (ret) {
301		DRM_DEBUG_DRIVER("pin failed %d\n", ret);
302		return ret;
303	}
304
305	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
306	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
307
308	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
309
310	/* init WOPCM */
311	I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
312	I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
313
314	/* Enable MIA caching. GuC clock gating is disabled. */
315	I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
316
317	/* WaDisableMinuteIaClockGating:skl,bxt */
318	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
319	    IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
320		I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
321					      ~GUC_ENABLE_MIA_CLOCK_GATING));
322	}
323
324	/* WaC6DisallowByGfxPause*/
325	I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
326
327	if (IS_BROXTON(dev))
328		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
329	else
330		I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
331
332	if (IS_GEN9(dev)) {
333		/* DOP Clock Gating Enable for GuC clocks */
334		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
335					    I915_READ(GEN7_MISCCPCTL)));
336
337		/* allows for 5us before GT can go to RC6 */
338		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
339	}
340
341	set_guc_init_params(dev_priv);
342
343	ret = guc_ucode_xfer_dma(dev_priv);
344
345	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
346
347	/*
348	 * We keep the object pages for reuse during resume. But we can unpin it
349	 * now that DMA has completed, so it doesn't continue to take up space.
350	 */
351	i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
352
353	return ret;
354}
355
356/**
357 * intel_guc_ucode_load() - load GuC uCode into the device
358 * @dev:	drm device
359 *
360 * Called from gem_init_hw() during driver loading and also after a GPU reset.
361 *
362 * The firmware image should have already been fetched into memory by the
363 * earlier call to intel_guc_ucode_init(), so here we need only check that
364 * is succeeded, and then transfer the image to the h/w.
365 *
366 * Return:	non-zero code on error
367 */
368int intel_guc_ucode_load(struct drm_device *dev)
369{
370	struct drm_i915_private *dev_priv = dev->dev_private;
371	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
372	int err = 0;
373
374	if (!i915.enable_guc_submission)
375		return 0;
376
377	DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
378		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
379		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
380
381	direct_interrupts_to_host(dev_priv);
382
383	if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
384		return 0;
385
386	if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
387	    guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
388		return -ENOEXEC;
389
390	guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
391
392	DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
393		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
394
395	switch (guc_fw->guc_fw_fetch_status) {
396	case GUC_FIRMWARE_FAIL:
397		/* something went wrong :( */
398		err = -EIO;
399		goto fail;
400
401	case GUC_FIRMWARE_NONE:
402	case GUC_FIRMWARE_PENDING:
403	default:
404		/* "can't happen" */
405		WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
406			guc_fw->guc_fw_path,
407			intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
408			guc_fw->guc_fw_fetch_status);
409		err = -ENXIO;
410		goto fail;
411
412	case GUC_FIRMWARE_SUCCESS:
413		break;
414	}
415
416	err = i915_guc_submission_init(dev);
417	if (err)
418		goto fail;
419
420	err = guc_ucode_xfer(dev_priv);
421	if (err)
422		goto fail;
423
424	guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
425
426	DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
427		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
428		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
429
430	if (i915.enable_guc_submission) {
431		/* The execbuf_client will be recreated. Release it first. */
432		i915_guc_submission_disable(dev);
433
434		err = i915_guc_submission_enable(dev);
435		if (err)
436			goto fail;
437		direct_interrupts_to_guc(dev_priv);
438	}
439
440	return 0;
441
442fail:
443	if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
444		guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
445
446	direct_interrupts_to_host(dev_priv);
447	i915_guc_submission_disable(dev);
448	i915_guc_submission_fini(dev);
449
450	return err;
451}
452
453static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
454{
455	struct drm_i915_gem_object *obj;
456	const struct firmware *fw;
457	struct guc_css_header *css;
458	size_t size;
459	int err;
460
461	DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
462		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
463
464	err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
465	if (err)
466		goto fail;
467	if (!fw)
468		goto fail;
469
470	DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
471		guc_fw->guc_fw_path, fw);
472
473	/* Check the size of the blob before examining buffer contents */
474	if (fw->size < sizeof(struct guc_css_header)) {
475		DRM_ERROR("Firmware header is missing\n");
476		goto fail;
477	}
478
479	css = (struct guc_css_header *)fw->data;
480
481	/* Firmware bits always start from header */
482	guc_fw->header_offset = 0;
483	guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
484		css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
485
486	if (guc_fw->header_size != sizeof(struct guc_css_header)) {
487		DRM_ERROR("CSS header definition mismatch\n");
488		goto fail;
489	}
490
491	/* then, uCode */
492	guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
493	guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
494
495	/* now RSA */
496	if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
497		DRM_ERROR("RSA key size is bad\n");
498		goto fail;
499	}
500	guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
501	guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
502
503	/* At least, it should have header, uCode and RSA. Size of all three. */
504	size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
505	if (fw->size < size) {
506		DRM_ERROR("Missing firmware components\n");
507		goto fail;
508	}
509
510	/* Header and uCode will be loaded to WOPCM. Size of the two. */
511	size = guc_fw->header_size + guc_fw->ucode_size;
512
513	/* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
514	if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
515		DRM_ERROR("Firmware is too large to fit in WOPCM\n");
516		goto fail;
517	}
518
519	/*
520	 * The GuC firmware image has the version number embedded at a well-known
521	 * offset within the firmware blob; note that major / minor version are
522	 * TWO bytes each (i.e. u16), although all pointers and offsets are defined
523	 * in terms of bytes (u8).
524	 */
525	guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
526	guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
527
528	if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
529	    guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
530		DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
531			guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
532			guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
533		err = -ENOEXEC;
534		goto fail;
535	}
536
537	DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
538			guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
539			guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
540
541	mutex_lock(&dev->struct_mutex);
542	obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
543	mutex_unlock(&dev->struct_mutex);
544	if (IS_ERR_OR_NULL(obj)) {
545		err = obj ? PTR_ERR(obj) : -ENOMEM;
546		goto fail;
547	}
548
549	guc_fw->guc_fw_obj = obj;
550	guc_fw->guc_fw_size = fw->size;
551
552	DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
553			guc_fw->guc_fw_obj);
554
555	release_firmware(fw);
556	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
557	return;
558
559fail:
560	DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
561		err, fw, guc_fw->guc_fw_obj);
562	DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
563		  guc_fw->guc_fw_path, err);
564
565	mutex_lock(&dev->struct_mutex);
566	obj = guc_fw->guc_fw_obj;
567	if (obj)
568		drm_gem_object_unreference(&obj->base);
569	guc_fw->guc_fw_obj = NULL;
570	mutex_unlock(&dev->struct_mutex);
571
572	release_firmware(fw);		/* OK even if fw is NULL */
573	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
574}
575
576/**
577 * intel_guc_ucode_init() - define parameters and fetch firmware
578 * @dev:	drm device
579 *
580 * Called early during driver load, but after GEM is initialised.
581 *
582 * The firmware will be transferred to the GuC's memory later,
583 * when intel_guc_ucode_load() is called.
584 */
585void intel_guc_ucode_init(struct drm_device *dev)
586{
587	struct drm_i915_private *dev_priv = dev->dev_private;
588	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
589	const char *fw_path;
590
591	if (!HAS_GUC_SCHED(dev))
592		i915.enable_guc_submission = false;
593
594	if (!HAS_GUC_UCODE(dev)) {
595		fw_path = NULL;
596	} else if (IS_SKYLAKE(dev)) {
597		fw_path = I915_SKL_GUC_UCODE;
598		guc_fw->guc_fw_major_wanted = 4;
599		guc_fw->guc_fw_minor_wanted = 3;
600	} else {
601		i915.enable_guc_submission = false;
602		fw_path = "";	/* unknown device */
603	}
604
605	if (!i915.enable_guc_submission)
606		return;
607
608	guc_fw->guc_dev = dev;
609	guc_fw->guc_fw_path = fw_path;
610	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
611	guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
612
613	if (fw_path == NULL)
614		return;
615
616	if (*fw_path == '\0') {
617		DRM_ERROR("No GuC firmware known for this platform\n");
618		guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
619		return;
620	}
621
622	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
623	DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
624	guc_fw_fetch(dev, guc_fw);
625	/* status must now be FAIL or SUCCESS */
626}
627
628/**
629 * intel_guc_ucode_fini() - clean up all allocated resources
630 * @dev:	drm device
631 */
632void intel_guc_ucode_fini(struct drm_device *dev)
633{
634	struct drm_i915_private *dev_priv = dev->dev_private;
635	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
636
637	mutex_lock(&dev->struct_mutex);
638	direct_interrupts_to_host(dev_priv);
639	i915_guc_submission_disable(dev);
640	i915_guc_submission_fini(dev);
641
642	if (guc_fw->guc_fw_obj)
643		drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
644	guc_fw->guc_fw_obj = NULL;
645	mutex_unlock(&dev->struct_mutex);
646
647	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
648}