Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright © 2014 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *    Vinit Azad <vinit.azad@intel.com>
 25 *    Ben Widawsky <ben@bwidawsk.net>
 26 *    Dave Gordon <david.s.gordon@intel.com>
 27 *    Alex Dai <yu.dai@intel.com>
 28 */
 29#include <linux/firmware.h>
 30#include "i915_drv.h"
 31#include "intel_guc.h"
 32
 33/**
 34 * DOC: GuC-specific firmware loader
 35 *
 36 * intel_guc:
 37 * Top level structure of guc. It handles firmware loading and manages client
 38 * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
 39 * ExecList submission.
 40 *
 41 * Firmware versioning:
 42 * The firmware build process will generate a version header file with major and
 43 * minor version defined. The versions are built into CSS header of firmware.
 44 * i915 kernel driver set the minimal firmware version required per platform.
 45 * The firmware installation package will install (symbolic link) proper version
 46 * of firmware.
 47 *
 48 * GuC address space:
 49 * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
 50 * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
 51 * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
 52 * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
 53 *
 54 * Firmware log:
 55 * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
 56 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
 57 * i915_guc_load_status will print out firmware loading status and scratch
 58 * registers value.
 59 *
 60 */
 61
 62#define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
 
 
 
 
 
 
 
 
 
 
 
 
 63MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
 64
 
 
 
 
 
 
 65/* User-friendly representation of an enum */
 66const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
 67{
 68	switch (status) {
 69	case GUC_FIRMWARE_FAIL:
 70		return "FAIL";
 71	case GUC_FIRMWARE_NONE:
 72		return "NONE";
 73	case GUC_FIRMWARE_PENDING:
 74		return "PENDING";
 75	case GUC_FIRMWARE_SUCCESS:
 76		return "SUCCESS";
 77	default:
 78		return "UNKNOWN!";
 79	}
 80};
 81
 82static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
 83{
 84	struct intel_engine_cs *ring;
 85	int i, irqs;
 
 86
 87	/* tell all command streamers NOT to forward interrupts and vblank to GuC */
 88	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
 89	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
 90	for_each_ring(ring, dev_priv, i)
 91		I915_WRITE(RING_MODE_GEN7(ring), irqs);
 92
 93	/* route all GT interrupts to the host */
 94	I915_WRITE(GUC_BCS_RCS_IER, 0);
 95	I915_WRITE(GUC_VCS2_VCS1_IER, 0);
 96	I915_WRITE(GUC_WD_VECS_IER, 0);
 97}
 98
 99static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
100{
101	struct intel_engine_cs *ring;
102	int i, irqs;
103
104	/* tell all command streamers to forward interrupts and vblank to GuC */
105	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
106	irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
107	for_each_ring(ring, dev_priv, i)
108		I915_WRITE(RING_MODE_GEN7(ring), irqs);
 
109
110	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
111	irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
112	       GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
113	/* These three registers have the same bit definitions */
114	I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
115	I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
116	I915_WRITE(GUC_WD_VECS_IER, ~irqs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117}
118
119static u32 get_gttype(struct drm_i915_private *dev_priv)
120{
121	/* XXX: GT type based on PCI device ID? field seems unused by fw */
122	return 0;
123}
124
125static u32 get_core_family(struct drm_i915_private *dev_priv)
126{
127	switch (INTEL_INFO(dev_priv)->gen) {
 
 
128	case 9:
129		return GFXCORE_FAMILY_GEN9;
130
131	default:
132		DRM_ERROR("GUC: unsupported core family\n");
133		return GFXCORE_FAMILY_UNKNOWN;
134	}
135}
136
137static void set_guc_init_params(struct drm_i915_private *dev_priv)
 
 
 
 
 
138{
139	struct intel_guc *guc = &dev_priv->guc;
140	u32 params[GUC_CTL_MAX_DWORDS];
141	int i;
142
143	memset(&params, 0, sizeof(params));
144
145	params[GUC_CTL_DEVICE_INFO] |=
146		(get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
147		(get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
148
149	/*
150	 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
151	 * second. This ARAR is calculated by:
152	 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
153	 */
154	params[GUC_CTL_ARAT_HIGH] = 0;
155	params[GUC_CTL_ARAT_LOW] = 100000000;
156
157	params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
158
159	params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
160			GUC_CTL_VCS2_ENABLED;
161
 
 
162	if (i915.guc_log_level >= 0) {
163		params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
164		params[GUC_CTL_DEBUG] =
165			i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
166	}
 
167
168	if (guc->ads_obj) {
169		u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj)
170				>> PAGE_SHIFT;
171		params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
172		params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
173	}
174
175	/* If GuC submission is enabled, set up additional parameters here */
176	if (i915.enable_guc_submission) {
177		u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
178		u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
179
180		pgs >>= PAGE_SHIFT;
181		params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
182			(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
183
184		params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
185
186		/* Unmask this bit to enable the GuC's internal scheduler */
187		params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
188	}
189
190	I915_WRITE(SOFT_SCRATCH(0), 0);
191
192	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
193		I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
194}
195
196/*
197 * Read the GuC status register (GUC_STATUS) and store it in the
198 * specified location; then return a boolean indicating whether
199 * the value matches either of two values representing completion
200 * of the GuC boot process.
201 *
202 * This is used for polling the GuC status in a wait_for()
203 * loop below.
204 */
205static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
206				      u32 *status)
207{
208	u32 val = I915_READ(GUC_STATUS);
209	u32 uk_val = val & GS_UKERNEL_MASK;
210	*status = val;
211	return (uk_val == GS_UKERNEL_READY ||
212		((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
213}
214
215/*
216 * Transfer the firmware image to RAM for execution by the microcontroller.
217 *
218 * Architecturally, the DMA engine is bidirectional, and can potentially even
219 * transfer between GTT locations. This functionality is left out of the API
220 * for now as there is no need for it.
221 *
222 * Note that GuC needs the CSS header plus uKernel code to be copied by the
223 * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
224 */
225static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
 
226{
227	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
228	struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
229	unsigned long offset;
230	struct sg_table *sg = fw_obj->pages;
231	u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
232	int i, ret = 0;
233
234	/* where RSA signature starts */
235	offset = guc_fw->rsa_offset;
236
237	/* Copy RSA signature from the fw image to HW for verification */
238	sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
239	for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
240		I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
241
242	/* The header plus uCode will be copied to WOPCM via DMA, excluding any
243	 * other components */
244	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
245
246	/* Set the source address for the new blob */
247	offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
248	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
249	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
250
251	/*
252	 * Set the DMA destination. Current uCode expects the code to be
253	 * loaded at 8k; locations below this are used for the stack.
254	 */
255	I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
256	I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
257
258	/* Finally start the DMA */
259	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
260
261	/*
262	 * Wait for the DMA to complete & the GuC to start up.
263	 * NB: Docs recommend not using the interrupt for completion.
264	 * Measurements indicate this should take no more than 20ms, so a
265	 * timeout here indicates that the GuC has failed and is unusable.
266	 * (Higher levels of the driver will attempt to fall back to
267	 * execlist mode if this happens.)
268	 */
269	ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
270
271	DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
272			I915_READ(DMA_CTRL), status);
273
274	if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
275		DRM_ERROR("GuC firmware signature verification failed\n");
276		ret = -ENOEXEC;
277	}
278
279	DRM_DEBUG_DRIVER("returning %d\n", ret);
280
281	return ret;
282}
283
 
 
 
 
 
 
 
 
 
 
 
284/*
285 * Load the GuC firmware blob into the MinuteIA.
286 */
287static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
288{
289	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
290	struct drm_device *dev = dev_priv->dev;
291	int ret;
292
293	ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
294	if (ret) {
295		DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
296		return ret;
297	}
298
299	ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
300	if (ret) {
301		DRM_DEBUG_DRIVER("pin failed %d\n", ret);
302		return ret;
303	}
304
305	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
306	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
307
308	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
309
310	/* init WOPCM */
311	I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
312	I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
313
314	/* Enable MIA caching. GuC clock gating is disabled. */
315	I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
316
317	/* WaDisableMinuteIaClockGating:skl,bxt */
318	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
319	    IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
320		I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
321					      ~GUC_ENABLE_MIA_CLOCK_GATING));
322	}
323
324	/* WaC6DisallowByGfxPause*/
325	I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
 
326
327	if (IS_BROXTON(dev))
328		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
329	else
330		I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
331
332	if (IS_GEN9(dev)) {
333		/* DOP Clock Gating Enable for GuC clocks */
334		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
335					    I915_READ(GEN7_MISCCPCTL)));
336
337		/* allows for 5us before GT can go to RC6 */
338		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
339	}
340
341	set_guc_init_params(dev_priv);
342
343	ret = guc_ucode_xfer_dma(dev_priv);
344
345	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
346
347	/*
348	 * We keep the object pages for reuse during resume. But we can unpin it
349	 * now that DMA has completed, so it doesn't continue to take up space.
350	 */
351	i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352
353	return ret;
354}
355
356/**
357 * intel_guc_ucode_load() - load GuC uCode into the device
358 * @dev:	drm device
359 *
360 * Called from gem_init_hw() during driver loading and also after a GPU reset.
361 *
 
362 * The firmware image should have already been fetched into memory by the
363 * earlier call to intel_guc_ucode_init(), so here we need only check that
364 * is succeeded, and then transfer the image to the h/w.
365 *
366 * Return:	non-zero code on error
367 */
368int intel_guc_ucode_load(struct drm_device *dev)
369{
370	struct drm_i915_private *dev_priv = dev->dev_private;
371	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
372	int err = 0;
 
373
374	if (!i915.enable_guc_submission)
375		return 0;
376
377	DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
378		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
379		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
380
381	direct_interrupts_to_host(dev_priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
382
383	if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
384		return 0;
 
 
 
 
 
 
385
386	if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
387	    guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
388		return -ENOEXEC;
389
390	guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
391
392	DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
393		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
 
394
395	switch (guc_fw->guc_fw_fetch_status) {
396	case GUC_FIRMWARE_FAIL:
397		/* something went wrong :( */
398		err = -EIO;
399		goto fail;
400
401	case GUC_FIRMWARE_NONE:
402	case GUC_FIRMWARE_PENDING:
403	default:
404		/* "can't happen" */
405		WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
406			guc_fw->guc_fw_path,
407			intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
408			guc_fw->guc_fw_fetch_status);
409		err = -ENXIO;
410		goto fail;
 
 
 
 
411
412	case GUC_FIRMWARE_SUCCESS:
413		break;
414	}
415
416	err = i915_guc_submission_init(dev);
417	if (err)
418		goto fail;
419
420	err = guc_ucode_xfer(dev_priv);
421	if (err)
422		goto fail;
423
424	guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
425
426	DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
427		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
428		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
429
430	if (i915.enable_guc_submission) {
431		/* The execbuf_client will be recreated. Release it first. */
432		i915_guc_submission_disable(dev);
433
434		err = i915_guc_submission_enable(dev);
435		if (err)
436			goto fail;
437		direct_interrupts_to_guc(dev_priv);
438	}
439
440	return 0;
441
442fail:
443	if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
444		guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
445
446	direct_interrupts_to_host(dev_priv);
447	i915_guc_submission_disable(dev);
448	i915_guc_submission_fini(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
449
450	return err;
451}
452
453static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
454{
 
455	struct drm_i915_gem_object *obj;
456	const struct firmware *fw;
457	struct guc_css_header *css;
458	size_t size;
459	int err;
460
461	DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
462		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
463
464	err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
465	if (err)
466		goto fail;
467	if (!fw)
468		goto fail;
469
470	DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
471		guc_fw->guc_fw_path, fw);
472
473	/* Check the size of the blob before examining buffer contents */
474	if (fw->size < sizeof(struct guc_css_header)) {
475		DRM_ERROR("Firmware header is missing\n");
476		goto fail;
477	}
478
479	css = (struct guc_css_header *)fw->data;
480
481	/* Firmware bits always start from header */
482	guc_fw->header_offset = 0;
483	guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
484		css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
485
486	if (guc_fw->header_size != sizeof(struct guc_css_header)) {
487		DRM_ERROR("CSS header definition mismatch\n");
488		goto fail;
489	}
490
491	/* then, uCode */
492	guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
493	guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
494
495	/* now RSA */
496	if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
497		DRM_ERROR("RSA key size is bad\n");
498		goto fail;
499	}
500	guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
501	guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
502
503	/* At least, it should have header, uCode and RSA. Size of all three. */
504	size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
505	if (fw->size < size) {
506		DRM_ERROR("Missing firmware components\n");
507		goto fail;
508	}
509
510	/* Header and uCode will be loaded to WOPCM. Size of the two. */
511	size = guc_fw->header_size + guc_fw->ucode_size;
512
513	/* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
514	if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
515		DRM_ERROR("Firmware is too large to fit in WOPCM\n");
516		goto fail;
517	}
518
519	/*
520	 * The GuC firmware image has the version number embedded at a well-known
521	 * offset within the firmware blob; note that major / minor version are
522	 * TWO bytes each (i.e. u16), although all pointers and offsets are defined
523	 * in terms of bytes (u8).
524	 */
525	guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
526	guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
527
528	if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
529	    guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
530		DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
531			guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
532			guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
533		err = -ENOEXEC;
534		goto fail;
535	}
536
537	DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
538			guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
539			guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
540
541	mutex_lock(&dev->struct_mutex);
542	obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
543	mutex_unlock(&dev->struct_mutex);
544	if (IS_ERR_OR_NULL(obj)) {
545		err = obj ? PTR_ERR(obj) : -ENOMEM;
546		goto fail;
547	}
548
549	guc_fw->guc_fw_obj = obj;
550	guc_fw->guc_fw_size = fw->size;
551
552	DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
553			guc_fw->guc_fw_obj);
554
555	release_firmware(fw);
556	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
557	return;
558
559fail:
 
 
560	DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
561		err, fw, guc_fw->guc_fw_obj);
562	DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
563		  guc_fw->guc_fw_path, err);
564
565	mutex_lock(&dev->struct_mutex);
566	obj = guc_fw->guc_fw_obj;
567	if (obj)
568		drm_gem_object_unreference(&obj->base);
569	guc_fw->guc_fw_obj = NULL;
570	mutex_unlock(&dev->struct_mutex);
571
572	release_firmware(fw);		/* OK even if fw is NULL */
573	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
574}
575
576/**
577 * intel_guc_ucode_init() - define parameters and fetch firmware
578 * @dev:	drm device
579 *
580 * Called early during driver load, but after GEM is initialised.
581 *
582 * The firmware will be transferred to the GuC's memory later,
583 * when intel_guc_ucode_load() is called.
584 */
585void intel_guc_ucode_init(struct drm_device *dev)
586{
587	struct drm_i915_private *dev_priv = dev->dev_private;
588	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
589	const char *fw_path;
590
591	if (!HAS_GUC_SCHED(dev))
592		i915.enable_guc_submission = false;
 
 
 
 
 
 
 
 
593
594	if (!HAS_GUC_UCODE(dev)) {
595		fw_path = NULL;
596	} else if (IS_SKYLAKE(dev)) {
597		fw_path = I915_SKL_GUC_UCODE;
598		guc_fw->guc_fw_major_wanted = 4;
599		guc_fw->guc_fw_minor_wanted = 3;
 
 
 
 
 
 
 
 
600	} else {
601		i915.enable_guc_submission = false;
602		fw_path = "";	/* unknown device */
603	}
604
605	if (!i915.enable_guc_submission)
606		return;
607
608	guc_fw->guc_dev = dev;
609	guc_fw->guc_fw_path = fw_path;
610	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
611	guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
612
 
 
 
613	if (fw_path == NULL)
614		return;
615
616	if (*fw_path == '\0') {
617		DRM_ERROR("No GuC firmware known for this platform\n");
618		guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
619		return;
620	}
621
622	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
623	DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
624	guc_fw_fetch(dev, guc_fw);
625	/* status must now be FAIL or SUCCESS */
626}
627
628/**
629 * intel_guc_ucode_fini() - clean up all allocated resources
630 * @dev:	drm device
631 */
632void intel_guc_ucode_fini(struct drm_device *dev)
633{
634	struct drm_i915_private *dev_priv = dev->dev_private;
635	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
636
637	mutex_lock(&dev->struct_mutex);
638	direct_interrupts_to_host(dev_priv);
639	i915_guc_submission_disable(dev);
640	i915_guc_submission_fini(dev);
641
642	if (guc_fw->guc_fw_obj)
643		drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
644	guc_fw->guc_fw_obj = NULL;
645	mutex_unlock(&dev->struct_mutex);
646
647	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
648}
v4.10.11
  1/*
  2 * Copyright © 2014 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 * Authors:
 24 *    Vinit Azad <vinit.azad@intel.com>
 25 *    Ben Widawsky <ben@bwidawsk.net>
 26 *    Dave Gordon <david.s.gordon@intel.com>
 27 *    Alex Dai <yu.dai@intel.com>
 28 */
 29#include <linux/firmware.h>
 30#include "i915_drv.h"
 31#include "intel_guc.h"
 32
 33/**
 34 * DOC: GuC-specific firmware loader
 35 *
 36 * intel_guc:
 37 * Top level structure of guc. It handles firmware loading and manages client
 38 * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
 39 * ExecList submission.
 40 *
 41 * Firmware versioning:
 42 * The firmware build process will generate a version header file with major and
 43 * minor version defined. The versions are built into CSS header of firmware.
 44 * i915 kernel driver set the minimal firmware version required per platform.
 45 * The firmware installation package will install (symbolic link) proper version
 46 * of firmware.
 47 *
 48 * GuC address space:
 49 * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
 50 * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
 51 * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
 52 * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
 53 *
 54 * Firmware log:
 55 * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
 56 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
 57 * i915_guc_load_status will print out firmware loading status and scratch
 58 * registers value.
 59 *
 60 */
 61
 62#define SKL_FW_MAJOR 6
 63#define SKL_FW_MINOR 1
 64
 65#define BXT_FW_MAJOR 8
 66#define BXT_FW_MINOR 7
 67
 68#define KBL_FW_MAJOR 9
 69#define KBL_FW_MINOR 14
 70
 71#define GUC_FW_PATH(platform, major, minor) \
 72       "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin"
 73
 74#define I915_SKL_GUC_UCODE GUC_FW_PATH(skl, SKL_FW_MAJOR, SKL_FW_MINOR)
 75MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
 76
 77#define I915_BXT_GUC_UCODE GUC_FW_PATH(bxt, BXT_FW_MAJOR, BXT_FW_MINOR)
 78MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
 79
 80#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
 81MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
 82
 83/* User-friendly representation of an enum */
 84const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
 85{
 86	switch (status) {
 87	case GUC_FIRMWARE_FAIL:
 88		return "FAIL";
 89	case GUC_FIRMWARE_NONE:
 90		return "NONE";
 91	case GUC_FIRMWARE_PENDING:
 92		return "PENDING";
 93	case GUC_FIRMWARE_SUCCESS:
 94		return "SUCCESS";
 95	default:
 96		return "UNKNOWN!";
 97	}
 98};
 99
100static void guc_interrupts_release(struct drm_i915_private *dev_priv)
101{
102	struct intel_engine_cs *engine;
103	enum intel_engine_id id;
104	int irqs;
105
106	/* tell all command streamers NOT to forward interrupts or vblank to GuC */
107	irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
108	irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
109	for_each_engine(engine, dev_priv, id)
110		I915_WRITE(RING_MODE_GEN7(engine), irqs);
111
112	/* route all GT interrupts to the host */
113	I915_WRITE(GUC_BCS_RCS_IER, 0);
114	I915_WRITE(GUC_VCS2_VCS1_IER, 0);
115	I915_WRITE(GUC_WD_VECS_IER, 0);
116}
117
118static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
119{
120	struct intel_engine_cs *engine;
121	enum intel_engine_id id;
122	int irqs;
123	u32 tmp;
124
125	/* tell all command streamers to forward interrupts (but not vblank) to GuC */
126	irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
127	for_each_engine(engine, dev_priv, id)
128		I915_WRITE(RING_MODE_GEN7(engine), irqs);
129
130	/* route USER_INTERRUPT to Host, all others are sent to GuC. */
131	irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
132	       GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
133	/* These three registers have the same bit definitions */
134	I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
135	I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
136	I915_WRITE(GUC_WD_VECS_IER, ~irqs);
137
138	/*
139	 * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
140	 * (unmasked) PM interrupts to the GuC. All other bits of this
141	 * register *disable* generation of a specific interrupt.
142	 *
143	 * 'pm_intr_keep' indicates bits that are NOT to be set when
144	 * writing to the PM interrupt mask register, i.e. interrupts
145	 * that must not be disabled.
146	 *
147	 * If the GuC is handling these interrupts, then we must not let
148	 * the PM code disable ANY interrupt that the GuC is expecting.
149	 * So for each ENABLED (0) bit in this register, we must SET the
150	 * bit in pm_intr_keep so that it's left enabled for the GuC.
151	 *
152	 * OTOH the REDIRECT_TO_GUC bit is initially SET in pm_intr_keep
153	 * (so interrupts go to the DISPLAY unit at first); but here we
154	 * need to CLEAR that bit, which will result in the register bit
155	 * being left SET!
156	 */
157	tmp = I915_READ(GEN6_PMINTRMSK);
158	if (tmp & GEN8_PMINTR_REDIRECT_TO_GUC) {
159		dev_priv->rps.pm_intr_keep |= ~tmp;
160		dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_GUC;
161	}
162}
163
164static u32 get_gttype(struct drm_i915_private *dev_priv)
165{
166	/* XXX: GT type based on PCI device ID? field seems unused by fw */
167	return 0;
168}
169
170static u32 get_core_family(struct drm_i915_private *dev_priv)
171{
172	u32 gen = INTEL_GEN(dev_priv);
173
174	switch (gen) {
175	case 9:
176		return GFXCORE_FAMILY_GEN9;
177
178	default:
179		WARN(1, "GEN%d does not support GuC operation!\n", gen);
180		return GFXCORE_FAMILY_UNKNOWN;
181	}
182}
183
184/*
185 * Initialise the GuC parameter block before starting the firmware
186 * transfer. These parameters are read by the firmware on startup
187 * and cannot be changed thereafter.
188 */
189static void guc_params_init(struct drm_i915_private *dev_priv)
190{
191	struct intel_guc *guc = &dev_priv->guc;
192	u32 params[GUC_CTL_MAX_DWORDS];
193	int i;
194
195	memset(&params, 0, sizeof(params));
196
197	params[GUC_CTL_DEVICE_INFO] |=
198		(get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
199		(get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
200
201	/*
202	 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
203	 * second. This ARAR is calculated by:
204	 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
205	 */
206	params[GUC_CTL_ARAT_HIGH] = 0;
207	params[GUC_CTL_ARAT_LOW] = 100000000;
208
209	params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
210
211	params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
212			GUC_CTL_VCS2_ENABLED;
213
214	params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
215
216	if (i915.guc_log_level >= 0) {
 
217		params[GUC_CTL_DEBUG] =
218			i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
219	} else
220		params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
221
222	if (guc->ads_vma) {
223		u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
 
224		params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
225		params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
226	}
227
228	/* If GuC submission is enabled, set up additional parameters here */
229	if (i915.enable_guc_submission) {
230		u32 pgs = i915_ggtt_offset(dev_priv->guc.ctx_pool_vma);
231		u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
232
233		pgs >>= PAGE_SHIFT;
234		params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
235			(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
236
237		params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
238
239		/* Unmask this bit to enable the GuC's internal scheduler */
240		params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
241	}
242
243	I915_WRITE(SOFT_SCRATCH(0), 0);
244
245	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
246		I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
247}
248
249/*
250 * Read the GuC status register (GUC_STATUS) and store it in the
251 * specified location; then return a boolean indicating whether
252 * the value matches either of two values representing completion
253 * of the GuC boot process.
254 *
255 * This is used for polling the GuC status in a wait_for()
256 * loop below.
257 */
258static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
259				      u32 *status)
260{
261	u32 val = I915_READ(GUC_STATUS);
262	u32 uk_val = val & GS_UKERNEL_MASK;
263	*status = val;
264	return (uk_val == GS_UKERNEL_READY ||
265		((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
266}
267
268/*
269 * Transfer the firmware image to RAM for execution by the microcontroller.
270 *
271 * Architecturally, the DMA engine is bidirectional, and can potentially even
272 * transfer between GTT locations. This functionality is left out of the API
273 * for now as there is no need for it.
274 *
275 * Note that GuC needs the CSS header plus uKernel code to be copied by the
276 * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
277 */
278static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
279			      struct i915_vma *vma)
280{
281	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 
282	unsigned long offset;
283	struct sg_table *sg = vma->pages;
284	u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
285	int i, ret = 0;
286
287	/* where RSA signature starts */
288	offset = guc_fw->rsa_offset;
289
290	/* Copy RSA signature from the fw image to HW for verification */
291	sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
292	for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
293		I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
294
295	/* The header plus uCode will be copied to WOPCM via DMA, excluding any
296	 * other components */
297	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
298
299	/* Set the source address for the new blob */
300	offset = i915_ggtt_offset(vma) + guc_fw->header_offset;
301	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
302	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
303
304	/*
305	 * Set the DMA destination. Current uCode expects the code to be
306	 * loaded at 8k; locations below this are used for the stack.
307	 */
308	I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
309	I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
310
311	/* Finally start the DMA */
312	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
313
314	/*
315	 * Wait for the DMA to complete & the GuC to start up.
316	 * NB: Docs recommend not using the interrupt for completion.
317	 * Measurements indicate this should take no more than 20ms, so a
318	 * timeout here indicates that the GuC has failed and is unusable.
319	 * (Higher levels of the driver will attempt to fall back to
320	 * execlist mode if this happens.)
321	 */
322	ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
323
324	DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
325			I915_READ(DMA_CTRL), status);
326
327	if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
328		DRM_ERROR("GuC firmware signature verification failed\n");
329		ret = -ENOEXEC;
330	}
331
332	DRM_DEBUG_DRIVER("returning %d\n", ret);
333
334	return ret;
335}
336
337static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
338{
339	u32 wopcm_size = GUC_WOPCM_TOP;
340
341	/* On BXT, the top of WOPCM is reserved for RC6 context */
342	if (IS_BROXTON(dev_priv))
343		wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
344
345	return wopcm_size;
346}
347
348/*
349 * Load the GuC firmware blob into the MinuteIA.
350 */
351static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
352{
353	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
354	struct i915_vma *vma;
355	int ret;
356
357	ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
358	if (ret) {
359		DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
360		return ret;
361	}
362
363	vma = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
364	if (IS_ERR(vma)) {
365		DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
366		return PTR_ERR(vma);
367	}
368
369	/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
370	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
371
372	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
373
374	/* init WOPCM */
375	I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
376	I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
377
378	/* Enable MIA caching. GuC clock gating is disabled. */
379	I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
380
381	/* WaDisableMinuteIaClockGating:bxt */
382	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
 
383		I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
384					      ~GUC_ENABLE_MIA_CLOCK_GATING));
385	}
386
387	/* WaC6DisallowByGfxPause:bxt */
388	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
389		I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
390
391	if (IS_BROXTON(dev_priv))
392		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
393	else
394		I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
395
396	if (IS_GEN9(dev_priv)) {
397		/* DOP Clock Gating Enable for GuC clocks */
398		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
399					    I915_READ(GEN7_MISCCPCTL)));
400
401		/* allows for 5us (in 10ns units) before GT can go to RC6 */
402		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
403	}
404
405	guc_params_init(dev_priv);
406
407	ret = guc_ucode_xfer_dma(dev_priv, vma);
408
409	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
410
411	/*
412	 * We keep the object pages for reuse during resume. But we can unpin it
413	 * now that DMA has completed, so it doesn't continue to take up space.
414	 */
415	i915_vma_unpin(vma);
416
417	return ret;
418}
419
420static int guc_hw_reset(struct drm_i915_private *dev_priv)
421{
422	int ret;
423	u32 guc_status;
424
425	ret = intel_guc_reset(dev_priv);
426	if (ret) {
427		DRM_ERROR("GuC reset failed, ret = %d\n", ret);
428		return ret;
429	}
430
431	guc_status = I915_READ(GUC_STATUS);
432	WARN(!(guc_status & GS_MIA_IN_RESET),
433	     "GuC status: 0x%x, MIA core expected to be in reset\n", guc_status);
434
435	return ret;
436}
437
438/**
439 * intel_guc_setup() - finish preparing the GuC for activity
440 * @dev:	drm device
441 *
442 * Called from gem_init_hw() during driver loading and also after a GPU reset.
443 *
444 * The main action required here it to load the GuC uCode into the device.
445 * The firmware image should have already been fetched into memory by the
446 * earlier call to intel_guc_init(), so here we need only check that worked,
447 * and then transfer the image to the h/w.
448 *
449 * Return:	non-zero code on error
450 */
451int intel_guc_setup(struct drm_device *dev)
452{
453	struct drm_i915_private *dev_priv = to_i915(dev);
454	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
455	const char *fw_path = guc_fw->guc_fw_path;
456	int retries, ret, err;
457
458	DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
459		fw_path,
 
 
460		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
461		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
462
463	/* Loading forbidden, or no firmware to load? */
464	if (!i915.enable_guc_loading) {
465		err = 0;
466		goto fail;
467	} else if (fw_path == NULL) {
468		/* Device is known to have no uCode (e.g. no GuC) */
469		err = -ENXIO;
470		goto fail;
471	} else if (*fw_path == '\0') {
472		/* Device has a GuC but we don't know what f/w to load? */
473		WARN(1, "No GuC firmware known for this platform!\n");
474		err = -ENODEV;
475		goto fail;
476	}
477
478	/* Fetch failed, or already fetched but failed to load? */
479	if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
480		err = -EIO;
481		goto fail;
482	} else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
483		err = -ENOEXEC;
484		goto fail;
485	}
486
487	guc_interrupts_release(dev_priv);
488	gen9_reset_guc_interrupts(dev_priv);
 
489
490	guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
491
492	DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
493		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
494		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
495
496	err = i915_guc_submission_init(dev_priv);
497	if (err)
 
 
498		goto fail;
499
500	/*
501	 * WaEnableuKernelHeaderValidFix:skl,bxt
502	 * For BXT, this is only upto B0 but below WA is required for later
503	 * steppings also so this is extended as well.
504	 */
505	/* WaEnableGuCBootHashCheckNotSet:skl,bxt */
506	for (retries = 3; ; ) {
507		/*
508		 * Always reset the GuC just before (re)loading, so
509		 * that the state and timing are fairly predictable
510		 */
511		err = guc_hw_reset(dev_priv);
512		if (err)
513			goto fail;
514
515		err = guc_ucode_xfer(dev_priv);
516		if (!err)
517			break;
518
519		if (--retries == 0)
520			goto fail;
 
521
522		DRM_INFO("GuC fw load failed: %d; will reset and "
523			 "retry %d more time(s)\n", err, retries);
524	}
525
526	guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
527
528	DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
529		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
530		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
531
532	if (i915.enable_guc_submission) {
533		if (i915.guc_log_level >= 0)
534			gen9_enable_guc_interrupts(dev_priv);
535
536		err = i915_guc_submission_enable(dev_priv);
537		if (err)
538			goto fail;
539		guc_interrupts_capture(dev_priv);
540	}
541
542	return 0;
543
544fail:
545	if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
546		guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
547
548	guc_interrupts_release(dev_priv);
549	i915_guc_submission_disable(dev_priv);
550	i915_guc_submission_fini(dev_priv);
551
552	/*
553	 * We've failed to load the firmware :(
554	 *
555	 * Decide whether to disable GuC submission and fall back to
556	 * execlist mode, and whether to hide the error by returning
557	 * zero or to return -EIO, which the caller will treat as a
558	 * nonfatal error (i.e. it doesn't prevent driver load, but
559	 * marks the GPU as wedged until reset).
560	 */
561	if (i915.enable_guc_loading > 1) {
562		ret = -EIO;
563	} else if (i915.enable_guc_submission > 1) {
564		ret = -EIO;
565	} else {
566		ret = 0;
567	}
568
569	if (err == 0 && !HAS_GUC_UCODE(dev_priv))
570		;	/* Don't mention the GuC! */
571	else if (err == 0)
572		DRM_INFO("GuC firmware load skipped\n");
573	else if (ret != -EIO)
574		DRM_NOTE("GuC firmware load failed: %d\n", err);
575	else
576		DRM_WARN("GuC firmware load failed: %d\n", err);
577
578	if (i915.enable_guc_submission) {
579		if (fw_path == NULL)
580			DRM_INFO("GuC submission without firmware not supported\n");
581		if (ret == 0)
582			DRM_NOTE("Falling back from GuC submission to execlist mode\n");
583		else
584			DRM_ERROR("GuC init failed: %d\n", ret);
585	}
586	i915.enable_guc_submission = 0;
587
588	return ret;
589}
590
591static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
592{
593	struct pci_dev *pdev = dev->pdev;
594	struct drm_i915_gem_object *obj;
595	const struct firmware *fw;
596	struct guc_css_header *css;
597	size_t size;
598	int err;
599
600	DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
601		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
602
603	err = request_firmware(&fw, guc_fw->guc_fw_path, &pdev->dev);
604	if (err)
605		goto fail;
606	if (!fw)
607		goto fail;
608
609	DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
610		guc_fw->guc_fw_path, fw);
611
612	/* Check the size of the blob before examining buffer contents */
613	if (fw->size < sizeof(struct guc_css_header)) {
614		DRM_NOTE("Firmware header is missing\n");
615		goto fail;
616	}
617
618	css = (struct guc_css_header *)fw->data;
619
620	/* Firmware bits always start from header */
621	guc_fw->header_offset = 0;
622	guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
623		css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
624
625	if (guc_fw->header_size != sizeof(struct guc_css_header)) {
626		DRM_NOTE("CSS header definition mismatch\n");
627		goto fail;
628	}
629
630	/* then, uCode */
631	guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
632	guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
633
634	/* now RSA */
635	if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
636		DRM_NOTE("RSA key size is bad\n");
637		goto fail;
638	}
639	guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
640	guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
641
642	/* At least, it should have header, uCode and RSA. Size of all three. */
643	size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
644	if (fw->size < size) {
645		DRM_NOTE("Missing firmware components\n");
646		goto fail;
647	}
648
649	/* Header and uCode will be loaded to WOPCM. Size of the two. */
650	size = guc_fw->header_size + guc_fw->ucode_size;
651	if (size > guc_wopcm_size(to_i915(dev))) {
652		DRM_NOTE("Firmware is too large to fit in WOPCM\n");
 
 
653		goto fail;
654	}
655
656	/*
657	 * The GuC firmware image has the version number embedded at a well-known
658	 * offset within the firmware blob; note that major / minor version are
659	 * TWO bytes each (i.e. u16), although all pointers and offsets are defined
660	 * in terms of bytes (u8).
661	 */
662	guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
663	guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
664
665	if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
666	    guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
667		DRM_NOTE("GuC firmware version %d.%d, required %d.%d\n",
668			guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
669			guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
670		err = -ENOEXEC;
671		goto fail;
672	}
673
674	DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
675			guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
676			guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
677
678	mutex_lock(&dev->struct_mutex);
679	obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
680	mutex_unlock(&dev->struct_mutex);
681	if (IS_ERR_OR_NULL(obj)) {
682		err = obj ? PTR_ERR(obj) : -ENOMEM;
683		goto fail;
684	}
685
686	guc_fw->guc_fw_obj = obj;
687	guc_fw->guc_fw_size = fw->size;
688
689	DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
690			guc_fw->guc_fw_obj);
691
692	release_firmware(fw);
693	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
694	return;
695
696fail:
697	DRM_WARN("Failed to fetch valid GuC firmware from %s (error %d)\n",
698		 guc_fw->guc_fw_path, err);
699	DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
700		err, fw, guc_fw->guc_fw_obj);
 
 
701
702	mutex_lock(&dev->struct_mutex);
703	obj = guc_fw->guc_fw_obj;
704	if (obj)
705		i915_gem_object_put(obj);
706	guc_fw->guc_fw_obj = NULL;
707	mutex_unlock(&dev->struct_mutex);
708
709	release_firmware(fw);		/* OK even if fw is NULL */
710	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
711}
712
713/**
714 * intel_guc_init() - define parameters and fetch firmware
715 * @dev:	drm device
716 *
717 * Called early during driver load, but after GEM is initialised.
718 *
719 * The firmware will be transferred to the GuC's memory later,
720 * when intel_guc_setup() is called.
721 */
722void intel_guc_init(struct drm_device *dev)
723{
724	struct drm_i915_private *dev_priv = to_i915(dev);
725	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
726	const char *fw_path;
727
728	if (!HAS_GUC(dev_priv)) {
729		i915.enable_guc_loading = 0;
730		i915.enable_guc_submission = 0;
731	} else {
732		/* A negative value means "use platform default" */
733		if (i915.enable_guc_loading < 0)
734			i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
735		if (i915.enable_guc_submission < 0)
736			i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
737	}
738
739	if (!HAS_GUC_UCODE(dev_priv)) {
740		fw_path = NULL;
741	} else if (IS_SKYLAKE(dev_priv)) {
742		fw_path = I915_SKL_GUC_UCODE;
743		guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
744		guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
745	} else if (IS_BROXTON(dev_priv)) {
746		fw_path = I915_BXT_GUC_UCODE;
747		guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
748		guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
749	} else if (IS_KABYLAKE(dev_priv)) {
750		fw_path = I915_KBL_GUC_UCODE;
751		guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
752		guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;
753	} else {
 
754		fw_path = "";	/* unknown device */
755	}
756
 
 
 
757	guc_fw->guc_dev = dev;
758	guc_fw->guc_fw_path = fw_path;
759	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
760	guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
761
762	/* Early (and silent) return if GuC loading is disabled */
763	if (!i915.enable_guc_loading)
764		return;
765	if (fw_path == NULL)
766		return;
767	if (*fw_path == '\0')
 
 
 
768		return;
 
769
770	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
771	DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
772	guc_fw_fetch(dev, guc_fw);
773	/* status must now be FAIL or SUCCESS */
774}
775
776/**
777 * intel_guc_fini() - clean up all allocated resources
778 * @dev:	drm device
779 */
780void intel_guc_fini(struct drm_device *dev)
781{
782	struct drm_i915_private *dev_priv = to_i915(dev);
783	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
784
785	mutex_lock(&dev->struct_mutex);
786	guc_interrupts_release(dev_priv);
787	i915_guc_submission_disable(dev_priv);
788	i915_guc_submission_fini(dev_priv);
789
790	if (guc_fw->guc_fw_obj)
791		i915_gem_object_put(guc_fw->guc_fw_obj);
792	guc_fw->guc_fw_obj = NULL;
793	mutex_unlock(&dev->struct_mutex);
794
795	guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
796}