Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2016-2019 Intel Corporation
  4 */
  5
  6#include <linux/types.h>
  7
  8#include "gt/intel_gt.h"
  9#include "intel_guc_reg.h"
 10#include "intel_huc.h"
 11#include "intel_huc_print.h"
 12#include "i915_drv.h"
 13#include "i915_reg.h"
 14#include "pxp/intel_pxp_cmd_interface_43.h"
 15
 16#include <linux/device/bus.h>
 17#include <linux/mei_aux.h>
 18
 19/**
 20 * DOC: HuC
 21 *
 22 * The HuC is a dedicated microcontroller for usage in media HEVC (High
 23 * Efficiency Video Coding) operations. Userspace can directly use the firmware
 24 * capabilities by adding HuC specific commands to batch buffers.
 25 *
 26 * The kernel driver is only responsible for loading the HuC firmware and
 27 * triggering its security authentication. This is done differently depending
 28 * on the platform:
 29 *
 30 * - older platforms (from Gen9 to most Gen12s): the load is performed via DMA
 31 *   and the authentication via GuC
 32 * - DG2: load and authentication are both performed via GSC.
 33 * - MTL and newer platforms: the load is performed via DMA (same as with
 34 *   not-DG2 older platforms), while the authentication is done in 2-steps,
 35 *   a first auth for clear-media workloads via GuC and a second one for all
 36 *   workloads via GSC.
 37 *
 38 * On platforms where the GuC does the authentication, to correctly do so the
 39 * HuC binary must be loaded before the GuC one.
 40 * Loading the HuC is optional; however, not using the HuC might negatively
 41 * impact power usage and/or performance of media workloads, depending on the
 42 * use-cases.
 43 * HuC must be reloaded on events that cause the WOPCM to lose its contents
 44 * (S3/S4, FLR); on older platforms the HuC must also be reloaded on GuC/GT
 45 * reset, while on newer ones it will survive that.
 46 *
 47 * See https://github.com/intel/media-driver for the latest details on HuC
 48 * functionality.
 49 */
 50
 51/**
 52 * DOC: HuC Memory Management
 53 *
 54 * Similarly to the GuC, the HuC can't do any memory allocations on its own,
 55 * with the difference being that the allocations for HuC usage are handled by
 56 * the userspace driver instead of the kernel one. The HuC accesses the memory
 57 * via the PPGTT belonging to the context loaded on the VCS executing the
 58 * HuC-specific commands.
 59 */
 60
 61/*
 62 * MEI-GSC load is an async process. The probing of the exposed aux device
 63 * (see intel_gsc.c) usually happens a few seconds after i915 probe, depending
 64 * on when the kernel schedules it. Unless something goes terribly wrong, we're
 65 * guaranteed for this to happen during boot, so the big timeout is a safety net
 66 * that we never expect to need.
 67 * MEI-PXP + HuC load usually takes ~300ms, but if the GSC needs to be resumed
 68 * and/or reset, this can take longer. Note that the kernel might schedule
 69 * other work between the i915 init/resume and the MEI one, which can add to
 70 * the delay.
 71 */
 72#define GSC_INIT_TIMEOUT_MS 10000
 73#define PXP_INIT_TIMEOUT_MS 5000
 74
 75static int sw_fence_dummy_notify(struct i915_sw_fence *sf,
 76				 enum i915_sw_fence_notify state)
 77{
 78	return NOTIFY_DONE;
 79}
 80
 81static void __delayed_huc_load_complete(struct intel_huc *huc)
 82{
 83	if (!i915_sw_fence_done(&huc->delayed_load.fence))
 84		i915_sw_fence_complete(&huc->delayed_load.fence);
 85}
 86
 87static void delayed_huc_load_complete(struct intel_huc *huc)
 88{
 89	hrtimer_cancel(&huc->delayed_load.timer);
 90	__delayed_huc_load_complete(huc);
 91}
 92
 93static void __gsc_init_error(struct intel_huc *huc)
 94{
 95	huc->delayed_load.status = INTEL_HUC_DELAYED_LOAD_ERROR;
 96	__delayed_huc_load_complete(huc);
 97}
 98
 99static void gsc_init_error(struct intel_huc *huc)
100{
101	hrtimer_cancel(&huc->delayed_load.timer);
102	__gsc_init_error(huc);
103}
104
105static void gsc_init_done(struct intel_huc *huc)
106{
107	hrtimer_cancel(&huc->delayed_load.timer);
108
109	/* MEI-GSC init is done, now we wait for MEI-PXP to bind */
110	huc->delayed_load.status = INTEL_HUC_WAITING_ON_PXP;
111	if (!i915_sw_fence_done(&huc->delayed_load.fence))
112		hrtimer_start(&huc->delayed_load.timer,
113			      ms_to_ktime(PXP_INIT_TIMEOUT_MS),
114			      HRTIMER_MODE_REL);
115}
116
117static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrtimer)
118{
119	struct intel_huc *huc = container_of(hrtimer, struct intel_huc, delayed_load.timer);
120
121	if (!intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)) {
122		if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC)
123			huc_notice(huc, "timed out waiting for MEI GSC\n");
124		else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP)
125			huc_notice(huc, "timed out waiting for MEI PXP\n");
126		else
127			MISSING_CASE(huc->delayed_load.status);
128
129		__gsc_init_error(huc);
130	}
131
132	return HRTIMER_NORESTART;
133}
134
135static void huc_delayed_load_start(struct intel_huc *huc)
136{
137	ktime_t delay;
138
139	GEM_BUG_ON(intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC));
140
141	/*
142	 * On resume we don't have to wait for MEI-GSC to be re-probed, but we
143	 * do need to wait for MEI-PXP to reset & re-bind
144	 */
145	switch (huc->delayed_load.status) {
146	case INTEL_HUC_WAITING_ON_GSC:
147		delay = ms_to_ktime(GSC_INIT_TIMEOUT_MS);
148		break;
149	case INTEL_HUC_WAITING_ON_PXP:
150		delay = ms_to_ktime(PXP_INIT_TIMEOUT_MS);
151		break;
152	default:
153		gsc_init_error(huc);
154		return;
155	}
156
157	/*
158	 * This fence is always complete unless we're waiting for the
159	 * GSC device to come up to load the HuC. We arm the fence here
160	 * and complete it when we confirm that the HuC is loaded from
161	 * the PXP bind callback.
162	 */
163	GEM_BUG_ON(!i915_sw_fence_done(&huc->delayed_load.fence));
164	i915_sw_fence_fini(&huc->delayed_load.fence);
165	i915_sw_fence_reinit(&huc->delayed_load.fence);
166	i915_sw_fence_await(&huc->delayed_load.fence);
167	i915_sw_fence_commit(&huc->delayed_load.fence);
168
169	hrtimer_start(&huc->delayed_load.timer, delay, HRTIMER_MODE_REL);
170}
171
172static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *data)
173{
174	struct device *dev = data;
175	struct intel_huc *huc = container_of(nb, struct intel_huc, delayed_load.nb);
176	struct intel_gsc_intf *intf = &huc_to_gt(huc)->gsc.intf[0];
177
178	if (!intf->adev || &intf->adev->aux_dev.dev != dev)
179		return 0;
180
181	switch (action) {
182	case BUS_NOTIFY_BOUND_DRIVER: /* mei driver bound to aux device */
183		gsc_init_done(huc);
184		break;
185
186	case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */
187	case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */
188		huc_info(huc, "MEI driver not bound, disabling load\n");
189		gsc_init_error(huc);
190		break;
191	}
192
193	return 0;
194}
195
196void intel_huc_register_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus)
197{
198	int ret;
199
200	if (!intel_huc_is_loaded_by_gsc(huc))
201		return;
202
203	huc->delayed_load.nb.notifier_call = gsc_notifier;
204	ret = bus_register_notifier(bus, &huc->delayed_load.nb);
205	if (ret) {
206		huc_err(huc, "failed to register GSC notifier %pe\n", ERR_PTR(ret));
207		huc->delayed_load.nb.notifier_call = NULL;
208		gsc_init_error(huc);
209	}
210}
211
212void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus)
213{
214	if (!huc->delayed_load.nb.notifier_call)
215		return;
216
217	delayed_huc_load_complete(huc);
218
219	bus_unregister_notifier(bus, &huc->delayed_load.nb);
220	huc->delayed_load.nb.notifier_call = NULL;
221}
222
223static void delayed_huc_load_init(struct intel_huc *huc)
224{
225	/*
226	 * Initialize fence to be complete as this is expected to be complete
227	 * unless there is a delayed HuC load in progress.
228	 */
229	i915_sw_fence_init(&huc->delayed_load.fence,
230			   sw_fence_dummy_notify);
231	i915_sw_fence_commit(&huc->delayed_load.fence);
232
233	hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
234	huc->delayed_load.timer.function = huc_delayed_load_timer_callback;
235}
236
237static void delayed_huc_load_fini(struct intel_huc *huc)
238{
239	/*
240	 * the fence is initialized in init_early, so we need to clean it up
241	 * even if HuC loading is off.
242	 */
243	delayed_huc_load_complete(huc);
244	i915_sw_fence_fini(&huc->delayed_load.fence);
245}
246
247int intel_huc_sanitize(struct intel_huc *huc)
248{
249	delayed_huc_load_complete(huc);
250	intel_uc_fw_sanitize(&huc->fw);
251	return 0;
252}
253
254static bool vcs_supported(struct intel_gt *gt)
255{
256	intel_engine_mask_t mask = gt->info.engine_mask;
257
258	/*
259	 * We reach here from i915_driver_early_probe for the primary GT before
260	 * its engine mask is set, so we use the device info engine mask for it;
261	 * this means we're not taking VCS fusing into account, but if the
262	 * primary GT supports VCS engines we expect at least one of them to
263	 * remain unfused so we're fine.
264	 * For other GTs we expect the GT-specific mask to be set before we
265	 * call this function.
266	 */
267	GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
268
269	if (gt_is_root(gt))
270		mask = INTEL_INFO(gt->i915)->platform_engine_mask;
271	else
272		mask = gt->info.engine_mask;
273
274	return __ENGINE_INSTANCES_MASK(mask, VCS0, I915_MAX_VCS);
275}
276
277void intel_huc_init_early(struct intel_huc *huc)
278{
279	struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
280	struct intel_gt *gt = huc_to_gt(huc);
281
282	intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, true);
283
284	/*
285	 * we always init the fence as already completed, even if HuC is not
286	 * supported. This way we don't have to distinguish between HuC not
287	 * supported/disabled or already loaded, and can focus on if the load
288	 * is currently in progress (fence not complete) or not, which is what
289	 * we care about for stalling userspace submissions.
290	 */
291	delayed_huc_load_init(huc);
292
293	if (!vcs_supported(gt)) {
294		intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
295		return;
296	}
297
298	if (GRAPHICS_VER(i915) >= 11) {
299		huc->status[INTEL_HUC_AUTH_BY_GUC].reg = GEN11_HUC_KERNEL_LOAD_INFO;
300		huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_LOAD_SUCCESSFUL;
301		huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_LOAD_SUCCESSFUL;
302	} else {
303		huc->status[INTEL_HUC_AUTH_BY_GUC].reg = HUC_STATUS2;
304		huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_FW_VERIFIED;
305		huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_FW_VERIFIED;
306	}
307
308	if (IS_DG2(i915)) {
309		huc->status[INTEL_HUC_AUTH_BY_GSC].reg = GEN11_HUC_KERNEL_LOAD_INFO;
310		huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HUC_LOAD_SUCCESSFUL;
311		huc->status[INTEL_HUC_AUTH_BY_GSC].value = HUC_LOAD_SUCCESSFUL;
312	} else {
313		huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS(MTL_GSC_HECI1_BASE, 5);
314		huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI1_FWSTS5_HUC_AUTH_DONE;
315		huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI1_FWSTS5_HUC_AUTH_DONE;
316	}
317}
318
319#define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
320static int check_huc_loading_mode(struct intel_huc *huc)
321{
322	struct intel_gt *gt = huc_to_gt(huc);
323	bool gsc_enabled = huc->fw.has_gsc_headers;
324
325	/*
326	 * The fuse for HuC load via GSC is only valid on platforms that have
327	 * GuC deprivilege.
328	 */
329	if (HAS_GUC_DEPRIVILEGE(gt->i915))
330		huc->loaded_via_gsc = intel_uncore_read(gt->uncore, GUC_SHIM_CONTROL2) &
331				      GSC_LOADS_HUC;
332
333	if (huc->loaded_via_gsc && !gsc_enabled) {
334		huc_err(huc, "HW requires a GSC-enabled blob, but we found a legacy one\n");
335		return -ENOEXEC;
336	}
337
338	/*
339	 * On newer platforms we have GSC-enabled binaries but we load the HuC
340	 * via DMA. To do so we need to find the location of the legacy-style
341	 * binary inside the GSC-enabled one, which we do at fetch time. Make
342	 * sure that we were able to do so if the fuse says we need to load via
343	 * DMA and the binary is GSC-enabled.
344	 */
345	if (!huc->loaded_via_gsc && gsc_enabled && !huc->fw.dma_start_offset) {
346		huc_err(huc, "HW in DMA mode, but we have an incompatible GSC-enabled blob\n");
347		return -ENOEXEC;
348	}
349
350	/*
351	 * If the HuC is loaded via GSC, we need to be able to access the GSC.
352	 * On DG2 this is done via the mei components, while on newer platforms
353	 * it is done via the GSCCS,
354	 */
355	if (huc->loaded_via_gsc) {
356		if (IS_DG2(gt->i915)) {
357			if (!IS_ENABLED(CONFIG_INTEL_MEI_PXP) ||
358			    !IS_ENABLED(CONFIG_INTEL_MEI_GSC)) {
359				huc_info(huc, "can't load due to missing mei modules\n");
360				return -EIO;
361			}
362		} else {
363			if (!HAS_ENGINE(gt, GSC0)) {
364				huc_info(huc, "can't load due to missing GSCCS\n");
365				return -EIO;
366			}
367		}
368	}
369
370	huc_dbg(huc, "loaded by GSC = %s\n", str_yes_no(huc->loaded_via_gsc));
371
372	return 0;
373}
374
375int intel_huc_init(struct intel_huc *huc)
376{
377	struct intel_gt *gt = huc_to_gt(huc);
378	int err;
379
380	err = check_huc_loading_mode(huc);
381	if (err)
382		goto out;
383
384	if (HAS_ENGINE(gt, GSC0)) {
385		struct i915_vma *vma;
386
387		vma = intel_guc_allocate_vma(&gt->uc.guc, PXP43_HUC_AUTH_INOUT_SIZE * 2);
388		if (IS_ERR(vma)) {
389			err = PTR_ERR(vma);
390			huc_info(huc, "Failed to allocate heci pkt\n");
391			goto out;
392		}
393
394		huc->heci_pkt = vma;
395	}
396
397	err = intel_uc_fw_init(&huc->fw);
398	if (err)
399		goto out_pkt;
400
401	intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE);
402
403	return 0;
404
405out_pkt:
406	if (huc->heci_pkt)
407		i915_vma_unpin_and_release(&huc->heci_pkt, 0);
408out:
409	intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
410	huc_info(huc, "initialization failed %pe\n", ERR_PTR(err));
411	return err;
412}
413
414void intel_huc_fini(struct intel_huc *huc)
415{
416	/*
417	 * the fence is initialized in init_early, so we need to clean it up
418	 * even if HuC loading is off.
419	 */
420	delayed_huc_load_fini(huc);
421
422	if (huc->heci_pkt)
423		i915_vma_unpin_and_release(&huc->heci_pkt, 0);
424
425	if (intel_uc_fw_is_loadable(&huc->fw))
426		intel_uc_fw_fini(&huc->fw);
427}
428
429void intel_huc_suspend(struct intel_huc *huc)
430{
431	if (!intel_uc_fw_is_loadable(&huc->fw))
432		return;
433
434	/*
435	 * in the unlikely case that we're suspending before the GSC has
436	 * completed its loading sequence, just stop waiting. We'll restart
437	 * on resume.
438	 */
439	delayed_huc_load_complete(huc);
440}
441
442static const char *auth_mode_string(struct intel_huc *huc,
443				    enum intel_huc_authentication_type type)
444{
445	bool partial = huc->fw.has_gsc_headers && type == INTEL_HUC_AUTH_BY_GUC;
446
447	return partial ? "clear media" : "all workloads";
448}
449
450int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
451				     enum intel_huc_authentication_type type)
452{
453	struct intel_gt *gt = huc_to_gt(huc);
454	int ret;
455
456	ret = __intel_wait_for_register(gt->uncore,
457					huc->status[type].reg,
458					huc->status[type].mask,
459					huc->status[type].value,
460					2, 50, NULL);
461
462	/* mark the load process as complete even if the wait failed */
463	delayed_huc_load_complete(huc);
464
465	if (ret) {
466		huc_err(huc, "firmware not verified for %s: %pe\n",
467			auth_mode_string(huc, type), ERR_PTR(ret));
468		intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
469		return ret;
470	}
471
472	intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
473	huc_info(huc, "authenticated for %s\n", auth_mode_string(huc, type));
474	return 0;
475}
476
477/**
478 * intel_huc_auth() - Authenticate HuC uCode
479 * @huc: intel_huc structure
480 * @type: authentication type (via GuC or via GSC)
481 *
482 * Called after HuC and GuC firmware loading during intel_uc_init_hw().
483 *
484 * This function invokes the GuC action to authenticate the HuC firmware,
485 * passing the offset of the RSA signature to intel_guc_auth_huc(). It then
486 * waits for up to 50ms for firmware verification ACK.
487 */
488int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type)
489{
490	struct intel_gt *gt = huc_to_gt(huc);
491	struct intel_guc *guc = &gt->uc.guc;
492	int ret;
493
494	if (!intel_uc_fw_is_loaded(&huc->fw))
495		return -ENOEXEC;
496
497	/* GSC will do the auth with the load */
498	if (intel_huc_is_loaded_by_gsc(huc))
499		return -ENODEV;
500
501	if (intel_huc_is_authenticated(huc, type))
502		return -EEXIST;
503
504	ret = i915_inject_probe_error(gt->i915, -ENXIO);
505	if (ret)
506		goto fail;
507
508	switch (type) {
509	case INTEL_HUC_AUTH_BY_GUC:
510		ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
511		break;
512	case INTEL_HUC_AUTH_BY_GSC:
513		ret = intel_huc_fw_auth_via_gsccs(huc);
514		break;
515	default:
516		MISSING_CASE(type);
517		ret = -EINVAL;
518	}
519	if (ret)
520		goto fail;
521
522	/* Check authentication status, it should be done by now */
523	ret = intel_huc_wait_for_auth_complete(huc, type);
524	if (ret)
525		goto fail;
526
527	return 0;
528
529fail:
530	huc_probe_error(huc, "%s authentication failed %pe\n",
531			auth_mode_string(huc, type), ERR_PTR(ret));
532	return ret;
533}
534
535bool intel_huc_is_authenticated(struct intel_huc *huc,
536				enum intel_huc_authentication_type type)
537{
538	struct intel_gt *gt = huc_to_gt(huc);
539	intel_wakeref_t wakeref;
540	u32 status = 0;
541
542	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
543		status = intel_uncore_read(gt->uncore, huc->status[type].reg);
544
545	return (status & huc->status[type].mask) == huc->status[type].value;
546}
547
548static bool huc_is_fully_authenticated(struct intel_huc *huc)
549{
550	struct intel_uc_fw *huc_fw = &huc->fw;
551
552	if (!huc_fw->has_gsc_headers)
553		return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC);
554	else if (intel_huc_is_loaded_by_gsc(huc) || HAS_ENGINE(huc_to_gt(huc), GSC0))
555		return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC);
556	else
557		return false;
558}
559
560/**
561 * intel_huc_check_status() - check HuC status
562 * @huc: intel_huc structure
563 *
564 * This function reads status register to verify if HuC
565 * firmware was successfully loaded.
566 *
567 * The return values match what is expected for the I915_PARAM_HUC_STATUS
568 * getparam.
569 */
570int intel_huc_check_status(struct intel_huc *huc)
571{
572	struct intel_uc_fw *huc_fw = &huc->fw;
573
574	switch (__intel_uc_fw_status(huc_fw)) {
575	case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
576		return -ENODEV;
577	case INTEL_UC_FIRMWARE_DISABLED:
578		return -EOPNOTSUPP;
579	case INTEL_UC_FIRMWARE_MISSING:
580		return -ENOPKG;
581	case INTEL_UC_FIRMWARE_ERROR:
582		return -ENOEXEC;
583	case INTEL_UC_FIRMWARE_INIT_FAIL:
584		return -ENOMEM;
585	case INTEL_UC_FIRMWARE_LOAD_FAIL:
586		return -EIO;
587	default:
588		break;
589	}
590
591	/*
592	 * GSC-enabled binaries loaded via DMA are first partially
593	 * authenticated by GuC and then fully authenticated by GSC
594	 */
595	if (huc_is_fully_authenticated(huc))
596		return 1; /* full auth */
597	else if (huc_fw->has_gsc_headers && !intel_huc_is_loaded_by_gsc(huc) &&
598		 intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC))
599		return 2; /* clear media only */
600	else
601		return 0;
602}
603
604static bool huc_has_delayed_load(struct intel_huc *huc)
605{
606	return intel_huc_is_loaded_by_gsc(huc) &&
607	       (huc->delayed_load.status != INTEL_HUC_DELAYED_LOAD_ERROR);
608}
609
610void intel_huc_update_auth_status(struct intel_huc *huc)
611{
612	if (!intel_uc_fw_is_loadable(&huc->fw))
613		return;
614
615	if (!huc->fw.has_gsc_headers)
616		return;
617
618	if (huc_is_fully_authenticated(huc))
619		intel_uc_fw_change_status(&huc->fw,
620					  INTEL_UC_FIRMWARE_RUNNING);
621	else if (huc_has_delayed_load(huc))
622		huc_delayed_load_start(huc);
623}
624
625/**
626 * intel_huc_load_status - dump information about HuC load status
627 * @huc: the HuC
628 * @p: the &drm_printer
629 *
630 * Pretty printer for HuC load status.
631 */
632void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p)
633{
634	struct intel_gt *gt = huc_to_gt(huc);
635	intel_wakeref_t wakeref;
636
637	if (!intel_huc_is_supported(huc)) {
638		drm_printf(p, "HuC not supported\n");
639		return;
640	}
641
642	if (!intel_huc_is_wanted(huc)) {
643		drm_printf(p, "HuC disabled\n");
644		return;
645	}
646
647	intel_uc_fw_dump(&huc->fw, p);
648
649	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
650		drm_printf(p, "HuC status: 0x%08x\n",
651			   intel_uncore_read(gt->uncore, huc->status[INTEL_HUC_AUTH_BY_GUC].reg));
652}