Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2016-2019 Intel Corporation
  4 */
  5
  6#include <linux/string_helpers.h>
  7
  8#include "gt/intel_gt.h"
  9#include "gt/intel_reset.h"
 10#include "intel_guc.h"
 11#include "intel_guc_ads.h"
 12#include "intel_guc_submission.h"
 13#include "gt/intel_rps.h"
 14#include "intel_uc.h"
 15
 16#include "i915_drv.h"
 17
 18static const struct intel_uc_ops uc_ops_off;
 19static const struct intel_uc_ops uc_ops_on;
 20
 21static void uc_expand_default_options(struct intel_uc *uc)
 22{
 23	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
 24
 25	if (i915->params.enable_guc != -1)
 26		return;
 27
 28	/* Don't enable GuC/HuC on pre-Gen12 */
 29	if (GRAPHICS_VER(i915) < 12) {
 30		i915->params.enable_guc = 0;
 31		return;
 32	}
 33
 34	/* Don't enable GuC/HuC on older Gen12 platforms */
 35	if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
 36		i915->params.enable_guc = 0;
 37		return;
 38	}
 39
 40	/* Intermediate platforms are HuC authentication only */
 41	if (IS_ALDERLAKE_S(i915) && !IS_ADLS_RPLS(i915)) {
 42		i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
 43		return;
 44	}
 45
 46	/* Default: enable HuC authentication and GuC submission */
 47	i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION;
 48
 49	/* XEHPSDV and PVC do not use HuC */
 50	if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915))
 51		i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
 52}
 53
 54/* Reset GuC providing us with fresh state for both GuC and HuC.
 55 */
 56static int __intel_uc_reset_hw(struct intel_uc *uc)
 57{
 58	struct intel_gt *gt = uc_to_gt(uc);
 59	int ret;
 60	u32 guc_status;
 61
 62	ret = i915_inject_probe_error(gt->i915, -ENXIO);
 63	if (ret)
 64		return ret;
 65
 66	ret = intel_reset_guc(gt);
 67	if (ret) {
 68		DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
 69		return ret;
 70	}
 71
 72	guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
 73	WARN(!(guc_status & GS_MIA_IN_RESET),
 74	     "GuC status: 0x%x, MIA core expected to be in reset\n",
 75	     guc_status);
 76
 77	return ret;
 78}
 79
 80static void __confirm_options(struct intel_uc *uc)
 81{
 82	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
 83
 84	drm_dbg(&i915->drm,
 85		"enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
 86		i915->params.enable_guc,
 87		str_yes_no(intel_uc_wants_guc(uc)),
 88		str_yes_no(intel_uc_wants_guc_submission(uc)),
 89		str_yes_no(intel_uc_wants_huc(uc)),
 90		str_yes_no(intel_uc_wants_guc_slpc(uc)));
 91
 92	if (i915->params.enable_guc == 0) {
 93		GEM_BUG_ON(intel_uc_wants_guc(uc));
 94		GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
 95		GEM_BUG_ON(intel_uc_wants_huc(uc));
 96		GEM_BUG_ON(intel_uc_wants_guc_slpc(uc));
 97		return;
 98	}
 99
100	if (!intel_uc_supports_guc(uc))
101		drm_info(&i915->drm,
102			 "Incompatible option enable_guc=%d - %s\n",
103			 i915->params.enable_guc, "GuC is not supported!");
104
105	if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
106	    !intel_uc_supports_huc(uc))
107		drm_info(&i915->drm,
108			 "Incompatible option enable_guc=%d - %s\n",
109			 i915->params.enable_guc, "HuC is not supported!");
110
111	if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
112	    !intel_uc_supports_guc_submission(uc))
113		drm_info(&i915->drm,
114			 "Incompatible option enable_guc=%d - %s\n",
115			 i915->params.enable_guc, "GuC submission is N/A");
116
117	if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
118		drm_info(&i915->drm,
119			 "Incompatible option enable_guc=%d - %s\n",
120			 i915->params.enable_guc, "undocumented flag");
121}
122
123void intel_uc_init_early(struct intel_uc *uc)
124{
125	uc_expand_default_options(uc);
126
127	intel_guc_init_early(&uc->guc);
128	intel_huc_init_early(&uc->huc);
129
130	__confirm_options(uc);
131
132	if (intel_uc_wants_guc(uc))
133		uc->ops = &uc_ops_on;
134	else
135		uc->ops = &uc_ops_off;
136}
137
138void intel_uc_init_late(struct intel_uc *uc)
139{
140	intel_guc_init_late(&uc->guc);
141}
142
143void intel_uc_driver_late_release(struct intel_uc *uc)
144{
145}
146
147/**
148 * intel_uc_init_mmio - setup uC MMIO access
149 * @uc: the intel_uc structure
150 *
151 * Setup minimal state necessary for MMIO accesses later in the
152 * initialization sequence.
153 */
154void intel_uc_init_mmio(struct intel_uc *uc)
155{
156	intel_guc_init_send_regs(&uc->guc);
157}
158
159static void __uc_capture_load_err_log(struct intel_uc *uc)
160{
161	struct intel_guc *guc = &uc->guc;
162
163	if (guc->log.vma && !uc->load_err_log)
164		uc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
165}
166
167static void __uc_free_load_err_log(struct intel_uc *uc)
168{
169	struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log);
170
171	if (log)
172		i915_gem_object_put(log);
173}
174
175void intel_uc_driver_remove(struct intel_uc *uc)
176{
177	intel_uc_fini_hw(uc);
178	intel_uc_fini(uc);
179	__uc_free_load_err_log(uc);
180}
181
182/*
183 * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
184 * register using the same bits used in the CT message payload. Since our
185 * communication channel with guc is turned off at this point, we can save the
186 * message and handle it after we turn it back on.
187 */
188static void guc_clear_mmio_msg(struct intel_guc *guc)
189{
190	intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
191}
192
193static void guc_get_mmio_msg(struct intel_guc *guc)
194{
195	u32 val;
196
197	spin_lock_irq(&guc->irq_lock);
198
199	val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
200	guc->mmio_msg |= val & guc->msg_enabled_mask;
201
202	/*
203	 * clear all events, including the ones we're not currently servicing,
204	 * to make sure we don't try to process a stale message if we enable
205	 * handling of more events later.
206	 */
207	guc_clear_mmio_msg(guc);
208
209	spin_unlock_irq(&guc->irq_lock);
210}
211
212static void guc_handle_mmio_msg(struct intel_guc *guc)
213{
214	/* we need communication to be enabled to reply to GuC */
215	GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
216
217	spin_lock_irq(&guc->irq_lock);
218	if (guc->mmio_msg) {
219		intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
220		guc->mmio_msg = 0;
221	}
222	spin_unlock_irq(&guc->irq_lock);
223}
224
225static int guc_enable_communication(struct intel_guc *guc)
226{
227	struct intel_gt *gt = guc_to_gt(guc);
228	struct drm_i915_private *i915 = gt->i915;
229	int ret;
230
231	GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
232
233	ret = i915_inject_probe_error(i915, -ENXIO);
234	if (ret)
235		return ret;
236
237	ret = intel_guc_ct_enable(&guc->ct);
238	if (ret)
239		return ret;
240
241	/* check for mmio messages received before/during the CT enable */
242	guc_get_mmio_msg(guc);
243	guc_handle_mmio_msg(guc);
244
245	intel_guc_enable_interrupts(guc);
246
247	/* check for CT messages received before we enabled interrupts */
248	spin_lock_irq(gt->irq_lock);
249	intel_guc_ct_event_handler(&guc->ct);
250	spin_unlock_irq(gt->irq_lock);
251
252	drm_dbg(&i915->drm, "GuC communication enabled\n");
253
254	return 0;
255}
256
257static void guc_disable_communication(struct intel_guc *guc)
258{
259	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
260
261	/*
262	 * Events generated during or after CT disable are logged by guc in
263	 * via mmio. Make sure the register is clear before disabling CT since
264	 * all events we cared about have already been processed via CT.
265	 */
266	guc_clear_mmio_msg(guc);
267
268	intel_guc_disable_interrupts(guc);
269
270	intel_guc_ct_disable(&guc->ct);
271
272	/*
273	 * Check for messages received during/after the CT disable. We do not
274	 * expect any messages to have arrived via CT between the interrupt
275	 * disable and the CT disable because GuC should've been idle until we
276	 * triggered the CT disable protocol.
277	 */
278	guc_get_mmio_msg(guc);
279
280	drm_dbg(&i915->drm, "GuC communication disabled\n");
281}
282
283static void __uc_fetch_firmwares(struct intel_uc *uc)
284{
285	int err;
286
287	GEM_BUG_ON(!intel_uc_wants_guc(uc));
288
289	err = intel_uc_fw_fetch(&uc->guc.fw);
290	if (err) {
291		/* Make sure we transition out of transient "SELECTED" state */
292		if (intel_uc_wants_huc(uc)) {
293			drm_dbg(&uc_to_gt(uc)->i915->drm,
294				"Failed to fetch GuC: %d disabling HuC\n", err);
295			intel_uc_fw_change_status(&uc->huc.fw,
296						  INTEL_UC_FIRMWARE_ERROR);
297		}
298
299		return;
300	}
301
302	if (intel_uc_wants_huc(uc))
303		intel_uc_fw_fetch(&uc->huc.fw);
304}
305
306static void __uc_cleanup_firmwares(struct intel_uc *uc)
307{
308	intel_uc_fw_cleanup_fetch(&uc->huc.fw);
309	intel_uc_fw_cleanup_fetch(&uc->guc.fw);
310}
311
312static int __uc_init(struct intel_uc *uc)
313{
314	struct intel_guc *guc = &uc->guc;
315	struct intel_huc *huc = &uc->huc;
316	int ret;
317
318	GEM_BUG_ON(!intel_uc_wants_guc(uc));
319
320	if (!intel_uc_uses_guc(uc))
321		return 0;
322
323	if (i915_inject_probe_failure(uc_to_gt(uc)->i915))
324		return -ENOMEM;
325
326	ret = intel_guc_init(guc);
327	if (ret)
328		return ret;
329
330	if (intel_uc_uses_huc(uc))
331		intel_huc_init(huc);
332
333	return 0;
334}
335
336static void __uc_fini(struct intel_uc *uc)
337{
338	intel_huc_fini(&uc->huc);
339	intel_guc_fini(&uc->guc);
340}
341
342static int __uc_sanitize(struct intel_uc *uc)
343{
344	struct intel_guc *guc = &uc->guc;
345	struct intel_huc *huc = &uc->huc;
346
347	GEM_BUG_ON(!intel_uc_supports_guc(uc));
348
349	intel_huc_sanitize(huc);
350	intel_guc_sanitize(guc);
351
352	return __intel_uc_reset_hw(uc);
353}
354
355/* Initialize and verify the uC regs related to uC positioning in WOPCM */
356static int uc_init_wopcm(struct intel_uc *uc)
357{
358	struct intel_gt *gt = uc_to_gt(uc);
359	struct intel_uncore *uncore = gt->uncore;
360	u32 base = intel_wopcm_guc_base(&gt->wopcm);
361	u32 size = intel_wopcm_guc_size(&gt->wopcm);
362	u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0;
363	u32 mask;
364	int err;
365
366	if (unlikely(!base || !size)) {
367		i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n");
368		return -E2BIG;
369	}
370
371	GEM_BUG_ON(!intel_uc_supports_guc(uc));
372	GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK));
373	GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK);
374	GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
375	GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
376
377	err = i915_inject_probe_error(gt->i915, -ENXIO);
378	if (err)
379		return err;
380
381	mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
382	err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask,
383					    size | GUC_WOPCM_SIZE_LOCKED);
384	if (err)
385		goto err_out;
386
387	mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
388	err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET,
389					    base | huc_agent, mask,
390					    base | huc_agent |
391					    GUC_WOPCM_OFFSET_VALID);
392	if (err)
393		goto err_out;
394
395	return 0;
396
397err_out:
398	i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n");
399	i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
400			 i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
401			 intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
402	i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
403			 i915_mmio_reg_offset(GUC_WOPCM_SIZE),
404			 intel_uncore_read(uncore, GUC_WOPCM_SIZE));
405
406	return err;
407}
408
409static bool uc_is_wopcm_locked(struct intel_uc *uc)
410{
411	struct intel_gt *gt = uc_to_gt(uc);
412	struct intel_uncore *uncore = gt->uncore;
413
414	return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) ||
415	       (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
416}
417
418static int __uc_check_hw(struct intel_uc *uc)
419{
420	if (!intel_uc_supports_guc(uc))
421		return 0;
422
423	/*
424	 * We can silently continue without GuC only if it was never enabled
425	 * before on this system after reboot, otherwise we risk GPU hangs.
426	 * To check if GuC was loaded before we look at WOPCM registers.
427	 */
428	if (uc_is_wopcm_locked(uc))
429		return -EIO;
430
431	return 0;
432}
433
434static void print_fw_ver(struct intel_uc *uc, struct intel_uc_fw *fw)
435{
436	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
437
438	drm_info(&i915->drm, "%s firmware %s version %u.%u.%u\n",
439		 intel_uc_fw_type_repr(fw->type), fw->file_selected.path,
440		 fw->file_selected.major_ver,
441		 fw->file_selected.minor_ver,
442		 fw->file_selected.patch_ver);
443}
444
445static int __uc_init_hw(struct intel_uc *uc)
446{
447	struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
448	struct intel_guc *guc = &uc->guc;
449	struct intel_huc *huc = &uc->huc;
450	int ret, attempts;
451
452	GEM_BUG_ON(!intel_uc_supports_guc(uc));
453	GEM_BUG_ON(!intel_uc_wants_guc(uc));
454
455	print_fw_ver(uc, &guc->fw);
456
457	if (intel_uc_uses_huc(uc))
458		print_fw_ver(uc, &huc->fw);
459
460	if (!intel_uc_fw_is_loadable(&guc->fw)) {
461		ret = __uc_check_hw(uc) ||
462		      intel_uc_fw_is_overridden(&guc->fw) ||
463		      intel_uc_wants_guc_submission(uc) ?
464		      intel_uc_fw_status_to_error(guc->fw.status) : 0;
465		goto err_out;
466	}
467
468	ret = uc_init_wopcm(uc);
469	if (ret)
470		goto err_out;
471
472	intel_guc_reset_interrupts(guc);
473
474	/* WaEnableuKernelHeaderValidFix:skl */
475	/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
476	if (GRAPHICS_VER(i915) == 9)
477		attempts = 3;
478	else
479		attempts = 1;
480
481	intel_rps_raise_unslice(&uc_to_gt(uc)->rps);
482
483	while (attempts--) {
484		/*
485		 * Always reset the GuC just before (re)loading, so
486		 * that the state and timing are fairly predictable
487		 */
488		ret = __uc_sanitize(uc);
489		if (ret)
490			goto err_out;
491
492		intel_huc_fw_upload(huc);
493		intel_guc_ads_reset(guc);
494		intel_guc_write_params(guc);
495		ret = intel_guc_fw_upload(guc);
496		if (ret == 0)
497			break;
498
499		DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
500				 "retry %d more time(s)\n", ret, attempts);
501	}
502
503	/* Did we succeded or run out of retries? */
504	if (ret)
505		goto err_log_capture;
506
507	ret = guc_enable_communication(guc);
508	if (ret)
509		goto err_log_capture;
510
511	/*
512	 * GSC-loaded HuC is authenticated by the GSC, so we don't need to
513	 * trigger the auth here. However, given that the HuC loaded this way
514	 * survive GT reset, we still need to update our SW bookkeeping to make
515	 * sure it reflects the correct HW status.
516	 */
517	if (intel_huc_is_loaded_by_gsc(huc))
518		intel_huc_update_auth_status(huc);
519	else
520		intel_huc_auth(huc);
521
522	if (intel_uc_uses_guc_submission(uc))
523		intel_guc_submission_enable(guc);
524
525	if (intel_uc_uses_guc_slpc(uc)) {
526		ret = intel_guc_slpc_enable(&guc->slpc);
527		if (ret)
528			goto err_submission;
529	} else {
530		/* Restore GT back to RPn for non-SLPC path */
531		intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
532	}
533
534	drm_info(&i915->drm, "GuC submission %s\n",
535		 str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
536	drm_info(&i915->drm, "GuC SLPC %s\n",
537		 str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
538
539	return 0;
540
541	/*
542	 * We've failed to load the firmware :(
543	 */
544err_submission:
545	intel_guc_submission_disable(guc);
546err_log_capture:
547	__uc_capture_load_err_log(uc);
548err_out:
549	/* Return GT back to RPn */
550	intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
551
552	__uc_sanitize(uc);
553
554	if (!ret) {
555		drm_notice(&i915->drm, "GuC is uninitialized\n");
556		/* We want to run without GuC submission */
557		return 0;
558	}
559
560	i915_probe_error(i915, "GuC initialization failed %d\n", ret);
561
562	/* We want to keep KMS alive */
563	return -EIO;
564}
565
566static void __uc_fini_hw(struct intel_uc *uc)
567{
568	struct intel_guc *guc = &uc->guc;
569
570	if (!intel_guc_is_fw_running(guc))
571		return;
572
573	if (intel_uc_uses_guc_submission(uc))
574		intel_guc_submission_disable(guc);
575
576	__uc_sanitize(uc);
577}
578
579/**
580 * intel_uc_reset_prepare - Prepare for reset
581 * @uc: the intel_uc structure
582 *
583 * Preparing for full gpu reset.
584 */
585void intel_uc_reset_prepare(struct intel_uc *uc)
586{
587	struct intel_guc *guc = &uc->guc;
588
589	uc->reset_in_progress = true;
590
591	/* Nothing to do if GuC isn't supported */
592	if (!intel_uc_supports_guc(uc))
593		return;
594
595	/* Firmware expected to be running when this function is called */
596	if (!intel_guc_is_ready(guc))
597		goto sanitize;
598
599	if (intel_uc_uses_guc_submission(uc))
600		intel_guc_submission_reset_prepare(guc);
601
602sanitize:
603	__uc_sanitize(uc);
604}
605
606void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled)
607{
608	struct intel_guc *guc = &uc->guc;
609
610	/* Firmware can not be running when this function is called  */
611	if (intel_uc_uses_guc_submission(uc))
612		intel_guc_submission_reset(guc, stalled);
613}
614
615void intel_uc_reset_finish(struct intel_uc *uc)
616{
617	struct intel_guc *guc = &uc->guc;
618
619	uc->reset_in_progress = false;
620
621	/* Firmware expected to be running when this function is called */
622	if (intel_guc_is_fw_running(guc) && intel_uc_uses_guc_submission(uc))
623		intel_guc_submission_reset_finish(guc);
624}
625
626void intel_uc_cancel_requests(struct intel_uc *uc)
627{
628	struct intel_guc *guc = &uc->guc;
629
630	/* Firmware can not be running when this function is called  */
631	if (intel_uc_uses_guc_submission(uc))
632		intel_guc_submission_cancel_requests(guc);
633}
634
635void intel_uc_runtime_suspend(struct intel_uc *uc)
636{
637	struct intel_guc *guc = &uc->guc;
638
639	if (!intel_guc_is_ready(guc)) {
640		guc->interrupts.enabled = false;
641		return;
642	}
643
644	/*
645	 * Wait for any outstanding CTB before tearing down communication /w the
646	 * GuC.
647	 */
648#define OUTSTANDING_CTB_TIMEOUT_PERIOD	(HZ / 5)
649	intel_guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h,
650				       false, OUTSTANDING_CTB_TIMEOUT_PERIOD);
651	GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
652
653	guc_disable_communication(guc);
654}
655
656void intel_uc_suspend(struct intel_uc *uc)
657{
658	struct intel_guc *guc = &uc->guc;
659	intel_wakeref_t wakeref;
660	int err;
661
662	if (!intel_guc_is_ready(guc)) {
663		guc->interrupts.enabled = false;
664		return;
665	}
666
667	with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) {
668		err = intel_guc_suspend(guc);
669		if (err)
670			DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
671	}
672}
673
674static int __uc_resume(struct intel_uc *uc, bool enable_communication)
675{
676	struct intel_guc *guc = &uc->guc;
677	struct intel_gt *gt = guc_to_gt(guc);
678	int err;
679
680	if (!intel_guc_is_fw_running(guc))
681		return 0;
682
683	/* Make sure we enable communication if and only if it's disabled */
684	GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
685
686	if (enable_communication)
687		guc_enable_communication(guc);
688
689	/* If we are only resuming GuC communication but not reloading
690	 * GuC, we need to ensure the ARAT timer interrupt is enabled
691	 * again. In case of GuC reload, it is enabled during SLPC enable.
692	 */
693	if (enable_communication && intel_uc_uses_guc_slpc(uc))
694		intel_guc_pm_intrmsk_enable(gt);
695
696	err = intel_guc_resume(guc);
697	if (err) {
698		DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
699		return err;
700	}
701
702	return 0;
703}
704
705int intel_uc_resume(struct intel_uc *uc)
706{
707	/*
708	 * When coming out of S3/S4 we sanitize and re-init the HW, so
709	 * communication is already re-enabled at this point.
710	 */
711	return __uc_resume(uc, false);
712}
713
714int intel_uc_runtime_resume(struct intel_uc *uc)
715{
716	/*
717	 * During runtime resume we don't sanitize, so we need to re-init
718	 * communication as well.
719	 */
720	return __uc_resume(uc, true);
721}
722
723static const struct intel_uc_ops uc_ops_off = {
724	.init_hw = __uc_check_hw,
725	.fini = __uc_fini, /* to clean-up the init_early initialization */
726};
727
728static const struct intel_uc_ops uc_ops_on = {
729	.sanitize = __uc_sanitize,
730
731	.init_fw = __uc_fetch_firmwares,
732	.fini_fw = __uc_cleanup_firmwares,
733
734	.init = __uc_init,
735	.fini = __uc_fini,
736
737	.init_hw = __uc_init_hw,
738	.fini_hw = __uc_fini_hw,
739};