Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2021-2022 Intel Corporation
   4 */
   5
   6#include <linux/types.h>
   7
   8#include <drm/drm_print.h>
   9
  10#include "gt/intel_engine_regs.h"
  11#include "gt/intel_gt.h"
  12#include "gt/intel_gt_mcr.h"
  13#include "gt/intel_gt_regs.h"
  14#include "gt/intel_lrc.h"
  15#include "guc_capture_fwif.h"
  16#include "intel_guc_capture.h"
  17#include "intel_guc_fwif.h"
  18#include "i915_drv.h"
  19#include "i915_gpu_error.h"
  20#include "i915_irq.h"
  21#include "i915_memcpy.h"
  22#include "i915_reg.h"
  23
  24/*
  25 * Define all device tables of GuC error capture register lists
  26 * NOTE: For engine-registers, GuC only needs the register offsets
  27 *       from the engine-mmio-base
  28 */
  29#define COMMON_BASE_GLOBAL \
  30	{ FORCEWAKE_MT,             0,      0, "FORCEWAKE" }
  31
  32#define COMMON_GEN9BASE_GLOBAL \
  33	{ GEN8_FAULT_TLB_DATA0,     0,      0, "GEN8_FAULT_TLB_DATA0" }, \
  34	{ GEN8_FAULT_TLB_DATA1,     0,      0, "GEN8_FAULT_TLB_DATA1" }, \
  35	{ ERROR_GEN6,               0,      0, "ERROR_GEN6" }, \
  36	{ DONE_REG,                 0,      0, "DONE_REG" }, \
  37	{ HSW_GTT_CACHE_EN,         0,      0, "HSW_GTT_CACHE_EN" }
  38
  39#define COMMON_GEN12BASE_GLOBAL \
  40	{ GEN12_FAULT_TLB_DATA0,    0,      0, "GEN12_FAULT_TLB_DATA0" }, \
  41	{ GEN12_FAULT_TLB_DATA1,    0,      0, "GEN12_FAULT_TLB_DATA1" }, \
  42	{ GEN12_AUX_ERR_DBG,        0,      0, "AUX_ERR_DBG" }, \
  43	{ GEN12_GAM_DONE,           0,      0, "GAM_DONE" }, \
  44	{ GEN12_RING_FAULT_REG,     0,      0, "FAULT_REG" }
  45
  46#define COMMON_BASE_ENGINE_INSTANCE \
  47	{ RING_PSMI_CTL(0),         0,      0, "RC PSMI" }, \
  48	{ RING_ESR(0),              0,      0, "ESR" }, \
  49	{ RING_DMA_FADD(0),         0,      0, "RING_DMA_FADD_LDW" }, \
  50	{ RING_DMA_FADD_UDW(0),     0,      0, "RING_DMA_FADD_UDW" }, \
  51	{ RING_IPEIR(0),            0,      0, "IPEIR" }, \
  52	{ RING_IPEHR(0),            0,      0, "IPEHR" }, \
  53	{ RING_INSTPS(0),           0,      0, "INSTPS" }, \
  54	{ RING_BBADDR(0),           0,      0, "RING_BBADDR_LOW32" }, \
  55	{ RING_BBADDR_UDW(0),       0,      0, "RING_BBADDR_UP32" }, \
  56	{ RING_BBSTATE(0),          0,      0, "BB_STATE" }, \
  57	{ CCID(0),                  0,      0, "CCID" }, \
  58	{ RING_ACTHD(0),            0,      0, "ACTHD_LDW" }, \
  59	{ RING_ACTHD_UDW(0),        0,      0, "ACTHD_UDW" }, \
  60	{ RING_INSTPM(0),           0,      0, "INSTPM" }, \
  61	{ RING_INSTDONE(0),         0,      0, "INSTDONE" }, \
  62	{ RING_NOPID(0),            0,      0, "RING_NOPID" }, \
  63	{ RING_START(0),            0,      0, "START" }, \
  64	{ RING_HEAD(0),             0,      0, "HEAD" }, \
  65	{ RING_TAIL(0),             0,      0, "TAIL" }, \
  66	{ RING_CTL(0),              0,      0, "CTL" }, \
  67	{ RING_MI_MODE(0),          0,      0, "MODE" }, \
  68	{ RING_CONTEXT_CONTROL(0),  0,      0, "RING_CONTEXT_CONTROL" }, \
  69	{ RING_HWS_PGA(0),          0,      0, "HWS" }, \
  70	{ RING_MODE_GEN7(0),        0,      0, "GFX_MODE" }, \
  71	{ GEN8_RING_PDP_LDW(0, 0),  0,      0, "PDP0_LDW" }, \
  72	{ GEN8_RING_PDP_UDW(0, 0),  0,      0, "PDP0_UDW" }, \
  73	{ GEN8_RING_PDP_LDW(0, 1),  0,      0, "PDP1_LDW" }, \
  74	{ GEN8_RING_PDP_UDW(0, 1),  0,      0, "PDP1_UDW" }, \
  75	{ GEN8_RING_PDP_LDW(0, 2),  0,      0, "PDP2_LDW" }, \
  76	{ GEN8_RING_PDP_UDW(0, 2),  0,      0, "PDP2_UDW" }, \
  77	{ GEN8_RING_PDP_LDW(0, 3),  0,      0, "PDP3_LDW" }, \
  78	{ GEN8_RING_PDP_UDW(0, 3),  0,      0, "PDP3_UDW" }
  79
  80#define COMMON_BASE_HAS_EU \
  81	{ EIR,                      0,      0, "EIR" }
  82
  83#define COMMON_BASE_RENDER \
  84	{ GEN7_SC_INSTDONE,         0,      0, "GEN7_SC_INSTDONE" }
  85
  86#define COMMON_GEN12BASE_RENDER \
  87	{ GEN12_SC_INSTDONE_EXTRA,  0,      0, "GEN12_SC_INSTDONE_EXTRA" }, \
  88	{ GEN12_SC_INSTDONE_EXTRA2, 0,      0, "GEN12_SC_INSTDONE_EXTRA2" }
  89
  90#define COMMON_GEN12BASE_VEC \
  91	{ GEN12_SFC_DONE(0),        0,      0, "SFC_DONE[0]" }, \
  92	{ GEN12_SFC_DONE(1),        0,      0, "SFC_DONE[1]" }, \
  93	{ GEN12_SFC_DONE(2),        0,      0, "SFC_DONE[2]" }, \
  94	{ GEN12_SFC_DONE(3),        0,      0, "SFC_DONE[3]" }
  95
  96/* XE_LPD - Global */
  97static const struct __guc_mmio_reg_descr xe_lpd_global_regs[] = {
  98	COMMON_BASE_GLOBAL,
  99	COMMON_GEN9BASE_GLOBAL,
 100	COMMON_GEN12BASE_GLOBAL,
 101};
 102
 103/* XE_LPD - Render / Compute Per-Class */
 104static const struct __guc_mmio_reg_descr xe_lpd_rc_class_regs[] = {
 105	COMMON_BASE_HAS_EU,
 106	COMMON_BASE_RENDER,
 107	COMMON_GEN12BASE_RENDER,
 108};
 109
 110/* GEN9/XE_LPD - Render / Compute Per-Engine-Instance */
 111static const struct __guc_mmio_reg_descr xe_lpd_rc_inst_regs[] = {
 112	COMMON_BASE_ENGINE_INSTANCE,
 113};
 114
 115/* GEN9/XE_LPD - Media Decode/Encode Per-Engine-Instance */
 116static const struct __guc_mmio_reg_descr xe_lpd_vd_inst_regs[] = {
 117	COMMON_BASE_ENGINE_INSTANCE,
 118};
 119
 120/* XE_LPD - Video Enhancement Per-Class */
 121static const struct __guc_mmio_reg_descr xe_lpd_vec_class_regs[] = {
 122	COMMON_GEN12BASE_VEC,
 123};
 124
 125/* GEN9/XE_LPD - Video Enhancement Per-Engine-Instance */
 126static const struct __guc_mmio_reg_descr xe_lpd_vec_inst_regs[] = {
 127	COMMON_BASE_ENGINE_INSTANCE,
 128};
 129
 130/* GEN9/XE_LPD - Blitter Per-Engine-Instance */
 131static const struct __guc_mmio_reg_descr xe_lpd_blt_inst_regs[] = {
 132	COMMON_BASE_ENGINE_INSTANCE,
 133};
 134
 135/* XE_LPD - GSC Per-Engine-Instance */
 136static const struct __guc_mmio_reg_descr xe_lpd_gsc_inst_regs[] = {
 137	COMMON_BASE_ENGINE_INSTANCE,
 138};
 139
 140/* GEN9 - Global */
 141static const struct __guc_mmio_reg_descr default_global_regs[] = {
 142	COMMON_BASE_GLOBAL,
 143	COMMON_GEN9BASE_GLOBAL,
 144};
 145
 146static const struct __guc_mmio_reg_descr default_rc_class_regs[] = {
 147	COMMON_BASE_HAS_EU,
 148	COMMON_BASE_RENDER,
 149};
 150
 151/*
 152 * Empty lists:
 153 * GEN9/XE_LPD - Blitter Per-Class
 154 * GEN9/XE_LPD - Media Decode/Encode Per-Class
 155 * GEN9 - VEC Class
 156 */
 157static const struct __guc_mmio_reg_descr empty_regs_list[] = {
 158};
 159
 160#define TO_GCAP_DEF_OWNER(x) (GUC_CAPTURE_LIST_INDEX_##x)
 161#define TO_GCAP_DEF_TYPE(x) (GUC_CAPTURE_LIST_TYPE_##x)
 162#define MAKE_REGLIST(regslist, regsowner, regstype, class) \
 163	{ \
 164		regslist, \
 165		ARRAY_SIZE(regslist), \
 166		TO_GCAP_DEF_OWNER(regsowner), \
 167		TO_GCAP_DEF_TYPE(regstype), \
 168		class, \
 169		NULL, \
 170	}
 171
 172/* List of lists */
 173static const struct __guc_mmio_reg_descr_group default_lists[] = {
 174	MAKE_REGLIST(default_global_regs, PF, GLOBAL, 0),
 175	MAKE_REGLIST(default_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS),
 176	MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
 177	MAKE_REGLIST(default_rc_class_regs, PF, ENGINE_CLASS, GUC_COMPUTE_CLASS),
 178	MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_COMPUTE_CLASS),
 179	MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEO_CLASS),
 180	MAKE_REGLIST(xe_lpd_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEO_CLASS),
 181	MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEOENHANCE_CLASS),
 182	MAKE_REGLIST(xe_lpd_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEOENHANCE_CLASS),
 183	MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_BLITTER_CLASS),
 184	MAKE_REGLIST(xe_lpd_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_BLITTER_CLASS),
 185	MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_GSC_OTHER_CLASS),
 186	MAKE_REGLIST(xe_lpd_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_GSC_OTHER_CLASS),
 187	{}
 188};
 189
 190static const struct __guc_mmio_reg_descr_group xe_lpd_lists[] = {
 191	MAKE_REGLIST(xe_lpd_global_regs, PF, GLOBAL, 0),
 192	MAKE_REGLIST(xe_lpd_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS),
 193	MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
 194	MAKE_REGLIST(xe_lpd_rc_class_regs, PF, ENGINE_CLASS, GUC_COMPUTE_CLASS),
 195	MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_COMPUTE_CLASS),
 196	MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEO_CLASS),
 197	MAKE_REGLIST(xe_lpd_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEO_CLASS),
 198	MAKE_REGLIST(xe_lpd_vec_class_regs, PF, ENGINE_CLASS, GUC_VIDEOENHANCE_CLASS),
 199	MAKE_REGLIST(xe_lpd_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEOENHANCE_CLASS),
 200	MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_BLITTER_CLASS),
 201	MAKE_REGLIST(xe_lpd_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_BLITTER_CLASS),
 202	MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_GSC_OTHER_CLASS),
 203	MAKE_REGLIST(xe_lpd_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_GSC_OTHER_CLASS),
 204	{}
 205};
 206
 207static const struct __guc_mmio_reg_descr_group *
 208guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists,
 209			 u32 owner, u32 type, u32 id)
 210{
 211	int i;
 212
 213	if (!reglists)
 214		return NULL;
 215
 216	for (i = 0; reglists[i].list; ++i) {
 217		if (reglists[i].owner == owner && reglists[i].type == type &&
 218		    (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
 219			return &reglists[i];
 220	}
 221
 222	return NULL;
 223}
 224
 225static struct __guc_mmio_reg_descr_group *
 226guc_capture_get_one_ext_list(struct __guc_mmio_reg_descr_group *reglists,
 227			     u32 owner, u32 type, u32 id)
 228{
 229	int i;
 230
 231	if (!reglists)
 232		return NULL;
 233
 234	for (i = 0; reglists[i].extlist; ++i) {
 235		if (reglists[i].owner == owner && reglists[i].type == type &&
 236		    (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
 237			return &reglists[i];
 238	}
 239
 240	return NULL;
 241}
 242
 243static void guc_capture_free_extlists(struct __guc_mmio_reg_descr_group *reglists)
 244{
 245	int i = 0;
 246
 247	if (!reglists)
 248		return;
 249
 250	while (reglists[i].extlist)
 251		kfree(reglists[i++].extlist);
 252}
 253
 254struct __ext_steer_reg {
 255	const char *name;
 256	i915_mcr_reg_t reg;
 257};
 258
 259static const struct __ext_steer_reg xe_extregs[] = {
 260	{"GEN8_SAMPLER_INSTDONE", GEN8_SAMPLER_INSTDONE},
 261	{"GEN8_ROW_INSTDONE", GEN8_ROW_INSTDONE}
 262};
 263
 264static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
 265			   const struct __ext_steer_reg *extlist,
 266			   int slice_id, int subslice_id)
 267{
 268	ext->reg = _MMIO(i915_mmio_reg_offset(extlist->reg));
 269	ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
 270	ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
 271	ext->regname = extlist->name;
 272}
 273
 274static int
 275__alloc_ext_regs(struct __guc_mmio_reg_descr_group *newlist,
 276		 const struct __guc_mmio_reg_descr_group *rootlist, int num_regs)
 277{
 278	struct __guc_mmio_reg_descr *list;
 279
 280	list = kcalloc(num_regs, sizeof(struct __guc_mmio_reg_descr), GFP_KERNEL);
 281	if (!list)
 282		return -ENOMEM;
 283
 284	newlist->extlist = list;
 285	newlist->num_regs = num_regs;
 286	newlist->owner = rootlist->owner;
 287	newlist->engine = rootlist->engine;
 288	newlist->type = rootlist->type;
 289
 290	return 0;
 291}
 292
 293static void
 294guc_capture_alloc_steered_lists_xe_lpd(struct intel_guc *guc,
 295				       const struct __guc_mmio_reg_descr_group *lists)
 296{
 297	struct intel_gt *gt = guc_to_gt(guc);
 298	int slice, subslice, iter, i, num_steer_regs, num_tot_regs = 0;
 299	const struct __guc_mmio_reg_descr_group *list;
 300	struct __guc_mmio_reg_descr_group *extlists;
 301	struct __guc_mmio_reg_descr *extarray;
 302	struct sseu_dev_info *sseu;
 303
 304	/* In XE_LPD we only have steered registers for the render-class */
 305	list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
 306					GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, GUC_RENDER_CLASS);
 307	/* skip if extlists was previously allocated */
 308	if (!list || guc->capture->extlists)
 309		return;
 310
 311	num_steer_regs = ARRAY_SIZE(xe_extregs);
 312
 313	sseu = &gt->info.sseu;
 314	for_each_ss_steering(iter, gt, slice, subslice)
 315		num_tot_regs += num_steer_regs;
 316
 317	if (!num_tot_regs)
 318		return;
 319
 320	/* allocate an extra for an end marker */
 321	extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
 322	if (!extlists)
 323		return;
 324
 325	if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) {
 326		kfree(extlists);
 327		return;
 328	}
 329
 330	extarray = extlists[0].extlist;
 331	for_each_ss_steering(iter, gt, slice, subslice) {
 332		for (i = 0; i < num_steer_regs; ++i) {
 333			__fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
 334			++extarray;
 335		}
 336	}
 337
 338	guc->capture->extlists = extlists;
 339}
 340
 341static const struct __ext_steer_reg xehpg_extregs[] = {
 342	{"XEHPG_INSTDONE_GEOM_SVG", XEHPG_INSTDONE_GEOM_SVG}
 343};
 344
 345static bool __has_xehpg_extregs(u32 ipver)
 346{
 347	return (ipver >= IP_VER(12, 55));
 348}
 349
 350static void
 351guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
 352				       const struct __guc_mmio_reg_descr_group *lists,
 353				       u32 ipver)
 354{
 355	struct intel_gt *gt = guc_to_gt(guc);
 356	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
 357	struct sseu_dev_info *sseu;
 358	int slice, subslice, i, iter, num_steer_regs, num_tot_regs = 0;
 359	const struct __guc_mmio_reg_descr_group *list;
 360	struct __guc_mmio_reg_descr_group *extlists;
 361	struct __guc_mmio_reg_descr *extarray;
 362
 363	/* In XE_LP / HPG we only have render-class steering registers during error-capture */
 364	list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
 365					GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, GUC_RENDER_CLASS);
 366	/* skip if extlists was previously allocated */
 367	if (!list || guc->capture->extlists)
 368		return;
 369
 370	num_steer_regs = ARRAY_SIZE(xe_extregs);
 371	if (__has_xehpg_extregs(ipver))
 372		num_steer_regs += ARRAY_SIZE(xehpg_extregs);
 373
 374	sseu = &gt->info.sseu;
 375	for_each_ss_steering(iter, gt, slice, subslice)
 376		num_tot_regs += num_steer_regs;
 377
 378	if (!num_tot_regs)
 379		return;
 380
 381	/* allocate an extra for an end marker */
 382	extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
 383	if (!extlists)
 384		return;
 385
 386	if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) {
 387		kfree(extlists);
 388		return;
 389	}
 390
 391	extarray = extlists[0].extlist;
 392	for_each_ss_steering(iter, gt, slice, subslice) {
 393		for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) {
 394			__fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
 395			++extarray;
 396		}
 397		if (__has_xehpg_extregs(ipver)) {
 398			for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) {
 399				__fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice);
 400				++extarray;
 401			}
 402		}
 403	}
 404
 405	drm_dbg(&i915->drm, "GuC-capture found %d-ext-regs.\n", num_tot_regs);
 406	guc->capture->extlists = extlists;
 407}
 408
 409static const struct __guc_mmio_reg_descr_group *
 410guc_capture_get_device_reglist(struct intel_guc *guc)
 411{
 412	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
 413
 414	if (GRAPHICS_VER(i915) > 11) {
 415		/*
 416		 * For certain engine classes, there are slice and subslice
 417		 * level registers requiring steering. We allocate and populate
 418		 * these at init time based on hw config add it as an extension
 419		 * list at the end of the pre-populated render list.
 420		 */
 421		if (IS_DG2(i915))
 422			guc_capture_alloc_steered_lists_xe_hpg(guc, xe_lpd_lists, IP_VER(12, 55));
 423		else if (IS_XEHPSDV(i915))
 424			guc_capture_alloc_steered_lists_xe_hpg(guc, xe_lpd_lists, IP_VER(12, 50));
 425		else
 426			guc_capture_alloc_steered_lists_xe_lpd(guc, xe_lpd_lists);
 427
 428		return xe_lpd_lists;
 429	}
 430
 431	/* if GuC submission is enabled on a non-POR platform, just use a common baseline */
 432	return default_lists;
 433}
 434
 435static const char *
 436__stringify_type(u32 type)
 437{
 438	switch (type) {
 439	case GUC_CAPTURE_LIST_TYPE_GLOBAL:
 440		return "Global";
 441	case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
 442		return "Class";
 443	case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
 444		return "Instance";
 445	default:
 446		break;
 447	}
 448
 449	return "unknown";
 450}
 451
 452static const char *
 453__stringify_engclass(u32 class)
 454{
 455	switch (class) {
 456	case GUC_RENDER_CLASS:
 457		return "Render";
 458	case GUC_VIDEO_CLASS:
 459		return "Video";
 460	case GUC_VIDEOENHANCE_CLASS:
 461		return "VideoEnhance";
 462	case GUC_BLITTER_CLASS:
 463		return "Blitter";
 464	case GUC_COMPUTE_CLASS:
 465		return "Compute";
 466	case GUC_GSC_OTHER_CLASS:
 467		return "GSC-Other";
 468	default:
 469		break;
 470	}
 471
 472	return "unknown";
 473}
 474
 475static int
 476guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
 477		      struct guc_mmio_reg *ptr, u16 num_entries)
 478{
 479	u32 i = 0, j = 0;
 480	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
 481	const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
 482	struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
 483	const struct __guc_mmio_reg_descr_group *match;
 484	struct __guc_mmio_reg_descr_group *matchext;
 485
 486	if (!reglists)
 487		return -ENODEV;
 488
 489	match = guc_capture_get_one_list(reglists, owner, type, classid);
 490	if (!match)
 491		return -ENODATA;
 492
 493	for (i = 0; i < num_entries && i < match->num_regs; ++i) {
 494		ptr[i].offset = match->list[i].reg.reg;
 495		ptr[i].value = 0xDEADF00D;
 496		ptr[i].flags = match->list[i].flags;
 497		ptr[i].mask = match->list[i].mask;
 498	}
 499
 500	matchext = guc_capture_get_one_ext_list(extlists, owner, type, classid);
 501	if (matchext) {
 502		for (i = match->num_regs, j = 0; i < num_entries &&
 503		     i < (match->num_regs + matchext->num_regs) &&
 504			j < matchext->num_regs; ++i, ++j) {
 505			ptr[i].offset = matchext->extlist[j].reg.reg;
 506			ptr[i].value = 0xDEADF00D;
 507			ptr[i].flags = matchext->extlist[j].flags;
 508			ptr[i].mask = matchext->extlist[j].mask;
 509		}
 510	}
 511	if (i < num_entries)
 512		drm_dbg(&i915->drm, "GuC-capture: Init reglist short %d out %d.\n",
 513			(int)i, (int)num_entries);
 514
 515	return 0;
 516}
 517
 518static int
 519guc_cap_list_num_regs(struct intel_guc_state_capture *gc, u32 owner, u32 type, u32 classid)
 520{
 521	const struct __guc_mmio_reg_descr_group *match;
 522	struct __guc_mmio_reg_descr_group *matchext;
 523	int num_regs;
 524
 525	match = guc_capture_get_one_list(gc->reglists, owner, type, classid);
 526	if (!match)
 527		return 0;
 528
 529	num_regs = match->num_regs;
 530
 531	matchext = guc_capture_get_one_ext_list(gc->extlists, owner, type, classid);
 532	if (matchext)
 533		num_regs += matchext->num_regs;
 534
 535	return num_regs;
 536}
 537
 538static int
 539guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
 540			size_t *size, bool is_purpose_est)
 541{
 542	struct intel_guc_state_capture *gc = guc->capture;
 543	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
 544	struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
 545	int num_regs;
 546
 547	if (!gc->reglists) {
 548		drm_warn(&i915->drm, "GuC-capture: No reglist on this device\n");
 549		return -ENODEV;
 550	}
 551
 552	if (cache->is_valid) {
 553		*size = cache->size;
 554		return cache->status;
 555	}
 556
 557	if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF &&
 558	    !guc_capture_get_one_list(gc->reglists, owner, type, classid)) {
 559		if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
 560			drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist Global!\n");
 561		else
 562			drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist %s(%u):%s(%u)!\n",
 563				 __stringify_type(type), type,
 564				 __stringify_engclass(classid), classid);
 565		return -ENODATA;
 566	}
 567
 568	num_regs = guc_cap_list_num_regs(gc, owner, type, classid);
 569	/* intentional empty lists can exist depending on hw config */
 570	if (!num_regs)
 571		return -ENODATA;
 572
 573	if (size)
 574		*size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
 575				   (num_regs * sizeof(struct guc_mmio_reg)));
 576
 577	return 0;
 578}
 579
 580int
 581intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
 582			      size_t *size)
 583{
 584	return guc_capture_getlistsize(guc, owner, type, classid, size, false);
 585}
 586
 587static void guc_capture_create_prealloc_nodes(struct intel_guc *guc);
 588
 589int
 590intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
 591			  void **outptr)
 592{
 593	struct intel_guc_state_capture *gc = guc->capture;
 594	struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
 595	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
 596	struct guc_debug_capture_list *listnode;
 597	int ret, num_regs;
 598	u8 *caplist, *tmp;
 599	size_t size = 0;
 600
 601	if (!gc->reglists)
 602		return -ENODEV;
 603
 604	if (cache->is_valid) {
 605		*outptr = cache->ptr;
 606		return cache->status;
 607	}
 608
 609	/*
 610	 * ADS population of input registers is a good
 611	 * time to pre-allocate cachelist output nodes
 612	 */
 613	guc_capture_create_prealloc_nodes(guc);
 614
 615	ret = intel_guc_capture_getlistsize(guc, owner, type, classid, &size);
 616	if (ret) {
 617		cache->is_valid = true;
 618		cache->ptr = NULL;
 619		cache->size = 0;
 620		cache->status = ret;
 621		return ret;
 622	}
 623
 624	caplist = kzalloc(size, GFP_KERNEL);
 625	if (!caplist) {
 626		drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached caplist");
 627		return -ENOMEM;
 628	}
 629
 630	/* populate capture list header */
 631	tmp = caplist;
 632	num_regs = guc_cap_list_num_regs(guc->capture, owner, type, classid);
 633	listnode = (struct guc_debug_capture_list *)tmp;
 634	listnode->header.info = FIELD_PREP(GUC_CAPTURELISTHDR_NUMDESCR, (u32)num_regs);
 635
 636	/* populate list of register descriptor */
 637	tmp += sizeof(struct guc_debug_capture_list);
 638	guc_capture_list_init(guc, owner, type, classid, (struct guc_mmio_reg *)tmp, num_regs);
 639
 640	/* cache this list */
 641	cache->is_valid = true;
 642	cache->ptr = caplist;
 643	cache->size = size;
 644	cache->status = 0;
 645
 646	*outptr = caplist;
 647
 648	return 0;
 649}
 650
 651int
 652intel_guc_capture_getnullheader(struct intel_guc *guc,
 653				void **outptr, size_t *size)
 654{
 655	struct intel_guc_state_capture *gc = guc->capture;
 656	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
 657	int tmp = sizeof(u32) * 4;
 658	void *null_header;
 659
 660	if (gc->ads_null_cache) {
 661		*outptr = gc->ads_null_cache;
 662		*size = tmp;
 663		return 0;
 664	}
 665
 666	null_header = kzalloc(tmp, GFP_KERNEL);
 667	if (!null_header) {
 668		drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached nulllist");
 669		return -ENOMEM;
 670	}
 671
 672	gc->ads_null_cache = null_header;
 673	*outptr = null_header;
 674	*size = tmp;
 675
 676	return 0;
 677}
 678
 679static int
 680guc_capture_output_min_size_est(struct intel_guc *guc)
 681{
 682	struct intel_gt *gt = guc_to_gt(guc);
 683	struct intel_engine_cs *engine;
 684	enum intel_engine_id id;
 685	int worst_min_size = 0;
 686	size_t tmp = 0;
 687
 688	if (!guc->capture)
 689		return -ENODEV;
 690
 691	/*
 692	 * If every single engine-instance suffered a failure in quick succession but
 693	 * were all unrelated, then a burst of multiple error-capture events would dump
 694	 * registers for every one engine instance, one at a time. In this case, GuC
 695	 * would even dump the global-registers repeatedly.
 696	 *
 697	 * For each engine instance, there would be 1 x guc_state_capture_group_t output
 698	 * followed by 3 x guc_state_capture_t lists. The latter is how the register
 699	 * dumps are split across different register types (where the '3' are global vs class
 700	 * vs instance).
 701	 */
 702	for_each_engine(engine, gt, id) {
 703		worst_min_size += sizeof(struct guc_state_capture_group_header_t) +
 704					 (3 * sizeof(struct guc_state_capture_header_t));
 705
 706		if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp, true))
 707			worst_min_size += tmp;
 708
 709		if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
 710					     engine->class, &tmp, true)) {
 711			worst_min_size += tmp;
 712		}
 713		if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
 714					     engine->class, &tmp, true)) {
 715			worst_min_size += tmp;
 716		}
 717	}
 718
 719	return worst_min_size;
 720}
 721
 722/*
 723 * Add on a 3x multiplier to allow for multiple back-to-back captures occurring
 724 * before the i915 can read the data out and process it
 725 */
 726#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
 727
 728static void check_guc_capture_size(struct intel_guc *guc)
 729{
 730	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
 731	int min_size = guc_capture_output_min_size_est(guc);
 732	int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
 733	u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
 734
 735	/*
 736	 * NOTE: min_size is much smaller than the capture region allocation (DG2: <80K vs 1MB)
 737	 * Additionally, its based on space needed to fit all engines getting reset at once
 738	 * within the same G2H handler task slot. This is very unlikely. However, if GuC really
 739	 * does run out of space for whatever reason, we will see an separate warning message
 740	 * when processing the G2H event capture-notification, search for:
 741	 * INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE.
 742	 */
 743	if (min_size < 0)
 744		drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n",
 745			 min_size);
 746	else if (min_size > buffer_size)
 747		drm_warn(&i915->drm, "GuC error state capture buffer maybe small: %d < %d\n",
 748			 buffer_size, min_size);
 749	else if (spare_size > buffer_size)
 750		drm_dbg(&i915->drm, "GuC error state capture buffer lacks spare size: %d < %d (min = %d)\n",
 751			buffer_size, spare_size, min_size);
 752}
 753
 754/*
 755 * KMD Init time flows:
 756 * --------------------
 757 *     --> alloc A: GuC input capture regs lists (registered to GuC via ADS).
 758 *                  intel_guc_ads acquires the register lists by calling
 759 *                  intel_guc_capture_list_size and intel_guc_capture_list_get 'n' times,
 760 *                  where n = 1 for global-reg-list +
 761 *                            num_engine_classes for class-reg-list +
 762 *                            num_engine_classes for instance-reg-list
 763 *                               (since all instances of the same engine-class type
 764 *                                have an identical engine-instance register-list).
 765 *                  ADS module also calls separately for PF vs VF.
 766 *
 767 *     --> alloc B: GuC output capture buf (registered via guc_init_params(log_param))
 768 *                  Size = #define CAPTURE_BUFFER_SIZE (warns if on too-small)
 769 *                  Note2: 'x 3' to hold multiple capture groups
 770 *
 771 * GUC Runtime notify capture:
 772 * --------------------------
 773 *     --> G2H STATE_CAPTURE_NOTIFICATION
 774 *                   L--> intel_guc_capture_process
 775 *                           L--> Loop through B (head..tail) and for each engine instance's
 776 *                                err-state-captured register-list we find, we alloc 'C':
 777 *      --> alloc C: A capture-output-node structure that includes misc capture info along
 778 *                   with 3 register list dumps (global, engine-class and engine-instance)
 779 *                   This node is created from a pre-allocated list of blank nodes in
 780 *                   guc->capture->cachelist and populated with the error-capture
 781 *                   data from GuC and then it's added into guc->capture->outlist linked
 782 *                   list. This list is used for matchup and printout by i915_gpu_coredump
 783 *                   and err_print_gt, (when user invokes the error capture sysfs).
 784 *
 785 * GUC --> notify context reset:
 786 * -----------------------------
 787 *     --> G2H CONTEXT RESET
 788 *                   L--> guc_handle_context_reset --> i915_capture_error_state
 789 *                          L--> i915_gpu_coredump(..IS_GUC_CAPTURE) --> gt_record_engines
 790 *                               --> capture_engine(..IS_GUC_CAPTURE)
 791 *                               L--> intel_guc_capture_get_matching_node is where
 792 *                                    detach C from internal linked list and add it into
 793 *                                    intel_engine_coredump struct (if the context and
 794 *                                    engine of the event notification matches a node
 795 *                                    in the link list).
 796 *
 797 * User Sysfs / Debugfs
 798 * --------------------
 799 *      --> i915_gpu_coredump_copy_to_buffer->
 800 *                   L--> err_print_to_sgl --> err_print_gt
 801 *                        L--> error_print_guc_captures
 802 *                             L--> intel_guc_capture_print_node prints the
 803 *                                  register lists values of the attached node
 804 *                                  on the error-engine-dump being reported.
 805 *                   L--> i915_reset_error_state ... -->__i915_gpu_coredump_free
 806 *                        L--> ... cleanup_gt -->
 807 *                             L--> intel_guc_capture_free_node returns the
 808 *                                  capture-output-node back to the internal
 809 *                                  cachelist for reuse.
 810 *
 811 */
 812
 813static int guc_capture_buf_cnt(struct __guc_capture_bufstate *buf)
 814{
 815	if (buf->wr >= buf->rd)
 816		return (buf->wr - buf->rd);
 817	return (buf->size - buf->rd) + buf->wr;
 818}
 819
 820static int guc_capture_buf_cnt_to_end(struct __guc_capture_bufstate *buf)
 821{
 822	if (buf->rd > buf->wr)
 823		return (buf->size - buf->rd);
 824	return (buf->wr - buf->rd);
 825}
 826
 827/*
 828 * GuC's error-capture output is a ring buffer populated in a byte-stream fashion:
 829 *
 830 * The GuC Log buffer region for error-capture is managed like a ring buffer.
 831 * The GuC firmware dumps error capture logs into this ring in a byte-stream flow.
 832 * Additionally, as per the current and foreseeable future, all packed error-
 833 * capture output structures are dword aligned.
 834 *
 835 * That said, if the GuC firmware is in the midst of writing a structure that is larger
 836 * than one dword but the tail end of the err-capture buffer-region has lesser space left,
 837 * we would need to extract that structure one dword at a time straddled across the end,
 838 * onto the start of the ring.
 839 *
 840 * Below function, guc_capture_log_remove_dw is a helper for that. All callers of this
 841 * function would typically do a straight-up memcpy from the ring contents and will only
 842 * call this helper if their structure-extraction is straddling across the end of the
 843 * ring. GuC firmware does not add any padding. The reason for the no-padding is to ease
 844 * scalability for future expansion of output data types without requiring a redesign
 845 * of the flow controls.
 846 */
 847static int
 848guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
 849			  u32 *dw)
 850{
 851	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
 852	int tries = 2;
 853	int avail = 0;
 854	u32 *src_data;
 855
 856	if (!guc_capture_buf_cnt(buf))
 857		return 0;
 858
 859	while (tries--) {
 860		avail = guc_capture_buf_cnt_to_end(buf);
 861		if (avail >= sizeof(u32)) {
 862			src_data = (u32 *)(buf->data + buf->rd);
 863			*dw = *src_data;
 864			buf->rd += 4;
 865			return 4;
 866		}
 867		if (avail)
 868			drm_dbg(&i915->drm, "GuC-Cap-Logs not dword aligned, skipping.\n");
 869		buf->rd = 0;
 870	}
 871
 872	return 0;
 873}
 874
 875static bool
 876guc_capture_data_extracted(struct __guc_capture_bufstate *b,
 877			   int size, void *dest)
 878{
 879	if (guc_capture_buf_cnt_to_end(b) >= size) {
 880		memcpy(dest, (b->data + b->rd), size);
 881		b->rd += size;
 882		return true;
 883	}
 884	return false;
 885}
 886
 887static int
 888guc_capture_log_get_group_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
 889			      struct guc_state_capture_group_header_t *ghdr)
 890{
 891	int read = 0;
 892	int fullsize = sizeof(struct guc_state_capture_group_header_t);
 893
 894	if (fullsize > guc_capture_buf_cnt(buf))
 895		return -1;
 896
 897	if (guc_capture_data_extracted(buf, fullsize, (void *)ghdr))
 898		return 0;
 899
 900	read += guc_capture_log_remove_dw(guc, buf, &ghdr->owner);
 901	read += guc_capture_log_remove_dw(guc, buf, &ghdr->info);
 902	if (read != fullsize)
 903		return -1;
 904
 905	return 0;
 906}
 907
 908static int
 909guc_capture_log_get_data_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
 910			     struct guc_state_capture_header_t *hdr)
 911{
 912	int read = 0;
 913	int fullsize = sizeof(struct guc_state_capture_header_t);
 914
 915	if (fullsize > guc_capture_buf_cnt(buf))
 916		return -1;
 917
 918	if (guc_capture_data_extracted(buf, fullsize, (void *)hdr))
 919		return 0;
 920
 921	read += guc_capture_log_remove_dw(guc, buf, &hdr->owner);
 922	read += guc_capture_log_remove_dw(guc, buf, &hdr->info);
 923	read += guc_capture_log_remove_dw(guc, buf, &hdr->lrca);
 924	read += guc_capture_log_remove_dw(guc, buf, &hdr->guc_id);
 925	read += guc_capture_log_remove_dw(guc, buf, &hdr->num_mmios);
 926	if (read != fullsize)
 927		return -1;
 928
 929	return 0;
 930}
 931
 932static int
 933guc_capture_log_get_register(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
 934			     struct guc_mmio_reg *reg)
 935{
 936	int read = 0;
 937	int fullsize = sizeof(struct guc_mmio_reg);
 938
 939	if (fullsize > guc_capture_buf_cnt(buf))
 940		return -1;
 941
 942	if (guc_capture_data_extracted(buf, fullsize, (void *)reg))
 943		return 0;
 944
 945	read += guc_capture_log_remove_dw(guc, buf, &reg->offset);
 946	read += guc_capture_log_remove_dw(guc, buf, &reg->value);
 947	read += guc_capture_log_remove_dw(guc, buf, &reg->flags);
 948	read += guc_capture_log_remove_dw(guc, buf, &reg->mask);
 949	if (read != fullsize)
 950		return -1;
 951
 952	return 0;
 953}
 954
 955static void
 956guc_capture_delete_one_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
 957{
 958	int i;
 959
 960	for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i)
 961		kfree(node->reginfo[i].regs);
 962	list_del(&node->link);
 963	kfree(node);
 964}
 965
 966static void
 967guc_capture_delete_prealloc_nodes(struct intel_guc *guc)
 968{
 969	struct __guc_capture_parsed_output *n, *ntmp;
 970
 971	/*
 972	 * NOTE: At the end of driver operation, we must assume that we
 973	 * have prealloc nodes in both the cachelist as well as outlist
 974	 * if unclaimed error capture events occurred prior to shutdown.
 975	 */
 976	list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link)
 977		guc_capture_delete_one_node(guc, n);
 978
 979	list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link)
 980		guc_capture_delete_one_node(guc, n);
 981}
 982
 983static void
 984guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node,
 985			     struct list_head *list)
 986{
 987	list_add_tail(&node->link, list);
 988}
 989
 990static void
 991guc_capture_add_node_to_outlist(struct intel_guc_state_capture *gc,
 992				struct __guc_capture_parsed_output *node)
 993{
 994	guc_capture_add_node_to_list(node, &gc->outlist);
 995}
 996
 997static void
 998guc_capture_add_node_to_cachelist(struct intel_guc_state_capture *gc,
 999				  struct __guc_capture_parsed_output *node)
1000{
1001	guc_capture_add_node_to_list(node, &gc->cachelist);
1002}
1003
1004static void
1005guc_capture_init_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
1006{
1007	struct guc_mmio_reg *tmp[GUC_CAPTURE_LIST_TYPE_MAX];
1008	int i;
1009
1010	for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
1011		tmp[i] = node->reginfo[i].regs;
1012		memset(tmp[i], 0, sizeof(struct guc_mmio_reg) *
1013		       guc->capture->max_mmio_per_node);
1014	}
1015	memset(node, 0, sizeof(*node));
1016	for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i)
1017		node->reginfo[i].regs = tmp[i];
1018
1019	INIT_LIST_HEAD(&node->link);
1020}
1021
1022static struct __guc_capture_parsed_output *
1023guc_capture_get_prealloc_node(struct intel_guc *guc)
1024{
1025	struct __guc_capture_parsed_output *found = NULL;
1026
1027	if (!list_empty(&guc->capture->cachelist)) {
1028		struct __guc_capture_parsed_output *n, *ntmp;
1029
1030		/* get first avail node from the cache list */
1031		list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) {
1032			found = n;
1033			list_del(&n->link);
1034			break;
1035		}
1036	} else {
1037		struct __guc_capture_parsed_output *n, *ntmp;
1038
1039		/* traverse down and steal back the oldest node already allocated */
1040		list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
1041			found = n;
1042		}
1043		if (found)
1044			list_del(&found->link);
1045	}
1046	if (found)
1047		guc_capture_init_node(guc, found);
1048
1049	return found;
1050}
1051
1052static struct __guc_capture_parsed_output *
1053guc_capture_alloc_one_node(struct intel_guc *guc)
1054{
1055	struct __guc_capture_parsed_output *new;
1056	int i;
1057
1058	new = kzalloc(sizeof(*new), GFP_KERNEL);
1059	if (!new)
1060		return NULL;
1061
1062	for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
1063		new->reginfo[i].regs = kcalloc(guc->capture->max_mmio_per_node,
1064					       sizeof(struct guc_mmio_reg), GFP_KERNEL);
1065		if (!new->reginfo[i].regs) {
1066			while (i)
1067				kfree(new->reginfo[--i].regs);
1068			kfree(new);
1069			return NULL;
1070		}
1071	}
1072	guc_capture_init_node(guc, new);
1073
1074	return new;
1075}
1076
1077static struct __guc_capture_parsed_output *
1078guc_capture_clone_node(struct intel_guc *guc, struct __guc_capture_parsed_output *original,
1079		       u32 keep_reglist_mask)
1080{
1081	struct __guc_capture_parsed_output *new;
1082	int i;
1083
1084	new = guc_capture_get_prealloc_node(guc);
1085	if (!new)
1086		return NULL;
1087	if (!original)
1088		return new;
1089
1090	new->is_partial = original->is_partial;
1091
1092	/* copy reg-lists that we want to clone */
1093	for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
1094		if (keep_reglist_mask & BIT(i)) {
1095			GEM_BUG_ON(original->reginfo[i].num_regs  >
1096				   guc->capture->max_mmio_per_node);
1097
1098			memcpy(new->reginfo[i].regs, original->reginfo[i].regs,
1099			       original->reginfo[i].num_regs * sizeof(struct guc_mmio_reg));
1100
1101			new->reginfo[i].num_regs = original->reginfo[i].num_regs;
1102			new->reginfo[i].vfid  = original->reginfo[i].vfid;
1103
1104			if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS) {
1105				new->eng_class = original->eng_class;
1106			} else if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) {
1107				new->eng_inst = original->eng_inst;
1108				new->guc_id = original->guc_id;
1109				new->lrca = original->lrca;
1110			}
1111		}
1112	}
1113
1114	return new;
1115}
1116
1117static void
1118__guc_capture_create_prealloc_nodes(struct intel_guc *guc)
1119{
1120	struct __guc_capture_parsed_output *node = NULL;
1121	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
1122	int i;
1123
1124	for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) {
1125		node = guc_capture_alloc_one_node(guc);
1126		if (!node) {
1127			drm_warn(&i915->drm, "GuC Capture pre-alloc-cache failure\n");
1128			/* dont free the priors, use what we got and cleanup at shutdown */
1129			return;
1130		}
1131		guc_capture_add_node_to_cachelist(guc->capture, node);
1132	}
1133}
1134
1135static int
1136guc_get_max_reglist_count(struct intel_guc *guc)
1137{
1138	int i, j, k, tmp, maxregcount = 0;
1139
1140	for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
1141		for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) {
1142			for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) {
1143				if (j == GUC_CAPTURE_LIST_TYPE_GLOBAL && k > 0)
1144					continue;
1145
1146				tmp = guc_cap_list_num_regs(guc->capture, i, j, k);
1147				if (tmp > maxregcount)
1148					maxregcount = tmp;
1149			}
1150		}
1151	}
1152	if (!maxregcount)
1153		maxregcount = PREALLOC_NODES_DEFAULT_NUMREGS;
1154
1155	return maxregcount;
1156}
1157
1158static void
1159guc_capture_create_prealloc_nodes(struct intel_guc *guc)
1160{
1161	/* skip if we've already done the pre-alloc */
1162	if (guc->capture->max_mmio_per_node)
1163		return;
1164
1165	guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc);
1166	__guc_capture_create_prealloc_nodes(guc);
1167}
1168
1169static int
1170guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf)
1171{
1172	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
1173	struct guc_state_capture_group_header_t ghdr = {0};
1174	struct guc_state_capture_header_t hdr = {0};
1175	struct __guc_capture_parsed_output *node = NULL;
1176	struct guc_mmio_reg *regs = NULL;
1177	int i, numlists, numregs, ret = 0;
1178	enum guc_capture_type datatype;
1179	struct guc_mmio_reg tmp;
1180	bool is_partial = false;
1181
1182	i = guc_capture_buf_cnt(buf);
1183	if (!i)
1184		return -ENODATA;
1185	if (i % sizeof(u32)) {
1186		drm_warn(&i915->drm, "GuC Capture new entries unaligned\n");
1187		ret = -EIO;
1188		goto bailout;
1189	}
1190
1191	/* first get the capture group header */
1192	if (guc_capture_log_get_group_hdr(guc, buf, &ghdr)) {
1193		ret = -EIO;
1194		goto bailout;
1195	}
1196	/*
1197	 * we would typically expect a layout as below where n would be expected to be
1198	 * anywhere between 3 to n where n > 3 if we are seeing multiple dependent engine
1199	 * instances being reset together.
1200	 * ____________________________________________
1201	 * | Capture Group                            |
1202	 * | ________________________________________ |
1203	 * | | Capture Group Header:                | |
1204	 * | |  - num_captures = 5                  | |
1205	 * | |______________________________________| |
1206	 * | ________________________________________ |
1207	 * | | Capture1:                            | |
1208	 * | |  Hdr: GLOBAL, numregs=a              | |
1209	 * | | ____________________________________ | |
1210	 * | | | Reglist                          | | |
1211	 * | | | - reg1, reg2, ... rega           | | |
1212	 * | | |__________________________________| | |
1213	 * | |______________________________________| |
1214	 * | ________________________________________ |
1215	 * | | Capture2:                            | |
1216	 * | |  Hdr: CLASS=RENDER/COMPUTE, numregs=b| |
1217	 * | | ____________________________________ | |
1218	 * | | | Reglist                          | | |
1219	 * | | | - reg1, reg2, ... regb           | | |
1220	 * | | |__________________________________| | |
1221	 * | |______________________________________| |
1222	 * | ________________________________________ |
1223	 * | | Capture3:                            | |
1224	 * | |  Hdr: INSTANCE=RCS, numregs=c        | |
1225	 * | | ____________________________________ | |
1226	 * | | | Reglist                          | | |
1227	 * | | | - reg1, reg2, ... regc           | | |
1228	 * | | |__________________________________| | |
1229	 * | |______________________________________| |
1230	 * | ________________________________________ |
1231	 * | | Capture4:                            | |
1232	 * | |  Hdr: CLASS=RENDER/COMPUTE, numregs=d| |
1233	 * | | ____________________________________ | |
1234	 * | | | Reglist                          | | |
1235	 * | | | - reg1, reg2, ... regd           | | |
1236	 * | | |__________________________________| | |
1237	 * | |______________________________________| |
1238	 * | ________________________________________ |
1239	 * | | Capture5:                            | |
1240	 * | |  Hdr: INSTANCE=CCS0, numregs=e       | |
1241	 * | | ____________________________________ | |
1242	 * | | | Reglist                          | | |
1243	 * | | | - reg1, reg2, ... rege           | | |
1244	 * | | |__________________________________| | |
1245	 * | |______________________________________| |
1246	 * |__________________________________________|
1247	 */
1248	is_partial = FIELD_GET(CAP_GRP_HDR_CAPTURE_TYPE, ghdr.info);
1249	numlists = FIELD_GET(CAP_GRP_HDR_NUM_CAPTURES, ghdr.info);
1250
1251	while (numlists--) {
1252		if (guc_capture_log_get_data_hdr(guc, buf, &hdr)) {
1253			ret = -EIO;
1254			break;
1255		}
1256
1257		datatype = FIELD_GET(CAP_HDR_CAPTURE_TYPE, hdr.info);
1258		if (datatype > GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) {
1259			/* unknown capture type - skip over to next capture set */
1260			numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
1261			while (numregs--) {
1262				if (guc_capture_log_get_register(guc, buf, &tmp)) {
1263					ret = -EIO;
1264					break;
1265				}
1266			}
1267			continue;
1268		} else if (node) {
1269			/*
1270			 * Based on the current capture type and what we have so far,
1271			 * decide if we should add the current node into the internal
1272			 * linked list for match-up when i915_gpu_coredump calls later
1273			 * (and alloc a blank node for the next set of reglists)
1274			 * or continue with the same node or clone the current node
1275			 * but only retain the global or class registers (such as the
1276			 * case of dependent engine resets).
1277			 */
1278			if (datatype == GUC_CAPTURE_LIST_TYPE_GLOBAL) {
1279				guc_capture_add_node_to_outlist(guc->capture, node);
1280				node = NULL;
1281			} else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS &&
1282				   node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS].num_regs) {
1283				/* Add to list, clone node and duplicate global list */
1284				guc_capture_add_node_to_outlist(guc->capture, node);
1285				node = guc_capture_clone_node(guc, node,
1286							      GCAP_PARSED_REGLIST_INDEX_GLOBAL);
1287			} else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE &&
1288				   node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE].num_regs) {
1289				/* Add to list, clone node and duplicate global + class lists */
1290				guc_capture_add_node_to_outlist(guc->capture, node);
1291				node = guc_capture_clone_node(guc, node,
1292							      (GCAP_PARSED_REGLIST_INDEX_GLOBAL |
1293							      GCAP_PARSED_REGLIST_INDEX_ENGCLASS));
1294			}
1295		}
1296
1297		if (!node) {
1298			node = guc_capture_get_prealloc_node(guc);
1299			if (!node) {
1300				ret = -ENOMEM;
1301				break;
1302			}
1303			if (datatype != GUC_CAPTURE_LIST_TYPE_GLOBAL)
1304				drm_dbg(&i915->drm, "GuC Capture missing global dump: %08x!\n",
1305					datatype);
1306		}
1307		node->is_partial = is_partial;
1308		node->reginfo[datatype].vfid = FIELD_GET(CAP_HDR_CAPTURE_VFID, hdr.owner);
1309		switch (datatype) {
1310		case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
1311			node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
1312			node->eng_inst = FIELD_GET(CAP_HDR_ENGINE_INSTANCE, hdr.info);
1313			node->lrca = hdr.lrca;
1314			node->guc_id = hdr.guc_id;
1315			break;
1316		case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
1317			node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
1318			break;
1319		default:
1320			break;
1321		}
1322
1323		numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
1324		if (numregs > guc->capture->max_mmio_per_node) {
1325			drm_dbg(&i915->drm, "GuC Capture list extraction clipped by prealloc!\n");
1326			numregs = guc->capture->max_mmio_per_node;
1327		}
1328		node->reginfo[datatype].num_regs = numregs;
1329		regs = node->reginfo[datatype].regs;
1330		i = 0;
1331		while (numregs--) {
1332			if (guc_capture_log_get_register(guc, buf, &regs[i++])) {
1333				ret = -EIO;
1334				break;
1335			}
1336		}
1337	}
1338
1339bailout:
1340	if (node) {
1341		/* If we have data, add to linked list for match-up when i915_gpu_coredump calls */
1342		for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
1343			if (node->reginfo[i].regs) {
1344				guc_capture_add_node_to_outlist(guc->capture, node);
1345				node = NULL;
1346				break;
1347			}
1348		}
1349		if (node) /* else return it back to cache list */
1350			guc_capture_add_node_to_cachelist(guc->capture, node);
1351	}
1352	return ret;
1353}
1354
1355static int __guc_capture_flushlog_complete(struct intel_guc *guc)
1356{
1357	u32 action[] = {
1358		INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
1359		GUC_CAPTURE_LOG_BUFFER
1360	};
1361
1362	return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
1363
1364}
1365
1366static void __guc_capture_process_output(struct intel_guc *guc)
1367{
1368	unsigned int buffer_size, read_offset, write_offset, full_count;
1369	struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
1370	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
1371	struct guc_log_buffer_state log_buf_state_local;
1372	struct guc_log_buffer_state *log_buf_state;
1373	struct __guc_capture_bufstate buf;
1374	void *src_data = NULL;
1375	bool new_overflow;
1376	int ret;
1377
1378	log_buf_state = guc->log.buf_addr +
1379			(sizeof(struct guc_log_buffer_state) * GUC_CAPTURE_LOG_BUFFER);
1380	src_data = guc->log.buf_addr +
1381		   intel_guc_get_log_buffer_offset(&guc->log, GUC_CAPTURE_LOG_BUFFER);
1382
1383	/*
1384	 * Make a copy of the state structure, inside GuC log buffer
1385	 * (which is uncached mapped), on the stack to avoid reading
1386	 * from it multiple times.
1387	 */
1388	memcpy(&log_buf_state_local, log_buf_state, sizeof(struct guc_log_buffer_state));
1389	buffer_size = intel_guc_get_log_buffer_size(&guc->log, GUC_CAPTURE_LOG_BUFFER);
1390	read_offset = log_buf_state_local.read_ptr;
1391	write_offset = log_buf_state_local.sampled_write_ptr;
1392	full_count = log_buf_state_local.buffer_full_cnt;
1393
1394	/* Bookkeeping stuff */
1395	guc->log.stats[GUC_CAPTURE_LOG_BUFFER].flush += log_buf_state_local.flush_to_file;
1396	new_overflow = intel_guc_check_log_buf_overflow(&guc->log, GUC_CAPTURE_LOG_BUFFER,
1397							full_count);
1398
1399	/* Now copy the actual logs. */
1400	if (unlikely(new_overflow)) {
1401		/* copy the whole buffer in case of overflow */
1402		read_offset = 0;
1403		write_offset = buffer_size;
1404	} else if (unlikely((read_offset > buffer_size) ||
1405			(write_offset > buffer_size))) {
1406		drm_err(&i915->drm, "invalid GuC log capture buffer state!\n");
1407		/* copy whole buffer as offsets are unreliable */
1408		read_offset = 0;
1409		write_offset = buffer_size;
1410	}
1411
1412	buf.size = buffer_size;
1413	buf.rd = read_offset;
1414	buf.wr = write_offset;
1415	buf.data = src_data;
1416
1417	if (!uc->reset_in_progress) {
1418		do {
1419			ret = guc_capture_extract_reglists(guc, &buf);
1420		} while (ret >= 0);
1421	}
1422
1423	/* Update the state of log buffer err-cap state */
1424	log_buf_state->read_ptr = write_offset;
1425	log_buf_state->flush_to_file = 0;
1426	__guc_capture_flushlog_complete(guc);
1427}
1428
1429#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
1430
1431static const char *
1432guc_capture_reg_to_str(const struct intel_guc *guc, u32 owner, u32 type,
1433		       u32 class, u32 id, u32 offset, u32 *is_ext)
1434{
1435	const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
1436	struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
1437	const struct __guc_mmio_reg_descr_group *match;
1438	struct __guc_mmio_reg_descr_group *matchext;
1439	int j;
1440
1441	*is_ext = 0;
1442	if (!reglists)
1443		return NULL;
1444
1445	match = guc_capture_get_one_list(reglists, owner, type, id);
1446	if (!match)
1447		return NULL;
1448
1449	for (j = 0; j < match->num_regs; ++j) {
1450		if (offset == match->list[j].reg.reg)
1451			return match->list[j].regname;
1452	}
1453	if (extlists) {
1454		matchext = guc_capture_get_one_ext_list(extlists, owner, type, id);
1455		if (!matchext)
1456			return NULL;
1457		for (j = 0; j < matchext->num_regs; ++j) {
1458			if (offset == matchext->extlist[j].reg.reg) {
1459				*is_ext = 1;
1460				return matchext->extlist[j].regname;
1461			}
1462		}
1463	}
1464
1465	return NULL;
1466}
1467
1468#define GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng) \
1469	do { \
1470		i915_error_printf(ebuf, "    i915-Eng-Name: %s command stream\n", \
1471				  (eng)->name); \
1472		i915_error_printf(ebuf, "    i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \
1473		i915_error_printf(ebuf, "    i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \
1474		i915_error_printf(ebuf, "    i915-Eng-LogicalMask: 0x%08x\n", \
1475				  (eng)->logical_mask); \
1476	} while (0)
1477
1478#define GCAP_PRINT_GUC_INST_INFO(ebuf, node) \
1479	do { \
1480		i915_error_printf(ebuf, "    GuC-Engine-Inst-Id: 0x%08x\n", \
1481				  (node)->eng_inst); \
1482		i915_error_printf(ebuf, "    GuC-Context-Id: 0x%08x\n", (node)->guc_id); \
1483		i915_error_printf(ebuf, "    LRCA: 0x%08x\n", (node)->lrca); \
1484	} while (0)
1485
1486int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
1487					const struct intel_engine_coredump *ee)
1488{
1489	const char *grptype[GUC_STATE_CAPTURE_GROUP_TYPE_MAX] = {
1490		"full-capture",
1491		"partial-capture"
1492	};
1493	const char *datatype[GUC_CAPTURE_LIST_TYPE_MAX] = {
1494		"Global",
1495		"Engine-Class",
1496		"Engine-Instance"
1497	};
1498	struct intel_guc_state_capture *cap;
1499	struct __guc_capture_parsed_output *node;
1500	struct intel_engine_cs *eng;
1501	struct guc_mmio_reg *regs;
1502	struct intel_guc *guc;
1503	const char *str;
1504	int numregs, i, j;
1505	u32 is_ext;
1506
1507	if (!ebuf || !ee)
1508		return -EINVAL;
1509	cap = ee->capture;
1510	if (!cap || !ee->engine)
1511		return -ENODEV;
1512
1513	guc = &ee->engine->gt->uc.guc;
1514
1515	i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n",
1516			  ee->engine->name);
1517
1518	node = ee->guc_capture_node;
1519	if (!node) {
1520		i915_error_printf(ebuf, "  No matching ee-node\n");
1521		return 0;
1522	}
1523
1524	i915_error_printf(ebuf, "Coverage:  %s\n", grptype[node->is_partial]);
1525
1526	for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
1527		i915_error_printf(ebuf, "  RegListType: %s\n",
1528				  datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]);
1529		i915_error_printf(ebuf, "    Owner-Id: %d\n", node->reginfo[i].vfid);
1530
1531		switch (i) {
1532		case GUC_CAPTURE_LIST_TYPE_GLOBAL:
1533		default:
1534			break;
1535		case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
1536			i915_error_printf(ebuf, "    GuC-Eng-Class: %d\n", node->eng_class);
1537			i915_error_printf(ebuf, "    i915-Eng-Class: %d\n",
1538					  guc_class_to_engine_class(node->eng_class));
1539			break;
1540		case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
1541			eng = intel_guc_lookup_engine(guc, node->eng_class, node->eng_inst);
1542			if (eng)
1543				GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng);
1544			else
1545				i915_error_printf(ebuf, "    i915-Eng-Lookup Fail!\n");
1546			GCAP_PRINT_GUC_INST_INFO(ebuf, node);
1547			break;
1548		}
1549
1550		numregs = node->reginfo[i].num_regs;
1551		i915_error_printf(ebuf, "    NumRegs: %d\n", numregs);
1552		j = 0;
1553		while (numregs--) {
1554			regs = node->reginfo[i].regs;
1555			str = guc_capture_reg_to_str(guc, GUC_CAPTURE_LIST_INDEX_PF, i,
1556						     node->eng_class, 0, regs[j].offset, &is_ext);
1557			if (!str)
1558				i915_error_printf(ebuf, "      REG-0x%08x", regs[j].offset);
1559			else
1560				i915_error_printf(ebuf, "      %s", str);
1561			if (is_ext)
1562				i915_error_printf(ebuf, "[%ld][%ld]",
1563					FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags),
1564					FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags));
1565			i915_error_printf(ebuf, ":  0x%08x\n", regs[j].value);
1566			++j;
1567		}
1568	}
1569	return 0;
1570}
1571
1572#endif //CONFIG_DRM_I915_CAPTURE_ERROR
1573
1574void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
1575{
1576	if (!ee || !ee->guc_capture_node)
1577		return;
1578
1579	guc_capture_add_node_to_cachelist(ee->capture, ee->guc_capture_node);
1580	ee->capture = NULL;
1581	ee->guc_capture_node = NULL;
1582}
1583
1584void intel_guc_capture_get_matching_node(struct intel_gt *gt,
1585					 struct intel_engine_coredump *ee,
1586					 struct intel_context *ce)
1587{
1588	struct __guc_capture_parsed_output *n, *ntmp;
1589	struct drm_i915_private *i915;
1590	struct intel_guc *guc;
1591
1592	if (!gt || !ee || !ce)
1593		return;
1594
1595	i915 = gt->i915;
1596	guc = &gt->uc.guc;
1597	if (!guc->capture)
1598		return;
1599
1600	GEM_BUG_ON(ee->guc_capture_node);
1601	/*
1602	 * Look for a matching GuC reported error capture node from
1603	 * the internal output link-list based on lrca, guc-id and engine
1604	 * identification.
1605	 */
1606	list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
1607		if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(ee->engine->guc_id) &&
1608		    n->eng_class == GUC_ID_TO_ENGINE_CLASS(ee->engine->guc_id) &&
1609		    n->guc_id && n->guc_id == ce->guc_id.id &&
1610		    (n->lrca & CTX_GTT_ADDRESS_MASK) && (n->lrca & CTX_GTT_ADDRESS_MASK) ==
1611		    (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
1612			list_del(&n->link);
1613			ee->guc_capture_node = n;
1614			ee->capture = guc->capture;
1615			return;
1616		}
1617	}
1618	drm_dbg(&i915->drm, "GuC capture can't match ee to node\n");
1619}
1620
1621void intel_guc_capture_process(struct intel_guc *guc)
1622{
1623	if (guc->capture)
1624		__guc_capture_process_output(guc);
1625}
1626
1627static void
1628guc_capture_free_ads_cache(struct intel_guc_state_capture *gc)
1629{
1630	int i, j, k;
1631	struct __guc_capture_ads_cache *cache;
1632
1633	for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
1634		for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) {
1635			for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) {
1636				cache = &gc->ads_cache[i][j][k];
1637				if (cache->is_valid)
1638					kfree(cache->ptr);
1639			}
1640		}
1641	}
1642	kfree(gc->ads_null_cache);
1643}
1644
1645void intel_guc_capture_destroy(struct intel_guc *guc)
1646{
1647	if (!guc->capture)
1648		return;
1649
1650	guc_capture_free_ads_cache(guc->capture);
1651
1652	guc_capture_delete_prealloc_nodes(guc);
1653
1654	guc_capture_free_extlists(guc->capture->extlists);
1655	kfree(guc->capture->extlists);
1656
1657	kfree(guc->capture);
1658	guc->capture = NULL;
1659}
1660
1661int intel_guc_capture_init(struct intel_guc *guc)
1662{
1663	guc->capture = kzalloc(sizeof(*guc->capture), GFP_KERNEL);
1664	if (!guc->capture)
1665		return -ENOMEM;
1666
1667	guc->capture->reglists = guc_capture_get_device_reglist(guc);
1668
1669	INIT_LIST_HEAD(&guc->capture->outlist);
1670	INIT_LIST_HEAD(&guc->capture->cachelist);
1671
1672	check_guc_capture_size(guc);
1673
1674	return 0;
1675}