Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1/*
   2 * Copyright © 2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include <drm/drm_print.h>
  26
  27#include "intel_device_info.h"
  28#include "i915_drv.h"
  29
  30#define PLATFORM_NAME(x) [INTEL_##x] = #x
  31static const char * const platform_names[] = {
  32	PLATFORM_NAME(I830),
  33	PLATFORM_NAME(I845G),
  34	PLATFORM_NAME(I85X),
  35	PLATFORM_NAME(I865G),
  36	PLATFORM_NAME(I915G),
  37	PLATFORM_NAME(I915GM),
  38	PLATFORM_NAME(I945G),
  39	PLATFORM_NAME(I945GM),
  40	PLATFORM_NAME(G33),
  41	PLATFORM_NAME(PINEVIEW),
  42	PLATFORM_NAME(I965G),
  43	PLATFORM_NAME(I965GM),
  44	PLATFORM_NAME(G45),
  45	PLATFORM_NAME(GM45),
  46	PLATFORM_NAME(IRONLAKE),
  47	PLATFORM_NAME(SANDYBRIDGE),
  48	PLATFORM_NAME(IVYBRIDGE),
  49	PLATFORM_NAME(VALLEYVIEW),
  50	PLATFORM_NAME(HASWELL),
  51	PLATFORM_NAME(BROADWELL),
  52	PLATFORM_NAME(CHERRYVIEW),
  53	PLATFORM_NAME(SKYLAKE),
  54	PLATFORM_NAME(BROXTON),
  55	PLATFORM_NAME(KABYLAKE),
  56	PLATFORM_NAME(GEMINILAKE),
  57	PLATFORM_NAME(COFFEELAKE),
  58	PLATFORM_NAME(CANNONLAKE),
  59	PLATFORM_NAME(ICELAKE),
  60	PLATFORM_NAME(ELKHARTLAKE),
  61	PLATFORM_NAME(TIGERLAKE),
  62};
  63#undef PLATFORM_NAME
  64
  65const char *intel_platform_name(enum intel_platform platform)
  66{
  67	BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS);
  68
  69	if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
  70			 platform_names[platform] == NULL))
  71		return "<unknown>";
  72
  73	return platform_names[platform];
  74}
  75
  76void intel_device_info_dump_flags(const struct intel_device_info *info,
  77				  struct drm_printer *p)
  78{
  79#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
  80	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
  81#undef PRINT_FLAG
  82
  83#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
  84	DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
  85#undef PRINT_FLAG
  86}
  87
  88static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
  89{
  90	int s;
  91
  92	drm_printf(p, "slice total: %u, mask=%04x\n",
  93		   hweight8(sseu->slice_mask), sseu->slice_mask);
  94	drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
  95	for (s = 0; s < sseu->max_slices; s++) {
  96		drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
  97			   s, intel_sseu_subslices_per_slice(sseu, s),
  98			   sseu->subslice_mask[s]);
  99	}
 100	drm_printf(p, "EU total: %u\n", sseu->eu_total);
 101	drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
 102	drm_printf(p, "has slice power gating: %s\n",
 103		   yesno(sseu->has_slice_pg));
 104	drm_printf(p, "has subslice power gating: %s\n",
 105		   yesno(sseu->has_subslice_pg));
 106	drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
 107}
 108
 109void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
 110				    struct drm_printer *p)
 111{
 112	sseu_dump(&info->sseu, p);
 113
 114	drm_printf(p, "CS timestamp frequency: %u kHz\n",
 115		   info->cs_timestamp_frequency_khz);
 116}
 117
 118static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
 119		       int subslice)
 120{
 121	int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
 122	int slice_stride = sseu->max_subslices * subslice_stride;
 123
 124	return slice * slice_stride + subslice * subslice_stride;
 125}
 126
 127static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
 128			int subslice)
 129{
 130	int i, offset = sseu_eu_idx(sseu, slice, subslice);
 131	u16 eu_mask = 0;
 132
 133	for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
 134		eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
 135			(i * BITS_PER_BYTE);
 136	}
 137
 138	return eu_mask;
 139}
 140
 141static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
 142			 u16 eu_mask)
 143{
 144	int i, offset = sseu_eu_idx(sseu, slice, subslice);
 145
 146	for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
 147		sseu->eu_mask[offset + i] =
 148			(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
 149	}
 150}
 151
 152void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
 153				     struct drm_printer *p)
 154{
 155	int s, ss;
 156
 157	if (sseu->max_slices == 0) {
 158		drm_printf(p, "Unavailable\n");
 159		return;
 160	}
 161
 162	for (s = 0; s < sseu->max_slices; s++) {
 163		drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n",
 164			   s, intel_sseu_subslices_per_slice(sseu, s),
 165			   sseu->subslice_mask[s]);
 166
 167		for (ss = 0; ss < sseu->max_subslices; ss++) {
 168			u16 enabled_eus = sseu_get_eus(sseu, s, ss);
 169
 170			drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
 171				   ss, hweight16(enabled_eus), enabled_eus);
 172		}
 173	}
 174}
 175
 176static u16 compute_eu_total(const struct sseu_dev_info *sseu)
 177{
 178	u16 i, total = 0;
 179
 180	for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++)
 181		total += hweight8(sseu->eu_mask[i]);
 182
 183	return total;
 184}
 185
 186static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
 187{
 188	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
 189	u8 s_en;
 190	u32 ss_en, ss_en_mask;
 191	u8 eu_en;
 192	int s;
 193
 194	if (IS_ELKHARTLAKE(dev_priv)) {
 195		sseu->max_slices = 1;
 196		sseu->max_subslices = 4;
 197		sseu->max_eus_per_subslice = 8;
 198	} else {
 199		sseu->max_slices = 1;
 200		sseu->max_subslices = 8;
 201		sseu->max_eus_per_subslice = 8;
 202	}
 203
 204	s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
 205	ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
 206	ss_en_mask = BIT(sseu->max_subslices) - 1;
 207	eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
 208
 209	for (s = 0; s < sseu->max_slices; s++) {
 210		if (s_en & BIT(s)) {
 211			int ss_idx = sseu->max_subslices * s;
 212			int ss;
 213
 214			sseu->slice_mask |= BIT(s);
 215			sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask;
 216			for (ss = 0; ss < sseu->max_subslices; ss++) {
 217				if (sseu->subslice_mask[s] & BIT(ss))
 218					sseu_set_eus(sseu, s, ss, eu_en);
 219			}
 220		}
 221	}
 222	sseu->eu_per_subslice = hweight8(eu_en);
 223	sseu->eu_total = compute_eu_total(sseu);
 224
 225	/* ICL has no power gating restrictions. */
 226	sseu->has_slice_pg = 1;
 227	sseu->has_subslice_pg = 1;
 228	sseu->has_eu_pg = 1;
 229}
 230
 231static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
 232{
 233	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
 234	const u32 fuse2 = I915_READ(GEN8_FUSE2);
 235	int s, ss;
 236	const int eu_mask = 0xff;
 237	u32 subslice_mask, eu_en;
 238
 239	sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
 240			    GEN10_F2_S_ENA_SHIFT;
 241	sseu->max_slices = 6;
 242	sseu->max_subslices = 4;
 243	sseu->max_eus_per_subslice = 8;
 244
 245	subslice_mask = (1 << 4) - 1;
 246	subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
 247			   GEN10_F2_SS_DIS_SHIFT);
 248
 249	/*
 250	 * Slice0 can have up to 3 subslices, but there are only 2 in
 251	 * slice1/2.
 252	 */
 253	sseu->subslice_mask[0] = subslice_mask;
 254	for (s = 1; s < sseu->max_slices; s++)
 255		sseu->subslice_mask[s] = subslice_mask & 0x3;
 256
 257	/* Slice0 */
 258	eu_en = ~I915_READ(GEN8_EU_DISABLE0);
 259	for (ss = 0; ss < sseu->max_subslices; ss++)
 260		sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
 261	/* Slice1 */
 262	sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
 263	eu_en = ~I915_READ(GEN8_EU_DISABLE1);
 264	sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
 265	/* Slice2 */
 266	sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
 267	sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
 268	/* Slice3 */
 269	sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
 270	eu_en = ~I915_READ(GEN8_EU_DISABLE2);
 271	sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
 272	/* Slice4 */
 273	sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
 274	sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
 275	/* Slice5 */
 276	sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
 277	eu_en = ~I915_READ(GEN10_EU_DISABLE3);
 278	sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
 279
 280	/* Do a second pass where we mark the subslices disabled if all their
 281	 * eus are off.
 282	 */
 283	for (s = 0; s < sseu->max_slices; s++) {
 284		for (ss = 0; ss < sseu->max_subslices; ss++) {
 285			if (sseu_get_eus(sseu, s, ss) == 0)
 286				sseu->subslice_mask[s] &= ~BIT(ss);
 287		}
 288	}
 289
 290	sseu->eu_total = compute_eu_total(sseu);
 291
 292	/*
 293	 * CNL is expected to always have a uniform distribution
 294	 * of EU across subslices with the exception that any one
 295	 * EU in any one subslice may be fused off for die
 296	 * recovery.
 297	 */
 298	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
 299				DIV_ROUND_UP(sseu->eu_total,
 300					     intel_sseu_subslice_total(sseu)) :
 301				0;
 302
 303	/* No restrictions on Power Gating */
 304	sseu->has_slice_pg = 1;
 305	sseu->has_subslice_pg = 1;
 306	sseu->has_eu_pg = 1;
 307}
 308
 309static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
 310{
 311	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
 312	u32 fuse;
 313
 314	fuse = I915_READ(CHV_FUSE_GT);
 315
 316	sseu->slice_mask = BIT(0);
 317	sseu->max_slices = 1;
 318	sseu->max_subslices = 2;
 319	sseu->max_eus_per_subslice = 8;
 320
 321	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
 322		u8 disabled_mask =
 323			((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
 324			 CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
 325			(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
 326			  CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
 327
 328		sseu->subslice_mask[0] |= BIT(0);
 329		sseu_set_eus(sseu, 0, 0, ~disabled_mask);
 330	}
 331
 332	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
 333		u8 disabled_mask =
 334			((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
 335			 CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
 336			(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
 337			  CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
 338
 339		sseu->subslice_mask[0] |= BIT(1);
 340		sseu_set_eus(sseu, 0, 1, ~disabled_mask);
 341	}
 342
 343	sseu->eu_total = compute_eu_total(sseu);
 344
 345	/*
 346	 * CHV expected to always have a uniform distribution of EU
 347	 * across subslices.
 348	*/
 349	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
 350				sseu->eu_total /
 351					intel_sseu_subslice_total(sseu) :
 352				0;
 353	/*
 354	 * CHV supports subslice power gating on devices with more than
 355	 * one subslice, and supports EU power gating on devices with
 356	 * more than one EU pair per subslice.
 357	*/
 358	sseu->has_slice_pg = 0;
 359	sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
 360	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
 361}
 362
 363static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 364{
 365	struct intel_device_info *info = mkwrite_device_info(dev_priv);
 366	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
 367	int s, ss;
 368	u32 fuse2, eu_disable, subslice_mask;
 369	const u8 eu_mask = 0xff;
 370
 371	fuse2 = I915_READ(GEN8_FUSE2);
 372	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
 373
 374	/* BXT has a single slice and at most 3 subslices. */
 375	sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3;
 376	sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4;
 377	sseu->max_eus_per_subslice = 8;
 378
 379	/*
 380	 * The subslice disable field is global, i.e. it applies
 381	 * to each of the enabled slices.
 382	*/
 383	subslice_mask = (1 << sseu->max_subslices) - 1;
 384	subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
 385			   GEN9_F2_SS_DIS_SHIFT);
 386
 387	/*
 388	 * Iterate through enabled slices and subslices to
 389	 * count the total enabled EU.
 390	*/
 391	for (s = 0; s < sseu->max_slices; s++) {
 392		if (!(sseu->slice_mask & BIT(s)))
 393			/* skip disabled slice */
 394			continue;
 395
 396		sseu->subslice_mask[s] = subslice_mask;
 397
 398		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
 399		for (ss = 0; ss < sseu->max_subslices; ss++) {
 400			int eu_per_ss;
 401			u8 eu_disabled_mask;
 402
 403			if (!(sseu->subslice_mask[s] & BIT(ss)))
 404				/* skip disabled subslice */
 405				continue;
 406
 407			eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
 408
 409			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
 410
 411			eu_per_ss = sseu->max_eus_per_subslice -
 412				hweight8(eu_disabled_mask);
 413
 414			/*
 415			 * Record which subslice(s) has(have) 7 EUs. we
 416			 * can tune the hash used to spread work among
 417			 * subslices if they are unbalanced.
 418			 */
 419			if (eu_per_ss == 7)
 420				sseu->subslice_7eu[s] |= BIT(ss);
 421		}
 422	}
 423
 424	sseu->eu_total = compute_eu_total(sseu);
 425
 426	/*
 427	 * SKL is expected to always have a uniform distribution
 428	 * of EU across subslices with the exception that any one
 429	 * EU in any one subslice may be fused off for die
 430	 * recovery. BXT is expected to be perfectly uniform in EU
 431	 * distribution.
 432	*/
 433	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
 434				DIV_ROUND_UP(sseu->eu_total,
 435					     intel_sseu_subslice_total(sseu)) :
 436				0;
 437	/*
 438	 * SKL+ supports slice power gating on devices with more than
 439	 * one slice, and supports EU power gating on devices with
 440	 * more than one EU pair per subslice. BXT+ supports subslice
 441	 * power gating on devices with more than one subslice, and
 442	 * supports EU power gating on devices with more than one EU
 443	 * pair per subslice.
 444	*/
 445	sseu->has_slice_pg =
 446		!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
 447	sseu->has_subslice_pg =
 448		IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
 449	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
 450
 451	if (IS_GEN9_LP(dev_priv)) {
 452#define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask[0] & BIT(ss)))
 453		info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3;
 454
 455		sseu->min_eu_in_pool = 0;
 456		if (info->has_pooled_eu) {
 457			if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
 458				sseu->min_eu_in_pool = 3;
 459			else if (IS_SS_DISABLED(1))
 460				sseu->min_eu_in_pool = 6;
 461			else
 462				sseu->min_eu_in_pool = 9;
 463		}
 464#undef IS_SS_DISABLED
 465	}
 466}
 467
 468static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 469{
 470	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
 471	int s, ss;
 472	u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
 473
 474	fuse2 = I915_READ(GEN8_FUSE2);
 475	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
 476	sseu->max_slices = 3;
 477	sseu->max_subslices = 3;
 478	sseu->max_eus_per_subslice = 8;
 479
 480	/*
 481	 * The subslice disable field is global, i.e. it applies
 482	 * to each of the enabled slices.
 483	 */
 484	subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
 485	subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
 486			   GEN8_F2_SS_DIS_SHIFT);
 487
 488	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
 489	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
 490			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
 491			 (32 - GEN8_EU_DIS0_S1_SHIFT));
 492	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
 493			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
 494			 (32 - GEN8_EU_DIS1_S2_SHIFT));
 495
 496	/*
 497	 * Iterate through enabled slices and subslices to
 498	 * count the total enabled EU.
 499	 */
 500	for (s = 0; s < sseu->max_slices; s++) {
 501		if (!(sseu->slice_mask & BIT(s)))
 502			/* skip disabled slice */
 503			continue;
 504
 505		sseu->subslice_mask[s] = subslice_mask;
 506
 507		for (ss = 0; ss < sseu->max_subslices; ss++) {
 508			u8 eu_disabled_mask;
 509			u32 n_disabled;
 510
 511			if (!(sseu->subslice_mask[s] & BIT(ss)))
 512				/* skip disabled subslice */
 513				continue;
 514
 515			eu_disabled_mask =
 516				eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
 517
 518			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask);
 519
 520			n_disabled = hweight8(eu_disabled_mask);
 521
 522			/*
 523			 * Record which subslices have 7 EUs.
 524			 */
 525			if (sseu->max_eus_per_subslice - n_disabled == 7)
 526				sseu->subslice_7eu[s] |= 1 << ss;
 527		}
 528	}
 529
 530	sseu->eu_total = compute_eu_total(sseu);
 531
 532	/*
 533	 * BDW is expected to always have a uniform distribution of EU across
 534	 * subslices with the exception that any one EU in any one subslice may
 535	 * be fused off for die recovery.
 536	 */
 537	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
 538				DIV_ROUND_UP(sseu->eu_total,
 539					     intel_sseu_subslice_total(sseu)) :
 540				0;
 541
 542	/*
 543	 * BDW supports slice power gating on devices with more than
 544	 * one slice.
 545	 */
 546	sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
 547	sseu->has_subslice_pg = 0;
 548	sseu->has_eu_pg = 0;
 549}
 550
 551static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
 552{
 553	struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
 554	u32 fuse1;
 555	int s, ss;
 556
 557	/*
 558	 * There isn't a register to tell us how many slices/subslices. We
 559	 * work off the PCI-ids here.
 560	 */
 561	switch (INTEL_INFO(dev_priv)->gt) {
 562	default:
 563		MISSING_CASE(INTEL_INFO(dev_priv)->gt);
 564		/* fall through */
 565	case 1:
 566		sseu->slice_mask = BIT(0);
 567		sseu->subslice_mask[0] = BIT(0);
 568		break;
 569	case 2:
 570		sseu->slice_mask = BIT(0);
 571		sseu->subslice_mask[0] = BIT(0) | BIT(1);
 572		break;
 573	case 3:
 574		sseu->slice_mask = BIT(0) | BIT(1);
 575		sseu->subslice_mask[0] = BIT(0) | BIT(1);
 576		sseu->subslice_mask[1] = BIT(0) | BIT(1);
 577		break;
 578	}
 579
 580	sseu->max_slices = hweight8(sseu->slice_mask);
 581	sseu->max_subslices = hweight8(sseu->subslice_mask[0]);
 582
 583	fuse1 = I915_READ(HSW_PAVP_FUSE1);
 584	switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
 585	default:
 586		MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
 587			     HSW_F1_EU_DIS_SHIFT);
 588		/* fall through */
 589	case HSW_F1_EU_DIS_10EUS:
 590		sseu->eu_per_subslice = 10;
 591		break;
 592	case HSW_F1_EU_DIS_8EUS:
 593		sseu->eu_per_subslice = 8;
 594		break;
 595	case HSW_F1_EU_DIS_6EUS:
 596		sseu->eu_per_subslice = 6;
 597		break;
 598	}
 599	sseu->max_eus_per_subslice = sseu->eu_per_subslice;
 600
 601	for (s = 0; s < sseu->max_slices; s++) {
 602		for (ss = 0; ss < sseu->max_subslices; ss++) {
 603			sseu_set_eus(sseu, s, ss,
 604				     (1UL << sseu->eu_per_subslice) - 1);
 605		}
 606	}
 607
 608	sseu->eu_total = compute_eu_total(sseu);
 609
 610	/* No powergating for you. */
 611	sseu->has_slice_pg = 0;
 612	sseu->has_subslice_pg = 0;
 613	sseu->has_eu_pg = 0;
 614}
 615
 616static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
 617{
 618	u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
 619	u32 base_freq, frac_freq;
 620
 621	base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
 622		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
 623	base_freq *= 1000;
 624
 625	frac_freq = ((ts_override &
 626		      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
 627		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
 628	frac_freq = 1000 / (frac_freq + 1);
 629
 630	return base_freq + frac_freq;
 631}
 632
 633static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
 634					u32 rpm_config_reg)
 635{
 636	u32 f19_2_mhz = 19200;
 637	u32 f24_mhz = 24000;
 638	u32 crystal_clock = (rpm_config_reg &
 639			     GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
 640			    GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
 641
 642	switch (crystal_clock) {
 643	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
 644		return f19_2_mhz;
 645	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
 646		return f24_mhz;
 647	default:
 648		MISSING_CASE(crystal_clock);
 649		return 0;
 650	}
 651}
 652
 653static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
 654					u32 rpm_config_reg)
 655{
 656	u32 f19_2_mhz = 19200;
 657	u32 f24_mhz = 24000;
 658	u32 f25_mhz = 25000;
 659	u32 f38_4_mhz = 38400;
 660	u32 crystal_clock = (rpm_config_reg &
 661			     GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
 662			    GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
 663
 664	switch (crystal_clock) {
 665	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
 666		return f24_mhz;
 667	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
 668		return f19_2_mhz;
 669	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
 670		return f38_4_mhz;
 671	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
 672		return f25_mhz;
 673	default:
 674		MISSING_CASE(crystal_clock);
 675		return 0;
 676	}
 677}
 678
 679static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
 680{
 681	u32 f12_5_mhz = 12500;
 682	u32 f19_2_mhz = 19200;
 683	u32 f24_mhz = 24000;
 684
 685	if (INTEL_GEN(dev_priv) <= 4) {
 686		/* PRMs say:
 687		 *
 688		 *     "The value in this register increments once every 16
 689		 *      hclks." (through the “Clocking Configuration”
 690		 *      (“CLKCFG”) MCHBAR register)
 691		 */
 692		return dev_priv->rawclk_freq / 16;
 693	} else if (INTEL_GEN(dev_priv) <= 8) {
 694		/* PRMs say:
 695		 *
 696		 *     "The PCU TSC counts 10ns increments; this timestamp
 697		 *      reflects bits 38:3 of the TSC (i.e. 80ns granularity,
 698		 *      rolling over every 1.5 hours).
 699		 */
 700		return f12_5_mhz;
 701	} else if (INTEL_GEN(dev_priv) <= 9) {
 702		u32 ctc_reg = I915_READ(CTC_MODE);
 703		u32 freq = 0;
 704
 705		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
 706			freq = read_reference_ts_freq(dev_priv);
 707		} else {
 708			freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;
 709
 710			/* Now figure out how the command stream's timestamp
 711			 * register increments from this frequency (it might
 712			 * increment only every few clock cycle).
 713			 */
 714			freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
 715				      CTC_SHIFT_PARAMETER_SHIFT);
 716		}
 717
 718		return freq;
 719	} else if (INTEL_GEN(dev_priv) <= 12) {
 720		u32 ctc_reg = I915_READ(CTC_MODE);
 721		u32 freq = 0;
 722
 723		/* First figure out the reference frequency. There are 2 ways
 724		 * we can compute the frequency, either through the
 725		 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
 726		 * tells us which one we should use.
 727		 */
 728		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
 729			freq = read_reference_ts_freq(dev_priv);
 730		} else {
 731			u32 rpm_config_reg = I915_READ(RPM_CONFIG0);
 732
 733			if (INTEL_GEN(dev_priv) <= 10)
 734				freq = gen10_get_crystal_clock_freq(dev_priv,
 735								rpm_config_reg);
 736			else
 737				freq = gen11_get_crystal_clock_freq(dev_priv,
 738								rpm_config_reg);
 739
 740			/* Now figure out how the command stream's timestamp
 741			 * register increments from this frequency (it might
 742			 * increment only every few clock cycle).
 743			 */
 744			freq >>= 3 - ((rpm_config_reg &
 745				       GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
 746				      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
 747		}
 748
 749		return freq;
 750	}
 751
 752	MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
 753	return 0;
 754}
 755
 756#undef INTEL_VGA_DEVICE
 757#define INTEL_VGA_DEVICE(id, info) (id)
 758
 759static const u16 subplatform_ult_ids[] = {
 760	INTEL_HSW_ULT_GT1_IDS(0),
 761	INTEL_HSW_ULT_GT2_IDS(0),
 762	INTEL_HSW_ULT_GT3_IDS(0),
 763	INTEL_BDW_ULT_GT1_IDS(0),
 764	INTEL_BDW_ULT_GT2_IDS(0),
 765	INTEL_BDW_ULT_GT3_IDS(0),
 766	INTEL_BDW_ULT_RSVD_IDS(0),
 767	INTEL_SKL_ULT_GT1_IDS(0),
 768	INTEL_SKL_ULT_GT2_IDS(0),
 769	INTEL_SKL_ULT_GT3_IDS(0),
 770	INTEL_KBL_ULT_GT1_IDS(0),
 771	INTEL_KBL_ULT_GT2_IDS(0),
 772	INTEL_KBL_ULT_GT3_IDS(0),
 773	INTEL_CFL_U_GT2_IDS(0),
 774	INTEL_CFL_U_GT3_IDS(0),
 775	INTEL_WHL_U_GT1_IDS(0),
 776	INTEL_WHL_U_GT2_IDS(0),
 777	INTEL_WHL_U_GT3_IDS(0),
 778};
 779
 780static const u16 subplatform_ulx_ids[] = {
 781	INTEL_HSW_ULX_GT1_IDS(0),
 782	INTEL_HSW_ULX_GT2_IDS(0),
 783	INTEL_BDW_ULX_GT1_IDS(0),
 784	INTEL_BDW_ULX_GT2_IDS(0),
 785	INTEL_BDW_ULX_GT3_IDS(0),
 786	INTEL_BDW_ULX_RSVD_IDS(0),
 787	INTEL_SKL_ULX_GT1_IDS(0),
 788	INTEL_SKL_ULX_GT2_IDS(0),
 789	INTEL_KBL_ULX_GT1_IDS(0),
 790	INTEL_KBL_ULX_GT2_IDS(0),
 791	INTEL_AML_KBL_GT2_IDS(0),
 792	INTEL_AML_CFL_GT2_IDS(0),
 793};
 794
 795static const u16 subplatform_portf_ids[] = {
 796	INTEL_CNL_PORT_F_IDS(0),
 797	INTEL_ICL_PORT_F_IDS(0),
 798};
 799
 800static bool find_devid(u16 id, const u16 *p, unsigned int num)
 801{
 802	for (; num; num--, p++) {
 803		if (*p == id)
 804			return true;
 805	}
 806
 807	return false;
 808}
 809
 810void intel_device_info_subplatform_init(struct drm_i915_private *i915)
 811{
 812	const struct intel_device_info *info = INTEL_INFO(i915);
 813	const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
 814	const unsigned int pi = __platform_mask_index(rinfo, info->platform);
 815	const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
 816	u16 devid = INTEL_DEVID(i915);
 817	u32 mask = 0;
 818
 819	/* Make sure IS_<platform> checks are working. */
 820	RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);
 821
 822	/* Find and mark subplatform bits based on the PCI device id. */
 823	if (find_devid(devid, subplatform_ult_ids,
 824		       ARRAY_SIZE(subplatform_ult_ids))) {
 825		mask = BIT(INTEL_SUBPLATFORM_ULT);
 826	} else if (find_devid(devid, subplatform_ulx_ids,
 827			      ARRAY_SIZE(subplatform_ulx_ids))) {
 828		mask = BIT(INTEL_SUBPLATFORM_ULX);
 829		if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
 830			/* ULX machines are also considered ULT. */
 831			mask |= BIT(INTEL_SUBPLATFORM_ULT);
 832		}
 833	} else if (find_devid(devid, subplatform_portf_ids,
 834			      ARRAY_SIZE(subplatform_portf_ids))) {
 835		mask = BIT(INTEL_SUBPLATFORM_PORTF);
 836	}
 837
 838	GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);
 839
 840	RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
 841}
 842
 843/**
 844 * intel_device_info_runtime_init - initialize runtime info
 845 * @dev_priv: the i915 device
 846 *
 847 * Determine various intel_device_info fields at runtime.
 848 *
 849 * Use it when either:
 850 *   - it's judged too laborious to fill n static structures with the limit
 851 *     when a simple if statement does the job,
 852 *   - run-time checks (eg read fuse/strap registers) are needed.
 853 *
 854 * This function needs to be called:
 855 *   - after the MMIO has been setup as we are reading registers,
 856 *   - after the PCH has been detected,
 857 *   - before the first usage of the fields it can tweak.
 858 */
 859void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
 860{
 861	struct intel_device_info *info = mkwrite_device_info(dev_priv);
 862	struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
 863	enum pipe pipe;
 864
 865	if (INTEL_GEN(dev_priv) >= 10) {
 866		for_each_pipe(dev_priv, pipe)
 867			runtime->num_scalers[pipe] = 2;
 868	} else if (IS_GEN(dev_priv, 9)) {
 869		runtime->num_scalers[PIPE_A] = 2;
 870		runtime->num_scalers[PIPE_B] = 2;
 871		runtime->num_scalers[PIPE_C] = 1;
 872	}
 873
 874	BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
 875
 876	if (INTEL_GEN(dev_priv) >= 11)
 877		for_each_pipe(dev_priv, pipe)
 878			runtime->num_sprites[pipe] = 6;
 879	else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
 880		for_each_pipe(dev_priv, pipe)
 881			runtime->num_sprites[pipe] = 3;
 882	else if (IS_BROXTON(dev_priv)) {
 883		/*
 884		 * Skylake and Broxton currently don't expose the topmost plane as its
 885		 * use is exclusive with the legacy cursor and we only want to expose
 886		 * one of those, not both. Until we can safely expose the topmost plane
 887		 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
 888		 * we don't expose the topmost plane at all to prevent ABI breakage
 889		 * down the line.
 890		 */
 891
 892		runtime->num_sprites[PIPE_A] = 2;
 893		runtime->num_sprites[PIPE_B] = 2;
 894		runtime->num_sprites[PIPE_C] = 1;
 895	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 896		for_each_pipe(dev_priv, pipe)
 897			runtime->num_sprites[pipe] = 2;
 898	} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
 899		for_each_pipe(dev_priv, pipe)
 900			runtime->num_sprites[pipe] = 1;
 901	}
 902
 903	if (i915_modparams.disable_display) {
 904		DRM_INFO("Display disabled (module parameter)\n");
 905		info->num_pipes = 0;
 906	} else if (HAS_DISPLAY(dev_priv) &&
 907		   (IS_GEN_RANGE(dev_priv, 7, 8)) &&
 908		   HAS_PCH_SPLIT(dev_priv)) {
 909		u32 fuse_strap = I915_READ(FUSE_STRAP);
 910		u32 sfuse_strap = I915_READ(SFUSE_STRAP);
 911
 912		/*
 913		 * SFUSE_STRAP is supposed to have a bit signalling the display
 914		 * is fused off. Unfortunately it seems that, at least in
 915		 * certain cases, fused off display means that PCH display
 916		 * reads don't land anywhere. In that case, we read 0s.
 917		 *
 918		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
 919		 * should be set when taking over after the firmware.
 920		 */
 921		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
 922		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
 923		    (HAS_PCH_CPT(dev_priv) &&
 924		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
 925			DRM_INFO("Display fused off, disabling\n");
 926			info->num_pipes = 0;
 927		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
 928			DRM_INFO("PipeC fused off\n");
 929			info->num_pipes -= 1;
 930		}
 931	} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
 932		u32 dfsm = I915_READ(SKL_DFSM);
 933		u8 enabled_mask = BIT(info->num_pipes) - 1;
 934
 935		if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
 936			enabled_mask &= ~BIT(PIPE_A);
 937		if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
 938			enabled_mask &= ~BIT(PIPE_B);
 939		if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
 940			enabled_mask &= ~BIT(PIPE_C);
 941		if (INTEL_GEN(dev_priv) >= 12 &&
 942		    (dfsm & TGL_DFSM_PIPE_D_DISABLE))
 943			enabled_mask &= ~BIT(PIPE_D);
 944
 945		/*
 946		 * At least one pipe should be enabled and if there are
 947		 * disabled pipes, they should be the last ones, with no holes
 948		 * in the mask.
 949		 */
 950		if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
 951			DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
 952				  enabled_mask);
 953		else
 954			info->num_pipes = hweight8(enabled_mask);
 955	}
 956
 957	/* Initialize slice/subslice/EU info */
 958	if (IS_HASWELL(dev_priv))
 959		haswell_sseu_info_init(dev_priv);
 960	else if (IS_CHERRYVIEW(dev_priv))
 961		cherryview_sseu_info_init(dev_priv);
 962	else if (IS_BROADWELL(dev_priv))
 963		broadwell_sseu_info_init(dev_priv);
 964	else if (IS_GEN(dev_priv, 9))
 965		gen9_sseu_info_init(dev_priv);
 966	else if (IS_GEN(dev_priv, 10))
 967		gen10_sseu_info_init(dev_priv);
 968	else if (INTEL_GEN(dev_priv) >= 11)
 969		gen11_sseu_info_init(dev_priv);
 970
 971	if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
 972		DRM_INFO("Disabling ppGTT for VT-d support\n");
 973		info->ppgtt_type = INTEL_PPGTT_NONE;
 974	}
 975
 976	/* Initialize command stream timestamp frequency */
 977	runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
 978}
 979
 980void intel_driver_caps_print(const struct intel_driver_caps *caps,
 981			     struct drm_printer *p)
 982{
 983	drm_printf(p, "Has logical contexts? %s\n",
 984		   yesno(caps->has_logical_contexts));
 985	drm_printf(p, "scheduler: %x\n", caps->scheduler);
 986}
 987
 988/*
 989 * Determine which engines are fused off in our particular hardware. Since the
 990 * fuse register is in the blitter powerwell, we need forcewake to be ready at
 991 * this point (but later we need to prune the forcewake domains for engines that
 992 * are indeed fused off).
 993 */
 994void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 995{
 996	struct intel_device_info *info = mkwrite_device_info(dev_priv);
 997	unsigned int logical_vdbox = 0;
 998	unsigned int i;
 999	u32 media_fuse;
1000	u16 vdbox_mask;
1001	u16 vebox_mask;
1002
1003	if (INTEL_GEN(dev_priv) < 11)
1004		return;
1005
1006	media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
1007
1008	vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
1009	vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
1010		      GEN11_GT_VEBOX_DISABLE_SHIFT;
1011
1012	for (i = 0; i < I915_MAX_VCS; i++) {
1013		if (!HAS_ENGINE(dev_priv, _VCS(i)))
1014			continue;
1015
1016		if (!(BIT(i) & vdbox_mask)) {
1017			info->engine_mask &= ~BIT(_VCS(i));
1018			DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
1019			continue;
1020		}
1021
1022		/*
1023		 * In Gen11, only even numbered logical VDBOXes are
1024		 * hooked up to an SFC (Scaler & Format Converter) unit.
1025		 * In TGL each VDBOX has access to an SFC.
1026		 */
1027		if (IS_TIGERLAKE(dev_priv) || logical_vdbox++ % 2 == 0)
1028			RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
1029	}
1030	DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
1031			 vdbox_mask, VDBOX_MASK(dev_priv));
1032	GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
1033
1034	for (i = 0; i < I915_MAX_VECS; i++) {
1035		if (!HAS_ENGINE(dev_priv, _VECS(i)))
1036			continue;
1037
1038		if (!(BIT(i) & vebox_mask)) {
1039			info->engine_mask &= ~BIT(_VECS(i));
1040			DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
1041		}
1042	}
1043	DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
1044			 vebox_mask, VEBOX_MASK(dev_priv));
1045	GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
1046}