Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Copyright © 2016 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 */
 24
 25#include <drm/drm_print.h>
 26#include <drm/i915_pciids.h>
 27
 28#include "display/intel_cdclk.h"
 29#include "display/intel_de.h"
 30#include "intel_device_info.h"
 31#include "i915_drv.h"
 32
 33#define PLATFORM_NAME(x) [INTEL_##x] = #x
 34static const char * const platform_names[] = {
 35	PLATFORM_NAME(I830),
 36	PLATFORM_NAME(I845G),
 37	PLATFORM_NAME(I85X),
 38	PLATFORM_NAME(I865G),
 39	PLATFORM_NAME(I915G),
 40	PLATFORM_NAME(I915GM),
 41	PLATFORM_NAME(I945G),
 42	PLATFORM_NAME(I945GM),
 43	PLATFORM_NAME(G33),
 44	PLATFORM_NAME(PINEVIEW),
 45	PLATFORM_NAME(I965G),
 46	PLATFORM_NAME(I965GM),
 47	PLATFORM_NAME(G45),
 48	PLATFORM_NAME(GM45),
 49	PLATFORM_NAME(IRONLAKE),
 50	PLATFORM_NAME(SANDYBRIDGE),
 51	PLATFORM_NAME(IVYBRIDGE),
 52	PLATFORM_NAME(VALLEYVIEW),
 53	PLATFORM_NAME(HASWELL),
 54	PLATFORM_NAME(BROADWELL),
 55	PLATFORM_NAME(CHERRYVIEW),
 56	PLATFORM_NAME(SKYLAKE),
 57	PLATFORM_NAME(BROXTON),
 58	PLATFORM_NAME(KABYLAKE),
 59	PLATFORM_NAME(GEMINILAKE),
 60	PLATFORM_NAME(COFFEELAKE),
 61	PLATFORM_NAME(COMETLAKE),
 62	PLATFORM_NAME(CANNONLAKE),
 63	PLATFORM_NAME(ICELAKE),
 64	PLATFORM_NAME(ELKHARTLAKE),
 65	PLATFORM_NAME(TIGERLAKE),
 66	PLATFORM_NAME(ROCKETLAKE),
 67	PLATFORM_NAME(DG1),
 68};
 69#undef PLATFORM_NAME
 70
 71const char *intel_platform_name(enum intel_platform platform)
 72{
 73	BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS);
 74
 75	if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
 76			 platform_names[platform] == NULL))
 77		return "<unknown>";
 78
 79	return platform_names[platform];
 80}
 81
 82static const char *iommu_name(void)
 83{
 84	const char *msg = "n/a";
 85
 86#ifdef CONFIG_INTEL_IOMMU
 87	msg = enableddisabled(intel_iommu_gfx_mapped);
 88#endif
 89
 90	return msg;
 91}
 92
 93void intel_device_info_print_static(const struct intel_device_info *info,
 94				    struct drm_printer *p)
 95{
 96	drm_printf(p, "gen: %d\n", info->gen);
 97	drm_printf(p, "gt: %d\n", info->gt);
 98	drm_printf(p, "iommu: %s\n", iommu_name());
 99	drm_printf(p, "memory-regions: %x\n", info->memory_regions);
100	drm_printf(p, "page-sizes: %x\n", info->page_sizes);
101	drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
102	drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
103	drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
104	drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
105
106#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
107	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
108#undef PRINT_FLAG
109
110#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
111	DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
112#undef PRINT_FLAG
113}
114
115void intel_device_info_print_runtime(const struct intel_runtime_info *info,
116				     struct drm_printer *p)
117{
118	drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
119	drm_printf(p, "CS timestamp frequency: %u Hz\n",
120		   info->cs_timestamp_frequency_hz);
121}
122
123static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
124{
125	u32 ts_override = intel_uncore_read(&dev_priv->uncore,
126					    GEN9_TIMESTAMP_OVERRIDE);
127	u32 base_freq, frac_freq;
128
129	base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
130		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
131	base_freq *= 1000000;
132
133	frac_freq = ((ts_override &
134		      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
135		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
136	frac_freq = 1000000 / (frac_freq + 1);
137
138	return base_freq + frac_freq;
139}
140
141static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
142					u32 rpm_config_reg)
143{
144	u32 f19_2_mhz = 19200000;
145	u32 f24_mhz = 24000000;
146	u32 crystal_clock = (rpm_config_reg &
147			     GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
148			    GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
149
150	switch (crystal_clock) {
151	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
152		return f19_2_mhz;
153	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
154		return f24_mhz;
155	default:
156		MISSING_CASE(crystal_clock);
157		return 0;
158	}
159}
160
161static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv,
162					u32 rpm_config_reg)
163{
164	u32 f19_2_mhz = 19200000;
165	u32 f24_mhz = 24000000;
166	u32 f25_mhz = 25000000;
167	u32 f38_4_mhz = 38400000;
168	u32 crystal_clock = (rpm_config_reg &
169			     GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
170			    GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
171
172	switch (crystal_clock) {
173	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
174		return f24_mhz;
175	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
176		return f19_2_mhz;
177	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
178		return f38_4_mhz;
179	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
180		return f25_mhz;
181	default:
182		MISSING_CASE(crystal_clock);
183		return 0;
184	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185}
186
187static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
188{
189	struct intel_uncore *uncore = &dev_priv->uncore;
190	u32 f12_5_mhz = 12500000;
191	u32 f19_2_mhz = 19200000;
192	u32 f24_mhz = 24000000;
193
194	if (INTEL_GEN(dev_priv) <= 4) {
195		/* PRMs say:
196		 *
197		 *     "The value in this register increments once every 16
198		 *      hclks." (through the “Clocking Configuration”
199		 *      (“CLKCFG”) MCHBAR register)
200		 */
201		return RUNTIME_INFO(dev_priv)->rawclk_freq * 1000 / 16;
202	} else if (INTEL_GEN(dev_priv) <= 8) {
203		/* PRMs say:
204		 *
205		 *     "The PCU TSC counts 10ns increments; this timestamp
206		 *      reflects bits 38:3 of the TSC (i.e. 80ns granularity,
207		 *      rolling over every 1.5 hours).
208		 */
209		return f12_5_mhz;
210	} else if (INTEL_GEN(dev_priv) <= 9) {
211		u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
212		u32 freq = 0;
213
214		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
215			freq = read_reference_ts_freq(dev_priv);
216		} else {
217			freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;
218
219			/* Now figure out how the command stream's timestamp
220			 * register increments from this frequency (it might
221			 * increment only every few clock cycle).
 
 
 
 
 
 
 
 
 
222			 */
223			freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
224				      CTC_SHIFT_PARAMETER_SHIFT);
 
 
225		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
227		return freq;
228	} else if (INTEL_GEN(dev_priv) <= 12) {
229		u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
230		u32 freq = 0;
231
232		/* First figure out the reference frequency. There are 2 ways
233		 * we can compute the frequency, either through the
234		 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
235		 * tells us which one we should use.
236		 */
237		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
238			freq = read_reference_ts_freq(dev_priv);
239		} else {
240			u32 rpm_config_reg = intel_uncore_read(uncore, RPM_CONFIG0);
241
242			if (INTEL_GEN(dev_priv) <= 10)
243				freq = gen10_get_crystal_clock_freq(dev_priv,
244								rpm_config_reg);
 
 
 
245			else
246				freq = gen11_get_crystal_clock_freq(dev_priv,
247								rpm_config_reg);
248
249			/* Now figure out how the command stream's timestamp
250			 * register increments from this frequency (it might
251			 * increment only every few clock cycle).
252			 */
253			freq >>= 3 - ((rpm_config_reg &
254				       GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
255				      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
256		}
257
258		return freq;
259	}
260
261	MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
262	return 0;
263}
264
265#undef INTEL_VGA_DEVICE
266#define INTEL_VGA_DEVICE(id, info) (id)
267
268static const u16 subplatform_ult_ids[] = {
269	INTEL_HSW_ULT_GT1_IDS(0),
270	INTEL_HSW_ULT_GT2_IDS(0),
271	INTEL_HSW_ULT_GT3_IDS(0),
272	INTEL_BDW_ULT_GT1_IDS(0),
273	INTEL_BDW_ULT_GT2_IDS(0),
274	INTEL_BDW_ULT_GT3_IDS(0),
275	INTEL_BDW_ULT_RSVD_IDS(0),
276	INTEL_SKL_ULT_GT1_IDS(0),
277	INTEL_SKL_ULT_GT2_IDS(0),
278	INTEL_SKL_ULT_GT3_IDS(0),
279	INTEL_KBL_ULT_GT1_IDS(0),
280	INTEL_KBL_ULT_GT2_IDS(0),
281	INTEL_KBL_ULT_GT3_IDS(0),
282	INTEL_CFL_U_GT2_IDS(0),
283	INTEL_CFL_U_GT3_IDS(0),
284	INTEL_WHL_U_GT1_IDS(0),
285	INTEL_WHL_U_GT2_IDS(0),
286	INTEL_WHL_U_GT3_IDS(0),
287	INTEL_CML_U_GT1_IDS(0),
288	INTEL_CML_U_GT2_IDS(0),
289};
290
291static const u16 subplatform_ulx_ids[] = {
292	INTEL_HSW_ULX_GT1_IDS(0),
293	INTEL_HSW_ULX_GT2_IDS(0),
294	INTEL_BDW_ULX_GT1_IDS(0),
295	INTEL_BDW_ULX_GT2_IDS(0),
296	INTEL_BDW_ULX_GT3_IDS(0),
297	INTEL_BDW_ULX_RSVD_IDS(0),
298	INTEL_SKL_ULX_GT1_IDS(0),
299	INTEL_SKL_ULX_GT2_IDS(0),
300	INTEL_KBL_ULX_GT1_IDS(0),
301	INTEL_KBL_ULX_GT2_IDS(0),
302	INTEL_AML_KBL_GT2_IDS(0),
303	INTEL_AML_CFL_GT2_IDS(0),
304};
305
306static const u16 subplatform_portf_ids[] = {
307	INTEL_CNL_PORT_F_IDS(0),
308	INTEL_ICL_PORT_F_IDS(0),
309};
310
311static bool find_devid(u16 id, const u16 *p, unsigned int num)
312{
313	for (; num; num--, p++) {
314		if (*p == id)
315			return true;
316	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
318	return false;
319}
320
321void intel_device_info_subplatform_init(struct drm_i915_private *i915)
322{
323	const struct intel_device_info *info = INTEL_INFO(i915);
324	const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
325	const unsigned int pi = __platform_mask_index(rinfo, info->platform);
326	const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
327	u16 devid = INTEL_DEVID(i915);
328	u32 mask = 0;
329
330	/* Make sure IS_<platform> checks are working. */
331	RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);
332
333	/* Find and mark subplatform bits based on the PCI device id. */
334	if (find_devid(devid, subplatform_ult_ids,
335		       ARRAY_SIZE(subplatform_ult_ids))) {
336		mask = BIT(INTEL_SUBPLATFORM_ULT);
337	} else if (find_devid(devid, subplatform_ulx_ids,
338			      ARRAY_SIZE(subplatform_ulx_ids))) {
339		mask = BIT(INTEL_SUBPLATFORM_ULX);
340		if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
341			/* ULX machines are also considered ULT. */
342			mask |= BIT(INTEL_SUBPLATFORM_ULT);
343		}
344	} else if (find_devid(devid, subplatform_portf_ids,
345			      ARRAY_SIZE(subplatform_portf_ids))) {
346		mask = BIT(INTEL_SUBPLATFORM_PORTF);
347	}
348
349	GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);
350
351	RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
 
 
 
 
 
 
 
 
 
 
 
 
 
352}
353
354/**
355 * intel_device_info_runtime_init - initialize runtime info
356 * @dev_priv: the i915 device
357 *
358 * Determine various intel_device_info fields at runtime.
359 *
360 * Use it when either:
361 *   - it's judged too laborious to fill n static structures with the limit
362 *     when a simple if statement does the job,
363 *   - run-time checks (eg read fuse/strap registers) are needed.
364 *
365 * This function needs to be called:
366 *   - after the MMIO has been setup as we are reading registers,
367 *   - after the PCH has been detected,
368 *   - before the first usage of the fields it can tweak.
369 */
370void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
371{
372	struct intel_device_info *info = mkwrite_device_info(dev_priv);
373	struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
374	enum pipe pipe;
375
376	if (INTEL_GEN(dev_priv) >= 10) {
377		for_each_pipe(dev_priv, pipe)
378			runtime->num_scalers[pipe] = 2;
379	} else if (IS_GEN(dev_priv, 9)) {
380		runtime->num_scalers[PIPE_A] = 2;
381		runtime->num_scalers[PIPE_B] = 2;
382		runtime->num_scalers[PIPE_C] = 1;
383	}
384
385	BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
386
387	if (IS_ROCKETLAKE(dev_priv))
388		for_each_pipe(dev_priv, pipe)
389			runtime->num_sprites[pipe] = 4;
390	else if (INTEL_GEN(dev_priv) >= 11)
391		for_each_pipe(dev_priv, pipe)
392			runtime->num_sprites[pipe] = 6;
393	else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
394		for_each_pipe(dev_priv, pipe)
395			runtime->num_sprites[pipe] = 3;
396	else if (IS_BROXTON(dev_priv)) {
397		/*
398		 * Skylake and Broxton currently don't expose the topmost plane as its
399		 * use is exclusive with the legacy cursor and we only want to expose
400		 * one of those, not both. Until we can safely expose the topmost plane
401		 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
402		 * we don't expose the topmost plane at all to prevent ABI breakage
403		 * down the line.
404		 */
405
406		runtime->num_sprites[PIPE_A] = 2;
407		runtime->num_sprites[PIPE_B] = 2;
408		runtime->num_sprites[PIPE_C] = 1;
409	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
410		for_each_pipe(dev_priv, pipe)
411			runtime->num_sprites[pipe] = 2;
412	} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
413		for_each_pipe(dev_priv, pipe)
414			runtime->num_sprites[pipe] = 1;
415	}
416
417	if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) &&
418	    HAS_PCH_SPLIT(dev_priv)) {
419		u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
420		u32 sfuse_strap = intel_de_read(dev_priv, SFUSE_STRAP);
 
 
 
 
421
422		/*
423		 * SFUSE_STRAP is supposed to have a bit signalling the display
424		 * is fused off. Unfortunately it seems that, at least in
425		 * certain cases, fused off display means that PCH display
426		 * reads don't land anywhere. In that case, we read 0s.
427		 *
428		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
429		 * should be set when taking over after the firmware.
430		 */
431		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
432		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
433		    (HAS_PCH_CPT(dev_priv) &&
434		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
435			drm_info(&dev_priv->drm,
436				 "Display fused off, disabling\n");
437			info->pipe_mask = 0;
438			info->cpu_transcoder_mask = 0;
439		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
440			drm_info(&dev_priv->drm, "PipeC fused off\n");
441			info->pipe_mask &= ~BIT(PIPE_C);
442			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
443		}
444	} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
445		u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
446
447		if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
448			info->pipe_mask &= ~BIT(PIPE_A);
449			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
450		}
451		if (dfsm & SKL_DFSM_PIPE_B_DISABLE) {
452			info->pipe_mask &= ~BIT(PIPE_B);
453			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454		}
455		if (dfsm & SKL_DFSM_PIPE_C_DISABLE) {
456			info->pipe_mask &= ~BIT(PIPE_C);
457			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
458		}
459		if (INTEL_GEN(dev_priv) >= 12 &&
460		    (dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
461			info->pipe_mask &= ~BIT(PIPE_D);
462			info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
463		}
464
465		if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
466			info->display.has_hdcp = 0;
467
468		if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
469			info->display.has_fbc = 0;
470
471		if (INTEL_GEN(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
472			info->display.has_csr = 0;
473
474		if (INTEL_GEN(dev_priv) >= 10 &&
475		    (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
476			info->display.has_dsc = 0;
477	}
478
479	if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
480		drm_info(&dev_priv->drm,
481			 "Disabling ppGTT for VT-d support\n");
482		info->ppgtt_type = INTEL_PPGTT_NONE;
483	}
484
485	runtime->rawclk_freq = intel_read_rawclk(dev_priv);
486	drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
487
488	/* Initialize command stream timestamp frequency */
489	runtime->cs_timestamp_frequency_hz =
490		read_timestamp_frequency(dev_priv);
491	if (runtime->cs_timestamp_frequency_hz) {
492		runtime->cs_timestamp_period_ns =
493			i915_cs_timestamp_ticks_to_ns(dev_priv, 1);
494		drm_dbg(&dev_priv->drm,
495			"CS timestamp wraparound in %lldms\n",
496			div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
497					    S32_MAX),
498				USEC_PER_SEC));
499	}
500}
501
502void intel_driver_caps_print(const struct intel_driver_caps *caps,
503			     struct drm_printer *p)
504{
505	drm_printf(p, "Has logical contexts? %s\n",
506		   yesno(caps->has_logical_contexts));
507	drm_printf(p, "scheduler: %x\n", caps->scheduler);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
508}
v4.10.11
  1/*
  2 * Copyright © 2016 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 *
 23 */
 24
 
 
 
 
 
 
 25#include "i915_drv.h"
 26
 27void intel_device_info_dump(struct drm_i915_private *dev_priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 28{
 29	const struct intel_device_info *info = &dev_priv->info;
 
 
 
 
 
 
 
 30
 31	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x",
 32			 info->gen,
 33			 dev_priv->drm.pdev->device,
 34			 dev_priv->drm.pdev->revision);
 35#define PRINT_FLAG(name) \
 36	DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
 38#undef PRINT_FLAG
 
 
 
 
 39}
 40
 41static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
 
 42{
 43	struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
 44	u32 fuse, eu_dis;
 
 
 45
 46	fuse = I915_READ(CHV_FUSE_GT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 47
 48	sseu->slice_mask = BIT(0);
 
 49
 50	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
 51		sseu->subslice_mask |= BIT(0);
 52		eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
 53				 CHV_FGT_EU_DIS_SS0_R1_MASK);
 54		sseu->eu_total += 8 - hweight32(eu_dis);
 
 
 
 
 
 
 
 
 
 
 
 
 55	}
 
 56
 57	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
 58		sseu->subslice_mask |= BIT(1);
 59		eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
 60				 CHV_FGT_EU_DIS_SS1_R1_MASK);
 61		sseu->eu_total += 8 - hweight32(eu_dis);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62	}
 63
 64	/*
 65	 * CHV expected to always have a uniform distribution of EU
 66	 * across subslices.
 67	*/
 68	sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
 69				sseu->eu_total / sseu_subslice_total(sseu) :
 70				0;
 71	/*
 72	 * CHV supports subslice power gating on devices with more than
 73	 * one subslice, and supports EU power gating on devices with
 74	 * more than one EU pair per subslice.
 75	*/
 76	sseu->has_slice_pg = 0;
 77	sseu->has_subslice_pg = sseu_subslice_total(sseu) > 1;
 78	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
 79}
 80
 81static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
 82{
 83	struct intel_device_info *info = mkwrite_device_info(dev_priv);
 84	struct sseu_dev_info *sseu = &info->sseu;
 85	int s_max = 3, ss_max = 4, eu_max = 8;
 86	int s, ss;
 87	u32 fuse2, eu_disable;
 88	u8 eu_mask = 0xff;
 89
 90	fuse2 = I915_READ(GEN8_FUSE2);
 91	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
 92
 93	/*
 94	 * The subslice disable field is global, i.e. it applies
 95	 * to each of the enabled slices.
 96	*/
 97	sseu->subslice_mask = (1 << ss_max) - 1;
 98	sseu->subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
 99				 GEN9_F2_SS_DIS_SHIFT);
100
101	/*
102	 * Iterate through enabled slices and subslices to
103	 * count the total enabled EU.
104	*/
105	for (s = 0; s < s_max; s++) {
106		if (!(sseu->slice_mask & BIT(s)))
107			/* skip disabled slice */
108			continue;
109
110		eu_disable = I915_READ(GEN9_EU_DISABLE(s));
111		for (ss = 0; ss < ss_max; ss++) {
112			int eu_per_ss;
113
114			if (!(sseu->subslice_mask & BIT(ss)))
115				/* skip disabled subslice */
116				continue;
117
118			eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
119						      eu_mask);
120
121			/*
122			 * Record which subslice(s) has(have) 7 EUs. we
123			 * can tune the hash used to spread work among
124			 * subslices if they are unbalanced.
125			 */
126			if (eu_per_ss == 7)
127				sseu->subslice_7eu[s] |= BIT(ss);
128
129			sseu->eu_total += eu_per_ss;
130		}
131	}
132
133	/*
134	 * SKL is expected to always have a uniform distribution
135	 * of EU across subslices with the exception that any one
136	 * EU in any one subslice may be fused off for die
137	 * recovery. BXT is expected to be perfectly uniform in EU
138	 * distribution.
139	*/
140	sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
141				DIV_ROUND_UP(sseu->eu_total,
142					     sseu_subslice_total(sseu)) : 0;
143	/*
144	 * SKL supports slice power gating on devices with more than
145	 * one slice, and supports EU power gating on devices with
146	 * more than one EU pair per subslice. BXT supports subslice
147	 * power gating on devices with more than one subslice, and
148	 * supports EU power gating on devices with more than one EU
149	 * pair per subslice.
150	*/
151	sseu->has_slice_pg =
152		(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
153		hweight8(sseu->slice_mask) > 1;
154	sseu->has_subslice_pg =
155		IS_BROXTON(dev_priv) && sseu_subslice_total(sseu) > 1;
156	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
157
158	if (IS_BROXTON(dev_priv)) {
159#define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask & BIT(ss)))
160		/*
161		 * There is a HW issue in 2x6 fused down parts that requires
162		 * Pooled EU to be enabled as a WA. The pool configuration
163		 * changes depending upon which subslice is fused down. This
164		 * doesn't affect if the device has all 3 subslices enabled.
 
 
165		 */
166		/* WaEnablePooledEuFor2x6:bxt */
167		info->has_pooled_eu = ((hweight8(sseu->subslice_mask) == 3) ||
168				       (hweight8(sseu->subslice_mask) == 2 &&
169					INTEL_REVID(dev_priv) < BXT_REVID_C0));
170
171		sseu->min_eu_in_pool = 0;
172		if (info->has_pooled_eu) {
173			if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
174				sseu->min_eu_in_pool = 3;
175			else if (IS_SS_DISABLED(1))
176				sseu->min_eu_in_pool = 6;
177			else
178				sseu->min_eu_in_pool = 9;
 
 
 
 
 
 
 
 
 
179		}
180#undef IS_SS_DISABLED
 
181	}
 
 
 
182}
183
184static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185{
186	struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
187	const int s_max = 3, ss_max = 3, eu_max = 8;
188	int s, ss;
189	u32 fuse2, eu_disable[3]; /* s_max */
190
191	fuse2 = I915_READ(GEN8_FUSE2);
192	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
193	/*
194	 * The subslice disable field is global, i.e. it applies
195	 * to each of the enabled slices.
196	 */
197	sseu->subslice_mask = BIT(ss_max) - 1;
198	sseu->subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
199				 GEN8_F2_SS_DIS_SHIFT);
200
201	eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
202	eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
203			((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
204			 (32 - GEN8_EU_DIS0_S1_SHIFT));
205	eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
206			((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
207			 (32 - GEN8_EU_DIS1_S2_SHIFT));
208
209	/*
210	 * Iterate through enabled slices and subslices to
211	 * count the total enabled EU.
212	 */
213	for (s = 0; s < s_max; s++) {
214		if (!(sseu->slice_mask & BIT(s)))
215			/* skip disabled slice */
216			continue;
217
218		for (ss = 0; ss < ss_max; ss++) {
219			u32 n_disabled;
220
221			if (!(sseu->subslice_mask & BIT(ss)))
222				/* skip disabled subslice */
223				continue;
224
225			n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
 
226
227			/*
228			 * Record which subslices have 7 EUs.
229			 */
230			if (eu_max - n_disabled == 7)
231				sseu->subslice_7eu[s] |= 1 << ss;
232
233			sseu->eu_total += eu_max - n_disabled;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234		}
 
 
 
235	}
236
237	/*
238	 * BDW is expected to always have a uniform distribution of EU across
239	 * subslices with the exception that any one EU in any one subslice may
240	 * be fused off for die recovery.
241	 */
242	sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
243				DIV_ROUND_UP(sseu->eu_total,
244					     sseu_subslice_total(sseu)) : 0;
245
246	/*
247	 * BDW supports slice power gating on devices with more than
248	 * one slice.
249	 */
250	sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
251	sseu->has_subslice_pg = 0;
252	sseu->has_eu_pg = 0;
253}
254
255/*
 
 
 
256 * Determine various intel_device_info fields at runtime.
257 *
258 * Use it when either:
259 *   - it's judged too laborious to fill n static structures with the limit
260 *     when a simple if statement does the job,
261 *   - run-time checks (eg read fuse/strap registers) are needed.
262 *
263 * This function needs to be called:
264 *   - after the MMIO has been setup as we are reading registers,
265 *   - after the PCH has been detected,
266 *   - before the first usage of the fields it can tweak.
267 */
268void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
269{
270	struct intel_device_info *info = mkwrite_device_info(dev_priv);
 
271	enum pipe pipe;
272
273	/*
274	 * Skylake and Broxton currently don't expose the topmost plane as its
275	 * use is exclusive with the legacy cursor and we only want to expose
276	 * one of those, not both. Until we can safely expose the topmost plane
277	 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
278	 * we don't expose the topmost plane at all to prevent ABI breakage
279	 * down the line.
280	 */
281	if (IS_BROXTON(dev_priv)) {
282		info->num_sprites[PIPE_A] = 2;
283		info->num_sprites[PIPE_B] = 2;
284		info->num_sprites[PIPE_C] = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
286		for_each_pipe(dev_priv, pipe)
287			info->num_sprites[pipe] = 2;
288	} else if (INTEL_GEN(dev_priv) >= 5) {
289		for_each_pipe(dev_priv, pipe)
290			info->num_sprites[pipe] = 1;
291	}
292
293	if (i915.disable_display) {
294		DRM_INFO("Display disabled (module parameter)\n");
295		info->num_pipes = 0;
296	} else if (info->num_pipes > 0 &&
297		   (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
298		   HAS_PCH_SPLIT(dev_priv)) {
299		u32 fuse_strap = I915_READ(FUSE_STRAP);
300		u32 sfuse_strap = I915_READ(SFUSE_STRAP);
301
302		/*
303		 * SFUSE_STRAP is supposed to have a bit signalling the display
304		 * is fused off. Unfortunately it seems that, at least in
305		 * certain cases, fused off display means that PCH display
306		 * reads don't land anywhere. In that case, we read 0s.
307		 *
308		 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
309		 * should be set when taking over after the firmware.
310		 */
311		if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
312		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
313		    (dev_priv->pch_type == PCH_CPT &&
314		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
315			DRM_INFO("Display fused off, disabling\n");
316			info->num_pipes = 0;
 
 
317		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
318			DRM_INFO("PipeC fused off\n");
319			info->num_pipes -= 1;
 
 
 
 
 
 
 
 
320		}
321	} else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
322		u32 dfsm = I915_READ(SKL_DFSM);
323		u8 disabled_mask = 0;
324		bool invalid;
325		int num_bits;
326
327		if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
328			disabled_mask |= BIT(PIPE_A);
329		if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
330			disabled_mask |= BIT(PIPE_B);
331		if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
332			disabled_mask |= BIT(PIPE_C);
333
334		num_bits = hweight8(disabled_mask);
335
336		switch (disabled_mask) {
337		case BIT(PIPE_A):
338		case BIT(PIPE_B):
339		case BIT(PIPE_A) | BIT(PIPE_B):
340		case BIT(PIPE_A) | BIT(PIPE_C):
341			invalid = true;
342			break;
343		default:
344			invalid = false;
345		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346
347		if (num_bits > info->num_pipes || invalid)
348			DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
349				  disabled_mask);
350		else
351			info->num_pipes -= num_bits;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352	}
 
353
354	/* Initialize slice/subslice/EU info */
355	if (IS_CHERRYVIEW(dev_priv))
356		cherryview_sseu_info_init(dev_priv);
357	else if (IS_BROADWELL(dev_priv))
358		broadwell_sseu_info_init(dev_priv);
359	else if (INTEL_INFO(dev_priv)->gen >= 9)
360		gen9_sseu_info_init(dev_priv);
361
362	info->has_snoop = !info->has_llc;
363
364	/* Snooping is broken on BXT A stepping. */
365	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
366		info->has_snoop = false;
367
368	DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
369	DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
370	DRM_DEBUG_DRIVER("subslice total: %u\n",
371			 sseu_subslice_total(&info->sseu));
372	DRM_DEBUG_DRIVER("subslice mask %04x\n", info->sseu.subslice_mask);
373	DRM_DEBUG_DRIVER("subslice per slice: %u\n",
374			 hweight8(info->sseu.subslice_mask));
375	DRM_DEBUG_DRIVER("EU total: %u\n", info->sseu.eu_total);
376	DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->sseu.eu_per_subslice);
377	DRM_DEBUG_DRIVER("has slice power gating: %s\n",
378			 info->sseu.has_slice_pg ? "y" : "n");
379	DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
380			 info->sseu.has_subslice_pg ? "y" : "n");
381	DRM_DEBUG_DRIVER("has EU power gating: %s\n",
382			 info->sseu.has_eu_pg ? "y" : "n");
383}