Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 */
   5
   6#include <drm/drm_debugfs.h>
   7#include <drm/drm_fourcc.h>
   8
   9#include "i915_debugfs.h"
 
  10#include "intel_display_debugfs.h"
  11#include "intel_display_power.h"
  12#include "intel_de.h"
  13#include "intel_display_types.h"
  14#include "intel_dmc.h"
  15#include "intel_dp.h"
  16#include "intel_fbc.h"
  17#include "intel_hdcp.h"
  18#include "intel_hdmi.h"
  19#include "intel_pm.h"
  20#include "intel_psr.h"
  21#include "intel_sideband.h"
  22#include "intel_sprite.h"
  23
  24static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
  25{
  26	return to_i915(node->minor->dev);
  27}
  28
  29static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
  30{
  31	struct drm_i915_private *dev_priv = node_to_i915(m->private);
  32
  33	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
  34		   dev_priv->fb_tracking.busy_bits);
  35
  36	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
  37		   dev_priv->fb_tracking.flip_bits);
  38
  39	return 0;
  40}
  41
  42static int i915_fbc_status(struct seq_file *m, void *unused)
  43{
  44	struct drm_i915_private *dev_priv = node_to_i915(m->private);
  45	struct intel_fbc *fbc = &dev_priv->fbc;
  46	intel_wakeref_t wakeref;
  47
  48	if (!HAS_FBC(dev_priv))
  49		return -ENODEV;
  50
  51	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
  52	mutex_lock(&fbc->lock);
  53
  54	if (intel_fbc_is_active(dev_priv))
  55		seq_puts(m, "FBC enabled\n");
  56	else
  57		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
  58
  59	if (intel_fbc_is_active(dev_priv)) {
  60		u32 mask;
  61
  62		if (DISPLAY_VER(dev_priv) >= 8)
  63			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
  64		else if (DISPLAY_VER(dev_priv) >= 7)
  65			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
  66		else if (DISPLAY_VER(dev_priv) >= 5)
  67			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
  68		else if (IS_G4X(dev_priv))
  69			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
  70		else
  71			mask = intel_de_read(dev_priv, FBC_STATUS) &
  72				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
  73
  74		seq_printf(m, "Compressing: %s\n", yesno(mask));
  75	}
  76
  77	mutex_unlock(&fbc->lock);
  78	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
  79
  80	return 0;
  81}
  82
  83static int i915_fbc_false_color_get(void *data, u64 *val)
  84{
  85	struct drm_i915_private *dev_priv = data;
  86
  87	if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
  88		return -ENODEV;
  89
  90	*val = dev_priv->fbc.false_color;
  91
  92	return 0;
  93}
  94
  95static int i915_fbc_false_color_set(void *data, u64 val)
  96{
  97	struct drm_i915_private *dev_priv = data;
  98	u32 reg;
  99
 100	if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
 101		return -ENODEV;
 102
 103	mutex_lock(&dev_priv->fbc.lock);
 104
 105	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
 106	dev_priv->fbc.false_color = val;
 107
 108	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
 109		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
 110
 111	mutex_unlock(&dev_priv->fbc.lock);
 112	return 0;
 113}
 114
 115DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
 116			i915_fbc_false_color_get, i915_fbc_false_color_set,
 117			"%llu\n");
 118
 119static int i915_ips_status(struct seq_file *m, void *unused)
 120{
 121	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 122	intel_wakeref_t wakeref;
 123
 124	if (!HAS_IPS(dev_priv))
 125		return -ENODEV;
 126
 127	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 128
 129	seq_printf(m, "Enabled by kernel parameter: %s\n",
 130		   yesno(dev_priv->params.enable_ips));
 131
 132	if (DISPLAY_VER(dev_priv) >= 8) {
 133		seq_puts(m, "Currently: unknown\n");
 134	} else {
 135		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
 136			seq_puts(m, "Currently: enabled\n");
 137		else
 138			seq_puts(m, "Currently: disabled\n");
 139	}
 140
 141	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 142
 143	return 0;
 144}
 145
 146static int i915_sr_status(struct seq_file *m, void *unused)
 147{
 148	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 149	intel_wakeref_t wakeref;
 150	bool sr_enabled = false;
 151
 152	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 153
 154	if (DISPLAY_VER(dev_priv) >= 9)
 155		/* no global SR status; inspect per-plane WM */;
 156	else if (HAS_PCH_SPLIT(dev_priv))
 157		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
 158	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
 159		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
 160		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
 161	else if (IS_I915GM(dev_priv))
 162		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
 163	else if (IS_PINEVIEW(dev_priv))
 164		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
 165	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 166		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
 167
 168	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
 169
 170	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
 171
 172	return 0;
 173}
 174
 175static int i915_opregion(struct seq_file *m, void *unused)
 176{
 177	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
 178
 179	if (opregion->header)
 180		seq_write(m, opregion->header, OPREGION_SIZE);
 181
 182	return 0;
 183}
 184
 185static int i915_vbt(struct seq_file *m, void *unused)
 186{
 187	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
 188
 189	if (opregion->vbt)
 190		seq_write(m, opregion->vbt, opregion->vbt_size);
 191
 192	return 0;
 193}
 194
 195static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 196{
 197	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 198	struct drm_device *dev = &dev_priv->drm;
 199	struct intel_framebuffer *fbdev_fb = NULL;
 200	struct drm_framebuffer *drm_fb;
 201
 202#ifdef CONFIG_DRM_FBDEV_EMULATION
 203	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
 204		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
 205
 206		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
 207			   fbdev_fb->base.width,
 208			   fbdev_fb->base.height,
 209			   fbdev_fb->base.format->depth,
 210			   fbdev_fb->base.format->cpp[0] * 8,
 211			   fbdev_fb->base.modifier,
 212			   drm_framebuffer_read_refcount(&fbdev_fb->base));
 213		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
 214		seq_putc(m, '\n');
 215	}
 216#endif
 217
 218	mutex_lock(&dev->mode_config.fb_lock);
 219	drm_for_each_fb(drm_fb, dev) {
 220		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
 221		if (fb == fbdev_fb)
 222			continue;
 223
 224		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
 225			   fb->base.width,
 226			   fb->base.height,
 227			   fb->base.format->depth,
 228			   fb->base.format->cpp[0] * 8,
 229			   fb->base.modifier,
 230			   drm_framebuffer_read_refcount(&fb->base));
 231		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
 232		seq_putc(m, '\n');
 233	}
 234	mutex_unlock(&dev->mode_config.fb_lock);
 235
 236	return 0;
 237}
 238
 239static int i915_psr_sink_status_show(struct seq_file *m, void *data)
 240{
 241	u8 val;
 242	static const char * const sink_status[] = {
 243		"inactive",
 244		"transition to active, capture and display",
 245		"active, display from RFB",
 246		"active, capture and display on sink device timings",
 247		"transition to inactive, capture and display, timing re-sync",
 248		"reserved",
 249		"reserved",
 250		"sink internal error",
 251	};
 252	struct drm_connector *connector = m->private;
 
 253	struct intel_dp *intel_dp =
 254		intel_attached_dp(to_intel_connector(connector));
 255	int ret;
 256
 257	if (!CAN_PSR(intel_dp)) {
 258		seq_puts(m, "PSR Unsupported\n");
 259		return -ENODEV;
 260	}
 261
 262	if (connector->status != connector_status_connected)
 263		return -ENODEV;
 264
 265	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
 266
 267	if (ret == 1) {
 268		const char *str = "unknown";
 269
 270		val &= DP_PSR_SINK_STATE_MASK;
 271		if (val < ARRAY_SIZE(sink_status))
 272			str = sink_status[val];
 273		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
 274	} else {
 275		return ret;
 276	}
 277
 278	return 0;
 279}
 280DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
 281
 282static void
 283psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
 284{
 285	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 286	const char *status = "unknown";
 287	u32 val, status_val;
 
 288
 289	if (intel_dp->psr.psr2_enabled) {
 290		static const char * const live_status[] = {
 291			"IDLE",
 292			"CAPTURE",
 293			"CAPTURE_FS",
 294			"SLEEP",
 295			"BUFON_FW",
 296			"ML_UP",
 297			"SU_STANDBY",
 298			"FAST_SLEEP",
 299			"DEEP_SLEEP",
 300			"BUF_ON",
 301			"TG_ON"
 302		};
 303		val = intel_de_read(dev_priv,
 304				    EDP_PSR2_STATUS(intel_dp->psr.transcoder));
 305		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
 306			      EDP_PSR2_STATUS_STATE_SHIFT;
 307		if (status_val < ARRAY_SIZE(live_status))
 308			status = live_status[status_val];
 309	} else {
 310		static const char * const live_status[] = {
 311			"IDLE",
 312			"SRDONACK",
 313			"SRDENT",
 314			"BUFOFF",
 315			"BUFON",
 316			"AUXACK",
 317			"SRDOFFACK",
 318			"SRDENT_ON",
 319		};
 320		val = intel_de_read(dev_priv,
 321				    EDP_PSR_STATUS(intel_dp->psr.transcoder));
 322		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
 323			      EDP_PSR_STATUS_STATE_SHIFT;
 324		if (status_val < ARRAY_SIZE(live_status))
 325			status = live_status[status_val];
 326	}
 327
 328	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
 329}
 330
 331static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
 332{
 333	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 334	struct intel_psr *psr = &intel_dp->psr;
 335	intel_wakeref_t wakeref;
 336	const char *status;
 337	bool enabled;
 338	u32 val;
 339
 
 
 
 340	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
 341	if (psr->sink_support)
 342		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
 343	seq_puts(m, "\n");
 344
 345	if (!psr->sink_support)
 346		return 0;
 347
 348	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 349	mutex_lock(&psr->lock);
 350
 351	if (psr->enabled)
 352		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
 353	else
 354		status = "disabled";
 355	seq_printf(m, "PSR mode: %s\n", status);
 356
 357	if (!psr->enabled) {
 358		seq_printf(m, "PSR sink not reliable: %s\n",
 359			   yesno(psr->sink_not_reliable));
 360
 361		goto unlock;
 362	}
 363
 364	if (psr->psr2_enabled) {
 365		val = intel_de_read(dev_priv,
 366				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
 367		enabled = val & EDP_PSR2_ENABLE;
 368	} else {
 369		val = intel_de_read(dev_priv,
 370				    EDP_PSR_CTL(intel_dp->psr.transcoder));
 371		enabled = val & EDP_PSR_ENABLE;
 372	}
 373	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
 374		   enableddisabled(enabled), val);
 375	psr_source_status(intel_dp, m);
 376	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
 377		   psr->busy_frontbuffer_bits);
 378
 379	/*
 380	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
 381	 */
 382	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 383		val = intel_de_read(dev_priv,
 384				    EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
 385		val &= EDP_PSR_PERF_CNT_MASK;
 386		seq_printf(m, "Performance counter: %u\n", val);
 387	}
 388
 389	if (psr->debug & I915_PSR_DEBUG_IRQ) {
 390		seq_printf(m, "Last attempted entry at: %lld\n",
 391			   psr->last_entry_attempt);
 392		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
 393	}
 394
 395	if (psr->psr2_enabled) {
 396		u32 su_frames_val[3];
 397		int frame;
 398
 399		/*
 400		 * Reading all 3 registers before hand to minimize crossing a
 401		 * frame boundary between register reads
 402		 */
 403		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
 404			val = intel_de_read(dev_priv,
 405					    PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
 406			su_frames_val[frame / 3] = val;
 407		}
 408
 409		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
 410
 411		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
 412			u32 su_blocks;
 413
 414			su_blocks = su_frames_val[frame / 3] &
 415				    PSR2_SU_STATUS_MASK(frame);
 416			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
 417			seq_printf(m, "%d\t%d\n", frame, su_blocks);
 418		}
 419
 420		seq_printf(m, "PSR2 selective fetch: %s\n",
 421			   enableddisabled(psr->psr2_sel_fetch_enabled));
 422	}
 423
 424unlock:
 425	mutex_unlock(&psr->lock);
 426	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 427
 428	return 0;
 429}
 430
 431static int i915_edp_psr_status(struct seq_file *m, void *data)
 432{
 433	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 434	struct intel_dp *intel_dp = NULL;
 435	struct intel_encoder *encoder;
 436
 437	if (!HAS_PSR(dev_priv))
 438		return -ENODEV;
 439
 440	/* Find the first EDP which supports PSR */
 441	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
 442		intel_dp = enc_to_intel_dp(encoder);
 443		break;
 444	}
 445
 446	if (!intel_dp)
 447		return -ENODEV;
 448
 449	return intel_psr_status(m, intel_dp);
 450}
 451
 452static int
 453i915_edp_psr_debug_set(void *data, u64 val)
 454{
 455	struct drm_i915_private *dev_priv = data;
 456	struct intel_encoder *encoder;
 457	intel_wakeref_t wakeref;
 458	int ret = -ENODEV;
 459
 460	if (!HAS_PSR(dev_priv))
 461		return ret;
 462
 463	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
 464		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 465
 466		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
 467
 468		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 469
 470		// TODO: split to each transcoder's PSR debug state
 471		ret = intel_psr_debug_set(intel_dp, val);
 472
 473		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 474	}
 475
 476	return ret;
 477}
 478
 479static int
 480i915_edp_psr_debug_get(void *data, u64 *val)
 481{
 482	struct drm_i915_private *dev_priv = data;
 483	struct intel_encoder *encoder;
 484
 485	if (!HAS_PSR(dev_priv))
 486		return -ENODEV;
 487
 488	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
 489		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 490
 491		// TODO: split to each transcoder's PSR debug state
 492		*val = READ_ONCE(intel_dp->psr.debug);
 493		return 0;
 494	}
 495
 496	return -ENODEV;
 497}
 498
 499DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
 500			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
 501			"%llu\n");
 502
 503static int i915_power_domain_info(struct seq_file *m, void *unused)
 504{
 505	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 506	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 507	int i;
 508
 509	mutex_lock(&power_domains->lock);
 510
 511	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
 512	for (i = 0; i < power_domains->power_well_count; i++) {
 513		struct i915_power_well *power_well;
 514		enum intel_display_power_domain power_domain;
 515
 516		power_well = &power_domains->power_wells[i];
 517		seq_printf(m, "%-25s %d\n", power_well->desc->name,
 518			   power_well->count);
 519
 520		for_each_power_domain(power_domain, power_well->desc->domains)
 521			seq_printf(m, "  %-23s %d\n",
 522				 intel_display_power_domain_str(power_domain),
 523				 power_domains->domain_use_count[power_domain]);
 524	}
 525
 526	mutex_unlock(&power_domains->lock);
 527
 528	return 0;
 529}
 530
 531static int i915_dmc_info(struct seq_file *m, void *unused)
 532{
 533	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 534	intel_wakeref_t wakeref;
 535	struct intel_dmc *dmc;
 536	i915_reg_t dc5_reg, dc6_reg = {};
 537
 538	if (!HAS_DMC(dev_priv))
 539		return -ENODEV;
 540
 541	dmc = &dev_priv->dmc;
 542
 543	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 544
 545	seq_printf(m, "fw loaded: %s\n", yesno(intel_dmc_has_payload(dev_priv)));
 546	seq_printf(m, "path: %s\n", dmc->fw_path);
 547
 548	if (!intel_dmc_has_payload(dev_priv))
 549		goto out;
 550
 551	seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
 552		   DMC_VERSION_MINOR(dmc->version));
 553
 554	if (DISPLAY_VER(dev_priv) >= 12) {
 555		if (IS_DGFX(dev_priv)) {
 556			dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
 557		} else {
 558			dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
 559			dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
 560		}
 561
 
 
 
 562		/*
 563		 * NOTE: DMC_DEBUG3 is a general purpose reg.
 564		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
 565		 * reg for DC3CO debugging and validation,
 566		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
 567		 */
 568		seq_printf(m, "DC3CO count: %d\n",
 569			   intel_de_read(dev_priv, DMC_DEBUG3));
 570	} else {
 571		dc5_reg = IS_BROXTON(dev_priv) ? BXT_DMC_DC3_DC5_COUNT :
 572						 SKL_DMC_DC3_DC5_COUNT;
 573		if (!IS_GEMINILAKE(dev_priv) && !IS_BROXTON(dev_priv))
 574			dc6_reg = SKL_DMC_DC5_DC6_COUNT;
 575	}
 576
 577	seq_printf(m, "DC3 -> DC5 count: %d\n",
 578		   intel_de_read(dev_priv, dc5_reg));
 579	if (dc6_reg.reg)
 580		seq_printf(m, "DC5 -> DC6 count: %d\n",
 581			   intel_de_read(dev_priv, dc6_reg));
 582
 583out:
 584	seq_printf(m, "program base: 0x%08x\n",
 585		   intel_de_read(dev_priv, DMC_PROGRAM(0)));
 586	seq_printf(m, "ssp base: 0x%08x\n",
 587		   intel_de_read(dev_priv, DMC_SSP_BASE));
 588	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, DMC_HTP_SKL));
 589
 590	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 591
 592	return 0;
 593}
 594
 595static void intel_seq_print_mode(struct seq_file *m, int tabs,
 596				 const struct drm_display_mode *mode)
 597{
 598	int i;
 599
 600	for (i = 0; i < tabs; i++)
 601		seq_putc(m, '\t');
 602
 603	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 604}
 605
 606static void intel_encoder_info(struct seq_file *m,
 607			       struct intel_crtc *crtc,
 608			       struct intel_encoder *encoder)
 609{
 610	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 611	struct drm_connector_list_iter conn_iter;
 612	struct drm_connector *connector;
 613
 614	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
 615		   encoder->base.base.id, encoder->base.name);
 616
 617	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
 618	drm_for_each_connector_iter(connector, &conn_iter) {
 619		const struct drm_connector_state *conn_state =
 620			connector->state;
 621
 622		if (conn_state->best_encoder != &encoder->base)
 623			continue;
 624
 625		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
 626			   connector->base.id, connector->name);
 627	}
 628	drm_connector_list_iter_end(&conn_iter);
 629}
 630
 631static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
 632{
 633	const struct drm_display_mode *mode = panel->fixed_mode;
 634
 635	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 636}
 637
 638static void intel_hdcp_info(struct seq_file *m,
 639			    struct intel_connector *intel_connector)
 640{
 641	bool hdcp_cap, hdcp2_cap;
 642
 643	if (!intel_connector->hdcp.shim) {
 644		seq_puts(m, "No Connector Support");
 645		goto out;
 646	}
 647
 648	hdcp_cap = intel_hdcp_capable(intel_connector);
 649	hdcp2_cap = intel_hdcp2_capable(intel_connector);
 650
 651	if (hdcp_cap)
 652		seq_puts(m, "HDCP1.4 ");
 653	if (hdcp2_cap)
 654		seq_puts(m, "HDCP2.2 ");
 655
 656	if (!hdcp_cap && !hdcp2_cap)
 657		seq_puts(m, "None");
 658
 659out:
 660	seq_puts(m, "\n");
 661}
 662
 663static void intel_dp_info(struct seq_file *m,
 664			  struct intel_connector *intel_connector)
 665{
 666	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
 667	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
 668	const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
 669
 670	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
 671	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
 672	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
 673		intel_panel_info(m, &intel_connector->panel);
 674
 675	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
 676				edid ? edid->data : NULL, &intel_dp->aux);
 
 
 
 
 677}
 678
 679static void intel_dp_mst_info(struct seq_file *m,
 680			      struct intel_connector *intel_connector)
 681{
 682	bool has_audio = intel_connector->port->has_audio;
 683
 684	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
 685}
 686
 687static void intel_hdmi_info(struct seq_file *m,
 688			    struct intel_connector *intel_connector)
 689{
 690	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
 691	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
 692
 693	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
 
 
 
 
 694}
 695
 696static void intel_lvds_info(struct seq_file *m,
 697			    struct intel_connector *intel_connector)
 698{
 699	intel_panel_info(m, &intel_connector->panel);
 700}
 701
 702static void intel_connector_info(struct seq_file *m,
 703				 struct drm_connector *connector)
 704{
 705	struct intel_connector *intel_connector = to_intel_connector(connector);
 706	const struct drm_connector_state *conn_state = connector->state;
 707	struct intel_encoder *encoder =
 708		to_intel_encoder(conn_state->best_encoder);
 709	const struct drm_display_mode *mode;
 710
 711	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
 712		   connector->base.id, connector->name,
 713		   drm_get_connector_status_name(connector->status));
 714
 715	if (connector->status == connector_status_disconnected)
 716		return;
 717
 718	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
 719		   connector->display_info.width_mm,
 720		   connector->display_info.height_mm);
 721	seq_printf(m, "\tsubpixel order: %s\n",
 722		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
 723	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
 724
 725	if (!encoder)
 726		return;
 727
 728	switch (connector->connector_type) {
 729	case DRM_MODE_CONNECTOR_DisplayPort:
 730	case DRM_MODE_CONNECTOR_eDP:
 731		if (encoder->type == INTEL_OUTPUT_DP_MST)
 732			intel_dp_mst_info(m, intel_connector);
 733		else
 734			intel_dp_info(m, intel_connector);
 735		break;
 736	case DRM_MODE_CONNECTOR_LVDS:
 737		if (encoder->type == INTEL_OUTPUT_LVDS)
 738			intel_lvds_info(m, intel_connector);
 739		break;
 740	case DRM_MODE_CONNECTOR_HDMIA:
 741		if (encoder->type == INTEL_OUTPUT_HDMI ||
 742		    encoder->type == INTEL_OUTPUT_DDI)
 743			intel_hdmi_info(m, intel_connector);
 744		break;
 745	default:
 746		break;
 747	}
 748
 749	seq_puts(m, "\tHDCP version: ");
 750	intel_hdcp_info(m, intel_connector);
 751
 752	seq_printf(m, "\tmodes:\n");
 753	list_for_each_entry(mode, &connector->modes, head)
 754		intel_seq_print_mode(m, 2, mode);
 755}
 756
 757static const char *plane_type(enum drm_plane_type type)
 758{
 759	switch (type) {
 760	case DRM_PLANE_TYPE_OVERLAY:
 761		return "OVL";
 762	case DRM_PLANE_TYPE_PRIMARY:
 763		return "PRI";
 764	case DRM_PLANE_TYPE_CURSOR:
 765		return "CUR";
 766	/*
 767	 * Deliberately omitting default: to generate compiler warnings
 768	 * when a new drm_plane_type gets added.
 769	 */
 770	}
 771
 772	return "unknown";
 773}
 774
 775static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
 776{
 777	/*
 778	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
 779	 * will print them all to visualize if the values are misused
 780	 */
 781	snprintf(buf, bufsize,
 782		 "%s%s%s%s%s%s(0x%08x)",
 783		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
 784		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
 785		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
 786		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
 787		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
 788		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
 789		 rotation);
 790}
 791
 792static const char *plane_visibility(const struct intel_plane_state *plane_state)
 793{
 794	if (plane_state->uapi.visible)
 795		return "visible";
 796
 797	if (plane_state->planar_slave)
 798		return "planar-slave";
 799
 800	return "hidden";
 801}
 802
 803static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
 804{
 805	const struct intel_plane_state *plane_state =
 806		to_intel_plane_state(plane->base.state);
 807	const struct drm_framebuffer *fb = plane_state->uapi.fb;
 
 808	struct drm_rect src, dst;
 809	char rot_str[48];
 810
 811	src = drm_plane_state_src(&plane_state->uapi);
 812	dst = drm_plane_state_dest(&plane_state->uapi);
 813
 
 
 
 814	plane_rotation(rot_str, sizeof(rot_str),
 815		       plane_state->uapi.rotation);
 816
 817	seq_puts(m, "\t\tuapi: [FB:");
 818	if (fb)
 819		seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id,
 820			   &fb->format->format, fb->modifier, fb->width,
 821			   fb->height);
 822	else
 823		seq_puts(m, "0] n/a,0x0,0x0,");
 824	seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT
 825		   ", rotation=%s\n", plane_visibility(plane_state),
 826		   DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str);
 827
 828	if (plane_state->planar_linked_plane)
 829		seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
 830			   plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name,
 831			   plane_state->planar_slave ? "slave" : "master");
 832}
 833
 834static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
 835{
 836	const struct intel_plane_state *plane_state =
 837		to_intel_plane_state(plane->base.state);
 838	const struct drm_framebuffer *fb = plane_state->hw.fb;
 
 839	char rot_str[48];
 840
 841	if (!fb)
 842		return;
 843
 
 
 844	plane_rotation(rot_str, sizeof(rot_str),
 845		       plane_state->hw.rotation);
 846
 847	seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src="
 848		   DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
 849		   fb->base.id, &fb->format->format,
 850		   fb->modifier, fb->width, fb->height,
 851		   yesno(plane_state->uapi.visible),
 852		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
 853		   DRM_RECT_ARG(&plane_state->uapi.dst),
 854		   rot_str);
 855}
 856
 857static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
 858{
 859	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 860	struct intel_plane *plane;
 861
 862	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
 863		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
 864			   plane->base.base.id, plane->base.name,
 865			   plane_type(plane->base.type));
 866		intel_plane_uapi_info(m, plane);
 867		intel_plane_hw_info(m, plane);
 868	}
 869}
 870
 871static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
 872{
 873	const struct intel_crtc_state *crtc_state =
 874		to_intel_crtc_state(crtc->base.state);
 875	int num_scalers = crtc->num_scalers;
 876	int i;
 877
 878	/* Not all platformas have a scaler */
 879	if (num_scalers) {
 880		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
 881			   num_scalers,
 882			   crtc_state->scaler_state.scaler_users,
 883			   crtc_state->scaler_state.scaler_id);
 884
 885		for (i = 0; i < num_scalers; i++) {
 886			const struct intel_scaler *sc =
 887				&crtc_state->scaler_state.scalers[i];
 888
 889			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
 890				   i, yesno(sc->in_use), sc->mode);
 891		}
 892		seq_puts(m, "\n");
 893	} else {
 894		seq_puts(m, "\tNo scalers available on this platform\n");
 895	}
 896}
 897
 898#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
 899static void crtc_updates_info(struct seq_file *m,
 900			      struct intel_crtc *crtc,
 901			      const char *hdr)
 902{
 903	u64 count;
 904	int row;
 905
 906	count = 0;
 907	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
 908		count += crtc->debug.vbl.times[row];
 909	seq_printf(m, "%sUpdates: %llu\n", hdr, count);
 910	if (!count)
 911		return;
 912
 913	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
 914		char columns[80] = "       |";
 915		unsigned int x;
 916
 917		if (row & 1) {
 918			const char *units;
 919
 920			if (row > 10) {
 921				x = 1000000;
 922				units = "ms";
 923			} else {
 924				x = 1000;
 925				units = "us";
 926			}
 927
 928			snprintf(columns, sizeof(columns), "%4ld%s |",
 929				 DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
 930		}
 931
 932		if (crtc->debug.vbl.times[row]) {
 933			x = ilog2(crtc->debug.vbl.times[row]);
 934			memset(columns + 8, '*', x);
 935			columns[8 + x] = '\0';
 936		}
 937
 938		seq_printf(m, "%s%s\n", hdr, columns);
 939	}
 940
 941	seq_printf(m, "%sMin update: %lluns\n",
 942		   hdr, crtc->debug.vbl.min);
 943	seq_printf(m, "%sMax update: %lluns\n",
 944		   hdr, crtc->debug.vbl.max);
 945	seq_printf(m, "%sAverage update: %lluns\n",
 946		   hdr, div64_u64(crtc->debug.vbl.sum,  count));
 947	seq_printf(m, "%sOverruns > %uus: %u\n",
 948		   hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
 949}
 950
 951static int crtc_updates_show(struct seq_file *m, void *data)
 952{
 953	crtc_updates_info(m, m->private, "");
 954	return 0;
 955}
 956
 957static int crtc_updates_open(struct inode *inode, struct file *file)
 958{
 959	return single_open(file, crtc_updates_show, inode->i_private);
 960}
 961
 962static ssize_t crtc_updates_write(struct file *file,
 963				  const char __user *ubuf,
 964				  size_t len, loff_t *offp)
 965{
 966	struct seq_file *m = file->private_data;
 967	struct intel_crtc *crtc = m->private;
 968
 969	/* May race with an update. Meh. */
 970	memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
 971
 972	return len;
 973}
 974
 975static const struct file_operations crtc_updates_fops = {
 976	.owner = THIS_MODULE,
 977	.open = crtc_updates_open,
 978	.read = seq_read,
 979	.llseek = seq_lseek,
 980	.release = single_release,
 981	.write = crtc_updates_write
 982};
 983
 984static void crtc_updates_add(struct drm_crtc *crtc)
 985{
 986	debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
 987			    to_intel_crtc(crtc), &crtc_updates_fops);
 988}
 989
 990#else
 991static void crtc_updates_info(struct seq_file *m,
 992			      struct intel_crtc *crtc,
 993			      const char *hdr)
 994{
 995}
 996
 997static void crtc_updates_add(struct drm_crtc *crtc)
 998{
 999}
1000#endif
1001
1002static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
1003{
1004	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1005	const struct intel_crtc_state *crtc_state =
1006		to_intel_crtc_state(crtc->base.state);
1007	struct intel_encoder *encoder;
1008
1009	seq_printf(m, "[CRTC:%d:%s]:\n",
1010		   crtc->base.base.id, crtc->base.name);
1011
1012	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
1013		   yesno(crtc_state->uapi.enable),
1014		   yesno(crtc_state->uapi.active),
1015		   DRM_MODE_ARG(&crtc_state->uapi.mode));
1016
1017	if (crtc_state->hw.enable) {
1018		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
1019			   yesno(crtc_state->hw.active),
1020			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
1021
1022		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
1023			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
1024			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
1025
1026		intel_scaler_info(m, crtc);
1027	}
1028
1029	if (crtc_state->bigjoiner)
1030		seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n",
1031			   crtc_state->bigjoiner_linked_crtc->base.base.id,
1032			   crtc_state->bigjoiner_linked_crtc->base.name,
1033			   crtc_state->bigjoiner_slave ? "slave" : "master");
1034
1035	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
1036				    crtc_state->uapi.encoder_mask)
1037		intel_encoder_info(m, crtc, encoder);
1038
1039	intel_plane_info(m, crtc);
1040
1041	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
1042		   yesno(!crtc->cpu_fifo_underrun_disabled),
1043		   yesno(!crtc->pch_fifo_underrun_disabled));
1044
1045	crtc_updates_info(m, crtc, "\t");
1046}
1047
1048static int i915_display_info(struct seq_file *m, void *unused)
1049{
1050	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1051	struct drm_device *dev = &dev_priv->drm;
1052	struct intel_crtc *crtc;
1053	struct drm_connector *connector;
1054	struct drm_connector_list_iter conn_iter;
1055	intel_wakeref_t wakeref;
1056
1057	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1058
1059	drm_modeset_lock_all(dev);
1060
1061	seq_printf(m, "CRTC info\n");
1062	seq_printf(m, "---------\n");
1063	for_each_intel_crtc(dev, crtc)
1064		intel_crtc_info(m, crtc);
1065
1066	seq_printf(m, "\n");
1067	seq_printf(m, "Connector info\n");
1068	seq_printf(m, "--------------\n");
1069	drm_connector_list_iter_begin(dev, &conn_iter);
1070	drm_for_each_connector_iter(connector, &conn_iter)
1071		intel_connector_info(m, connector);
1072	drm_connector_list_iter_end(&conn_iter);
1073
1074	drm_modeset_unlock_all(dev);
1075
1076	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1077
1078	return 0;
1079}
1080
1081static int i915_shared_dplls_info(struct seq_file *m, void *unused)
1082{
1083	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1084	struct drm_device *dev = &dev_priv->drm;
1085	int i;
1086
1087	drm_modeset_lock_all(dev);
1088
1089	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
1090		   dev_priv->dpll.ref_clks.nssc,
1091		   dev_priv->dpll.ref_clks.ssc);
1092
1093	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
1094		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
1095
1096		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
1097			   pll->info->id);
1098		seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
1099			   pll->state.pipe_mask, pll->active_mask, yesno(pll->on));
1100		seq_printf(m, " tracked hardware state:\n");
1101		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
1102		seq_printf(m, " dpll_md: 0x%08x\n",
1103			   pll->state.hw_state.dpll_md);
1104		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
1105		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
1106		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
1107		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
1108		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
1109		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
1110			   pll->state.hw_state.mg_refclkin_ctl);
1111		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
1112			   pll->state.hw_state.mg_clktop2_coreclkctl1);
1113		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
1114			   pll->state.hw_state.mg_clktop2_hsclkctl);
1115		seq_printf(m, " mg_pll_div0:  0x%08x\n",
1116			   pll->state.hw_state.mg_pll_div0);
1117		seq_printf(m, " mg_pll_div1:  0x%08x\n",
1118			   pll->state.hw_state.mg_pll_div1);
1119		seq_printf(m, " mg_pll_lf:    0x%08x\n",
1120			   pll->state.hw_state.mg_pll_lf);
1121		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
1122			   pll->state.hw_state.mg_pll_frac_lock);
1123		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
1124			   pll->state.hw_state.mg_pll_ssc);
1125		seq_printf(m, " mg_pll_bias:  0x%08x\n",
1126			   pll->state.hw_state.mg_pll_bias);
1127		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
1128			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
1129	}
1130	drm_modeset_unlock_all(dev);
1131
1132	return 0;
1133}
1134
1135static int i915_ipc_status_show(struct seq_file *m, void *data)
1136{
1137	struct drm_i915_private *dev_priv = m->private;
1138
1139	seq_printf(m, "Isochronous Priority Control: %s\n",
1140			yesno(dev_priv->ipc_enabled));
1141	return 0;
1142}
1143
1144static int i915_ipc_status_open(struct inode *inode, struct file *file)
1145{
1146	struct drm_i915_private *dev_priv = inode->i_private;
1147
1148	if (!HAS_IPC(dev_priv))
1149		return -ENODEV;
1150
1151	return single_open(file, i915_ipc_status_show, dev_priv);
1152}
1153
1154static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
1155				     size_t len, loff_t *offp)
1156{
1157	struct seq_file *m = file->private_data;
1158	struct drm_i915_private *dev_priv = m->private;
1159	intel_wakeref_t wakeref;
1160	bool enable;
1161	int ret;
1162
1163	ret = kstrtobool_from_user(ubuf, len, &enable);
1164	if (ret < 0)
1165		return ret;
1166
1167	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1168		if (!dev_priv->ipc_enabled && enable)
1169			drm_info(&dev_priv->drm,
1170				 "Enabling IPC: WM will be proper only after next commit\n");
 
1171		dev_priv->ipc_enabled = enable;
1172		intel_enable_ipc(dev_priv);
1173	}
1174
1175	return len;
1176}
1177
1178static const struct file_operations i915_ipc_status_fops = {
1179	.owner = THIS_MODULE,
1180	.open = i915_ipc_status_open,
1181	.read = seq_read,
1182	.llseek = seq_lseek,
1183	.release = single_release,
1184	.write = i915_ipc_status_write
1185};
1186
1187static int i915_ddb_info(struct seq_file *m, void *unused)
1188{
1189	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1190	struct drm_device *dev = &dev_priv->drm;
1191	struct skl_ddb_entry *entry;
1192	struct intel_crtc *crtc;
1193
1194	if (DISPLAY_VER(dev_priv) < 9)
1195		return -ENODEV;
1196
1197	drm_modeset_lock_all(dev);
1198
1199	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1200
1201	for_each_intel_crtc(&dev_priv->drm, crtc) {
1202		struct intel_crtc_state *crtc_state =
1203			to_intel_crtc_state(crtc->base.state);
1204		enum pipe pipe = crtc->pipe;
1205		enum plane_id plane_id;
1206
1207		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1208
1209		for_each_plane_id_on_crtc(crtc, plane_id) {
1210			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1211			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1212				   entry->start, entry->end,
1213				   skl_ddb_entry_size(entry));
1214		}
1215
1216		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1217		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1218			   entry->end, skl_ddb_entry_size(entry));
1219	}
1220
1221	drm_modeset_unlock_all(dev);
1222
1223	return 0;
1224}
1225
1226static void drrs_status_per_crtc(struct seq_file *m,
1227				 struct drm_device *dev,
1228				 struct intel_crtc *intel_crtc)
1229{
1230	struct drm_i915_private *dev_priv = to_i915(dev);
1231	struct i915_drrs *drrs = &dev_priv->drrs;
1232	int vrefresh = 0;
1233	struct drm_connector *connector;
1234	struct drm_connector_list_iter conn_iter;
1235
1236	drm_connector_list_iter_begin(dev, &conn_iter);
1237	drm_for_each_connector_iter(connector, &conn_iter) {
1238		bool supported = false;
1239
1240		if (connector->state->crtc != &intel_crtc->base)
1241			continue;
1242
1243		seq_printf(m, "%s:\n", connector->name);
1244
1245		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
1246		    drrs->type == SEAMLESS_DRRS_SUPPORT)
1247			supported = true;
1248
1249		seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
1250	}
1251	drm_connector_list_iter_end(&conn_iter);
1252
1253	seq_puts(m, "\n");
1254
1255	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
1256		struct intel_panel *panel;
1257
1258		mutex_lock(&drrs->mutex);
1259		/* DRRS Supported */
1260		seq_puts(m, "\tDRRS Enabled: Yes\n");
1261
1262		/* disable_drrs() will make drrs->dp NULL */
1263		if (!drrs->dp) {
1264			seq_puts(m, "Idleness DRRS: Disabled\n");
 
 
 
1265			mutex_unlock(&drrs->mutex);
1266			return;
1267		}
1268
1269		panel = &drrs->dp->attached_connector->panel;
1270		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1271					drrs->busy_frontbuffer_bits);
1272
1273		seq_puts(m, "\n\t\t");
1274		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1275			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1276			vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1277		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1278			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1279			vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1280		} else {
1281			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1282						drrs->refresh_rate_type);
1283			mutex_unlock(&drrs->mutex);
1284			return;
1285		}
1286		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1287
1288		seq_puts(m, "\n\t\t");
1289		mutex_unlock(&drrs->mutex);
1290	} else {
1291		/* DRRS not supported. Print the VBT parameter*/
1292		seq_puts(m, "\tDRRS Enabled : No");
1293	}
1294	seq_puts(m, "\n");
1295}
1296
1297static int i915_drrs_status(struct seq_file *m, void *unused)
1298{
1299	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1300	struct drm_device *dev = &dev_priv->drm;
1301	struct intel_crtc *intel_crtc;
1302	int active_crtc_cnt = 0;
1303
1304	drm_modeset_lock_all(dev);
1305	for_each_intel_crtc(dev, intel_crtc) {
1306		if (intel_crtc->base.state->active) {
1307			active_crtc_cnt++;
1308			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1309
1310			drrs_status_per_crtc(m, dev, intel_crtc);
1311		}
1312	}
1313	drm_modeset_unlock_all(dev);
1314
1315	if (!active_crtc_cnt)
1316		seq_puts(m, "No active crtc found\n");
1317
1318	return 0;
1319}
1320
1321#define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
1322				seq_puts(m, "LPSP: disabled\n"))
1323
1324static bool
1325intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1326			      enum i915_power_well_id power_well_id)
1327{
1328	intel_wakeref_t wakeref;
1329	bool is_enabled;
1330
1331	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1332	is_enabled = intel_display_power_well_is_enabled(i915,
1333							 power_well_id);
1334	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1335
1336	return is_enabled;
1337}
1338
1339static int i915_lpsp_status(struct seq_file *m, void *unused)
1340{
1341	struct drm_i915_private *i915 = node_to_i915(m->private);
1342
1343	if (DISPLAY_VER(i915) >= 13) {
1344		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915,
1345							   SKL_DISP_PW_2));
1346		return 0;
1347	}
1348
1349	switch (DISPLAY_VER(i915)) {
1350	case 12:
1351	case 11:
1352		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
1353		break;
1354	case 10:
1355	case 9:
1356		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
1357		break;
1358	default:
1359		/*
1360		 * Apart from HASWELL/BROADWELL other legacy platform doesn't
1361		 * support lpsp.
1362		 */
1363		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1364			LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
1365		else
1366			seq_puts(m, "LPSP: not supported\n");
1367	}
1368
1369	return 0;
1370}
1371
1372static int i915_dp_mst_info(struct seq_file *m, void *unused)
1373{
1374	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1375	struct drm_device *dev = &dev_priv->drm;
1376	struct intel_encoder *intel_encoder;
1377	struct intel_digital_port *dig_port;
1378	struct drm_connector *connector;
1379	struct drm_connector_list_iter conn_iter;
1380
1381	drm_connector_list_iter_begin(dev, &conn_iter);
1382	drm_for_each_connector_iter(connector, &conn_iter) {
1383		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1384			continue;
1385
1386		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1387		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1388			continue;
1389
1390		dig_port = enc_to_dig_port(intel_encoder);
1391		if (!dig_port->dp.can_mst)
1392			continue;
1393
1394		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1395			   dig_port->base.base.base.id,
1396			   dig_port->base.base.name);
1397		drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1398	}
1399	drm_connector_list_iter_end(&conn_iter);
1400
1401	return 0;
1402}
1403
1404static ssize_t i915_displayport_test_active_write(struct file *file,
1405						  const char __user *ubuf,
1406						  size_t len, loff_t *offp)
1407{
1408	char *input_buffer;
1409	int status = 0;
1410	struct drm_device *dev;
1411	struct drm_connector *connector;
1412	struct drm_connector_list_iter conn_iter;
1413	struct intel_dp *intel_dp;
1414	int val = 0;
1415
1416	dev = ((struct seq_file *)file->private_data)->private;
1417
1418	if (len == 0)
1419		return 0;
1420
1421	input_buffer = memdup_user_nul(ubuf, len);
1422	if (IS_ERR(input_buffer))
1423		return PTR_ERR(input_buffer);
1424
1425	drm_dbg(&to_i915(dev)->drm,
1426		"Copied %d bytes from user\n", (unsigned int)len);
1427
1428	drm_connector_list_iter_begin(dev, &conn_iter);
1429	drm_for_each_connector_iter(connector, &conn_iter) {
1430		struct intel_encoder *encoder;
1431
1432		if (connector->connector_type !=
1433		    DRM_MODE_CONNECTOR_DisplayPort)
1434			continue;
1435
1436		encoder = to_intel_encoder(connector->encoder);
1437		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1438			continue;
1439
1440		if (encoder && connector->status == connector_status_connected) {
1441			intel_dp = enc_to_intel_dp(encoder);
1442			status = kstrtoint(input_buffer, 10, &val);
1443			if (status < 0)
1444				break;
1445			drm_dbg(&to_i915(dev)->drm,
1446				"Got %d for test active\n", val);
1447			/* To prevent erroneous activation of the compliance
1448			 * testing code, only accept an actual value of 1 here
1449			 */
1450			if (val == 1)
1451				intel_dp->compliance.test_active = true;
1452			else
1453				intel_dp->compliance.test_active = false;
1454		}
1455	}
1456	drm_connector_list_iter_end(&conn_iter);
1457	kfree(input_buffer);
1458	if (status < 0)
1459		return status;
1460
1461	*offp += len;
1462	return len;
1463}
1464
1465static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1466{
1467	struct drm_i915_private *dev_priv = m->private;
1468	struct drm_device *dev = &dev_priv->drm;
1469	struct drm_connector *connector;
1470	struct drm_connector_list_iter conn_iter;
1471	struct intel_dp *intel_dp;
1472
1473	drm_connector_list_iter_begin(dev, &conn_iter);
1474	drm_for_each_connector_iter(connector, &conn_iter) {
1475		struct intel_encoder *encoder;
1476
1477		if (connector->connector_type !=
1478		    DRM_MODE_CONNECTOR_DisplayPort)
1479			continue;
1480
1481		encoder = to_intel_encoder(connector->encoder);
1482		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1483			continue;
1484
1485		if (encoder && connector->status == connector_status_connected) {
1486			intel_dp = enc_to_intel_dp(encoder);
1487			if (intel_dp->compliance.test_active)
1488				seq_puts(m, "1");
1489			else
1490				seq_puts(m, "0");
1491		} else
1492			seq_puts(m, "0");
1493	}
1494	drm_connector_list_iter_end(&conn_iter);
1495
1496	return 0;
1497}
1498
1499static int i915_displayport_test_active_open(struct inode *inode,
1500					     struct file *file)
1501{
1502	return single_open(file, i915_displayport_test_active_show,
1503			   inode->i_private);
1504}
1505
1506static const struct file_operations i915_displayport_test_active_fops = {
1507	.owner = THIS_MODULE,
1508	.open = i915_displayport_test_active_open,
1509	.read = seq_read,
1510	.llseek = seq_lseek,
1511	.release = single_release,
1512	.write = i915_displayport_test_active_write
1513};
1514
1515static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1516{
1517	struct drm_i915_private *dev_priv = m->private;
1518	struct drm_device *dev = &dev_priv->drm;
1519	struct drm_connector *connector;
1520	struct drm_connector_list_iter conn_iter;
1521	struct intel_dp *intel_dp;
1522
1523	drm_connector_list_iter_begin(dev, &conn_iter);
1524	drm_for_each_connector_iter(connector, &conn_iter) {
1525		struct intel_encoder *encoder;
1526
1527		if (connector->connector_type !=
1528		    DRM_MODE_CONNECTOR_DisplayPort)
1529			continue;
1530
1531		encoder = to_intel_encoder(connector->encoder);
1532		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1533			continue;
1534
1535		if (encoder && connector->status == connector_status_connected) {
1536			intel_dp = enc_to_intel_dp(encoder);
1537			if (intel_dp->compliance.test_type ==
1538			    DP_TEST_LINK_EDID_READ)
1539				seq_printf(m, "%lx",
1540					   intel_dp->compliance.test_data.edid);
1541			else if (intel_dp->compliance.test_type ==
1542				 DP_TEST_LINK_VIDEO_PATTERN) {
1543				seq_printf(m, "hdisplay: %d\n",
1544					   intel_dp->compliance.test_data.hdisplay);
1545				seq_printf(m, "vdisplay: %d\n",
1546					   intel_dp->compliance.test_data.vdisplay);
1547				seq_printf(m, "bpc: %u\n",
1548					   intel_dp->compliance.test_data.bpc);
1549			} else if (intel_dp->compliance.test_type ==
1550				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1551				seq_printf(m, "pattern: %d\n",
1552					   intel_dp->compliance.test_data.phytest.phy_pattern);
1553				seq_printf(m, "Number of lanes: %d\n",
1554					   intel_dp->compliance.test_data.phytest.num_lanes);
1555				seq_printf(m, "Link Rate: %d\n",
1556					   intel_dp->compliance.test_data.phytest.link_rate);
1557				seq_printf(m, "level: %02x\n",
1558					   intel_dp->train_set[0]);
1559			}
1560		} else
1561			seq_puts(m, "0");
1562	}
1563	drm_connector_list_iter_end(&conn_iter);
1564
1565	return 0;
1566}
1567DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1568
1569static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1570{
1571	struct drm_i915_private *dev_priv = m->private;
1572	struct drm_device *dev = &dev_priv->drm;
1573	struct drm_connector *connector;
1574	struct drm_connector_list_iter conn_iter;
1575	struct intel_dp *intel_dp;
1576
1577	drm_connector_list_iter_begin(dev, &conn_iter);
1578	drm_for_each_connector_iter(connector, &conn_iter) {
1579		struct intel_encoder *encoder;
1580
1581		if (connector->connector_type !=
1582		    DRM_MODE_CONNECTOR_DisplayPort)
1583			continue;
1584
1585		encoder = to_intel_encoder(connector->encoder);
1586		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1587			continue;
1588
1589		if (encoder && connector->status == connector_status_connected) {
1590			intel_dp = enc_to_intel_dp(encoder);
1591			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1592		} else
1593			seq_puts(m, "0");
1594	}
1595	drm_connector_list_iter_end(&conn_iter);
1596
1597	return 0;
1598}
1599DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1600
1601static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1602{
1603	struct drm_i915_private *dev_priv = m->private;
1604	struct drm_device *dev = &dev_priv->drm;
1605	int level;
1606	int num_levels;
1607
1608	if (IS_CHERRYVIEW(dev_priv))
1609		num_levels = 3;
1610	else if (IS_VALLEYVIEW(dev_priv))
1611		num_levels = 1;
1612	else if (IS_G4X(dev_priv))
1613		num_levels = 3;
1614	else
1615		num_levels = ilk_wm_max_level(dev_priv) + 1;
1616
1617	drm_modeset_lock_all(dev);
1618
1619	for (level = 0; level < num_levels; level++) {
1620		unsigned int latency = wm[level];
1621
1622		/*
1623		 * - WM1+ latency values in 0.5us units
1624		 * - latencies are in us on gen9/vlv/chv
1625		 */
1626		if (DISPLAY_VER(dev_priv) >= 9 ||
1627		    IS_VALLEYVIEW(dev_priv) ||
1628		    IS_CHERRYVIEW(dev_priv) ||
1629		    IS_G4X(dev_priv))
1630			latency *= 10;
1631		else if (level > 0)
1632			latency *= 5;
1633
1634		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1635			   level, wm[level], latency / 10, latency % 10);
1636	}
1637
1638	drm_modeset_unlock_all(dev);
1639}
1640
1641static int pri_wm_latency_show(struct seq_file *m, void *data)
1642{
1643	struct drm_i915_private *dev_priv = m->private;
1644	const u16 *latencies;
1645
1646	if (DISPLAY_VER(dev_priv) >= 9)
1647		latencies = dev_priv->wm.skl_latency;
1648	else
1649		latencies = dev_priv->wm.pri_latency;
1650
1651	wm_latency_show(m, latencies);
1652
1653	return 0;
1654}
1655
1656static int spr_wm_latency_show(struct seq_file *m, void *data)
1657{
1658	struct drm_i915_private *dev_priv = m->private;
1659	const u16 *latencies;
1660
1661	if (DISPLAY_VER(dev_priv) >= 9)
1662		latencies = dev_priv->wm.skl_latency;
1663	else
1664		latencies = dev_priv->wm.spr_latency;
1665
1666	wm_latency_show(m, latencies);
1667
1668	return 0;
1669}
1670
1671static int cur_wm_latency_show(struct seq_file *m, void *data)
1672{
1673	struct drm_i915_private *dev_priv = m->private;
1674	const u16 *latencies;
1675
1676	if (DISPLAY_VER(dev_priv) >= 9)
1677		latencies = dev_priv->wm.skl_latency;
1678	else
1679		latencies = dev_priv->wm.cur_latency;
1680
1681	wm_latency_show(m, latencies);
1682
1683	return 0;
1684}
1685
1686static int pri_wm_latency_open(struct inode *inode, struct file *file)
1687{
1688	struct drm_i915_private *dev_priv = inode->i_private;
1689
1690	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
1691		return -ENODEV;
1692
1693	return single_open(file, pri_wm_latency_show, dev_priv);
1694}
1695
1696static int spr_wm_latency_open(struct inode *inode, struct file *file)
1697{
1698	struct drm_i915_private *dev_priv = inode->i_private;
1699
1700	if (HAS_GMCH(dev_priv))
1701		return -ENODEV;
1702
1703	return single_open(file, spr_wm_latency_show, dev_priv);
1704}
1705
1706static int cur_wm_latency_open(struct inode *inode, struct file *file)
1707{
1708	struct drm_i915_private *dev_priv = inode->i_private;
1709
1710	if (HAS_GMCH(dev_priv))
1711		return -ENODEV;
1712
1713	return single_open(file, cur_wm_latency_show, dev_priv);
1714}
1715
1716static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1717				size_t len, loff_t *offp, u16 wm[8])
1718{
1719	struct seq_file *m = file->private_data;
1720	struct drm_i915_private *dev_priv = m->private;
1721	struct drm_device *dev = &dev_priv->drm;
1722	u16 new[8] = { 0 };
1723	int num_levels;
1724	int level;
1725	int ret;
1726	char tmp[32];
1727
1728	if (IS_CHERRYVIEW(dev_priv))
1729		num_levels = 3;
1730	else if (IS_VALLEYVIEW(dev_priv))
1731		num_levels = 1;
1732	else if (IS_G4X(dev_priv))
1733		num_levels = 3;
1734	else
1735		num_levels = ilk_wm_max_level(dev_priv) + 1;
1736
1737	if (len >= sizeof(tmp))
1738		return -EINVAL;
1739
1740	if (copy_from_user(tmp, ubuf, len))
1741		return -EFAULT;
1742
1743	tmp[len] = '\0';
1744
1745	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1746		     &new[0], &new[1], &new[2], &new[3],
1747		     &new[4], &new[5], &new[6], &new[7]);
1748	if (ret != num_levels)
1749		return -EINVAL;
1750
1751	drm_modeset_lock_all(dev);
1752
1753	for (level = 0; level < num_levels; level++)
1754		wm[level] = new[level];
1755
1756	drm_modeset_unlock_all(dev);
1757
1758	return len;
1759}
1760
1761
1762static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1763				    size_t len, loff_t *offp)
1764{
1765	struct seq_file *m = file->private_data;
1766	struct drm_i915_private *dev_priv = m->private;
1767	u16 *latencies;
1768
1769	if (DISPLAY_VER(dev_priv) >= 9)
1770		latencies = dev_priv->wm.skl_latency;
1771	else
1772		latencies = dev_priv->wm.pri_latency;
1773
1774	return wm_latency_write(file, ubuf, len, offp, latencies);
1775}
1776
1777static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1778				    size_t len, loff_t *offp)
1779{
1780	struct seq_file *m = file->private_data;
1781	struct drm_i915_private *dev_priv = m->private;
1782	u16 *latencies;
1783
1784	if (DISPLAY_VER(dev_priv) >= 9)
1785		latencies = dev_priv->wm.skl_latency;
1786	else
1787		latencies = dev_priv->wm.spr_latency;
1788
1789	return wm_latency_write(file, ubuf, len, offp, latencies);
1790}
1791
1792static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1793				    size_t len, loff_t *offp)
1794{
1795	struct seq_file *m = file->private_data;
1796	struct drm_i915_private *dev_priv = m->private;
1797	u16 *latencies;
1798
1799	if (DISPLAY_VER(dev_priv) >= 9)
1800		latencies = dev_priv->wm.skl_latency;
1801	else
1802		latencies = dev_priv->wm.cur_latency;
1803
1804	return wm_latency_write(file, ubuf, len, offp, latencies);
1805}
1806
1807static const struct file_operations i915_pri_wm_latency_fops = {
1808	.owner = THIS_MODULE,
1809	.open = pri_wm_latency_open,
1810	.read = seq_read,
1811	.llseek = seq_lseek,
1812	.release = single_release,
1813	.write = pri_wm_latency_write
1814};
1815
1816static const struct file_operations i915_spr_wm_latency_fops = {
1817	.owner = THIS_MODULE,
1818	.open = spr_wm_latency_open,
1819	.read = seq_read,
1820	.llseek = seq_lseek,
1821	.release = single_release,
1822	.write = spr_wm_latency_write
1823};
1824
1825static const struct file_operations i915_cur_wm_latency_fops = {
1826	.owner = THIS_MODULE,
1827	.open = cur_wm_latency_open,
1828	.read = seq_read,
1829	.llseek = seq_lseek,
1830	.release = single_release,
1831	.write = cur_wm_latency_write
1832};
1833
1834static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1835{
1836	struct drm_i915_private *dev_priv = m->private;
1837	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1838
1839	/* Synchronize with everything first in case there's been an HPD
1840	 * storm, but we haven't finished handling it in the kernel yet
1841	 */
1842	intel_synchronize_irq(dev_priv);
1843	flush_work(&dev_priv->hotplug.dig_port_work);
1844	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1845
1846	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1847	seq_printf(m, "Detected: %s\n",
1848		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1849
1850	return 0;
1851}
1852
1853static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1854					const char __user *ubuf, size_t len,
1855					loff_t *offp)
1856{
1857	struct seq_file *m = file->private_data;
1858	struct drm_i915_private *dev_priv = m->private;
1859	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1860	unsigned int new_threshold;
1861	int i;
1862	char *newline;
1863	char tmp[16];
1864
1865	if (len >= sizeof(tmp))
1866		return -EINVAL;
1867
1868	if (copy_from_user(tmp, ubuf, len))
1869		return -EFAULT;
1870
1871	tmp[len] = '\0';
1872
1873	/* Strip newline, if any */
1874	newline = strchr(tmp, '\n');
1875	if (newline)
1876		*newline = '\0';
1877
1878	if (strcmp(tmp, "reset") == 0)
1879		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1880	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1881		return -EINVAL;
1882
1883	if (new_threshold > 0)
1884		drm_dbg_kms(&dev_priv->drm,
1885			    "Setting HPD storm detection threshold to %d\n",
1886			    new_threshold);
1887	else
1888		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1889
1890	spin_lock_irq(&dev_priv->irq_lock);
1891	hotplug->hpd_storm_threshold = new_threshold;
1892	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1893	for_each_hpd_pin(i)
1894		hotplug->stats[i].count = 0;
1895	spin_unlock_irq(&dev_priv->irq_lock);
1896
1897	/* Re-enable hpd immediately if we were in an irq storm */
1898	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1899
1900	return len;
1901}
1902
1903static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1904{
1905	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1906}
1907
1908static const struct file_operations i915_hpd_storm_ctl_fops = {
1909	.owner = THIS_MODULE,
1910	.open = i915_hpd_storm_ctl_open,
1911	.read = seq_read,
1912	.llseek = seq_lseek,
1913	.release = single_release,
1914	.write = i915_hpd_storm_ctl_write
1915};
1916
1917static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1918{
1919	struct drm_i915_private *dev_priv = m->private;
1920
1921	seq_printf(m, "Enabled: %s\n",
1922		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1923
1924	return 0;
1925}
1926
1927static int
1928i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1929{
1930	return single_open(file, i915_hpd_short_storm_ctl_show,
1931			   inode->i_private);
1932}
1933
1934static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1935					      const char __user *ubuf,
1936					      size_t len, loff_t *offp)
1937{
1938	struct seq_file *m = file->private_data;
1939	struct drm_i915_private *dev_priv = m->private;
1940	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1941	char *newline;
1942	char tmp[16];
1943	int i;
1944	bool new_state;
1945
1946	if (len >= sizeof(tmp))
1947		return -EINVAL;
1948
1949	if (copy_from_user(tmp, ubuf, len))
1950		return -EFAULT;
1951
1952	tmp[len] = '\0';
1953
1954	/* Strip newline, if any */
1955	newline = strchr(tmp, '\n');
1956	if (newline)
1957		*newline = '\0';
1958
1959	/* Reset to the "default" state for this system */
1960	if (strcmp(tmp, "reset") == 0)
1961		new_state = !HAS_DP_MST(dev_priv);
1962	else if (kstrtobool(tmp, &new_state) != 0)
1963		return -EINVAL;
1964
1965	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1966		    new_state ? "En" : "Dis");
1967
1968	spin_lock_irq(&dev_priv->irq_lock);
1969	hotplug->hpd_short_storm_enabled = new_state;
1970	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1971	for_each_hpd_pin(i)
1972		hotplug->stats[i].count = 0;
1973	spin_unlock_irq(&dev_priv->irq_lock);
1974
1975	/* Re-enable hpd immediately if we were in an irq storm */
1976	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1977
1978	return len;
1979}
1980
1981static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1982	.owner = THIS_MODULE,
1983	.open = i915_hpd_short_storm_ctl_open,
1984	.read = seq_read,
1985	.llseek = seq_lseek,
1986	.release = single_release,
1987	.write = i915_hpd_short_storm_ctl_write,
1988};
1989
1990static int i915_drrs_ctl_set(void *data, u64 val)
1991{
1992	struct drm_i915_private *dev_priv = data;
1993	struct drm_device *dev = &dev_priv->drm;
1994	struct intel_crtc *crtc;
1995
1996	if (DISPLAY_VER(dev_priv) < 7)
1997		return -ENODEV;
1998
1999	for_each_intel_crtc(dev, crtc) {
2000		struct drm_connector_list_iter conn_iter;
2001		struct intel_crtc_state *crtc_state;
2002		struct drm_connector *connector;
2003		struct drm_crtc_commit *commit;
2004		int ret;
2005
2006		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
2007		if (ret)
2008			return ret;
2009
2010		crtc_state = to_intel_crtc_state(crtc->base.state);
2011
2012		if (!crtc_state->hw.active ||
2013		    !crtc_state->has_drrs)
2014			goto out;
2015
2016		commit = crtc_state->uapi.commit;
2017		if (commit) {
2018			ret = wait_for_completion_interruptible(&commit->hw_done);
2019			if (ret)
2020				goto out;
2021		}
2022
2023		drm_connector_list_iter_begin(dev, &conn_iter);
2024		drm_for_each_connector_iter(connector, &conn_iter) {
2025			struct intel_encoder *encoder;
2026			struct intel_dp *intel_dp;
2027
2028			if (!(crtc_state->uapi.connector_mask &
2029			      drm_connector_mask(connector)))
2030				continue;
2031
2032			encoder = intel_attached_encoder(to_intel_connector(connector));
2033			if (encoder->type != INTEL_OUTPUT_EDP)
2034				continue;
2035
2036			drm_dbg(&dev_priv->drm,
2037				"Manually %sabling DRRS. %llu\n",
2038				val ? "en" : "dis", val);
2039
2040			intel_dp = enc_to_intel_dp(encoder);
2041			if (val)
2042				intel_edp_drrs_enable(intel_dp,
2043						      crtc_state);
2044			else
2045				intel_edp_drrs_disable(intel_dp,
2046						       crtc_state);
2047		}
2048		drm_connector_list_iter_end(&conn_iter);
2049
2050out:
2051		drm_modeset_unlock(&crtc->base.mutex);
2052		if (ret)
2053			return ret;
2054	}
2055
2056	return 0;
2057}
2058
2059DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
2060
2061static ssize_t
2062i915_fifo_underrun_reset_write(struct file *filp,
2063			       const char __user *ubuf,
2064			       size_t cnt, loff_t *ppos)
2065{
2066	struct drm_i915_private *dev_priv = filp->private_data;
2067	struct intel_crtc *intel_crtc;
2068	struct drm_device *dev = &dev_priv->drm;
2069	int ret;
2070	bool reset;
2071
2072	ret = kstrtobool_from_user(ubuf, cnt, &reset);
2073	if (ret)
2074		return ret;
2075
2076	if (!reset)
2077		return cnt;
2078
2079	for_each_intel_crtc(dev, intel_crtc) {
2080		struct drm_crtc_commit *commit;
2081		struct intel_crtc_state *crtc_state;
2082
2083		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
2084		if (ret)
2085			return ret;
2086
2087		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
2088		commit = crtc_state->uapi.commit;
2089		if (commit) {
2090			ret = wait_for_completion_interruptible(&commit->hw_done);
2091			if (!ret)
2092				ret = wait_for_completion_interruptible(&commit->flip_done);
2093		}
2094
2095		if (!ret && crtc_state->hw.active) {
2096			drm_dbg_kms(&dev_priv->drm,
2097				    "Re-arming FIFO underruns on pipe %c\n",
2098				    pipe_name(intel_crtc->pipe));
2099
2100			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
2101		}
2102
2103		drm_modeset_unlock(&intel_crtc->base.mutex);
2104
2105		if (ret)
2106			return ret;
2107	}
2108
2109	ret = intel_fbc_reset_underrun(dev_priv);
2110	if (ret)
2111		return ret;
2112
2113	return cnt;
2114}
2115
2116static const struct file_operations i915_fifo_underrun_reset_ops = {
2117	.owner = THIS_MODULE,
2118	.open = simple_open,
2119	.write = i915_fifo_underrun_reset_write,
2120	.llseek = default_llseek,
2121};
2122
2123static const struct drm_info_list intel_display_debugfs_list[] = {
2124	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
2125	{"i915_fbc_status", i915_fbc_status, 0},
2126	{"i915_ips_status", i915_ips_status, 0},
2127	{"i915_sr_status", i915_sr_status, 0},
2128	{"i915_opregion", i915_opregion, 0},
2129	{"i915_vbt", i915_vbt, 0},
2130	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2131	{"i915_edp_psr_status", i915_edp_psr_status, 0},
2132	{"i915_power_domain_info", i915_power_domain_info, 0},
2133	{"i915_dmc_info", i915_dmc_info, 0},
2134	{"i915_display_info", i915_display_info, 0},
2135	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
2136	{"i915_dp_mst_info", i915_dp_mst_info, 0},
2137	{"i915_ddb_info", i915_ddb_info, 0},
2138	{"i915_drrs_status", i915_drrs_status, 0},
2139	{"i915_lpsp_status", i915_lpsp_status, 0},
2140};
2141
2142static const struct {
2143	const char *name;
2144	const struct file_operations *fops;
2145} intel_display_debugfs_files[] = {
2146	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
2147	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
2148	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
2149	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
2150	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
2151	{"i915_dp_test_data", &i915_displayport_test_data_fops},
2152	{"i915_dp_test_type", &i915_displayport_test_type_fops},
2153	{"i915_dp_test_active", &i915_displayport_test_active_fops},
2154	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
2155	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
2156	{"i915_ipc_status", &i915_ipc_status_fops},
2157	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
2158	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
2159};
2160
2161void intel_display_debugfs_register(struct drm_i915_private *i915)
2162{
2163	struct drm_minor *minor = i915->drm.primary;
2164	int i;
2165
2166	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
2167		debugfs_create_file(intel_display_debugfs_files[i].name,
2168				    S_IRUGO | S_IWUSR,
2169				    minor->debugfs_root,
2170				    to_i915(minor->dev),
2171				    intel_display_debugfs_files[i].fops);
2172	}
2173
2174	drm_debugfs_create_files(intel_display_debugfs_list,
2175				 ARRAY_SIZE(intel_display_debugfs_list),
2176				 minor->debugfs_root, minor);
2177}
2178
2179static int i915_panel_show(struct seq_file *m, void *data)
2180{
2181	struct drm_connector *connector = m->private;
2182	struct intel_dp *intel_dp =
2183		intel_attached_dp(to_intel_connector(connector));
2184
2185	if (connector->status != connector_status_connected)
2186		return -ENODEV;
2187
2188	seq_printf(m, "Panel power up delay: %d\n",
2189		   intel_dp->pps.panel_power_up_delay);
2190	seq_printf(m, "Panel power down delay: %d\n",
2191		   intel_dp->pps.panel_power_down_delay);
2192	seq_printf(m, "Backlight on delay: %d\n",
2193		   intel_dp->pps.backlight_on_delay);
2194	seq_printf(m, "Backlight off delay: %d\n",
2195		   intel_dp->pps.backlight_off_delay);
2196
2197	return 0;
2198}
2199DEFINE_SHOW_ATTRIBUTE(i915_panel);
2200
2201static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2202{
2203	struct drm_connector *connector = m->private;
2204	struct drm_i915_private *i915 = to_i915(connector->dev);
2205	struct intel_connector *intel_connector = to_intel_connector(connector);
2206	int ret;
2207
2208	ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
2209	if (ret)
2210		return ret;
2211
2212	if (!connector->encoder || connector->status != connector_status_connected) {
2213		ret = -ENODEV;
2214		goto out;
2215	}
2216
2217	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2218		   connector->base.id);
2219	intel_hdcp_info(m, intel_connector);
2220
2221out:
2222	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
2223
2224	return ret;
2225}
2226DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2227
2228static int i915_psr_status_show(struct seq_file *m, void *data)
2229{
2230	struct drm_connector *connector = m->private;
2231	struct intel_dp *intel_dp =
2232		intel_attached_dp(to_intel_connector(connector));
2233
2234	return intel_psr_status(m, intel_dp);
2235}
2236DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
2237
2238#define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
2239				seq_puts(m, "LPSP: incapable\n"))
2240
2241static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2242{
2243	struct drm_connector *connector = m->private;
2244	struct drm_i915_private *i915 = to_i915(connector->dev);
2245	struct intel_encoder *encoder;
2246
2247	encoder = intel_attached_encoder(to_intel_connector(connector));
2248	if (!encoder)
2249		return -ENODEV;
2250
2251	if (connector->status != connector_status_connected)
2252		return -ENODEV;
2253
2254	switch (DISPLAY_VER(i915)) {
2255	case 12:
2256		/*
2257		 * Actually TGL can drive LPSP on port till DDI_C
2258		 * but there is no physical connected DDI_C on TGL sku's,
2259		 * even driver is not initilizing DDI_C port for gen12.
2260		 */
2261		LPSP_CAPABLE(encoder->port <= PORT_B);
2262		break;
2263	case 11:
2264		LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2265			     connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2266		break;
2267	case 10:
2268	case 9:
2269		LPSP_CAPABLE(encoder->port == PORT_A &&
2270			     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2271			     connector->connector_type == DRM_MODE_CONNECTOR_eDP  ||
2272			     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2273		break;
2274	default:
2275		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2276			LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2277	}
2278
2279	return 0;
2280}
2281DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2282
2283static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2284{
2285	struct drm_connector *connector = m->private;
2286	struct drm_device *dev = connector->dev;
2287	struct drm_crtc *crtc;
2288	struct intel_dp *intel_dp;
2289	struct drm_modeset_acquire_ctx ctx;
2290	struct intel_crtc_state *crtc_state = NULL;
2291	int ret = 0;
2292	bool try_again = false;
2293
2294	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2295
2296	do {
2297		try_again = false;
2298		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2299				       &ctx);
2300		if (ret) {
2301			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2302				try_again = true;
2303				continue;
2304			}
2305			break;
2306		}
2307		crtc = connector->state->crtc;
2308		if (connector->status != connector_status_connected || !crtc) {
2309			ret = -ENODEV;
2310			break;
2311		}
2312		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2313		if (ret == -EDEADLK) {
2314			ret = drm_modeset_backoff(&ctx);
2315			if (!ret) {
2316				try_again = true;
2317				continue;
2318			}
2319			break;
2320		} else if (ret) {
2321			break;
2322		}
2323		intel_dp = intel_attached_dp(to_intel_connector(connector));
2324		crtc_state = to_intel_crtc_state(crtc->state);
2325		seq_printf(m, "DSC_Enabled: %s\n",
2326			   yesno(crtc_state->dsc.compression_enable));
2327		seq_printf(m, "DSC_Sink_Support: %s\n",
2328			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2329		seq_printf(m, "Force_DSC_Enable: %s\n",
2330			   yesno(intel_dp->force_dsc_en));
2331		if (!intel_dp_is_edp(intel_dp))
2332			seq_printf(m, "FEC_Sink_Support: %s\n",
2333				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2334	} while (try_again);
2335
2336	drm_modeset_drop_locks(&ctx);
2337	drm_modeset_acquire_fini(&ctx);
2338
2339	return ret;
2340}
2341
2342static ssize_t i915_dsc_fec_support_write(struct file *file,
2343					  const char __user *ubuf,
2344					  size_t len, loff_t *offp)
2345{
2346	bool dsc_enable = false;
2347	int ret;
2348	struct drm_connector *connector =
2349		((struct seq_file *)file->private_data)->private;
2350	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2351	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2352	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2353
2354	if (len == 0)
2355		return 0;
2356
2357	drm_dbg(&i915->drm,
2358		"Copied %zu bytes from user to force DSC\n", len);
2359
2360	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2361	if (ret < 0)
2362		return ret;
2363
2364	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2365		(dsc_enable) ? "true" : "false");
2366	intel_dp->force_dsc_en = dsc_enable;
2367
2368	*offp += len;
2369	return len;
2370}
2371
2372static int i915_dsc_fec_support_open(struct inode *inode,
2373				     struct file *file)
2374{
2375	return single_open(file, i915_dsc_fec_support_show,
2376			   inode->i_private);
2377}
2378
2379static const struct file_operations i915_dsc_fec_support_fops = {
2380	.owner = THIS_MODULE,
2381	.open = i915_dsc_fec_support_open,
2382	.read = seq_read,
2383	.llseek = seq_lseek,
2384	.release = single_release,
2385	.write = i915_dsc_fec_support_write
2386};
2387
2388/**
2389 * intel_connector_debugfs_add - add i915 specific connector debugfs files
2390 * @connector: pointer to a registered drm_connector
2391 *
2392 * Cleanup will be done by drm_connector_unregister() through a call to
2393 * drm_debugfs_connector_remove().
2394 *
2395 * Returns 0 on success, negative error codes on error.
2396 */
2397int intel_connector_debugfs_add(struct drm_connector *connector)
2398{
2399	struct dentry *root = connector->debugfs_entry;
2400	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2401
2402	/* The connector must have been registered beforehands. */
2403	if (!root)
2404		return -ENODEV;
2405
2406	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2407		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2408				    connector, &i915_panel_fops);
2409		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2410				    connector, &i915_psr_sink_status_fops);
2411	}
2412
2413	if (HAS_PSR(dev_priv) &&
2414	    connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2415		debugfs_create_file("i915_psr_status", 0444, root,
2416				    connector, &i915_psr_status_fops);
2417	}
2418
2419	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2420	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2421	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2422		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2423				    connector, &i915_hdcp_sink_capability_fops);
2424	}
2425
2426	if ((DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) && ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && !to_intel_connector(connector)->mst_port) || connector->connector_type == DRM_MODE_CONNECTOR_eDP))
 
 
 
2427		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
2428				    connector, &i915_dsc_fec_support_fops);
2429
2430	/* Legacy panels doesn't lpsp on any platform */
2431	if ((DISPLAY_VER(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
2432	     IS_BROADWELL(dev_priv)) &&
2433	     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2434	     connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2435	     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2436	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2437	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
2438		debugfs_create_file("i915_lpsp_capability", 0444, root,
2439				    connector, &i915_lpsp_capability_fops);
2440
2441	return 0;
2442}
2443
2444/**
2445 * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
2446 * @crtc: pointer to a drm_crtc
2447 *
2448 * Returns 0 on success, negative error codes on error.
2449 *
2450 * Failure to add debugfs entries should generally be ignored.
2451 */
2452int intel_crtc_debugfs_add(struct drm_crtc *crtc)
2453{
2454	if (!crtc->debugfs_entry)
2455		return -ENODEV;
2456
2457	crtc_updates_add(crtc);
2458	return 0;
2459}
v5.9
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2020 Intel Corporation
   4 */
   5
   6#include <drm/drm_debugfs.h>
   7#include <drm/drm_fourcc.h>
   8
   9#include "i915_debugfs.h"
  10#include "intel_csr.h"
  11#include "intel_display_debugfs.h"
  12#include "intel_display_power.h"
 
  13#include "intel_display_types.h"
 
  14#include "intel_dp.h"
  15#include "intel_fbc.h"
  16#include "intel_hdcp.h"
  17#include "intel_hdmi.h"
  18#include "intel_pm.h"
  19#include "intel_psr.h"
  20#include "intel_sideband.h"
 
  21
  22static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
  23{
  24	return to_i915(node->minor->dev);
  25}
  26
  27static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
  28{
  29	struct drm_i915_private *dev_priv = node_to_i915(m->private);
  30
  31	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
  32		   dev_priv->fb_tracking.busy_bits);
  33
  34	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
  35		   dev_priv->fb_tracking.flip_bits);
  36
  37	return 0;
  38}
  39
  40static int i915_fbc_status(struct seq_file *m, void *unused)
  41{
  42	struct drm_i915_private *dev_priv = node_to_i915(m->private);
  43	struct intel_fbc *fbc = &dev_priv->fbc;
  44	intel_wakeref_t wakeref;
  45
  46	if (!HAS_FBC(dev_priv))
  47		return -ENODEV;
  48
  49	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
  50	mutex_lock(&fbc->lock);
  51
  52	if (intel_fbc_is_active(dev_priv))
  53		seq_puts(m, "FBC enabled\n");
  54	else
  55		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
  56
  57	if (intel_fbc_is_active(dev_priv)) {
  58		u32 mask;
  59
  60		if (INTEL_GEN(dev_priv) >= 8)
  61			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
  62		else if (INTEL_GEN(dev_priv) >= 7)
  63			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
  64		else if (INTEL_GEN(dev_priv) >= 5)
  65			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
  66		else if (IS_G4X(dev_priv))
  67			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
  68		else
  69			mask = intel_de_read(dev_priv, FBC_STATUS) &
  70				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
  71
  72		seq_printf(m, "Compressing: %s\n", yesno(mask));
  73	}
  74
  75	mutex_unlock(&fbc->lock);
  76	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
  77
  78	return 0;
  79}
  80
  81static int i915_fbc_false_color_get(void *data, u64 *val)
  82{
  83	struct drm_i915_private *dev_priv = data;
  84
  85	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
  86		return -ENODEV;
  87
  88	*val = dev_priv->fbc.false_color;
  89
  90	return 0;
  91}
  92
  93static int i915_fbc_false_color_set(void *data, u64 val)
  94{
  95	struct drm_i915_private *dev_priv = data;
  96	u32 reg;
  97
  98	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
  99		return -ENODEV;
 100
 101	mutex_lock(&dev_priv->fbc.lock);
 102
 103	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
 104	dev_priv->fbc.false_color = val;
 105
 106	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
 107		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
 108
 109	mutex_unlock(&dev_priv->fbc.lock);
 110	return 0;
 111}
 112
 113DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
 114			i915_fbc_false_color_get, i915_fbc_false_color_set,
 115			"%llu\n");
 116
 117static int i915_ips_status(struct seq_file *m, void *unused)
 118{
 119	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 120	intel_wakeref_t wakeref;
 121
 122	if (!HAS_IPS(dev_priv))
 123		return -ENODEV;
 124
 125	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 126
 127	seq_printf(m, "Enabled by kernel parameter: %s\n",
 128		   yesno(dev_priv->params.enable_ips));
 129
 130	if (INTEL_GEN(dev_priv) >= 8) {
 131		seq_puts(m, "Currently: unknown\n");
 132	} else {
 133		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
 134			seq_puts(m, "Currently: enabled\n");
 135		else
 136			seq_puts(m, "Currently: disabled\n");
 137	}
 138
 139	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 140
 141	return 0;
 142}
 143
 144static int i915_sr_status(struct seq_file *m, void *unused)
 145{
 146	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 147	intel_wakeref_t wakeref;
 148	bool sr_enabled = false;
 149
 150	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 151
 152	if (INTEL_GEN(dev_priv) >= 9)
 153		/* no global SR status; inspect per-plane WM */;
 154	else if (HAS_PCH_SPLIT(dev_priv))
 155		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
 156	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
 157		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
 158		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
 159	else if (IS_I915GM(dev_priv))
 160		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
 161	else if (IS_PINEVIEW(dev_priv))
 162		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
 163	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 164		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
 165
 166	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
 167
 168	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
 169
 170	return 0;
 171}
 172
 173static int i915_opregion(struct seq_file *m, void *unused)
 174{
 175	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
 176
 177	if (opregion->header)
 178		seq_write(m, opregion->header, OPREGION_SIZE);
 179
 180	return 0;
 181}
 182
 183static int i915_vbt(struct seq_file *m, void *unused)
 184{
 185	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
 186
 187	if (opregion->vbt)
 188		seq_write(m, opregion->vbt, opregion->vbt_size);
 189
 190	return 0;
 191}
 192
 193static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 194{
 195	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 196	struct drm_device *dev = &dev_priv->drm;
 197	struct intel_framebuffer *fbdev_fb = NULL;
 198	struct drm_framebuffer *drm_fb;
 199
 200#ifdef CONFIG_DRM_FBDEV_EMULATION
 201	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
 202		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
 203
 204		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
 205			   fbdev_fb->base.width,
 206			   fbdev_fb->base.height,
 207			   fbdev_fb->base.format->depth,
 208			   fbdev_fb->base.format->cpp[0] * 8,
 209			   fbdev_fb->base.modifier,
 210			   drm_framebuffer_read_refcount(&fbdev_fb->base));
 211		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
 212		seq_putc(m, '\n');
 213	}
 214#endif
 215
 216	mutex_lock(&dev->mode_config.fb_lock);
 217	drm_for_each_fb(drm_fb, dev) {
 218		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
 219		if (fb == fbdev_fb)
 220			continue;
 221
 222		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
 223			   fb->base.width,
 224			   fb->base.height,
 225			   fb->base.format->depth,
 226			   fb->base.format->cpp[0] * 8,
 227			   fb->base.modifier,
 228			   drm_framebuffer_read_refcount(&fb->base));
 229		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
 230		seq_putc(m, '\n');
 231	}
 232	mutex_unlock(&dev->mode_config.fb_lock);
 233
 234	return 0;
 235}
 236
 237static int i915_psr_sink_status_show(struct seq_file *m, void *data)
 238{
 239	u8 val;
 240	static const char * const sink_status[] = {
 241		"inactive",
 242		"transition to active, capture and display",
 243		"active, display from RFB",
 244		"active, capture and display on sink device timings",
 245		"transition to inactive, capture and display, timing re-sync",
 246		"reserved",
 247		"reserved",
 248		"sink internal error",
 249	};
 250	struct drm_connector *connector = m->private;
 251	struct drm_i915_private *dev_priv = to_i915(connector->dev);
 252	struct intel_dp *intel_dp =
 253		intel_attached_dp(to_intel_connector(connector));
 254	int ret;
 255
 256	if (!CAN_PSR(dev_priv)) {
 257		seq_puts(m, "PSR Unsupported\n");
 258		return -ENODEV;
 259	}
 260
 261	if (connector->status != connector_status_connected)
 262		return -ENODEV;
 263
 264	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
 265
 266	if (ret == 1) {
 267		const char *str = "unknown";
 268
 269		val &= DP_PSR_SINK_STATE_MASK;
 270		if (val < ARRAY_SIZE(sink_status))
 271			str = sink_status[val];
 272		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
 273	} else {
 274		return ret;
 275	}
 276
 277	return 0;
 278}
 279DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
 280
 281static void
 282psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
 283{
 
 
 284	u32 val, status_val;
 285	const char *status = "unknown";
 286
 287	if (dev_priv->psr.psr2_enabled) {
 288		static const char * const live_status[] = {
 289			"IDLE",
 290			"CAPTURE",
 291			"CAPTURE_FS",
 292			"SLEEP",
 293			"BUFON_FW",
 294			"ML_UP",
 295			"SU_STANDBY",
 296			"FAST_SLEEP",
 297			"DEEP_SLEEP",
 298			"BUF_ON",
 299			"TG_ON"
 300		};
 301		val = intel_de_read(dev_priv,
 302				    EDP_PSR2_STATUS(dev_priv->psr.transcoder));
 303		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
 304			      EDP_PSR2_STATUS_STATE_SHIFT;
 305		if (status_val < ARRAY_SIZE(live_status))
 306			status = live_status[status_val];
 307	} else {
 308		static const char * const live_status[] = {
 309			"IDLE",
 310			"SRDONACK",
 311			"SRDENT",
 312			"BUFOFF",
 313			"BUFON",
 314			"AUXACK",
 315			"SRDOFFACK",
 316			"SRDENT_ON",
 317		};
 318		val = intel_de_read(dev_priv,
 319				    EDP_PSR_STATUS(dev_priv->psr.transcoder));
 320		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
 321			      EDP_PSR_STATUS_STATE_SHIFT;
 322		if (status_val < ARRAY_SIZE(live_status))
 323			status = live_status[status_val];
 324	}
 325
 326	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
 327}
 328
 329static int i915_edp_psr_status(struct seq_file *m, void *data)
 330{
 331	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 332	struct i915_psr *psr = &dev_priv->psr;
 333	intel_wakeref_t wakeref;
 334	const char *status;
 335	bool enabled;
 336	u32 val;
 337
 338	if (!HAS_PSR(dev_priv))
 339		return -ENODEV;
 340
 341	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
 342	if (psr->dp)
 343		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
 344	seq_puts(m, "\n");
 345
 346	if (!psr->sink_support)
 347		return 0;
 348
 349	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 350	mutex_lock(&psr->lock);
 351
 352	if (psr->enabled)
 353		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
 354	else
 355		status = "disabled";
 356	seq_printf(m, "PSR mode: %s\n", status);
 357
 358	if (!psr->enabled) {
 359		seq_printf(m, "PSR sink not reliable: %s\n",
 360			   yesno(psr->sink_not_reliable));
 361
 362		goto unlock;
 363	}
 364
 365	if (psr->psr2_enabled) {
 366		val = intel_de_read(dev_priv,
 367				    EDP_PSR2_CTL(dev_priv->psr.transcoder));
 368		enabled = val & EDP_PSR2_ENABLE;
 369	} else {
 370		val = intel_de_read(dev_priv,
 371				    EDP_PSR_CTL(dev_priv->psr.transcoder));
 372		enabled = val & EDP_PSR_ENABLE;
 373	}
 374	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
 375		   enableddisabled(enabled), val);
 376	psr_source_status(dev_priv, m);
 377	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
 378		   psr->busy_frontbuffer_bits);
 379
 380	/*
 381	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
 382	 */
 383	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 384		val = intel_de_read(dev_priv,
 385				    EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
 386		val &= EDP_PSR_PERF_CNT_MASK;
 387		seq_printf(m, "Performance counter: %u\n", val);
 388	}
 389
 390	if (psr->debug & I915_PSR_DEBUG_IRQ) {
 391		seq_printf(m, "Last attempted entry at: %lld\n",
 392			   psr->last_entry_attempt);
 393		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
 394	}
 395
 396	if (psr->psr2_enabled) {
 397		u32 su_frames_val[3];
 398		int frame;
 399
 400		/*
 401		 * Reading all 3 registers before hand to minimize crossing a
 402		 * frame boundary between register reads
 403		 */
 404		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
 405			val = intel_de_read(dev_priv,
 406					    PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
 407			su_frames_val[frame / 3] = val;
 408		}
 409
 410		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
 411
 412		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
 413			u32 su_blocks;
 414
 415			su_blocks = su_frames_val[frame / 3] &
 416				    PSR2_SU_STATUS_MASK(frame);
 417			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
 418			seq_printf(m, "%d\t%d\n", frame, su_blocks);
 419		}
 
 
 
 420	}
 421
 422unlock:
 423	mutex_unlock(&psr->lock);
 424	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 425
 426	return 0;
 427}
 428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 429static int
 430i915_edp_psr_debug_set(void *data, u64 val)
 431{
 432	struct drm_i915_private *dev_priv = data;
 
 433	intel_wakeref_t wakeref;
 434	int ret;
 
 
 
 435
 436	if (!CAN_PSR(dev_priv))
 437		return -ENODEV;
 438
 439	drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
 440
 441	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 442
 443	ret = intel_psr_debug_set(dev_priv, val);
 
 444
 445	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 
 446
 447	return ret;
 448}
 449
 450static int
 451i915_edp_psr_debug_get(void *data, u64 *val)
 452{
 453	struct drm_i915_private *dev_priv = data;
 
 454
 455	if (!CAN_PSR(dev_priv))
 456		return -ENODEV;
 457
 458	*val = READ_ONCE(dev_priv->psr.debug);
 459	return 0;
 
 
 
 
 
 
 
 460}
 461
 462DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
 463			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
 464			"%llu\n");
 465
 466static int i915_power_domain_info(struct seq_file *m, void *unused)
 467{
 468	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 469	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 470	int i;
 471
 472	mutex_lock(&power_domains->lock);
 473
 474	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
 475	for (i = 0; i < power_domains->power_well_count; i++) {
 476		struct i915_power_well *power_well;
 477		enum intel_display_power_domain power_domain;
 478
 479		power_well = &power_domains->power_wells[i];
 480		seq_printf(m, "%-25s %d\n", power_well->desc->name,
 481			   power_well->count);
 482
 483		for_each_power_domain(power_domain, power_well->desc->domains)
 484			seq_printf(m, "  %-23s %d\n",
 485				 intel_display_power_domain_str(power_domain),
 486				 power_domains->domain_use_count[power_domain]);
 487	}
 488
 489	mutex_unlock(&power_domains->lock);
 490
 491	return 0;
 492}
 493
 494static int i915_dmc_info(struct seq_file *m, void *unused)
 495{
 496	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 497	intel_wakeref_t wakeref;
 498	struct intel_csr *csr;
 499	i915_reg_t dc5_reg, dc6_reg = {};
 500
 501	if (!HAS_CSR(dev_priv))
 502		return -ENODEV;
 503
 504	csr = &dev_priv->csr;
 505
 506	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 507
 508	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
 509	seq_printf(m, "path: %s\n", csr->fw_path);
 510
 511	if (!csr->dmc_payload)
 512		goto out;
 513
 514	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
 515		   CSR_VERSION_MINOR(csr->version));
 
 
 
 
 
 
 
 
 516
 517	if (INTEL_GEN(dev_priv) >= 12) {
 518		dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
 519		dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
 520		/*
 521		 * NOTE: DMC_DEBUG3 is a general purpose reg.
 522		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
 523		 * reg for DC3CO debugging and validation,
 524		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
 525		 */
 526		seq_printf(m, "DC3CO count: %d\n",
 527			   intel_de_read(dev_priv, DMC_DEBUG3));
 528	} else {
 529		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
 530						 SKL_CSR_DC3_DC5_COUNT;
 531		if (!IS_GEN9_LP(dev_priv))
 532			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
 533	}
 534
 535	seq_printf(m, "DC3 -> DC5 count: %d\n",
 536		   intel_de_read(dev_priv, dc5_reg));
 537	if (dc6_reg.reg)
 538		seq_printf(m, "DC5 -> DC6 count: %d\n",
 539			   intel_de_read(dev_priv, dc6_reg));
 540
 541out:
 542	seq_printf(m, "program base: 0x%08x\n",
 543		   intel_de_read(dev_priv, CSR_PROGRAM(0)));
 544	seq_printf(m, "ssp base: 0x%08x\n",
 545		   intel_de_read(dev_priv, CSR_SSP_BASE));
 546	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
 547
 548	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 549
 550	return 0;
 551}
 552
 553static void intel_seq_print_mode(struct seq_file *m, int tabs,
 554				 const struct drm_display_mode *mode)
 555{
 556	int i;
 557
 558	for (i = 0; i < tabs; i++)
 559		seq_putc(m, '\t');
 560
 561	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 562}
 563
 564static void intel_encoder_info(struct seq_file *m,
 565			       struct intel_crtc *crtc,
 566			       struct intel_encoder *encoder)
 567{
 568	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 569	struct drm_connector_list_iter conn_iter;
 570	struct drm_connector *connector;
 571
 572	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
 573		   encoder->base.base.id, encoder->base.name);
 574
 575	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
 576	drm_for_each_connector_iter(connector, &conn_iter) {
 577		const struct drm_connector_state *conn_state =
 578			connector->state;
 579
 580		if (conn_state->best_encoder != &encoder->base)
 581			continue;
 582
 583		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
 584			   connector->base.id, connector->name);
 585	}
 586	drm_connector_list_iter_end(&conn_iter);
 587}
 588
 589static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
 590{
 591	const struct drm_display_mode *mode = panel->fixed_mode;
 592
 593	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 594}
 595
 596static void intel_hdcp_info(struct seq_file *m,
 597			    struct intel_connector *intel_connector)
 598{
 599	bool hdcp_cap, hdcp2_cap;
 600
 
 
 
 
 
 601	hdcp_cap = intel_hdcp_capable(intel_connector);
 602	hdcp2_cap = intel_hdcp2_capable(intel_connector);
 603
 604	if (hdcp_cap)
 605		seq_puts(m, "HDCP1.4 ");
 606	if (hdcp2_cap)
 607		seq_puts(m, "HDCP2.2 ");
 608
 609	if (!hdcp_cap && !hdcp2_cap)
 610		seq_puts(m, "None");
 611
 
 612	seq_puts(m, "\n");
 613}
 614
 615static void intel_dp_info(struct seq_file *m,
 616			  struct intel_connector *intel_connector)
 617{
 618	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
 619	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
 
 620
 621	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
 622	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
 623	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
 624		intel_panel_info(m, &intel_connector->panel);
 625
 626	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
 627				&intel_dp->aux);
 628	if (intel_connector->hdcp.shim) {
 629		seq_puts(m, "\tHDCP version: ");
 630		intel_hdcp_info(m, intel_connector);
 631	}
 632}
 633
 634static void intel_dp_mst_info(struct seq_file *m,
 635			      struct intel_connector *intel_connector)
 636{
 637	bool has_audio = intel_connector->port->has_audio;
 638
 639	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
 640}
 641
 642static void intel_hdmi_info(struct seq_file *m,
 643			    struct intel_connector *intel_connector)
 644{
 645	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
 646	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
 647
 648	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
 649	if (intel_connector->hdcp.shim) {
 650		seq_puts(m, "\tHDCP version: ");
 651		intel_hdcp_info(m, intel_connector);
 652	}
 653}
 654
 655static void intel_lvds_info(struct seq_file *m,
 656			    struct intel_connector *intel_connector)
 657{
 658	intel_panel_info(m, &intel_connector->panel);
 659}
 660
 661static void intel_connector_info(struct seq_file *m,
 662				 struct drm_connector *connector)
 663{
 664	struct intel_connector *intel_connector = to_intel_connector(connector);
 665	const struct drm_connector_state *conn_state = connector->state;
 666	struct intel_encoder *encoder =
 667		to_intel_encoder(conn_state->best_encoder);
 668	const struct drm_display_mode *mode;
 669
 670	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
 671		   connector->base.id, connector->name,
 672		   drm_get_connector_status_name(connector->status));
 673
 674	if (connector->status == connector_status_disconnected)
 675		return;
 676
 677	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
 678		   connector->display_info.width_mm,
 679		   connector->display_info.height_mm);
 680	seq_printf(m, "\tsubpixel order: %s\n",
 681		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
 682	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
 683
 684	if (!encoder)
 685		return;
 686
 687	switch (connector->connector_type) {
 688	case DRM_MODE_CONNECTOR_DisplayPort:
 689	case DRM_MODE_CONNECTOR_eDP:
 690		if (encoder->type == INTEL_OUTPUT_DP_MST)
 691			intel_dp_mst_info(m, intel_connector);
 692		else
 693			intel_dp_info(m, intel_connector);
 694		break;
 695	case DRM_MODE_CONNECTOR_LVDS:
 696		if (encoder->type == INTEL_OUTPUT_LVDS)
 697			intel_lvds_info(m, intel_connector);
 698		break;
 699	case DRM_MODE_CONNECTOR_HDMIA:
 700		if (encoder->type == INTEL_OUTPUT_HDMI ||
 701		    encoder->type == INTEL_OUTPUT_DDI)
 702			intel_hdmi_info(m, intel_connector);
 703		break;
 704	default:
 705		break;
 706	}
 707
 
 
 
 708	seq_printf(m, "\tmodes:\n");
 709	list_for_each_entry(mode, &connector->modes, head)
 710		intel_seq_print_mode(m, 2, mode);
 711}
 712
 713static const char *plane_type(enum drm_plane_type type)
 714{
 715	switch (type) {
 716	case DRM_PLANE_TYPE_OVERLAY:
 717		return "OVL";
 718	case DRM_PLANE_TYPE_PRIMARY:
 719		return "PRI";
 720	case DRM_PLANE_TYPE_CURSOR:
 721		return "CUR";
 722	/*
 723	 * Deliberately omitting default: to generate compiler warnings
 724	 * when a new drm_plane_type gets added.
 725	 */
 726	}
 727
 728	return "unknown";
 729}
 730
 731static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
 732{
 733	/*
 734	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
 735	 * will print them all to visualize if the values are misused
 736	 */
 737	snprintf(buf, bufsize,
 738		 "%s%s%s%s%s%s(0x%08x)",
 739		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
 740		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
 741		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
 742		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
 743		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
 744		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
 745		 rotation);
 746}
 747
 
 
 
 
 
 
 
 
 
 
 
 748static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
 749{
 750	const struct intel_plane_state *plane_state =
 751		to_intel_plane_state(plane->base.state);
 752	const struct drm_framebuffer *fb = plane_state->uapi.fb;
 753	struct drm_format_name_buf format_name;
 754	struct drm_rect src, dst;
 755	char rot_str[48];
 756
 757	src = drm_plane_state_src(&plane_state->uapi);
 758	dst = drm_plane_state_dest(&plane_state->uapi);
 759
 760	if (fb)
 761		drm_get_format_name(fb->format->format, &format_name);
 762
 763	plane_rotation(rot_str, sizeof(rot_str),
 764		       plane_state->uapi.rotation);
 765
 766	seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
 767		   fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
 768		   fb ? fb->width : 0, fb ? fb->height : 0,
 769		   DRM_RECT_FP_ARG(&src),
 770		   DRM_RECT_ARG(&dst),
 771		   rot_str);
 
 
 
 
 
 
 
 
 
 772}
 773
 774static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
 775{
 776	const struct intel_plane_state *plane_state =
 777		to_intel_plane_state(plane->base.state);
 778	const struct drm_framebuffer *fb = plane_state->hw.fb;
 779	struct drm_format_name_buf format_name;
 780	char rot_str[48];
 781
 782	if (!fb)
 783		return;
 784
 785	drm_get_format_name(fb->format->format, &format_name);
 786
 787	plane_rotation(rot_str, sizeof(rot_str),
 788		       plane_state->hw.rotation);
 789
 790	seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
 791		   fb->base.id, format_name.str,
 792		   fb->width, fb->height,
 
 793		   yesno(plane_state->uapi.visible),
 794		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
 795		   DRM_RECT_ARG(&plane_state->uapi.dst),
 796		   rot_str);
 797}
 798
 799static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
 800{
 801	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 802	struct intel_plane *plane;
 803
 804	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
 805		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
 806			   plane->base.base.id, plane->base.name,
 807			   plane_type(plane->base.type));
 808		intel_plane_uapi_info(m, plane);
 809		intel_plane_hw_info(m, plane);
 810	}
 811}
 812
 813static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
 814{
 815	const struct intel_crtc_state *crtc_state =
 816		to_intel_crtc_state(crtc->base.state);
 817	int num_scalers = crtc->num_scalers;
 818	int i;
 819
 820	/* Not all platformas have a scaler */
 821	if (num_scalers) {
 822		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
 823			   num_scalers,
 824			   crtc_state->scaler_state.scaler_users,
 825			   crtc_state->scaler_state.scaler_id);
 826
 827		for (i = 0; i < num_scalers; i++) {
 828			const struct intel_scaler *sc =
 829				&crtc_state->scaler_state.scalers[i];
 830
 831			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
 832				   i, yesno(sc->in_use), sc->mode);
 833		}
 834		seq_puts(m, "\n");
 835	} else {
 836		seq_puts(m, "\tNo scalers available on this platform\n");
 837	}
 838}
 839
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 840static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
 841{
 842	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 843	const struct intel_crtc_state *crtc_state =
 844		to_intel_crtc_state(crtc->base.state);
 845	struct intel_encoder *encoder;
 846
 847	seq_printf(m, "[CRTC:%d:%s]:\n",
 848		   crtc->base.base.id, crtc->base.name);
 849
 850	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
 851		   yesno(crtc_state->uapi.enable),
 852		   yesno(crtc_state->uapi.active),
 853		   DRM_MODE_ARG(&crtc_state->uapi.mode));
 854
 855	if (crtc_state->hw.enable) {
 856		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
 857			   yesno(crtc_state->hw.active),
 858			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
 859
 860		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
 861			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
 862			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
 863
 864		intel_scaler_info(m, crtc);
 865	}
 866
 
 
 
 
 
 
 867	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
 868				    crtc_state->uapi.encoder_mask)
 869		intel_encoder_info(m, crtc, encoder);
 870
 871	intel_plane_info(m, crtc);
 872
 873	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
 874		   yesno(!crtc->cpu_fifo_underrun_disabled),
 875		   yesno(!crtc->pch_fifo_underrun_disabled));
 
 
 876}
 877
 878static int i915_display_info(struct seq_file *m, void *unused)
 879{
 880	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 881	struct drm_device *dev = &dev_priv->drm;
 882	struct intel_crtc *crtc;
 883	struct drm_connector *connector;
 884	struct drm_connector_list_iter conn_iter;
 885	intel_wakeref_t wakeref;
 886
 887	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 888
 889	drm_modeset_lock_all(dev);
 890
 891	seq_printf(m, "CRTC info\n");
 892	seq_printf(m, "---------\n");
 893	for_each_intel_crtc(dev, crtc)
 894		intel_crtc_info(m, crtc);
 895
 896	seq_printf(m, "\n");
 897	seq_printf(m, "Connector info\n");
 898	seq_printf(m, "--------------\n");
 899	drm_connector_list_iter_begin(dev, &conn_iter);
 900	drm_for_each_connector_iter(connector, &conn_iter)
 901		intel_connector_info(m, connector);
 902	drm_connector_list_iter_end(&conn_iter);
 903
 904	drm_modeset_unlock_all(dev);
 905
 906	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 907
 908	return 0;
 909}
 910
 911static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 912{
 913	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 914	struct drm_device *dev = &dev_priv->drm;
 915	int i;
 916
 917	drm_modeset_lock_all(dev);
 918
 919	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
 920		   dev_priv->dpll.ref_clks.nssc,
 921		   dev_priv->dpll.ref_clks.ssc);
 922
 923	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
 924		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
 925
 926		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
 927			   pll->info->id);
 928		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
 929			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
 930		seq_printf(m, " tracked hardware state:\n");
 931		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
 932		seq_printf(m, " dpll_md: 0x%08x\n",
 933			   pll->state.hw_state.dpll_md);
 934		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
 935		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
 936		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
 937		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
 938		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
 939		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
 940			   pll->state.hw_state.mg_refclkin_ctl);
 941		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
 942			   pll->state.hw_state.mg_clktop2_coreclkctl1);
 943		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
 944			   pll->state.hw_state.mg_clktop2_hsclkctl);
 945		seq_printf(m, " mg_pll_div0:  0x%08x\n",
 946			   pll->state.hw_state.mg_pll_div0);
 947		seq_printf(m, " mg_pll_div1:  0x%08x\n",
 948			   pll->state.hw_state.mg_pll_div1);
 949		seq_printf(m, " mg_pll_lf:    0x%08x\n",
 950			   pll->state.hw_state.mg_pll_lf);
 951		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
 952			   pll->state.hw_state.mg_pll_frac_lock);
 953		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
 954			   pll->state.hw_state.mg_pll_ssc);
 955		seq_printf(m, " mg_pll_bias:  0x%08x\n",
 956			   pll->state.hw_state.mg_pll_bias);
 957		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
 958			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
 959	}
 960	drm_modeset_unlock_all(dev);
 961
 962	return 0;
 963}
 964
 965static int i915_ipc_status_show(struct seq_file *m, void *data)
 966{
 967	struct drm_i915_private *dev_priv = m->private;
 968
 969	seq_printf(m, "Isochronous Priority Control: %s\n",
 970			yesno(dev_priv->ipc_enabled));
 971	return 0;
 972}
 973
 974static int i915_ipc_status_open(struct inode *inode, struct file *file)
 975{
 976	struct drm_i915_private *dev_priv = inode->i_private;
 977
 978	if (!HAS_IPC(dev_priv))
 979		return -ENODEV;
 980
 981	return single_open(file, i915_ipc_status_show, dev_priv);
 982}
 983
 984static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
 985				     size_t len, loff_t *offp)
 986{
 987	struct seq_file *m = file->private_data;
 988	struct drm_i915_private *dev_priv = m->private;
 989	intel_wakeref_t wakeref;
 990	bool enable;
 991	int ret;
 992
 993	ret = kstrtobool_from_user(ubuf, len, &enable);
 994	if (ret < 0)
 995		return ret;
 996
 997	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
 998		if (!dev_priv->ipc_enabled && enable)
 999			drm_info(&dev_priv->drm,
1000				 "Enabling IPC: WM will be proper only after next commit\n");
1001		dev_priv->wm.distrust_bios_wm = true;
1002		dev_priv->ipc_enabled = enable;
1003		intel_enable_ipc(dev_priv);
1004	}
1005
1006	return len;
1007}
1008
1009static const struct file_operations i915_ipc_status_fops = {
1010	.owner = THIS_MODULE,
1011	.open = i915_ipc_status_open,
1012	.read = seq_read,
1013	.llseek = seq_lseek,
1014	.release = single_release,
1015	.write = i915_ipc_status_write
1016};
1017
1018static int i915_ddb_info(struct seq_file *m, void *unused)
1019{
1020	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1021	struct drm_device *dev = &dev_priv->drm;
1022	struct skl_ddb_entry *entry;
1023	struct intel_crtc *crtc;
1024
1025	if (INTEL_GEN(dev_priv) < 9)
1026		return -ENODEV;
1027
1028	drm_modeset_lock_all(dev);
1029
1030	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1031
1032	for_each_intel_crtc(&dev_priv->drm, crtc) {
1033		struct intel_crtc_state *crtc_state =
1034			to_intel_crtc_state(crtc->base.state);
1035		enum pipe pipe = crtc->pipe;
1036		enum plane_id plane_id;
1037
1038		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1039
1040		for_each_plane_id_on_crtc(crtc, plane_id) {
1041			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1042			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1043				   entry->start, entry->end,
1044				   skl_ddb_entry_size(entry));
1045		}
1046
1047		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1048		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1049			   entry->end, skl_ddb_entry_size(entry));
1050	}
1051
1052	drm_modeset_unlock_all(dev);
1053
1054	return 0;
1055}
1056
1057static void drrs_status_per_crtc(struct seq_file *m,
1058				 struct drm_device *dev,
1059				 struct intel_crtc *intel_crtc)
1060{
1061	struct drm_i915_private *dev_priv = to_i915(dev);
1062	struct i915_drrs *drrs = &dev_priv->drrs;
1063	int vrefresh = 0;
1064	struct drm_connector *connector;
1065	struct drm_connector_list_iter conn_iter;
1066
1067	drm_connector_list_iter_begin(dev, &conn_iter);
1068	drm_for_each_connector_iter(connector, &conn_iter) {
 
 
1069		if (connector->state->crtc != &intel_crtc->base)
1070			continue;
1071
1072		seq_printf(m, "%s:\n", connector->name);
 
 
 
 
 
 
1073	}
1074	drm_connector_list_iter_end(&conn_iter);
1075
1076	seq_puts(m, "\n");
1077
1078	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
1079		struct intel_panel *panel;
1080
1081		mutex_lock(&drrs->mutex);
1082		/* DRRS Supported */
1083		seq_puts(m, "\tDRRS Supported: Yes\n");
1084
1085		/* disable_drrs() will make drrs->dp NULL */
1086		if (!drrs->dp) {
1087			seq_puts(m, "Idleness DRRS: Disabled\n");
1088			if (dev_priv->psr.enabled)
1089				seq_puts(m,
1090				"\tAs PSR is enabled, DRRS is not enabled\n");
1091			mutex_unlock(&drrs->mutex);
1092			return;
1093		}
1094
1095		panel = &drrs->dp->attached_connector->panel;
1096		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1097					drrs->busy_frontbuffer_bits);
1098
1099		seq_puts(m, "\n\t\t");
1100		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1101			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1102			vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1103		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1104			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1105			vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1106		} else {
1107			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1108						drrs->refresh_rate_type);
1109			mutex_unlock(&drrs->mutex);
1110			return;
1111		}
1112		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1113
1114		seq_puts(m, "\n\t\t");
1115		mutex_unlock(&drrs->mutex);
1116	} else {
1117		/* DRRS not supported. Print the VBT parameter*/
1118		seq_puts(m, "\tDRRS Supported : No");
1119	}
1120	seq_puts(m, "\n");
1121}
1122
1123static int i915_drrs_status(struct seq_file *m, void *unused)
1124{
1125	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1126	struct drm_device *dev = &dev_priv->drm;
1127	struct intel_crtc *intel_crtc;
1128	int active_crtc_cnt = 0;
1129
1130	drm_modeset_lock_all(dev);
1131	for_each_intel_crtc(dev, intel_crtc) {
1132		if (intel_crtc->base.state->active) {
1133			active_crtc_cnt++;
1134			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1135
1136			drrs_status_per_crtc(m, dev, intel_crtc);
1137		}
1138	}
1139	drm_modeset_unlock_all(dev);
1140
1141	if (!active_crtc_cnt)
1142		seq_puts(m, "No active crtc found\n");
1143
1144	return 0;
1145}
1146
1147#define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
1148				seq_puts(m, "LPSP: disabled\n"))
1149
1150static bool
1151intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1152			      enum i915_power_well_id power_well_id)
1153{
1154	intel_wakeref_t wakeref;
1155	bool is_enabled;
1156
1157	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1158	is_enabled = intel_display_power_well_is_enabled(i915,
1159							 power_well_id);
1160	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1161
1162	return is_enabled;
1163}
1164
1165static int i915_lpsp_status(struct seq_file *m, void *unused)
1166{
1167	struct drm_i915_private *i915 = node_to_i915(m->private);
1168
1169	switch (INTEL_GEN(i915)) {
 
 
 
 
 
 
1170	case 12:
1171	case 11:
1172		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
1173		break;
1174	case 10:
1175	case 9:
1176		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
1177		break;
1178	default:
1179		/*
1180		 * Apart from HASWELL/BROADWELL other legacy platform doesn't
1181		 * support lpsp.
1182		 */
1183		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1184			LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
1185		else
1186			seq_puts(m, "LPSP: not supported\n");
1187	}
1188
1189	return 0;
1190}
1191
1192static int i915_dp_mst_info(struct seq_file *m, void *unused)
1193{
1194	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1195	struct drm_device *dev = &dev_priv->drm;
1196	struct intel_encoder *intel_encoder;
1197	struct intel_digital_port *dig_port;
1198	struct drm_connector *connector;
1199	struct drm_connector_list_iter conn_iter;
1200
1201	drm_connector_list_iter_begin(dev, &conn_iter);
1202	drm_for_each_connector_iter(connector, &conn_iter) {
1203		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1204			continue;
1205
1206		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1207		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1208			continue;
1209
1210		dig_port = enc_to_dig_port(intel_encoder);
1211		if (!dig_port->dp.can_mst)
1212			continue;
1213
1214		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1215			   dig_port->base.base.base.id,
1216			   dig_port->base.base.name);
1217		drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1218	}
1219	drm_connector_list_iter_end(&conn_iter);
1220
1221	return 0;
1222}
1223
1224static ssize_t i915_displayport_test_active_write(struct file *file,
1225						  const char __user *ubuf,
1226						  size_t len, loff_t *offp)
1227{
1228	char *input_buffer;
1229	int status = 0;
1230	struct drm_device *dev;
1231	struct drm_connector *connector;
1232	struct drm_connector_list_iter conn_iter;
1233	struct intel_dp *intel_dp;
1234	int val = 0;
1235
1236	dev = ((struct seq_file *)file->private_data)->private;
1237
1238	if (len == 0)
1239		return 0;
1240
1241	input_buffer = memdup_user_nul(ubuf, len);
1242	if (IS_ERR(input_buffer))
1243		return PTR_ERR(input_buffer);
1244
1245	drm_dbg(&to_i915(dev)->drm,
1246		"Copied %d bytes from user\n", (unsigned int)len);
1247
1248	drm_connector_list_iter_begin(dev, &conn_iter);
1249	drm_for_each_connector_iter(connector, &conn_iter) {
1250		struct intel_encoder *encoder;
1251
1252		if (connector->connector_type !=
1253		    DRM_MODE_CONNECTOR_DisplayPort)
1254			continue;
1255
1256		encoder = to_intel_encoder(connector->encoder);
1257		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1258			continue;
1259
1260		if (encoder && connector->status == connector_status_connected) {
1261			intel_dp = enc_to_intel_dp(encoder);
1262			status = kstrtoint(input_buffer, 10, &val);
1263			if (status < 0)
1264				break;
1265			drm_dbg(&to_i915(dev)->drm,
1266				"Got %d for test active\n", val);
1267			/* To prevent erroneous activation of the compliance
1268			 * testing code, only accept an actual value of 1 here
1269			 */
1270			if (val == 1)
1271				intel_dp->compliance.test_active = true;
1272			else
1273				intel_dp->compliance.test_active = false;
1274		}
1275	}
1276	drm_connector_list_iter_end(&conn_iter);
1277	kfree(input_buffer);
1278	if (status < 0)
1279		return status;
1280
1281	*offp += len;
1282	return len;
1283}
1284
1285static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1286{
1287	struct drm_i915_private *dev_priv = m->private;
1288	struct drm_device *dev = &dev_priv->drm;
1289	struct drm_connector *connector;
1290	struct drm_connector_list_iter conn_iter;
1291	struct intel_dp *intel_dp;
1292
1293	drm_connector_list_iter_begin(dev, &conn_iter);
1294	drm_for_each_connector_iter(connector, &conn_iter) {
1295		struct intel_encoder *encoder;
1296
1297		if (connector->connector_type !=
1298		    DRM_MODE_CONNECTOR_DisplayPort)
1299			continue;
1300
1301		encoder = to_intel_encoder(connector->encoder);
1302		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1303			continue;
1304
1305		if (encoder && connector->status == connector_status_connected) {
1306			intel_dp = enc_to_intel_dp(encoder);
1307			if (intel_dp->compliance.test_active)
1308				seq_puts(m, "1");
1309			else
1310				seq_puts(m, "0");
1311		} else
1312			seq_puts(m, "0");
1313	}
1314	drm_connector_list_iter_end(&conn_iter);
1315
1316	return 0;
1317}
1318
1319static int i915_displayport_test_active_open(struct inode *inode,
1320					     struct file *file)
1321{
1322	return single_open(file, i915_displayport_test_active_show,
1323			   inode->i_private);
1324}
1325
1326static const struct file_operations i915_displayport_test_active_fops = {
1327	.owner = THIS_MODULE,
1328	.open = i915_displayport_test_active_open,
1329	.read = seq_read,
1330	.llseek = seq_lseek,
1331	.release = single_release,
1332	.write = i915_displayport_test_active_write
1333};
1334
1335static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1336{
1337	struct drm_i915_private *dev_priv = m->private;
1338	struct drm_device *dev = &dev_priv->drm;
1339	struct drm_connector *connector;
1340	struct drm_connector_list_iter conn_iter;
1341	struct intel_dp *intel_dp;
1342
1343	drm_connector_list_iter_begin(dev, &conn_iter);
1344	drm_for_each_connector_iter(connector, &conn_iter) {
1345		struct intel_encoder *encoder;
1346
1347		if (connector->connector_type !=
1348		    DRM_MODE_CONNECTOR_DisplayPort)
1349			continue;
1350
1351		encoder = to_intel_encoder(connector->encoder);
1352		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1353			continue;
1354
1355		if (encoder && connector->status == connector_status_connected) {
1356			intel_dp = enc_to_intel_dp(encoder);
1357			if (intel_dp->compliance.test_type ==
1358			    DP_TEST_LINK_EDID_READ)
1359				seq_printf(m, "%lx",
1360					   intel_dp->compliance.test_data.edid);
1361			else if (intel_dp->compliance.test_type ==
1362				 DP_TEST_LINK_VIDEO_PATTERN) {
1363				seq_printf(m, "hdisplay: %d\n",
1364					   intel_dp->compliance.test_data.hdisplay);
1365				seq_printf(m, "vdisplay: %d\n",
1366					   intel_dp->compliance.test_data.vdisplay);
1367				seq_printf(m, "bpc: %u\n",
1368					   intel_dp->compliance.test_data.bpc);
1369			} else if (intel_dp->compliance.test_type ==
1370				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1371				seq_printf(m, "pattern: %d\n",
1372					   intel_dp->compliance.test_data.phytest.phy_pattern);
1373				seq_printf(m, "Number of lanes: %d\n",
1374					   intel_dp->compliance.test_data.phytest.num_lanes);
1375				seq_printf(m, "Link Rate: %d\n",
1376					   intel_dp->compliance.test_data.phytest.link_rate);
1377				seq_printf(m, "level: %02x\n",
1378					   intel_dp->train_set[0]);
1379			}
1380		} else
1381			seq_puts(m, "0");
1382	}
1383	drm_connector_list_iter_end(&conn_iter);
1384
1385	return 0;
1386}
1387DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1388
1389static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1390{
1391	struct drm_i915_private *dev_priv = m->private;
1392	struct drm_device *dev = &dev_priv->drm;
1393	struct drm_connector *connector;
1394	struct drm_connector_list_iter conn_iter;
1395	struct intel_dp *intel_dp;
1396
1397	drm_connector_list_iter_begin(dev, &conn_iter);
1398	drm_for_each_connector_iter(connector, &conn_iter) {
1399		struct intel_encoder *encoder;
1400
1401		if (connector->connector_type !=
1402		    DRM_MODE_CONNECTOR_DisplayPort)
1403			continue;
1404
1405		encoder = to_intel_encoder(connector->encoder);
1406		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1407			continue;
1408
1409		if (encoder && connector->status == connector_status_connected) {
1410			intel_dp = enc_to_intel_dp(encoder);
1411			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1412		} else
1413			seq_puts(m, "0");
1414	}
1415	drm_connector_list_iter_end(&conn_iter);
1416
1417	return 0;
1418}
1419DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1420
1421static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1422{
1423	struct drm_i915_private *dev_priv = m->private;
1424	struct drm_device *dev = &dev_priv->drm;
1425	int level;
1426	int num_levels;
1427
1428	if (IS_CHERRYVIEW(dev_priv))
1429		num_levels = 3;
1430	else if (IS_VALLEYVIEW(dev_priv))
1431		num_levels = 1;
1432	else if (IS_G4X(dev_priv))
1433		num_levels = 3;
1434	else
1435		num_levels = ilk_wm_max_level(dev_priv) + 1;
1436
1437	drm_modeset_lock_all(dev);
1438
1439	for (level = 0; level < num_levels; level++) {
1440		unsigned int latency = wm[level];
1441
1442		/*
1443		 * - WM1+ latency values in 0.5us units
1444		 * - latencies are in us on gen9/vlv/chv
1445		 */
1446		if (INTEL_GEN(dev_priv) >= 9 ||
1447		    IS_VALLEYVIEW(dev_priv) ||
1448		    IS_CHERRYVIEW(dev_priv) ||
1449		    IS_G4X(dev_priv))
1450			latency *= 10;
1451		else if (level > 0)
1452			latency *= 5;
1453
1454		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1455			   level, wm[level], latency / 10, latency % 10);
1456	}
1457
1458	drm_modeset_unlock_all(dev);
1459}
1460
1461static int pri_wm_latency_show(struct seq_file *m, void *data)
1462{
1463	struct drm_i915_private *dev_priv = m->private;
1464	const u16 *latencies;
1465
1466	if (INTEL_GEN(dev_priv) >= 9)
1467		latencies = dev_priv->wm.skl_latency;
1468	else
1469		latencies = dev_priv->wm.pri_latency;
1470
1471	wm_latency_show(m, latencies);
1472
1473	return 0;
1474}
1475
1476static int spr_wm_latency_show(struct seq_file *m, void *data)
1477{
1478	struct drm_i915_private *dev_priv = m->private;
1479	const u16 *latencies;
1480
1481	if (INTEL_GEN(dev_priv) >= 9)
1482		latencies = dev_priv->wm.skl_latency;
1483	else
1484		latencies = dev_priv->wm.spr_latency;
1485
1486	wm_latency_show(m, latencies);
1487
1488	return 0;
1489}
1490
1491static int cur_wm_latency_show(struct seq_file *m, void *data)
1492{
1493	struct drm_i915_private *dev_priv = m->private;
1494	const u16 *latencies;
1495
1496	if (INTEL_GEN(dev_priv) >= 9)
1497		latencies = dev_priv->wm.skl_latency;
1498	else
1499		latencies = dev_priv->wm.cur_latency;
1500
1501	wm_latency_show(m, latencies);
1502
1503	return 0;
1504}
1505
1506static int pri_wm_latency_open(struct inode *inode, struct file *file)
1507{
1508	struct drm_i915_private *dev_priv = inode->i_private;
1509
1510	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
1511		return -ENODEV;
1512
1513	return single_open(file, pri_wm_latency_show, dev_priv);
1514}
1515
1516static int spr_wm_latency_open(struct inode *inode, struct file *file)
1517{
1518	struct drm_i915_private *dev_priv = inode->i_private;
1519
1520	if (HAS_GMCH(dev_priv))
1521		return -ENODEV;
1522
1523	return single_open(file, spr_wm_latency_show, dev_priv);
1524}
1525
1526static int cur_wm_latency_open(struct inode *inode, struct file *file)
1527{
1528	struct drm_i915_private *dev_priv = inode->i_private;
1529
1530	if (HAS_GMCH(dev_priv))
1531		return -ENODEV;
1532
1533	return single_open(file, cur_wm_latency_show, dev_priv);
1534}
1535
1536static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1537				size_t len, loff_t *offp, u16 wm[8])
1538{
1539	struct seq_file *m = file->private_data;
1540	struct drm_i915_private *dev_priv = m->private;
1541	struct drm_device *dev = &dev_priv->drm;
1542	u16 new[8] = { 0 };
1543	int num_levels;
1544	int level;
1545	int ret;
1546	char tmp[32];
1547
1548	if (IS_CHERRYVIEW(dev_priv))
1549		num_levels = 3;
1550	else if (IS_VALLEYVIEW(dev_priv))
1551		num_levels = 1;
1552	else if (IS_G4X(dev_priv))
1553		num_levels = 3;
1554	else
1555		num_levels = ilk_wm_max_level(dev_priv) + 1;
1556
1557	if (len >= sizeof(tmp))
1558		return -EINVAL;
1559
1560	if (copy_from_user(tmp, ubuf, len))
1561		return -EFAULT;
1562
1563	tmp[len] = '\0';
1564
1565	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1566		     &new[0], &new[1], &new[2], &new[3],
1567		     &new[4], &new[5], &new[6], &new[7]);
1568	if (ret != num_levels)
1569		return -EINVAL;
1570
1571	drm_modeset_lock_all(dev);
1572
1573	for (level = 0; level < num_levels; level++)
1574		wm[level] = new[level];
1575
1576	drm_modeset_unlock_all(dev);
1577
1578	return len;
1579}
1580
1581
1582static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1583				    size_t len, loff_t *offp)
1584{
1585	struct seq_file *m = file->private_data;
1586	struct drm_i915_private *dev_priv = m->private;
1587	u16 *latencies;
1588
1589	if (INTEL_GEN(dev_priv) >= 9)
1590		latencies = dev_priv->wm.skl_latency;
1591	else
1592		latencies = dev_priv->wm.pri_latency;
1593
1594	return wm_latency_write(file, ubuf, len, offp, latencies);
1595}
1596
1597static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1598				    size_t len, loff_t *offp)
1599{
1600	struct seq_file *m = file->private_data;
1601	struct drm_i915_private *dev_priv = m->private;
1602	u16 *latencies;
1603
1604	if (INTEL_GEN(dev_priv) >= 9)
1605		latencies = dev_priv->wm.skl_latency;
1606	else
1607		latencies = dev_priv->wm.spr_latency;
1608
1609	return wm_latency_write(file, ubuf, len, offp, latencies);
1610}
1611
1612static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1613				    size_t len, loff_t *offp)
1614{
1615	struct seq_file *m = file->private_data;
1616	struct drm_i915_private *dev_priv = m->private;
1617	u16 *latencies;
1618
1619	if (INTEL_GEN(dev_priv) >= 9)
1620		latencies = dev_priv->wm.skl_latency;
1621	else
1622		latencies = dev_priv->wm.cur_latency;
1623
1624	return wm_latency_write(file, ubuf, len, offp, latencies);
1625}
1626
1627static const struct file_operations i915_pri_wm_latency_fops = {
1628	.owner = THIS_MODULE,
1629	.open = pri_wm_latency_open,
1630	.read = seq_read,
1631	.llseek = seq_lseek,
1632	.release = single_release,
1633	.write = pri_wm_latency_write
1634};
1635
1636static const struct file_operations i915_spr_wm_latency_fops = {
1637	.owner = THIS_MODULE,
1638	.open = spr_wm_latency_open,
1639	.read = seq_read,
1640	.llseek = seq_lseek,
1641	.release = single_release,
1642	.write = spr_wm_latency_write
1643};
1644
1645static const struct file_operations i915_cur_wm_latency_fops = {
1646	.owner = THIS_MODULE,
1647	.open = cur_wm_latency_open,
1648	.read = seq_read,
1649	.llseek = seq_lseek,
1650	.release = single_release,
1651	.write = cur_wm_latency_write
1652};
1653
1654static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1655{
1656	struct drm_i915_private *dev_priv = m->private;
1657	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1658
1659	/* Synchronize with everything first in case there's been an HPD
1660	 * storm, but we haven't finished handling it in the kernel yet
1661	 */
1662	intel_synchronize_irq(dev_priv);
1663	flush_work(&dev_priv->hotplug.dig_port_work);
1664	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1665
1666	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1667	seq_printf(m, "Detected: %s\n",
1668		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1669
1670	return 0;
1671}
1672
1673static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1674					const char __user *ubuf, size_t len,
1675					loff_t *offp)
1676{
1677	struct seq_file *m = file->private_data;
1678	struct drm_i915_private *dev_priv = m->private;
1679	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1680	unsigned int new_threshold;
1681	int i;
1682	char *newline;
1683	char tmp[16];
1684
1685	if (len >= sizeof(tmp))
1686		return -EINVAL;
1687
1688	if (copy_from_user(tmp, ubuf, len))
1689		return -EFAULT;
1690
1691	tmp[len] = '\0';
1692
1693	/* Strip newline, if any */
1694	newline = strchr(tmp, '\n');
1695	if (newline)
1696		*newline = '\0';
1697
1698	if (strcmp(tmp, "reset") == 0)
1699		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1700	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1701		return -EINVAL;
1702
1703	if (new_threshold > 0)
1704		drm_dbg_kms(&dev_priv->drm,
1705			    "Setting HPD storm detection threshold to %d\n",
1706			    new_threshold);
1707	else
1708		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1709
1710	spin_lock_irq(&dev_priv->irq_lock);
1711	hotplug->hpd_storm_threshold = new_threshold;
1712	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1713	for_each_hpd_pin(i)
1714		hotplug->stats[i].count = 0;
1715	spin_unlock_irq(&dev_priv->irq_lock);
1716
1717	/* Re-enable hpd immediately if we were in an irq storm */
1718	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1719
1720	return len;
1721}
1722
1723static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1724{
1725	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1726}
1727
1728static const struct file_operations i915_hpd_storm_ctl_fops = {
1729	.owner = THIS_MODULE,
1730	.open = i915_hpd_storm_ctl_open,
1731	.read = seq_read,
1732	.llseek = seq_lseek,
1733	.release = single_release,
1734	.write = i915_hpd_storm_ctl_write
1735};
1736
1737static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1738{
1739	struct drm_i915_private *dev_priv = m->private;
1740
1741	seq_printf(m, "Enabled: %s\n",
1742		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1743
1744	return 0;
1745}
1746
1747static int
1748i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1749{
1750	return single_open(file, i915_hpd_short_storm_ctl_show,
1751			   inode->i_private);
1752}
1753
1754static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1755					      const char __user *ubuf,
1756					      size_t len, loff_t *offp)
1757{
1758	struct seq_file *m = file->private_data;
1759	struct drm_i915_private *dev_priv = m->private;
1760	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1761	char *newline;
1762	char tmp[16];
1763	int i;
1764	bool new_state;
1765
1766	if (len >= sizeof(tmp))
1767		return -EINVAL;
1768
1769	if (copy_from_user(tmp, ubuf, len))
1770		return -EFAULT;
1771
1772	tmp[len] = '\0';
1773
1774	/* Strip newline, if any */
1775	newline = strchr(tmp, '\n');
1776	if (newline)
1777		*newline = '\0';
1778
1779	/* Reset to the "default" state for this system */
1780	if (strcmp(tmp, "reset") == 0)
1781		new_state = !HAS_DP_MST(dev_priv);
1782	else if (kstrtobool(tmp, &new_state) != 0)
1783		return -EINVAL;
1784
1785	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1786		    new_state ? "En" : "Dis");
1787
1788	spin_lock_irq(&dev_priv->irq_lock);
1789	hotplug->hpd_short_storm_enabled = new_state;
1790	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1791	for_each_hpd_pin(i)
1792		hotplug->stats[i].count = 0;
1793	spin_unlock_irq(&dev_priv->irq_lock);
1794
1795	/* Re-enable hpd immediately if we were in an irq storm */
1796	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1797
1798	return len;
1799}
1800
1801static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1802	.owner = THIS_MODULE,
1803	.open = i915_hpd_short_storm_ctl_open,
1804	.read = seq_read,
1805	.llseek = seq_lseek,
1806	.release = single_release,
1807	.write = i915_hpd_short_storm_ctl_write,
1808};
1809
1810static int i915_drrs_ctl_set(void *data, u64 val)
1811{
1812	struct drm_i915_private *dev_priv = data;
1813	struct drm_device *dev = &dev_priv->drm;
1814	struct intel_crtc *crtc;
1815
1816	if (INTEL_GEN(dev_priv) < 7)
1817		return -ENODEV;
1818
1819	for_each_intel_crtc(dev, crtc) {
1820		struct drm_connector_list_iter conn_iter;
1821		struct intel_crtc_state *crtc_state;
1822		struct drm_connector *connector;
1823		struct drm_crtc_commit *commit;
1824		int ret;
1825
1826		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1827		if (ret)
1828			return ret;
1829
1830		crtc_state = to_intel_crtc_state(crtc->base.state);
1831
1832		if (!crtc_state->hw.active ||
1833		    !crtc_state->has_drrs)
1834			goto out;
1835
1836		commit = crtc_state->uapi.commit;
1837		if (commit) {
1838			ret = wait_for_completion_interruptible(&commit->hw_done);
1839			if (ret)
1840				goto out;
1841		}
1842
1843		drm_connector_list_iter_begin(dev, &conn_iter);
1844		drm_for_each_connector_iter(connector, &conn_iter) {
1845			struct intel_encoder *encoder;
1846			struct intel_dp *intel_dp;
1847
1848			if (!(crtc_state->uapi.connector_mask &
1849			      drm_connector_mask(connector)))
1850				continue;
1851
1852			encoder = intel_attached_encoder(to_intel_connector(connector));
1853			if (encoder->type != INTEL_OUTPUT_EDP)
1854				continue;
1855
1856			drm_dbg(&dev_priv->drm,
1857				"Manually %sabling DRRS. %llu\n",
1858				val ? "en" : "dis", val);
1859
1860			intel_dp = enc_to_intel_dp(encoder);
1861			if (val)
1862				intel_edp_drrs_enable(intel_dp,
1863						      crtc_state);
1864			else
1865				intel_edp_drrs_disable(intel_dp,
1866						       crtc_state);
1867		}
1868		drm_connector_list_iter_end(&conn_iter);
1869
1870out:
1871		drm_modeset_unlock(&crtc->base.mutex);
1872		if (ret)
1873			return ret;
1874	}
1875
1876	return 0;
1877}
1878
1879DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
1880
1881static ssize_t
1882i915_fifo_underrun_reset_write(struct file *filp,
1883			       const char __user *ubuf,
1884			       size_t cnt, loff_t *ppos)
1885{
1886	struct drm_i915_private *dev_priv = filp->private_data;
1887	struct intel_crtc *intel_crtc;
1888	struct drm_device *dev = &dev_priv->drm;
1889	int ret;
1890	bool reset;
1891
1892	ret = kstrtobool_from_user(ubuf, cnt, &reset);
1893	if (ret)
1894		return ret;
1895
1896	if (!reset)
1897		return cnt;
1898
1899	for_each_intel_crtc(dev, intel_crtc) {
1900		struct drm_crtc_commit *commit;
1901		struct intel_crtc_state *crtc_state;
1902
1903		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
1904		if (ret)
1905			return ret;
1906
1907		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
1908		commit = crtc_state->uapi.commit;
1909		if (commit) {
1910			ret = wait_for_completion_interruptible(&commit->hw_done);
1911			if (!ret)
1912				ret = wait_for_completion_interruptible(&commit->flip_done);
1913		}
1914
1915		if (!ret && crtc_state->hw.active) {
1916			drm_dbg_kms(&dev_priv->drm,
1917				    "Re-arming FIFO underruns on pipe %c\n",
1918				    pipe_name(intel_crtc->pipe));
1919
1920			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
1921		}
1922
1923		drm_modeset_unlock(&intel_crtc->base.mutex);
1924
1925		if (ret)
1926			return ret;
1927	}
1928
1929	ret = intel_fbc_reset_underrun(dev_priv);
1930	if (ret)
1931		return ret;
1932
1933	return cnt;
1934}
1935
1936static const struct file_operations i915_fifo_underrun_reset_ops = {
1937	.owner = THIS_MODULE,
1938	.open = simple_open,
1939	.write = i915_fifo_underrun_reset_write,
1940	.llseek = default_llseek,
1941};
1942
1943static const struct drm_info_list intel_display_debugfs_list[] = {
1944	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
1945	{"i915_fbc_status", i915_fbc_status, 0},
1946	{"i915_ips_status", i915_ips_status, 0},
1947	{"i915_sr_status", i915_sr_status, 0},
1948	{"i915_opregion", i915_opregion, 0},
1949	{"i915_vbt", i915_vbt, 0},
1950	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1951	{"i915_edp_psr_status", i915_edp_psr_status, 0},
1952	{"i915_power_domain_info", i915_power_domain_info, 0},
1953	{"i915_dmc_info", i915_dmc_info, 0},
1954	{"i915_display_info", i915_display_info, 0},
1955	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
1956	{"i915_dp_mst_info", i915_dp_mst_info, 0},
1957	{"i915_ddb_info", i915_ddb_info, 0},
1958	{"i915_drrs_status", i915_drrs_status, 0},
1959	{"i915_lpsp_status", i915_lpsp_status, 0},
1960};
1961
1962static const struct {
1963	const char *name;
1964	const struct file_operations *fops;
1965} intel_display_debugfs_files[] = {
1966	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
1967	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
1968	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
1969	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
1970	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
1971	{"i915_dp_test_data", &i915_displayport_test_data_fops},
1972	{"i915_dp_test_type", &i915_displayport_test_type_fops},
1973	{"i915_dp_test_active", &i915_displayport_test_active_fops},
1974	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
1975	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
1976	{"i915_ipc_status", &i915_ipc_status_fops},
1977	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
1978	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
1979};
1980
1981void intel_display_debugfs_register(struct drm_i915_private *i915)
1982{
1983	struct drm_minor *minor = i915->drm.primary;
1984	int i;
1985
1986	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
1987		debugfs_create_file(intel_display_debugfs_files[i].name,
1988				    S_IRUGO | S_IWUSR,
1989				    minor->debugfs_root,
1990				    to_i915(minor->dev),
1991				    intel_display_debugfs_files[i].fops);
1992	}
1993
1994	drm_debugfs_create_files(intel_display_debugfs_list,
1995				 ARRAY_SIZE(intel_display_debugfs_list),
1996				 minor->debugfs_root, minor);
1997}
1998
1999static int i915_panel_show(struct seq_file *m, void *data)
2000{
2001	struct drm_connector *connector = m->private;
2002	struct intel_dp *intel_dp =
2003		intel_attached_dp(to_intel_connector(connector));
2004
2005	if (connector->status != connector_status_connected)
2006		return -ENODEV;
2007
2008	seq_printf(m, "Panel power up delay: %d\n",
2009		   intel_dp->panel_power_up_delay);
2010	seq_printf(m, "Panel power down delay: %d\n",
2011		   intel_dp->panel_power_down_delay);
2012	seq_printf(m, "Backlight on delay: %d\n",
2013		   intel_dp->backlight_on_delay);
2014	seq_printf(m, "Backlight off delay: %d\n",
2015		   intel_dp->backlight_off_delay);
2016
2017	return 0;
2018}
2019DEFINE_SHOW_ATTRIBUTE(i915_panel);
2020
2021static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2022{
2023	struct drm_connector *connector = m->private;
 
2024	struct intel_connector *intel_connector = to_intel_connector(connector);
 
2025
2026	if (connector->status != connector_status_connected)
2027		return -ENODEV;
 
2028
2029	/* HDCP is supported by connector */
2030	if (!intel_connector->hdcp.shim)
2031		return -EINVAL;
 
2032
2033	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2034		   connector->base.id);
2035	intel_hdcp_info(m, intel_connector);
2036
2037	return 0;
 
 
 
2038}
2039DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2040
 
 
 
 
 
 
 
 
 
 
2041#define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
2042				seq_puts(m, "LPSP: incapable\n"))
2043
2044static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2045{
2046	struct drm_connector *connector = m->private;
2047	struct drm_i915_private *i915 = to_i915(connector->dev);
2048	struct intel_encoder *encoder;
2049
2050	encoder = intel_attached_encoder(to_intel_connector(connector));
2051	if (!encoder)
2052		return -ENODEV;
2053
2054	if (connector->status != connector_status_connected)
2055		return -ENODEV;
2056
2057	switch (INTEL_GEN(i915)) {
2058	case 12:
2059		/*
2060		 * Actually TGL can drive LPSP on port till DDI_C
2061		 * but there is no physical connected DDI_C on TGL sku's,
2062		 * even driver is not initilizing DDI_C port for gen12.
2063		 */
2064		LPSP_CAPABLE(encoder->port <= PORT_B);
2065		break;
2066	case 11:
2067		LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2068			     connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2069		break;
2070	case 10:
2071	case 9:
2072		LPSP_CAPABLE(encoder->port == PORT_A &&
2073			     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2074			     connector->connector_type == DRM_MODE_CONNECTOR_eDP  ||
2075			     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2076		break;
2077	default:
2078		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2079			LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2080	}
2081
2082	return 0;
2083}
2084DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2085
2086static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2087{
2088	struct drm_connector *connector = m->private;
2089	struct drm_device *dev = connector->dev;
2090	struct drm_crtc *crtc;
2091	struct intel_dp *intel_dp;
2092	struct drm_modeset_acquire_ctx ctx;
2093	struct intel_crtc_state *crtc_state = NULL;
2094	int ret = 0;
2095	bool try_again = false;
2096
2097	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2098
2099	do {
2100		try_again = false;
2101		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2102				       &ctx);
2103		if (ret) {
2104			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2105				try_again = true;
2106				continue;
2107			}
2108			break;
2109		}
2110		crtc = connector->state->crtc;
2111		if (connector->status != connector_status_connected || !crtc) {
2112			ret = -ENODEV;
2113			break;
2114		}
2115		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2116		if (ret == -EDEADLK) {
2117			ret = drm_modeset_backoff(&ctx);
2118			if (!ret) {
2119				try_again = true;
2120				continue;
2121			}
2122			break;
2123		} else if (ret) {
2124			break;
2125		}
2126		intel_dp = intel_attached_dp(to_intel_connector(connector));
2127		crtc_state = to_intel_crtc_state(crtc->state);
2128		seq_printf(m, "DSC_Enabled: %s\n",
2129			   yesno(crtc_state->dsc.compression_enable));
2130		seq_printf(m, "DSC_Sink_Support: %s\n",
2131			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2132		seq_printf(m, "Force_DSC_Enable: %s\n",
2133			   yesno(intel_dp->force_dsc_en));
2134		if (!intel_dp_is_edp(intel_dp))
2135			seq_printf(m, "FEC_Sink_Support: %s\n",
2136				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2137	} while (try_again);
2138
2139	drm_modeset_drop_locks(&ctx);
2140	drm_modeset_acquire_fini(&ctx);
2141
2142	return ret;
2143}
2144
2145static ssize_t i915_dsc_fec_support_write(struct file *file,
2146					  const char __user *ubuf,
2147					  size_t len, loff_t *offp)
2148{
2149	bool dsc_enable = false;
2150	int ret;
2151	struct drm_connector *connector =
2152		((struct seq_file *)file->private_data)->private;
2153	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2154	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2155	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2156
2157	if (len == 0)
2158		return 0;
2159
2160	drm_dbg(&i915->drm,
2161		"Copied %zu bytes from user to force DSC\n", len);
2162
2163	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2164	if (ret < 0)
2165		return ret;
2166
2167	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2168		(dsc_enable) ? "true" : "false");
2169	intel_dp->force_dsc_en = dsc_enable;
2170
2171	*offp += len;
2172	return len;
2173}
2174
2175static int i915_dsc_fec_support_open(struct inode *inode,
2176				     struct file *file)
2177{
2178	return single_open(file, i915_dsc_fec_support_show,
2179			   inode->i_private);
2180}
2181
2182static const struct file_operations i915_dsc_fec_support_fops = {
2183	.owner = THIS_MODULE,
2184	.open = i915_dsc_fec_support_open,
2185	.read = seq_read,
2186	.llseek = seq_lseek,
2187	.release = single_release,
2188	.write = i915_dsc_fec_support_write
2189};
2190
2191/**
2192 * intel_connector_debugfs_add - add i915 specific connector debugfs files
2193 * @connector: pointer to a registered drm_connector
2194 *
2195 * Cleanup will be done by drm_connector_unregister() through a call to
2196 * drm_debugfs_connector_remove().
2197 *
2198 * Returns 0 on success, negative error codes on error.
2199 */
2200int intel_connector_debugfs_add(struct drm_connector *connector)
2201{
2202	struct dentry *root = connector->debugfs_entry;
2203	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2204
2205	/* The connector must have been registered beforehands. */
2206	if (!root)
2207		return -ENODEV;
2208
2209	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2210		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2211				    connector, &i915_panel_fops);
2212		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2213				    connector, &i915_psr_sink_status_fops);
2214	}
2215
 
 
 
 
 
 
2216	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2217	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2218	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2219		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2220				    connector, &i915_hdcp_sink_capability_fops);
2221	}
2222
2223	if (INTEL_GEN(dev_priv) >= 10 &&
2224	    ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
2225	      !to_intel_connector(connector)->mst_port) ||
2226	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
2227		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
2228				    connector, &i915_dsc_fec_support_fops);
2229
2230	/* Legacy panels doesn't lpsp on any platform */
2231	if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
2232	     IS_BROADWELL(dev_priv)) &&
2233	     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2234	     connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2235	     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2236	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2237	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
2238		debugfs_create_file("i915_lpsp_capability", 0444, root,
2239				    connector, &i915_lpsp_capability_fops);
2240
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2241	return 0;
2242}