Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Keith Packard <keithp@keithp.com>
  26 *
  27 */
  28
  29#include <linux/seq_file.h>
  30#include <linux/debugfs.h>
  31#include <linux/slab.h>
  32#include "drmP.h"
  33#include "drm.h"
  34#include "intel_drv.h"
  35#include "intel_ringbuffer.h"
  36#include "i915_drm.h"
  37#include "i915_drv.h"
  38
  39#define DRM_I915_RING_DEBUG 1
  40
  41
  42#if defined(CONFIG_DEBUG_FS)
  43
  44enum {
  45	ACTIVE_LIST,
  46	FLUSHING_LIST,
  47	INACTIVE_LIST,
  48	PINNED_LIST,
  49	DEFERRED_FREE_LIST,
  50};
 
 
  51
  52static const char *yesno(int v)
  53{
  54	return v ? "yes" : "no";
  55}
  56
  57static int i915_capabilities(struct seq_file *m, void *data)
  58{
  59	struct drm_info_node *node = (struct drm_info_node *) m->private;
  60	struct drm_device *dev = node->minor->dev;
  61	const struct intel_device_info *info = INTEL_INFO(dev);
  62
  63	seq_printf(m, "gen: %d\n", info->gen);
  64#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
  65	B(is_mobile);
  66	B(is_i85x);
  67	B(is_i915g);
  68	B(is_i945gm);
  69	B(is_g33);
  70	B(need_gfx_hws);
  71	B(is_g4x);
  72	B(is_pineview);
  73	B(is_broadwater);
  74	B(is_crestline);
  75	B(has_fbc);
  76	B(has_pipe_cxsr);
  77	B(has_hotplug);
  78	B(cursor_needs_physical);
  79	B(has_overlay);
  80	B(overlay_needs_physical);
  81	B(supports_tv);
  82	B(has_bsd_ring);
  83	B(has_blt_ring);
  84#undef B
  85
  86	return 0;
  87}
  88
  89static const char *get_pin_flag(struct drm_i915_gem_object *obj)
  90{
  91	if (obj->user_pin_count > 0)
  92		return "P";
  93	else if (obj->pin_count > 0)
  94		return "p";
  95	else
  96		return " ";
  97}
  98
  99static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
 100{
 101    switch (obj->tiling_mode) {
 102    default:
 103    case I915_TILING_NONE: return " ";
 104    case I915_TILING_X: return "X";
 105    case I915_TILING_Y: return "Y";
 106    }
 107}
 108
 109static const char *cache_level_str(int type)
 110{
 111	switch (type) {
 112	case I915_CACHE_NONE: return " uncached";
 113	case I915_CACHE_LLC: return " snooped (LLC)";
 114	case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
 115	default: return "";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116	}
 117}
 118
 119static void
 120describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 121{
 122	seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
 
 
 
 
 
 123		   &obj->base,
 124		   get_pin_flag(obj),
 125		   get_tiling_flag(obj),
 126		   obj->base.size,
 127		   obj->base.read_domains,
 128		   obj->base.write_domain,
 129		   obj->last_rendering_seqno,
 130		   obj->last_fenced_seqno,
 131		   cache_level_str(obj->cache_level),
 132		   obj->dirty ? " dirty" : "",
 133		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 134	if (obj->base.name)
 135		seq_printf(m, " (name: %d)", obj->base.name);
 136	if (obj->fence_reg != I915_FENCE_REG_NONE)
 137		seq_printf(m, " (fence: %d)", obj->fence_reg);
 138	if (obj->gtt_space != NULL)
 139		seq_printf(m, " (gtt offset: %08x, size: %08x)",
 140			   obj->gtt_offset, (unsigned int)obj->gtt_space->size);
 141	if (obj->pin_mappable || obj->fault_mappable) {
 142		char s[3], *t = s;
 143		if (obj->pin_mappable)
 144			*t++ = 'p';
 145		if (obj->fault_mappable)
 146			*t++ = 'f';
 147		*t = '\0';
 148		seq_printf(m, " (%s mappable)", s);
 149	}
 150	if (obj->ring != NULL)
 151		seq_printf(m, " (%s)", obj->ring->name);
 152}
 153
 154static int i915_gem_object_list_info(struct seq_file *m, void *data)
 155{
 156	struct drm_info_node *node = (struct drm_info_node *) m->private;
 157	uintptr_t list = (uintptr_t) node->info_ent->data;
 158	struct list_head *head;
 159	struct drm_device *dev = node->minor->dev;
 160	drm_i915_private_t *dev_priv = dev->dev_private;
 161	struct drm_i915_gem_object *obj;
 162	size_t total_obj_size, total_gtt_size;
 163	int count, ret;
 164
 165	ret = mutex_lock_interruptible(&dev->struct_mutex);
 166	if (ret)
 167		return ret;
 168
 169	switch (list) {
 170	case ACTIVE_LIST:
 171		seq_printf(m, "Active:\n");
 172		head = &dev_priv->mm.active_list;
 173		break;
 174	case INACTIVE_LIST:
 175		seq_printf(m, "Inactive:\n");
 176		head = &dev_priv->mm.inactive_list;
 177		break;
 178	case PINNED_LIST:
 179		seq_printf(m, "Pinned:\n");
 180		head = &dev_priv->mm.pinned_list;
 181		break;
 182	case FLUSHING_LIST:
 183		seq_printf(m, "Flushing:\n");
 184		head = &dev_priv->mm.flushing_list;
 185		break;
 186	case DEFERRED_FREE_LIST:
 187		seq_printf(m, "Deferred free:\n");
 188		head = &dev_priv->mm.deferred_free_list;
 189		break;
 190	default:
 191		mutex_unlock(&dev->struct_mutex);
 192		return -EINVAL;
 193	}
 194
 195	total_obj_size = total_gtt_size = count = 0;
 196	list_for_each_entry(obj, head, mm_list) {
 197		seq_printf(m, "   ");
 198		describe_obj(m, obj);
 199		seq_printf(m, "\n");
 200		total_obj_size += obj->base.size;
 201		total_gtt_size += obj->gtt_space->size;
 202		count++;
 203	}
 204	mutex_unlock(&dev->struct_mutex);
 205
 206	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
 207		   count, total_obj_size, total_gtt_size);
 208	return 0;
 209}
 210
 211#define count_objects(list, member) do { \
 212	list_for_each_entry(obj, list, member) { \
 213		size += obj->gtt_space->size; \
 214		++count; \
 215		if (obj->map_and_fenceable) { \
 216			mappable_size += obj->gtt_space->size; \
 217			++mappable_count; \
 218		} \
 219	} \
 220} while(0)
 221
 222static int i915_gem_object_info(struct seq_file *m, void* data)
 223{
 224	struct drm_info_node *node = (struct drm_info_node *) m->private;
 225	struct drm_device *dev = node->minor->dev;
 226	struct drm_i915_private *dev_priv = dev->dev_private;
 227	u32 count, mappable_count;
 228	size_t size, mappable_size;
 229	struct drm_i915_gem_object *obj;
 230	int ret;
 231
 232	ret = mutex_lock_interruptible(&dev->struct_mutex);
 233	if (ret)
 234		return ret;
 235
 236	seq_printf(m, "%u objects, %zu bytes\n",
 237		   dev_priv->mm.object_count,
 238		   dev_priv->mm.object_memory);
 239
 240	size = count = mappable_size = mappable_count = 0;
 241	count_objects(&dev_priv->mm.gtt_list, gtt_list);
 242	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
 243		   count, mappable_count, size, mappable_size);
 244
 245	size = count = mappable_size = mappable_count = 0;
 246	count_objects(&dev_priv->mm.active_list, mm_list);
 247	count_objects(&dev_priv->mm.flushing_list, mm_list);
 248	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
 249		   count, mappable_count, size, mappable_size);
 250
 251	size = count = mappable_size = mappable_count = 0;
 252	count_objects(&dev_priv->mm.pinned_list, mm_list);
 253	seq_printf(m, "  %u [%u] pinned objects, %zu [%zu] bytes\n",
 254		   count, mappable_count, size, mappable_size);
 255
 256	size = count = mappable_size = mappable_count = 0;
 257	count_objects(&dev_priv->mm.inactive_list, mm_list);
 258	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
 259		   count, mappable_count, size, mappable_size);
 260
 261	size = count = mappable_size = mappable_count = 0;
 262	count_objects(&dev_priv->mm.deferred_free_list, mm_list);
 263	seq_printf(m, "  %u [%u] freed objects, %zu [%zu] bytes\n",
 264		   count, mappable_count, size, mappable_size);
 265
 266	size = count = mappable_size = mappable_count = 0;
 267	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
 268		if (obj->fault_mappable) {
 269			size += obj->gtt_space->size;
 270			++count;
 271		}
 272		if (obj->pin_mappable) {
 273			mappable_size += obj->gtt_space->size;
 274			++mappable_count;
 275		}
 276	}
 277	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
 278		   mappable_count, mappable_size);
 279	seq_printf(m, "%u fault mappable objects, %zu bytes\n",
 280		   count, size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 281
 282	seq_printf(m, "%zu [%zu] gtt total\n",
 283		   dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284
 285	mutex_unlock(&dev->struct_mutex);
 
 
 
 286
 287	return 0;
 288}
 
 
 
 289
 290static int i915_gem_gtt_info(struct seq_file *m, void* data)
 291{
 292	struct drm_info_node *node = (struct drm_info_node *) m->private;
 293	struct drm_device *dev = node->minor->dev;
 294	struct drm_i915_private *dev_priv = dev->dev_private;
 295	struct drm_i915_gem_object *obj;
 296	size_t total_obj_size, total_gtt_size;
 297	int count, ret;
 298
 299	ret = mutex_lock_interruptible(&dev->struct_mutex);
 300	if (ret)
 301		return ret;
 
 
 
 
 
 302
 303	total_obj_size = total_gtt_size = count = 0;
 304	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
 305		seq_printf(m, "   ");
 306		describe_obj(m, obj);
 307		seq_printf(m, "\n");
 308		total_obj_size += obj->base.size;
 309		total_gtt_size += obj->gtt_space->size;
 310		count++;
 
 
 311	}
 312
 313	mutex_unlock(&dev->struct_mutex);
 314
 315	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
 316		   count, total_obj_size, total_gtt_size);
 317
 318	return 0;
 319}
 320
 321
 322static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 323{
 324	struct drm_info_node *node = (struct drm_info_node *) m->private;
 325	struct drm_device *dev = node->minor->dev;
 326	unsigned long flags;
 327	struct intel_crtc *crtc;
 328
 329	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
 330		const char pipe = pipe_name(crtc->pipe);
 331		const char plane = plane_name(crtc->plane);
 332		struct intel_unpin_work *work;
 333
 334		spin_lock_irqsave(&dev->event_lock, flags);
 335		work = crtc->unpin_work;
 336		if (work == NULL) {
 337			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
 338				   pipe, plane);
 339		} else {
 340			if (!work->pending) {
 341				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
 342					   pipe, plane);
 343			} else {
 344				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
 345					   pipe, plane);
 
 
 
 
 
 346			}
 347			if (work->enable_stall_check)
 348				seq_printf(m, "Stall check enabled, ");
 349			else
 350				seq_printf(m, "Stall check waiting for page flip ioctl, ");
 351			seq_printf(m, "%d prepares\n", work->pending);
 352
 353			if (work->old_fb_obj) {
 354				struct drm_i915_gem_object *obj = work->old_fb_obj;
 355				if (obj)
 356					seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
 357			}
 358			if (work->pending_flip_obj) {
 359				struct drm_i915_gem_object *obj = work->pending_flip_obj;
 360				if (obj)
 361					seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
 362			}
 
 
 
 
 
 
 
 363		}
 364		spin_unlock_irqrestore(&dev->event_lock, flags);
 365	}
 366
 367	return 0;
 368}
 369
 370static int i915_gem_request_info(struct seq_file *m, void *data)
 371{
 372	struct drm_info_node *node = (struct drm_info_node *) m->private;
 373	struct drm_device *dev = node->minor->dev;
 374	drm_i915_private_t *dev_priv = dev->dev_private;
 375	struct drm_i915_gem_request *gem_request;
 376	int ret, count;
 377
 378	ret = mutex_lock_interruptible(&dev->struct_mutex);
 
 
 
 
 
 
 
 379	if (ret)
 380		return ret;
 381
 382	count = 0;
 383	if (!list_empty(&dev_priv->ring[RCS].request_list)) {
 384		seq_printf(m, "Render requests:\n");
 385		list_for_each_entry(gem_request,
 386				    &dev_priv->ring[RCS].request_list,
 387				    list) {
 388			seq_printf(m, "    %d @ %d\n",
 389				   gem_request->seqno,
 390				   (int) (jiffies - gem_request->emitted_jiffies));
 391		}
 392		count++;
 393	}
 394	if (!list_empty(&dev_priv->ring[VCS].request_list)) {
 395		seq_printf(m, "BSD requests:\n");
 396		list_for_each_entry(gem_request,
 397				    &dev_priv->ring[VCS].request_list,
 398				    list) {
 399			seq_printf(m, "    %d @ %d\n",
 400				   gem_request->seqno,
 401				   (int) (jiffies - gem_request->emitted_jiffies));
 402		}
 403		count++;
 404	}
 405	if (!list_empty(&dev_priv->ring[BCS].request_list)) {
 406		seq_printf(m, "BLT requests:\n");
 407		list_for_each_entry(gem_request,
 408				    &dev_priv->ring[BCS].request_list,
 409				    list) {
 410			seq_printf(m, "    %d @ %d\n",
 411				   gem_request->seqno,
 412				   (int) (jiffies - gem_request->emitted_jiffies));
 413		}
 414		count++;
 415	}
 416	mutex_unlock(&dev->struct_mutex);
 417
 418	if (count == 0)
 419		seq_printf(m, "No requests\n");
 420
 421	return 0;
 422}
 423
 424static void i915_ring_seqno_info(struct seq_file *m,
 425				 struct intel_ring_buffer *ring)
 426{
 427	if (ring->get_seqno) {
 428		seq_printf(m, "Current sequence (%s): %d\n",
 429			   ring->name, ring->get_seqno(ring));
 430		seq_printf(m, "Waiter sequence (%s):  %d\n",
 431			   ring->name, ring->waiting_seqno);
 432		seq_printf(m, "IRQ sequence (%s):     %d\n",
 433			   ring->name, ring->irq_seqno);
 434	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 435}
 436
 437static int i915_gem_seqno_info(struct seq_file *m, void *data)
 438{
 439	struct drm_info_node *node = (struct drm_info_node *) m->private;
 440	struct drm_device *dev = node->minor->dev;
 441	drm_i915_private_t *dev_priv = dev->dev_private;
 442	int ret, i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 443
 444	ret = mutex_lock_interruptible(&dev->struct_mutex);
 445	if (ret)
 446		return ret;
 447
 448	for (i = 0; i < I915_NUM_RINGS; i++)
 449		i915_ring_seqno_info(m, &dev_priv->ring[i]);
 450
 451	mutex_unlock(&dev->struct_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 452
 453	return 0;
 454}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 455
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 456
 457static int i915_interrupt_info(struct seq_file *m, void *data)
 458{
 459	struct drm_info_node *node = (struct drm_info_node *) m->private;
 460	struct drm_device *dev = node->minor->dev;
 461	drm_i915_private_t *dev_priv = dev->dev_private;
 462	int ret, i, pipe;
 463
 464	ret = mutex_lock_interruptible(&dev->struct_mutex);
 465	if (ret)
 466		return ret;
 467
 468	if (!HAS_PCH_SPLIT(dev)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 469		seq_printf(m, "Interrupt enable:    %08x\n",
 470			   I915_READ(IER));
 471		seq_printf(m, "Interrupt identity:  %08x\n",
 472			   I915_READ(IIR));
 473		seq_printf(m, "Interrupt mask:      %08x\n",
 474			   I915_READ(IMR));
 475		for_each_pipe(pipe)
 476			seq_printf(m, "Pipe %c stat:         %08x\n",
 477				   pipe_name(pipe),
 478				   I915_READ(PIPESTAT(pipe)));
 479	} else {
 480		seq_printf(m, "North Display Interrupt enable:		%08x\n",
 481			   I915_READ(DEIER));
 482		seq_printf(m, "North Display Interrupt identity:	%08x\n",
 483			   I915_READ(DEIIR));
 484		seq_printf(m, "North Display Interrupt mask:		%08x\n",
 485			   I915_READ(DEIMR));
 486		seq_printf(m, "South Display Interrupt enable:		%08x\n",
 487			   I915_READ(SDEIER));
 488		seq_printf(m, "South Display Interrupt identity:	%08x\n",
 489			   I915_READ(SDEIIR));
 490		seq_printf(m, "South Display Interrupt mask:		%08x\n",
 491			   I915_READ(SDEIMR));
 492		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
 493			   I915_READ(GTIER));
 494		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
 495			   I915_READ(GTIIR));
 496		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
 497			   I915_READ(GTIMR));
 498	}
 499	seq_printf(m, "Interrupts received: %d\n",
 500		   atomic_read(&dev_priv->irq_received));
 501	for (i = 0; i < I915_NUM_RINGS; i++) {
 502		if (IS_GEN6(dev) || IS_GEN7(dev)) {
 503			seq_printf(m, "Graphics Interrupt mask (%s):	%08x\n",
 504				   dev_priv->ring[i].name,
 505				   I915_READ_IMR(&dev_priv->ring[i]));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 506		}
 507		i915_ring_seqno_info(m, &dev_priv->ring[i]);
 508	}
 509	mutex_unlock(&dev->struct_mutex);
 
 510
 511	return 0;
 512}
 513
 514static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 515{
 516	struct drm_info_node *node = (struct drm_info_node *) m->private;
 517	struct drm_device *dev = node->minor->dev;
 518	drm_i915_private_t *dev_priv = dev->dev_private;
 519	int i, ret;
 520
 521	ret = mutex_lock_interruptible(&dev->struct_mutex);
 522	if (ret)
 523		return ret;
 524
 525	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
 526	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
 527	for (i = 0; i < dev_priv->num_fence_regs; i++) {
 528		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
 529
 530		seq_printf(m, "Fenced object[%2d] = ", i);
 531		if (obj == NULL)
 532			seq_printf(m, "unused");
 
 533		else
 534			describe_obj(m, obj);
 535		seq_printf(m, "\n");
 536	}
 
 537
 538	mutex_unlock(&dev->struct_mutex);
 539	return 0;
 540}
 541
 542static int i915_hws_info(struct seq_file *m, void *data)
 543{
 544	struct drm_info_node *node = (struct drm_info_node *) m->private;
 545	struct drm_device *dev = node->minor->dev;
 546	drm_i915_private_t *dev_priv = dev->dev_private;
 547	struct intel_ring_buffer *ring;
 548	const volatile u32 __iomem *hws;
 549	int i;
 550
 551	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
 552	hws = (volatile u32 __iomem *)ring->status_page.page_addr;
 553	if (hws == NULL)
 554		return 0;
 555
 556	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
 557		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
 558			   i * 4,
 559			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
 560	}
 561	return 0;
 562}
 563
 564static void i915_dump_object(struct seq_file *m,
 565			     struct io_mapping *mapping,
 566			     struct drm_i915_gem_object *obj)
 567{
 568	int page, page_count, i;
 569
 570	page_count = obj->base.size / PAGE_SIZE;
 571	for (page = 0; page < page_count; page++) {
 572		u32 *mem = io_mapping_map_wc(mapping,
 573					     obj->gtt_offset + page * PAGE_SIZE);
 574		for (i = 0; i < PAGE_SIZE; i += 4)
 575			seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
 576		io_mapping_unmap(mem);
 577	}
 578}
 579
 580static int i915_batchbuffer_info(struct seq_file *m, void *data)
 581{
 582	struct drm_info_node *node = (struct drm_info_node *) m->private;
 583	struct drm_device *dev = node->minor->dev;
 584	drm_i915_private_t *dev_priv = dev->dev_private;
 585	struct drm_i915_gem_object *obj;
 586	int ret;
 587
 588	ret = mutex_lock_interruptible(&dev->struct_mutex);
 589	if (ret)
 590		return ret;
 591
 592	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
 593		if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
 594		    seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
 595		    i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
 596		}
 597	}
 598
 599	mutex_unlock(&dev->struct_mutex);
 600	return 0;
 
 601}
 602
 603static int i915_ringbuffer_data(struct seq_file *m, void *data)
 604{
 605	struct drm_info_node *node = (struct drm_info_node *) m->private;
 606	struct drm_device *dev = node->minor->dev;
 607	drm_i915_private_t *dev_priv = dev->dev_private;
 608	struct intel_ring_buffer *ring;
 609	int ret;
 610
 611	ret = mutex_lock_interruptible(&dev->struct_mutex);
 612	if (ret)
 613		return ret;
 614
 615	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
 616	if (!ring->obj) {
 617		seq_printf(m, "No ringbuffer setup\n");
 618	} else {
 619		const u8 __iomem *virt = ring->virtual_start;
 620		uint32_t off;
 621
 622		for (off = 0; off < ring->size; off += 4) {
 623			uint32_t *ptr = (uint32_t *)(virt + off);
 624			seq_printf(m, "%08x :  %08x\n", off, *ptr);
 625		}
 626	}
 627	mutex_unlock(&dev->struct_mutex);
 628
 629	return 0;
 630}
 631
 632static int i915_ringbuffer_info(struct seq_file *m, void *data)
 633{
 634	struct drm_info_node *node = (struct drm_info_node *) m->private;
 635	struct drm_device *dev = node->minor->dev;
 636	drm_i915_private_t *dev_priv = dev->dev_private;
 637	struct intel_ring_buffer *ring;
 638
 639	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
 640	if (ring->size == 0)
 641		return 0;
 642
 643	seq_printf(m, "Ring %s:\n", ring->name);
 644	seq_printf(m, "  Head :    %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
 645	seq_printf(m, "  Tail :    %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
 646	seq_printf(m, "  Size :    %08x\n", ring->size);
 647	seq_printf(m, "  Active :  %08x\n", intel_ring_get_active_head(ring));
 648	seq_printf(m, "  NOPID :   %08x\n", I915_READ_NOPID(ring));
 649	if (IS_GEN6(dev)) {
 650		seq_printf(m, "  Sync 0 :   %08x\n", I915_READ_SYNC_0(ring));
 651		seq_printf(m, "  Sync 1 :   %08x\n", I915_READ_SYNC_1(ring));
 652	}
 653	seq_printf(m, "  Control : %08x\n", I915_READ_CTL(ring));
 654	seq_printf(m, "  Start :   %08x\n", I915_READ_START(ring));
 655
 
 656	return 0;
 657}
 658
 659static const char *ring_str(int ring)
 660{
 661	switch (ring) {
 662	case RING_RENDER: return " render";
 663	case RING_BSD: return " bsd";
 664	case RING_BLT: return " blt";
 665	default: return "";
 666	}
 667}
 668
 669static const char *pin_flag(int pinned)
 670{
 671	if (pinned > 0)
 672		return " P";
 673	else if (pinned < 0)
 674		return " p";
 675	else
 676		return "";
 677}
 678
 679static const char *tiling_flag(int tiling)
 680{
 681	switch (tiling) {
 682	default:
 683	case I915_TILING_NONE: return "";
 684	case I915_TILING_X: return " X";
 685	case I915_TILING_Y: return " Y";
 686	}
 687}
 688
 689static const char *dirty_flag(int dirty)
 690{
 691	return dirty ? " dirty" : "";
 692}
 693
 694static const char *purgeable_flag(int purgeable)
 695{
 696	return purgeable ? " purgeable" : "";
 697}
 698
 699static void print_error_buffers(struct seq_file *m,
 700				const char *name,
 701				struct drm_i915_error_buffer *err,
 702				int count)
 
 703{
 704	seq_printf(m, "%s [%d]:\n", name, count);
 705
 706	while (count--) {
 707		seq_printf(m, "  %08x %8u %04x %04x %08x%s%s%s%s%s%s",
 708			   err->gtt_offset,
 709			   err->size,
 710			   err->read_domains,
 711			   err->write_domain,
 712			   err->seqno,
 713			   pin_flag(err->pinned),
 714			   tiling_flag(err->tiling),
 715			   dirty_flag(err->dirty),
 716			   purgeable_flag(err->purgeable),
 717			   ring_str(err->ring),
 718			   cache_level_str(err->cache_level));
 719
 720		if (err->name)
 721			seq_printf(m, " (name: %d)", err->name);
 722		if (err->fence_reg != I915_FENCE_REG_NONE)
 723			seq_printf(m, " (fence: %d)", err->fence_reg);
 724
 725		seq_printf(m, "\n");
 726		err++;
 727	}
 728}
 729
 730static int i915_error_state(struct seq_file *m, void *unused)
 731{
 732	struct drm_info_node *node = (struct drm_info_node *) m->private;
 733	struct drm_device *dev = node->minor->dev;
 734	drm_i915_private_t *dev_priv = dev->dev_private;
 735	struct drm_i915_error_state *error;
 736	unsigned long flags;
 737	int i, page, offset, elt;
 738
 739	spin_lock_irqsave(&dev_priv->error_lock, flags);
 740	if (!dev_priv->first_error) {
 741		seq_printf(m, "no error state collected\n");
 742		goto out;
 743	}
 744
 745	error = dev_priv->first_error;
 746
 747	seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
 748		   error->time.tv_usec);
 749	seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
 750	seq_printf(m, "EIR: 0x%08x\n", error->eir);
 751	seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
 752	if (INTEL_INFO(dev)->gen >= 6) {
 753		seq_printf(m, "ERROR: 0x%08x\n", error->error);
 754		seq_printf(m, "Blitter command stream:\n");
 755		seq_printf(m, "  ACTHD:    0x%08x\n", error->bcs_acthd);
 756		seq_printf(m, "  IPEIR:    0x%08x\n", error->bcs_ipeir);
 757		seq_printf(m, "  IPEHR:    0x%08x\n", error->bcs_ipehr);
 758		seq_printf(m, "  INSTDONE: 0x%08x\n", error->bcs_instdone);
 759		seq_printf(m, "  seqno:    0x%08x\n", error->bcs_seqno);
 760		seq_printf(m, "Video (BSD) command stream:\n");
 761		seq_printf(m, "  ACTHD:    0x%08x\n", error->vcs_acthd);
 762		seq_printf(m, "  IPEIR:    0x%08x\n", error->vcs_ipeir);
 763		seq_printf(m, "  IPEHR:    0x%08x\n", error->vcs_ipehr);
 764		seq_printf(m, "  INSTDONE: 0x%08x\n", error->vcs_instdone);
 765		seq_printf(m, "  seqno:    0x%08x\n", error->vcs_seqno);
 766	}
 767	seq_printf(m, "Render command stream:\n");
 768	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
 769	seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir);
 770	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
 771	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
 772	if (INTEL_INFO(dev)->gen >= 4) {
 773		seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
 774		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
 775	}
 776	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
 777	seq_printf(m, "  seqno: 0x%08x\n", error->seqno);
 778
 779	for (i = 0; i < dev_priv->num_fence_regs; i++)
 780		seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
 781
 782	if (error->active_bo)
 783		print_error_buffers(m, "Active",
 784				    error->active_bo,
 785				    error->active_bo_count);
 786
 787	if (error->pinned_bo)
 788		print_error_buffers(m, "Pinned",
 789				    error->pinned_bo,
 790				    error->pinned_bo_count);
 791
 792	for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
 793		if (error->batchbuffer[i]) {
 794			struct drm_i915_error_object *obj = error->batchbuffer[i];
 795
 796			seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
 797				   dev_priv->ring[i].name,
 798				   obj->gtt_offset);
 799			offset = 0;
 800			for (page = 0; page < obj->page_count; page++) {
 801				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
 802					seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
 803					offset += 4;
 804				}
 805			}
 806		}
 807	}
 808
 809	for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
 810		if (error->ringbuffer[i]) {
 811			struct drm_i915_error_object *obj = error->ringbuffer[i];
 812			seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
 813				   dev_priv->ring[i].name,
 814				   obj->gtt_offset);
 815			offset = 0;
 816			for (page = 0; page < obj->page_count; page++) {
 817				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
 818					seq_printf(m, "%08x :  %08x\n",
 819						   offset,
 820						   obj->pages[page][elt]);
 821					offset += 4;
 822				}
 823			}
 824		}
 825	}
 826
 827	if (error->overlay)
 828		intel_overlay_print_error_state(m, error->overlay);
 829
 830	if (error->display)
 831		intel_display_print_error_state(m, dev, error->display);
 832
 833out:
 834	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
 835
 
 836	return 0;
 837}
 838
 839static int i915_rstdby_delays(struct seq_file *m, void *unused)
 840{
 841	struct drm_info_node *node = (struct drm_info_node *) m->private;
 842	struct drm_device *dev = node->minor->dev;
 843	drm_i915_private_t *dev_priv = dev->dev_private;
 844	u16 crstanddelay = I915_READ16(CRSTANDVID);
 845
 846	seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
 847
 848	return 0;
 849}
 850
 851static int i915_cur_delayinfo(struct seq_file *m, void *unused)
 852{
 853	struct drm_info_node *node = (struct drm_info_node *) m->private;
 854	struct drm_device *dev = node->minor->dev;
 855	drm_i915_private_t *dev_priv = dev->dev_private;
 856	int ret;
 
 
 
 857
 858	if (IS_GEN5(dev)) {
 859		u16 rgvswctl = I915_READ16(MEMSWCTL);
 860		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
 861
 862		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
 863		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
 864		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
 865			   MEMSTAT_VID_SHIFT);
 866		seq_printf(m, "Current P-state: %d\n",
 867			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
 868	} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
 869		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 870		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
 871		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 872		u32 rpstat;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 873		u32 rpupei, rpcurup, rpprevup;
 874		u32 rpdownei, rpcurdown, rpprevdown;
 
 875		int max_freq;
 876
 
 
 
 
 
 
 
 
 
 877		/* RPSTAT1 is in the GT power well */
 878		ret = mutex_lock_interruptible(&dev->struct_mutex);
 879		if (ret)
 880			return ret;
 881
 882		gen6_gt_force_wake_get(dev_priv);
 
 
 
 
 
 
 
 
 
 
 883
 884		rpstat = I915_READ(GEN6_RPSTAT1);
 885		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
 886		rpcurup = I915_READ(GEN6_RP_CUR_UP);
 887		rpprevup = I915_READ(GEN6_RP_PREV_UP);
 888		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
 889		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
 890		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
 891
 892		gen6_gt_force_wake_put(dev_priv);
 893		mutex_unlock(&dev->struct_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 894
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 895		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
 896		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
 897		seq_printf(m, "Render p-state ratio: %d\n",
 898			   (gt_perf_status & 0xff00) >> 8);
 899		seq_printf(m, "Render p-state VID: %d\n",
 900			   gt_perf_status & 0xff);
 901		seq_printf(m, "Render p-state limit: %d\n",
 902			   rp_state_limits & 0xff);
 903		seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
 904						GEN6_CAGF_SHIFT) * 50);
 905		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
 906			   GEN6_CURICONT_MASK);
 907		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
 908			   GEN6_CURBSYTAVG_MASK);
 909		seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
 910			   GEN6_CURBSYTAVG_MASK);
 911		seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
 912			   GEN6_CURIAVG_MASK);
 913		seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
 914			   GEN6_CURBSYTAVG_MASK);
 915		seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
 916			   GEN6_CURBSYTAVG_MASK);
 917
 918		max_freq = (rp_state_cap & 0xff0000) >> 16;
 
 
 
 
 
 
 
 
 
 
 
 
 919		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
 920			   max_freq * 50);
 921
 922		max_freq = (rp_state_cap & 0xff00) >> 8;
 
 
 923		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
 924			   max_freq * 50);
 925
 926		max_freq = rp_state_cap & 0xff;
 
 
 
 927		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
 928			   max_freq * 50);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 929	} else {
 930		seq_printf(m, "no P-state info available\n");
 931	}
 932
 933	return 0;
 
 
 
 
 
 934}
 935
 936static int i915_delayfreq_table(struct seq_file *m, void *unused)
 
 
 937{
 938	struct drm_info_node *node = (struct drm_info_node *) m->private;
 939	struct drm_device *dev = node->minor->dev;
 940	drm_i915_private_t *dev_priv = dev->dev_private;
 941	u32 delayfreq;
 942	int i;
 943
 944	for (i = 0; i < 16; i++) {
 945		delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
 946		seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
 947			   (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
 948	}
 949
 950	return 0;
 951}
 952
 953static inline int MAP_TO_MV(int map)
 954{
 955	return 1250 - (map * 25);
 
 
 
 
 
 
 
 
 
 
 956}
 957
 958static int i915_inttoext_table(struct seq_file *m, void *unused)
 959{
 960	struct drm_info_node *node = (struct drm_info_node *) m->private;
 961	struct drm_device *dev = node->minor->dev;
 962	drm_i915_private_t *dev_priv = dev->dev_private;
 963	u32 inttoext;
 964	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 965
 966	for (i = 1; i <= 32; i++) {
 967		inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
 968		seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
 
 969	}
 970
 971	return 0;
 972}
 973
 974static int i915_drpc_info(struct seq_file *m, void *unused)
 975{
 976	struct drm_info_node *node = (struct drm_info_node *) m->private;
 977	struct drm_device *dev = node->minor->dev;
 978	drm_i915_private_t *dev_priv = dev->dev_private;
 979	u32 rgvmodectl = I915_READ(MEMMODECTL);
 980	u32 rstdbyctl = I915_READ(RSTDBYCTL);
 981	u16 crstandvid = I915_READ16(CRSTANDVID);
 
 
 982
 983	seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
 984		   "yes" : "no");
 985	seq_printf(m, "Boost freq: %d\n",
 986		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
 987		   MEMMODE_BOOST_FREQ_SHIFT);
 988	seq_printf(m, "HW control enabled: %s\n",
 989		   rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
 990	seq_printf(m, "SW control enabled: %s\n",
 991		   rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
 992	seq_printf(m, "Gated voltage change: %s\n",
 993		   rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
 994	seq_printf(m, "Starting frequency: P%d\n",
 995		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
 996	seq_printf(m, "Max P-state: P%d\n",
 997		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
 998	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
 999	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1000	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1001	seq_printf(m, "Render standby enabled: %s\n",
1002		   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1003	seq_printf(m, "Current RS state: ");
1004	switch (rstdbyctl & RSX_STATUS_MASK) {
1005	case RSX_STATUS_ON:
1006		seq_printf(m, "on\n");
1007		break;
1008	case RSX_STATUS_RC1:
1009		seq_printf(m, "RC1\n");
1010		break;
1011	case RSX_STATUS_RC1E:
1012		seq_printf(m, "RC1E\n");
1013		break;
1014	case RSX_STATUS_RS1:
1015		seq_printf(m, "RS1\n");
1016		break;
1017	case RSX_STATUS_RS2:
1018		seq_printf(m, "RS2 (RC6)\n");
1019		break;
1020	case RSX_STATUS_RS3:
1021		seq_printf(m, "RC3 (RC6+)\n");
1022		break;
1023	default:
1024		seq_printf(m, "unknown\n");
1025		break;
1026	}
1027
1028	return 0;
1029}
1030
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1031static int i915_fbc_status(struct seq_file *m, void *unused)
1032{
1033	struct drm_info_node *node = (struct drm_info_node *) m->private;
1034	struct drm_device *dev = node->minor->dev;
1035	drm_i915_private_t *dev_priv = dev->dev_private;
1036
1037	if (!I915_HAS_FBC(dev)) {
1038		seq_printf(m, "FBC unsupported on this chipset\n");
1039		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1040	}
1041
1042	if (intel_fbc_enabled(dev)) {
1043		seq_printf(m, "FBC enabled\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1044	} else {
1045		seq_printf(m, "FBC disabled: ");
1046		switch (dev_priv->no_fbc_reason) {
1047		case FBC_NO_OUTPUT:
1048			seq_printf(m, "no outputs");
1049			break;
1050		case FBC_STOLEN_TOO_SMALL:
1051			seq_printf(m, "not enough stolen memory");
1052			break;
1053		case FBC_UNSUPPORTED_MODE:
1054			seq_printf(m, "mode not supported");
1055			break;
1056		case FBC_MODE_TOO_LARGE:
1057			seq_printf(m, "mode too large");
1058			break;
1059		case FBC_BAD_PLANE:
1060			seq_printf(m, "FBC unsupported on plane");
1061			break;
1062		case FBC_NOT_TILED:
1063			seq_printf(m, "scanout buffer not tiled");
1064			break;
1065		case FBC_MULTIPLE_PIPES:
1066			seq_printf(m, "multiple pipes are enabled");
1067			break;
1068		case FBC_MODULE_PARAM:
1069			seq_printf(m, "disabled per module param (default off)");
1070			break;
1071		default:
1072			seq_printf(m, "unknown reason");
1073		}
1074		seq_printf(m, "\n");
1075	}
 
 
 
1076	return 0;
1077}
1078
1079static int i915_sr_status(struct seq_file *m, void *unused)
1080{
1081	struct drm_info_node *node = (struct drm_info_node *) m->private;
1082	struct drm_device *dev = node->minor->dev;
1083	drm_i915_private_t *dev_priv = dev->dev_private;
1084	bool sr_enabled = false;
1085
1086	if (HAS_PCH_SPLIT(dev))
 
 
 
 
1087		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1088	else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
 
1089		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1090	else if (IS_I915GM(dev))
1091		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1092	else if (IS_PINEVIEW(dev))
1093		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1094
1095	seq_printf(m, "self-refresh: %s\n",
1096		   sr_enabled ? "enabled" : "disabled");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1097
1098	return 0;
1099}
1100
1101static int i915_emon_status(struct seq_file *m, void *unused)
1102{
1103	struct drm_info_node *node = (struct drm_info_node *) m->private;
1104	struct drm_device *dev = node->minor->dev;
1105	drm_i915_private_t *dev_priv = dev->dev_private;
1106	unsigned long temp, chipset, gfx;
1107	int ret;
1108
1109	ret = mutex_lock_interruptible(&dev->struct_mutex);
1110	if (ret)
1111		return ret;
 
 
 
1112
1113	temp = i915_mch_val(dev_priv);
1114	chipset = i915_chipset_val(dev_priv);
1115	gfx = i915_gfx_val(dev_priv);
1116	mutex_unlock(&dev->struct_mutex);
1117
1118	seq_printf(m, "GMCH temp: %ld\n", temp);
1119	seq_printf(m, "Chipset power: %ld\n", chipset);
1120	seq_printf(m, "GFX power: %ld\n", gfx);
1121	seq_printf(m, "Total power: %ld\n", chipset + gfx);
 
 
 
 
 
 
1122
1123	return 0;
1124}
1125
1126static int i915_ring_freq_table(struct seq_file *m, void *unused)
1127{
1128	struct drm_info_node *node = (struct drm_info_node *) m->private;
1129	struct drm_device *dev = node->minor->dev;
1130	drm_i915_private_t *dev_priv = dev->dev_private;
 
1131	int ret;
1132	int gpu_freq, ia_freq;
1133
1134	if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1135		seq_printf(m, "unsupported on this chipset\n");
1136		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1137	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1138
1139	ret = mutex_lock_interruptible(&dev->struct_mutex);
1140	if (ret)
1141		return ret;
1142
1143	seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1144
1145	for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
1146	     gpu_freq++) {
1147		I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1148		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
1149			   GEN6_PCODE_READ_MIN_FREQ_TABLE);
1150		if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
1151			      GEN6_PCODE_READY) == 0, 10)) {
1152			DRM_ERROR("pcode read of freq table timed out\n");
1153			continue;
 
 
 
 
 
1154		}
1155		ia_freq = I915_READ(GEN6_PCODE_DATA);
1156		seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
 
1157	}
1158
1159	mutex_unlock(&dev->struct_mutex);
1160
1161	return 0;
1162}
1163
1164static int i915_gfxec(struct seq_file *m, void *unused)
1165{
1166	struct drm_info_node *node = (struct drm_info_node *) m->private;
1167	struct drm_device *dev = node->minor->dev;
1168	drm_i915_private_t *dev_priv = dev->dev_private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1169
1170	seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
 
 
 
1171
1172	return 0;
1173}
1174
1175static int i915_opregion(struct seq_file *m, void *unused)
1176{
1177	struct drm_info_node *node = (struct drm_info_node *) m->private;
1178	struct drm_device *dev = node->minor->dev;
1179	drm_i915_private_t *dev_priv = dev->dev_private;
1180	struct intel_opregion *opregion = &dev_priv->opregion;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1181	int ret;
1182
1183	ret = mutex_lock_interruptible(&dev->struct_mutex);
1184	if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1185		return ret;
 
1186
1187	if (opregion->header)
1188		seq_write(m, opregion->header, OPREGION_SIZE);
 
1189
1190	mutex_unlock(&dev->struct_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1191
1192	return 0;
1193}
1194
1195static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 
1196{
1197	struct drm_info_node *node = (struct drm_info_node *) m->private;
1198	struct drm_device *dev = node->minor->dev;
1199	drm_i915_private_t *dev_priv = dev->dev_private;
1200	struct intel_fbdev *ifbdev;
1201	struct intel_framebuffer *fb;
1202	int ret;
1203
1204	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1205	if (ret)
1206		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1207
1208	ifbdev = dev_priv->fbdev;
1209	fb = to_intel_framebuffer(ifbdev->helper.fb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1210
1211	seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
1212		   fb->base.width,
1213		   fb->base.height,
1214		   fb->base.depth,
1215		   fb->base.bits_per_pixel);
1216	describe_obj(m, fb->obj);
1217	seq_printf(m, "\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1218
1219	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1220		if (&fb->base == ifbdev->helper.fb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1221			continue;
1222
1223		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
1224			   fb->base.width,
1225			   fb->base.height,
1226			   fb->base.depth,
1227			   fb->base.bits_per_pixel);
1228		describe_obj(m, fb->obj);
 
 
1229		seq_printf(m, "\n");
1230	}
1231
1232	mutex_unlock(&dev->mode_config.mutex);
 
1233
 
 
 
 
 
 
1234	return 0;
1235}
1236
1237static int i915_context_status(struct seq_file *m, void *unused)
1238{
1239	struct drm_info_node *node = (struct drm_info_node *) m->private;
1240	struct drm_device *dev = node->minor->dev;
1241	drm_i915_private_t *dev_priv = dev->dev_private;
 
 
 
 
 
 
 
 
 
 
 
 
1242	int ret;
1243
1244	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1245	if (ret)
1246		return ret;
1247
1248	if (dev_priv->pwrctx) {
1249		seq_printf(m, "power context ");
1250		describe_obj(m, dev_priv->pwrctx);
1251		seq_printf(m, "\n");
 
 
1252	}
1253
1254	if (dev_priv->renderctx) {
1255		seq_printf(m, "render context ");
1256		describe_obj(m, dev_priv->renderctx);
1257		seq_printf(m, "\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1258	}
1259
1260	mutex_unlock(&dev->mode_config.mutex);
1261
1262	return 0;
1263}
1264
1265static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1266{
1267	struct drm_info_node *node = (struct drm_info_node *) m->private;
1268	struct drm_device *dev = node->minor->dev;
1269	struct drm_i915_private *dev_priv = dev->dev_private;
 
1270
1271	seq_printf(m, "forcewake count = %d\n",
1272		   atomic_read(&dev_priv->forcewake_count));
 
 
 
 
 
 
 
 
 
 
 
1273
1274	return 0;
1275}
1276
1277static int
1278i915_wedged_open(struct inode *inode,
1279		 struct file *filp)
1280{
1281	filp->private_data = inode->i_private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1282	return 0;
1283}
1284
1285static ssize_t
1286i915_wedged_read(struct file *filp,
1287		 char __user *ubuf,
1288		 size_t max,
1289		 loff_t *ppos)
1290{
1291	struct drm_device *dev = filp->private_data;
1292	drm_i915_private_t *dev_priv = dev->dev_private;
1293	char buf[80];
1294	int len;
1295
1296	len = snprintf(buf, sizeof (buf),
1297		       "wedged :  %d\n",
1298		       atomic_read(&dev_priv->mm.wedged));
1299
1300	if (len > sizeof (buf))
1301		len = sizeof (buf);
 
 
 
 
 
 
 
 
 
 
1302
1303	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1304}
1305
1306static ssize_t
1307i915_wedged_write(struct file *filp,
1308		  const char __user *ubuf,
1309		  size_t cnt,
1310		  loff_t *ppos)
1311{
1312	struct drm_device *dev = filp->private_data;
1313	char buf[20];
1314	int val = 1;
 
 
 
 
 
 
1315
1316	if (cnt > 0) {
1317		if (cnt > sizeof (buf) - 1)
1318			return -EINVAL;
1319
1320		if (copy_from_user(buf, ubuf, cnt))
1321			return -EFAULT;
1322		buf[cnt] = 0;
1323
1324		val = simple_strtoul(buf, NULL, 0);
 
 
 
 
 
 
 
1325	}
 
1326
1327	DRM_INFO("Manually setting wedged to %d\n", val);
1328	i915_handle_error(dev, val);
1329
1330	return cnt;
 
 
 
 
1331}
1332
1333static const struct file_operations i915_wedged_fops = {
1334	.owner = THIS_MODULE,
1335	.open = i915_wedged_open,
1336	.read = i915_wedged_read,
1337	.write = i915_wedged_write,
1338	.llseek = default_llseek,
 
1339};
1340
1341static int
1342i915_max_freq_open(struct inode *inode,
1343		   struct file *filp)
1344{
1345	filp->private_data = inode->i_private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1346	return 0;
1347}
 
1348
1349static ssize_t
1350i915_max_freq_read(struct file *filp,
1351		   char __user *ubuf,
1352		   size_t max,
1353		   loff_t *ppos)
1354{
1355	struct drm_device *dev = filp->private_data;
1356	drm_i915_private_t *dev_priv = dev->dev_private;
1357	char buf[80];
1358	int len;
 
1359
1360	len = snprintf(buf, sizeof (buf),
1361		       "max freq: %d\n", dev_priv->max_delay * 50);
 
1362
1363	if (len > sizeof (buf))
1364		len = sizeof (buf);
 
1365
1366	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
 
 
 
 
 
 
 
 
1367}
 
1368
1369static ssize_t
1370i915_max_freq_write(struct file *filp,
1371		  const char __user *ubuf,
1372		  size_t cnt,
1373		  loff_t *ppos)
1374{
1375	struct drm_device *dev = filp->private_data;
1376	struct drm_i915_private *dev_priv = dev->dev_private;
1377	char buf[20];
1378	int val = 1;
 
 
 
 
 
 
 
 
 
1379
1380	if (cnt > 0) {
1381		if (cnt > sizeof (buf) - 1)
1382			return -EINVAL;
1383
1384		if (copy_from_user(buf, ubuf, cnt))
1385			return -EFAULT;
1386		buf[cnt] = 0;
1387
1388		val = simple_strtoul(buf, NULL, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
1389	}
1390
1391	DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
 
1392
1393	/*
1394	 * Turbo will still be enabled, but won't go above the set value.
1395	 */
1396	dev_priv->max_delay = val / 50;
1397
1398	gen6_set_rps(dev, val / 50);
 
 
 
1399
1400	return cnt;
 
 
1401}
1402
1403static const struct file_operations i915_max_freq_fops = {
1404	.owner = THIS_MODULE,
1405	.open = i915_max_freq_open,
1406	.read = i915_max_freq_read,
1407	.write = i915_max_freq_write,
1408	.llseek = default_llseek,
1409};
1410
1411static int
1412i915_cache_sharing_open(struct inode *inode,
1413		   struct file *filp)
 
 
 
 
 
 
 
 
1414{
1415	filp->private_data = inode->i_private;
 
 
 
 
 
 
 
 
 
1416	return 0;
1417}
1418
1419static ssize_t
1420i915_cache_sharing_read(struct file *filp,
1421		   char __user *ubuf,
1422		   size_t max,
1423		   loff_t *ppos)
1424{
1425	struct drm_device *dev = filp->private_data;
1426	drm_i915_private_t *dev_priv = dev->dev_private;
1427	char buf[80];
1428	u32 snpcr;
1429	int len;
1430
1431	mutex_lock(&dev_priv->dev->struct_mutex);
1432	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1433	mutex_unlock(&dev_priv->dev->struct_mutex);
1434
1435	len = snprintf(buf, sizeof (buf),
1436		       "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
1437		       GEN6_MBC_SNPCR_SHIFT);
1438
1439	if (len > sizeof (buf))
1440		len = sizeof (buf);
1441
1442	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1443}
1444
1445static ssize_t
1446i915_cache_sharing_write(struct file *filp,
1447		  const char __user *ubuf,
1448		  size_t cnt,
1449		  loff_t *ppos)
1450{
1451	struct drm_device *dev = filp->private_data;
1452	struct drm_i915_private *dev_priv = dev->dev_private;
1453	char buf[20];
1454	u32 snpcr;
1455	int val = 1;
1456
1457	if (cnt > 0) {
1458		if (cnt > sizeof (buf) - 1)
1459			return -EINVAL;
1460
1461		if (copy_from_user(buf, ubuf, cnt))
1462			return -EFAULT;
1463		buf[cnt] = 0;
1464
1465		val = simple_strtoul(buf, NULL, 0);
1466	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1467
1468	if (val < 0 || val > 3)
1469		return -EINVAL;
1470
1471	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
 
1472
1473	/* Update the cache sharing policy here as well */
1474	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1475	snpcr &= ~GEN6_MBC_SNPCR_MASK;
1476	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
1477	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1478
1479	return cnt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1480}
1481
1482static const struct file_operations i915_cache_sharing_fops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1483	.owner = THIS_MODULE,
1484	.open = i915_cache_sharing_open,
1485	.read = i915_cache_sharing_read,
1486	.write = i915_cache_sharing_write,
1487	.llseek = default_llseek,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1488};
1489
1490/* As the drm_debugfs_init() routines are called before dev->dev_private is
1491 * allocated we need to hook into the minor for release. */
1492static int
1493drm_add_fake_info_node(struct drm_minor *minor,
1494		       struct dentry *ent,
1495		       const void *key)
1496{
1497	struct drm_info_node *node;
1498
1499	node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
1500	if (node == NULL) {
1501		debugfs_remove(ent);
1502		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1503	}
1504
1505	node->minor = minor;
1506	node->dent = ent;
1507	node->info_ent = (void *) key;
1508	list_add(&node->list, &minor->debugfs_nodes.list);
1509
1510	return 0;
1511}
1512
1513static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
 
 
 
 
 
1514{
1515	struct drm_device *dev = minor->dev;
1516	struct dentry *ent;
 
1517
1518	ent = debugfs_create_file("i915_wedged",
1519				  S_IRUGO | S_IWUSR,
1520				  root, dev,
1521				  &i915_wedged_fops);
1522	if (IS_ERR(ent))
1523		return PTR_ERR(ent);
1524
1525	return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1526}
1527
1528static int i915_forcewake_open(struct inode *inode, struct file *file)
1529{
1530	struct drm_device *dev = inode->i_private;
1531	struct drm_i915_private *dev_priv = dev->dev_private;
1532	int ret;
1533
1534	if (!IS_GEN6(dev))
1535		return 0;
1536
1537	ret = mutex_lock_interruptible(&dev->struct_mutex);
1538	if (ret)
1539		return ret;
1540	gen6_gt_force_wake_get(dev_priv);
1541	mutex_unlock(&dev->struct_mutex);
1542
1543	return 0;
1544}
1545
1546int i915_forcewake_release(struct inode *inode, struct file *file)
1547{
1548	struct drm_device *dev = inode->i_private;
1549	struct drm_i915_private *dev_priv = dev->dev_private;
1550
1551	if (!IS_GEN6(dev))
1552		return 0;
1553
1554	/*
1555	 * It's bad that we can potentially hang userspace if struct_mutex gets
1556	 * forever stuck.  However, if we cannot acquire this lock it means that
1557	 * almost certainly the driver has hung, is not unload-able. Therefore
1558	 * hanging here is probably a minor inconvenience not to be seen my
1559	 * almost every user.
1560	 */
1561	mutex_lock(&dev->struct_mutex);
1562	gen6_gt_force_wake_put(dev_priv);
1563	mutex_unlock(&dev->struct_mutex);
1564
1565	return 0;
1566}
1567
1568static const struct file_operations i915_forcewake_fops = {
1569	.owner = THIS_MODULE,
1570	.open = i915_forcewake_open,
1571	.release = i915_forcewake_release,
1572};
1573
1574static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1575{
1576	struct drm_device *dev = minor->dev;
1577	struct dentry *ent;
1578
1579	ent = debugfs_create_file("i915_forcewake_user",
1580				  S_IRUSR,
1581				  root, dev,
1582				  &i915_forcewake_fops);
1583	if (IS_ERR(ent))
1584		return PTR_ERR(ent);
1585
1586	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
1587}
1588
1589static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
 
1590{
1591	struct drm_device *dev = minor->dev;
1592	struct dentry *ent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1593
1594	ent = debugfs_create_file("i915_max_freq",
1595				  S_IRUGO | S_IWUSR,
1596				  root, dev,
1597				  &i915_max_freq_fops);
1598	if (IS_ERR(ent))
1599		return PTR_ERR(ent);
1600
1601	return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
 
 
 
 
 
 
 
 
 
 
1602}
1603
1604static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
 
 
 
 
 
 
 
 
 
1605{
1606	struct drm_device *dev = minor->dev;
1607	struct dentry *ent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1608
1609	ent = debugfs_create_file("i915_cache_sharing",
1610				  S_IRUGO | S_IWUSR,
1611				  root, dev,
1612				  &i915_cache_sharing_fops);
1613	if (IS_ERR(ent))
1614		return PTR_ERR(ent);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1615
1616	return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1617}
1618
1619static struct drm_info_list i915_debugfs_list[] = {
 
 
 
 
 
 
 
1620	{"i915_capabilities", i915_capabilities, 0},
1621	{"i915_gem_objects", i915_gem_object_info, 0},
1622	{"i915_gem_gtt", i915_gem_gtt_info, 0},
1623	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1624	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1625	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1626	{"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1627	{"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
1628	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
1629	{"i915_gem_request", i915_gem_request_info, 0},
1630	{"i915_gem_seqno", i915_gem_seqno_info, 0},
1631	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1632	{"i915_gem_interrupt", i915_interrupt_info, 0},
1633	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
1634	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
1635	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
1636	{"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
1637	{"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
1638	{"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
1639	{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
1640	{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
1641	{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
1642	{"i915_batchbuffers", i915_batchbuffer_info, 0},
1643	{"i915_error_state", i915_error_state, 0},
1644	{"i915_rstdby_delays", i915_rstdby_delays, 0},
1645	{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1646	{"i915_delayfreq_table", i915_delayfreq_table, 0},
1647	{"i915_inttoext_table", i915_inttoext_table, 0},
1648	{"i915_drpc_info", i915_drpc_info, 0},
1649	{"i915_emon_status", i915_emon_status, 0},
1650	{"i915_ring_freq_table", i915_ring_freq_table, 0},
1651	{"i915_gfxec", i915_gfxec, 0},
1652	{"i915_fbc_status", i915_fbc_status, 0},
 
1653	{"i915_sr_status", i915_sr_status, 0},
1654	{"i915_opregion", i915_opregion, 0},
 
1655	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1656	{"i915_context_status", i915_context_status, 0},
1657	{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1658};
1659#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1660
1661int i915_debugfs_init(struct drm_minor *minor)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1662{
1663	int ret;
 
1664
1665	ret = i915_wedged_create(minor->debugfs_root, minor);
1666	if (ret)
1667		return ret;
1668
1669	ret = i915_forcewake_create(minor->debugfs_root, minor);
1670	if (ret)
1671		return ret;
1672	ret = i915_max_freq_create(minor->debugfs_root, minor);
1673	if (ret)
1674		return ret;
1675	ret = i915_cache_sharing_create(minor->debugfs_root, minor);
1676	if (ret)
1677		return ret;
1678
1679	return drm_debugfs_create_files(i915_debugfs_list,
1680					I915_DEBUGFS_ENTRIES,
1681					minor->debugfs_root, minor);
1682}
1683
1684void i915_debugfs_cleanup(struct drm_minor *minor)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1685{
1686	drm_debugfs_remove_files(i915_debugfs_list,
1687				 I915_DEBUGFS_ENTRIES, minor);
1688	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1689				 1, minor);
1690	drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1691				 1, minor);
1692	drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
1693				 1, minor);
1694	drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1695				 1, minor);
 
 
 
 
 
 
 
1696}
 
 
 
 
 
 
1697
1698#endif /* CONFIG_DEBUG_FS */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v5.4
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Keith Packard <keithp@keithp.com>
  26 *
  27 */
  28
  29#include <linux/sched/mm.h>
  30#include <linux/sort.h>
  31
  32#include <drm/drm_debugfs.h>
  33#include <drm/drm_fourcc.h>
  34
  35#include "display/intel_display_types.h"
  36#include "display/intel_dp.h"
  37#include "display/intel_fbc.h"
  38#include "display/intel_hdcp.h"
  39#include "display/intel_hdmi.h"
  40#include "display/intel_psr.h"
  41
  42#include "gem/i915_gem_context.h"
  43#include "gt/intel_gt_pm.h"
  44#include "gt/intel_reset.h"
  45#include "gt/uc/intel_guc_submission.h"
  46
  47#include "i915_debugfs.h"
  48#include "i915_irq.h"
  49#include "i915_trace.h"
  50#include "intel_csr.h"
  51#include "intel_pm.h"
  52#include "intel_sideband.h"
  53
  54static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
  55{
  56	return to_i915(node->minor->dev);
  57}
  58
  59static int i915_capabilities(struct seq_file *m, void *data)
  60{
  61	struct drm_i915_private *dev_priv = node_to_i915(m->private);
  62	const struct intel_device_info *info = INTEL_INFO(dev_priv);
  63	struct drm_printer p = drm_seq_file_printer(m);
  64
  65	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
  66	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
  67	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
  68
  69	intel_device_info_dump_flags(info, &p);
  70	intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
  71	intel_driver_caps_print(&dev_priv->caps, &p);
  72
  73	kernel_param_lock(THIS_MODULE);
  74	i915_params_dump(&i915_modparams, &p);
  75	kernel_param_unlock(THIS_MODULE);
 
 
 
 
 
 
 
 
 
 
 
  76
  77	return 0;
  78}
  79
  80static char get_pin_flag(struct drm_i915_gem_object *obj)
  81{
  82	return obj->pin_global ? 'p' : ' ';
 
 
 
 
 
  83}
  84
  85static char get_tiling_flag(struct drm_i915_gem_object *obj)
  86{
  87	switch (i915_gem_object_get_tiling(obj)) {
  88	default:
  89	case I915_TILING_NONE: return ' ';
  90	case I915_TILING_X: return 'X';
  91	case I915_TILING_Y: return 'Y';
  92	}
  93}
  94
  95static char get_global_flag(struct drm_i915_gem_object *obj)
  96{
  97	return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
  98}
  99
 100static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
 101{
 102	return obj->mm.mapping ? 'M' : ' ';
 103}
 104
 105static const char *
 106stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
 107{
 108	size_t x = 0;
 109
 110	switch (page_sizes) {
 111	case 0:
 112		return "";
 113	case I915_GTT_PAGE_SIZE_4K:
 114		return "4K";
 115	case I915_GTT_PAGE_SIZE_64K:
 116		return "64K";
 117	case I915_GTT_PAGE_SIZE_2M:
 118		return "2M";
 119	default:
 120		if (!buf)
 121			return "M";
 122
 123		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
 124			x += snprintf(buf + x, len - x, "2M, ");
 125		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
 126			x += snprintf(buf + x, len - x, "64K, ");
 127		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
 128			x += snprintf(buf + x, len - x, "4K, ");
 129		buf[x-2] = '\0';
 130
 131		return buf;
 132	}
 133}
 134
 135static void
 136describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 137{
 138	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 139	struct intel_engine_cs *engine;
 140	struct i915_vma *vma;
 141	int pin_count = 0;
 142
 143	seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
 144		   &obj->base,
 145		   get_pin_flag(obj),
 146		   get_tiling_flag(obj),
 147		   get_global_flag(obj),
 148		   get_pin_mapped_flag(obj),
 149		   obj->base.size / 1024,
 150		   obj->read_domains,
 151		   obj->write_domain,
 152		   i915_cache_level_str(dev_priv, obj->cache_level),
 153		   obj->mm.dirty ? " dirty" : "",
 154		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
 155	if (obj->base.name)
 156		seq_printf(m, " (name: %d)", obj->base.name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 157
 158	spin_lock(&obj->vma.lock);
 159	list_for_each_entry(vma, &obj->vma.list, obj_link) {
 160		if (!drm_mm_node_allocated(&vma->node))
 161			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162
 163		spin_unlock(&obj->vma.lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 164
 165		if (i915_vma_is_pinned(vma))
 166			pin_count++;
 
 167
 168		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
 169			   i915_vma_is_ggtt(vma) ? "g" : "pp",
 170			   vma->node.start, vma->node.size,
 171			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
 172		if (i915_vma_is_ggtt(vma)) {
 173			switch (vma->ggtt_view.type) {
 174			case I915_GGTT_VIEW_NORMAL:
 175				seq_puts(m, ", normal");
 176				break;
 177
 178			case I915_GGTT_VIEW_PARTIAL:
 179				seq_printf(m, ", partial [%08llx+%x]",
 180					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
 181					   vma->ggtt_view.partial.size << PAGE_SHIFT);
 182				break;
 183
 184			case I915_GGTT_VIEW_ROTATED:
 185				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
 186					   vma->ggtt_view.rotated.plane[0].width,
 187					   vma->ggtt_view.rotated.plane[0].height,
 188					   vma->ggtt_view.rotated.plane[0].stride,
 189					   vma->ggtt_view.rotated.plane[0].offset,
 190					   vma->ggtt_view.rotated.plane[1].width,
 191					   vma->ggtt_view.rotated.plane[1].height,
 192					   vma->ggtt_view.rotated.plane[1].stride,
 193					   vma->ggtt_view.rotated.plane[1].offset);
 194				break;
 195
 196			case I915_GGTT_VIEW_REMAPPED:
 197				seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
 198					   vma->ggtt_view.remapped.plane[0].width,
 199					   vma->ggtt_view.remapped.plane[0].height,
 200					   vma->ggtt_view.remapped.plane[0].stride,
 201					   vma->ggtt_view.remapped.plane[0].offset,
 202					   vma->ggtt_view.remapped.plane[1].width,
 203					   vma->ggtt_view.remapped.plane[1].height,
 204					   vma->ggtt_view.remapped.plane[1].stride,
 205					   vma->ggtt_view.remapped.plane[1].offset);
 206				break;
 207
 208			default:
 209				MISSING_CASE(vma->ggtt_view.type);
 210				break;
 211			}
 212		}
 213		if (vma->fence)
 214			seq_printf(m, " , fence: %d", vma->fence->id);
 215		seq_puts(m, ")");
 216
 217		spin_lock(&obj->vma.lock);
 218	}
 219	spin_unlock(&obj->vma.lock);
 220
 221	seq_printf(m, " (pinned x %d)", pin_count);
 222	if (obj->stolen)
 223		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
 224	if (obj->pin_global)
 225		seq_printf(m, " (global)");
 226
 227	engine = i915_gem_object_last_write_engine(obj);
 228	if (engine)
 229		seq_printf(m, " (%s)", engine->name);
 230}
 231
 232struct file_stats {
 233	struct i915_address_space *vm;
 234	unsigned long count;
 235	u64 total, unbound;
 236	u64 active, inactive;
 237	u64 closed;
 238};
 239
 240static int per_file_stats(int id, void *ptr, void *data)
 241{
 242	struct drm_i915_gem_object *obj = ptr;
 243	struct file_stats *stats = data;
 244	struct i915_vma *vma;
 245
 246	stats->count++;
 247	stats->total += obj->base.size;
 248	if (!atomic_read(&obj->bind_count))
 249		stats->unbound += obj->base.size;
 250
 251	spin_lock(&obj->vma.lock);
 252	if (!stats->vm) {
 253		for_each_ggtt_vma(vma, obj) {
 254			if (!drm_mm_node_allocated(&vma->node))
 255				continue;
 256
 257			if (i915_vma_is_active(vma))
 258				stats->active += vma->node.size;
 259			else
 260				stats->inactive += vma->node.size;
 261
 262			if (i915_vma_is_closed(vma))
 263				stats->closed += vma->node.size;
 264		}
 265	} else {
 266		struct rb_node *p = obj->vma.tree.rb_node;
 267
 268		while (p) {
 269			long cmp;
 
 
 
 
 
 
 270
 271			vma = rb_entry(p, typeof(*vma), obj_node);
 272			cmp = i915_vma_compare(vma, stats->vm, NULL);
 273			if (cmp == 0) {
 274				if (drm_mm_node_allocated(&vma->node)) {
 275					if (i915_vma_is_active(vma))
 276						stats->active += vma->node.size;
 277					else
 278						stats->inactive += vma->node.size;
 279
 280					if (i915_vma_is_closed(vma))
 281						stats->closed += vma->node.size;
 282				}
 283				break;
 284			}
 285			if (cmp < 0)
 286				p = p->rb_right;
 287			else
 288				p = p->rb_left;
 289		}
 290	}
 291	spin_unlock(&obj->vma.lock);
 
 
 
 
 292
 293	return 0;
 294}
 295
 296#define print_file_stats(m, name, stats) do { \
 297	if (stats.count) \
 298		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
 299			   name, \
 300			   stats.count, \
 301			   stats.total, \
 302			   stats.active, \
 303			   stats.inactive, \
 304			   stats.unbound, \
 305			   stats.closed); \
 306} while (0)
 307
 308static void print_context_stats(struct seq_file *m,
 309				struct drm_i915_private *i915)
 310{
 311	struct file_stats kstats = {};
 312	struct i915_gem_context *ctx;
 313
 314	list_for_each_entry(ctx, &i915->contexts.list, link) {
 315		struct i915_gem_engines_iter it;
 316		struct intel_context *ce;
 317
 318		for_each_gem_engine(ce,
 319				    i915_gem_context_lock_engines(ctx), it) {
 320			intel_context_lock_pinned(ce);
 321			if (intel_context_is_pinned(ce)) {
 322				if (ce->state)
 323					per_file_stats(0,
 324						       ce->state->obj, &kstats);
 325				per_file_stats(0, ce->ring->vma->obj, &kstats);
 326			}
 327			intel_context_unlock_pinned(ce);
 328		}
 329		i915_gem_context_unlock_engines(ctx);
 
 
 330
 331		if (!IS_ERR_OR_NULL(ctx->file_priv)) {
 332			struct file_stats stats = { .vm = ctx->vm, };
 333			struct drm_file *file = ctx->file_priv->file;
 334			struct task_struct *task;
 335			char name[80];
 336
 337			spin_lock(&file->table_lock);
 338			idr_for_each(&file->object_idr, per_file_stats, &stats);
 339			spin_unlock(&file->table_lock);
 340
 341			rcu_read_lock();
 342			task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
 343			snprintf(name, sizeof(name), "%s",
 344				 task ? task->comm : "<unknown>");
 345			rcu_read_unlock();
 346
 347			print_file_stats(m, name, stats);
 348		}
 
 349	}
 350
 351	print_file_stats(m, "[k]contexts", kstats);
 352}
 353
 354static int i915_gem_object_info(struct seq_file *m, void *data)
 355{
 356	struct drm_i915_private *i915 = node_to_i915(m->private);
 357	int ret;
 
 
 
 358
 359	seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
 360		   i915->mm.shrink_count,
 361		   atomic_read(&i915->mm.free_count),
 362		   i915->mm.shrink_memory);
 363
 364	seq_putc(m, '\n');
 365
 366	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
 367	if (ret)
 368		return ret;
 369
 370	print_context_stats(m, i915);
 371	mutex_unlock(&i915->drm.struct_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 372
 373	return 0;
 374}
 375
 376static void gen8_display_interrupt_info(struct seq_file *m)
 
 377{
 378	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 379	int pipe;
 380
 381	for_each_pipe(dev_priv, pipe) {
 382		enum intel_display_power_domain power_domain;
 383		intel_wakeref_t wakeref;
 384
 385		power_domain = POWER_DOMAIN_PIPE(pipe);
 386		wakeref = intel_display_power_get_if_enabled(dev_priv,
 387							     power_domain);
 388		if (!wakeref) {
 389			seq_printf(m, "Pipe %c power disabled\n",
 390				   pipe_name(pipe));
 391			continue;
 392		}
 393		seq_printf(m, "Pipe %c IMR:\t%08x\n",
 394			   pipe_name(pipe),
 395			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
 396		seq_printf(m, "Pipe %c IIR:\t%08x\n",
 397			   pipe_name(pipe),
 398			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
 399		seq_printf(m, "Pipe %c IER:\t%08x\n",
 400			   pipe_name(pipe),
 401			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
 402
 403		intel_display_power_put(dev_priv, power_domain, wakeref);
 404	}
 405
 406	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
 407		   I915_READ(GEN8_DE_PORT_IMR));
 408	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
 409		   I915_READ(GEN8_DE_PORT_IIR));
 410	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
 411		   I915_READ(GEN8_DE_PORT_IER));
 412
 413	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
 414		   I915_READ(GEN8_DE_MISC_IMR));
 415	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
 416		   I915_READ(GEN8_DE_MISC_IIR));
 417	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
 418		   I915_READ(GEN8_DE_MISC_IER));
 419
 420	seq_printf(m, "PCU interrupt mask:\t%08x\n",
 421		   I915_READ(GEN8_PCU_IMR));
 422	seq_printf(m, "PCU interrupt identity:\t%08x\n",
 423		   I915_READ(GEN8_PCU_IIR));
 424	seq_printf(m, "PCU interrupt enable:\t%08x\n",
 425		   I915_READ(GEN8_PCU_IER));
 426}
 427
 428static int i915_interrupt_info(struct seq_file *m, void *data)
 429{
 430	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 431	struct intel_engine_cs *engine;
 432	intel_wakeref_t wakeref;
 433	int i, pipe;
 434
 435	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 436
 437	if (IS_CHERRYVIEW(dev_priv)) {
 438		intel_wakeref_t pref;
 439
 440		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 441			   I915_READ(GEN8_MASTER_IRQ));
 442
 443		seq_printf(m, "Display IER:\t%08x\n",
 444			   I915_READ(VLV_IER));
 445		seq_printf(m, "Display IIR:\t%08x\n",
 446			   I915_READ(VLV_IIR));
 447		seq_printf(m, "Display IIR_RW:\t%08x\n",
 448			   I915_READ(VLV_IIR_RW));
 449		seq_printf(m, "Display IMR:\t%08x\n",
 450			   I915_READ(VLV_IMR));
 451		for_each_pipe(dev_priv, pipe) {
 452			enum intel_display_power_domain power_domain;
 453
 454			power_domain = POWER_DOMAIN_PIPE(pipe);
 455			pref = intel_display_power_get_if_enabled(dev_priv,
 456								  power_domain);
 457			if (!pref) {
 458				seq_printf(m, "Pipe %c power disabled\n",
 459					   pipe_name(pipe));
 460				continue;
 461			}
 462
 463			seq_printf(m, "Pipe %c stat:\t%08x\n",
 464				   pipe_name(pipe),
 465				   I915_READ(PIPESTAT(pipe)));
 466
 467			intel_display_power_put(dev_priv, power_domain, pref);
 468		}
 469
 470		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 471		seq_printf(m, "Port hotplug:\t%08x\n",
 472			   I915_READ(PORT_HOTPLUG_EN));
 473		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 474			   I915_READ(VLV_DPFLIPSTAT));
 475		seq_printf(m, "DPINVGTT:\t%08x\n",
 476			   I915_READ(DPINVGTT));
 477		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
 478
 479		for (i = 0; i < 4; i++) {
 480			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
 481				   i, I915_READ(GEN8_GT_IMR(i)));
 482			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
 483				   i, I915_READ(GEN8_GT_IIR(i)));
 484			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
 485				   i, I915_READ(GEN8_GT_IER(i)));
 486		}
 487
 488		seq_printf(m, "PCU interrupt mask:\t%08x\n",
 489			   I915_READ(GEN8_PCU_IMR));
 490		seq_printf(m, "PCU interrupt identity:\t%08x\n",
 491			   I915_READ(GEN8_PCU_IIR));
 492		seq_printf(m, "PCU interrupt enable:\t%08x\n",
 493			   I915_READ(GEN8_PCU_IER));
 494	} else if (INTEL_GEN(dev_priv) >= 11) {
 495		seq_printf(m, "Master Interrupt Control:  %08x\n",
 496			   I915_READ(GEN11_GFX_MSTR_IRQ));
 497
 498		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
 499			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
 500		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
 501			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
 502		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
 503			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
 504		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
 505			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
 506		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
 507			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
 508		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
 509			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
 510
 511		seq_printf(m, "Display Interrupt Control:\t%08x\n",
 512			   I915_READ(GEN11_DISPLAY_INT_CTL));
 513
 514		gen8_display_interrupt_info(m);
 515	} else if (INTEL_GEN(dev_priv) >= 8) {
 516		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 517			   I915_READ(GEN8_MASTER_IRQ));
 518
 519		for (i = 0; i < 4; i++) {
 520			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
 521				   i, I915_READ(GEN8_GT_IMR(i)));
 522			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
 523				   i, I915_READ(GEN8_GT_IIR(i)));
 524			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
 525				   i, I915_READ(GEN8_GT_IER(i)));
 526		}
 527
 528		gen8_display_interrupt_info(m);
 529	} else if (IS_VALLEYVIEW(dev_priv)) {
 530		seq_printf(m, "Display IER:\t%08x\n",
 531			   I915_READ(VLV_IER));
 532		seq_printf(m, "Display IIR:\t%08x\n",
 533			   I915_READ(VLV_IIR));
 534		seq_printf(m, "Display IIR_RW:\t%08x\n",
 535			   I915_READ(VLV_IIR_RW));
 536		seq_printf(m, "Display IMR:\t%08x\n",
 537			   I915_READ(VLV_IMR));
 538		for_each_pipe(dev_priv, pipe) {
 539			enum intel_display_power_domain power_domain;
 540			intel_wakeref_t pref;
 541
 542			power_domain = POWER_DOMAIN_PIPE(pipe);
 543			pref = intel_display_power_get_if_enabled(dev_priv,
 544								  power_domain);
 545			if (!pref) {
 546				seq_printf(m, "Pipe %c power disabled\n",
 547					   pipe_name(pipe));
 548				continue;
 549			}
 550
 551			seq_printf(m, "Pipe %c stat:\t%08x\n",
 552				   pipe_name(pipe),
 553				   I915_READ(PIPESTAT(pipe)));
 554			intel_display_power_put(dev_priv, power_domain, pref);
 555		}
 
 556
 557		seq_printf(m, "Master IER:\t%08x\n",
 558			   I915_READ(VLV_MASTER_IER));
 
 559
 560		seq_printf(m, "Render IER:\t%08x\n",
 561			   I915_READ(GTIER));
 562		seq_printf(m, "Render IIR:\t%08x\n",
 563			   I915_READ(GTIIR));
 564		seq_printf(m, "Render IMR:\t%08x\n",
 565			   I915_READ(GTIMR));
 566
 567		seq_printf(m, "PM IER:\t\t%08x\n",
 568			   I915_READ(GEN6_PMIER));
 569		seq_printf(m, "PM IIR:\t\t%08x\n",
 570			   I915_READ(GEN6_PMIIR));
 571		seq_printf(m, "PM IMR:\t\t%08x\n",
 572			   I915_READ(GEN6_PMIMR));
 573
 574		seq_printf(m, "Port hotplug:\t%08x\n",
 575			   I915_READ(PORT_HOTPLUG_EN));
 576		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 577			   I915_READ(VLV_DPFLIPSTAT));
 578		seq_printf(m, "DPINVGTT:\t%08x\n",
 579			   I915_READ(DPINVGTT));
 580
 581	} else if (!HAS_PCH_SPLIT(dev_priv)) {
 582		seq_printf(m, "Interrupt enable:    %08x\n",
 583			   I915_READ(GEN2_IER));
 584		seq_printf(m, "Interrupt identity:  %08x\n",
 585			   I915_READ(GEN2_IIR));
 586		seq_printf(m, "Interrupt mask:      %08x\n",
 587			   I915_READ(GEN2_IMR));
 588		for_each_pipe(dev_priv, pipe)
 589			seq_printf(m, "Pipe %c stat:         %08x\n",
 590				   pipe_name(pipe),
 591				   I915_READ(PIPESTAT(pipe)));
 592	} else {
 593		seq_printf(m, "North Display Interrupt enable:		%08x\n",
 594			   I915_READ(DEIER));
 595		seq_printf(m, "North Display Interrupt identity:	%08x\n",
 596			   I915_READ(DEIIR));
 597		seq_printf(m, "North Display Interrupt mask:		%08x\n",
 598			   I915_READ(DEIMR));
 599		seq_printf(m, "South Display Interrupt enable:		%08x\n",
 600			   I915_READ(SDEIER));
 601		seq_printf(m, "South Display Interrupt identity:	%08x\n",
 602			   I915_READ(SDEIIR));
 603		seq_printf(m, "South Display Interrupt mask:		%08x\n",
 604			   I915_READ(SDEIMR));
 605		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
 606			   I915_READ(GTIER));
 607		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
 608			   I915_READ(GTIIR));
 609		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
 610			   I915_READ(GTIMR));
 611	}
 612
 613	if (INTEL_GEN(dev_priv) >= 11) {
 614		seq_printf(m, "RCS Intr Mask:\t %08x\n",
 615			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
 616		seq_printf(m, "BCS Intr Mask:\t %08x\n",
 617			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
 618		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
 619			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
 620		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
 621			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
 622		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
 623			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
 624		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
 625			   I915_READ(GEN11_GUC_SG_INTR_MASK));
 626		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
 627			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
 628		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
 629			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
 630		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
 631			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
 632
 633	} else if (INTEL_GEN(dev_priv) >= 6) {
 634		for_each_uabi_engine(engine, dev_priv) {
 635			seq_printf(m,
 636				   "Graphics Interrupt mask (%s):	%08x\n",
 637				   engine->name, ENGINE_READ(engine, RING_IMR));
 638		}
 
 639	}
 640
 641	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 642
 643	return 0;
 644}
 645
 646static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 647{
 648	struct drm_i915_private *i915 = node_to_i915(m->private);
 649	unsigned int i;
 
 
 650
 651	seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
 
 
 652
 653	rcu_read_lock();
 654	for (i = 0; i < i915->ggtt.num_fences; i++) {
 655		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
 656		struct i915_vma *vma = reg->vma;
 657
 658		seq_printf(m, "Fence %d, pin count = %d, object = ",
 659			   i, atomic_read(&reg->pin_count));
 660		if (!vma)
 661			seq_puts(m, "unused");
 662		else
 663			describe_obj(m, vma->obj);
 664		seq_putc(m, '\n');
 665	}
 666	rcu_read_unlock();
 667
 
 668	return 0;
 669}
 670
 671#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
 672static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
 673			      size_t count, loff_t *pos)
 674{
 675	struct i915_gpu_state *error;
 676	ssize_t ret;
 677	void *buf;
 
 678
 679	error = file->private_data;
 680	if (!error)
 
 681		return 0;
 682
 683	/* Bounce buffer required because of kernfs __user API convenience. */
 684	buf = kmalloc(count, GFP_KERNEL);
 685	if (!buf)
 686		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 687
 688	ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
 689	if (ret <= 0)
 690		goto out;
 691
 692	if (!copy_to_user(ubuf, buf, ret))
 693		*pos += ret;
 694	else
 695		ret = -EFAULT;
 
 
 696
 697out:
 698	kfree(buf);
 699	return ret;
 700}
 701
 702static int gpu_state_release(struct inode *inode, struct file *file)
 703{
 704	i915_gpu_state_put(file->private_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 705	return 0;
 706}
 707
 708static int i915_gpu_info_open(struct inode *inode, struct file *file)
 709{
 710	struct drm_i915_private *i915 = inode->i_private;
 711	struct i915_gpu_state *gpu;
 712	intel_wakeref_t wakeref;
 713
 714	gpu = NULL;
 715	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
 716		gpu = i915_capture_gpu_state(i915);
 717	if (IS_ERR(gpu))
 718		return PTR_ERR(gpu);
 
 
 
 
 
 
 
 
 
 
 
 
 719
 720	file->private_data = gpu;
 721	return 0;
 722}
 723
 724static const struct file_operations i915_gpu_info_fops = {
 725	.owner = THIS_MODULE,
 726	.open = i915_gpu_info_open,
 727	.read = gpu_state_read,
 728	.llseek = default_llseek,
 729	.release = gpu_state_release,
 730};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 731
 732static ssize_t
 733i915_error_state_write(struct file *filp,
 734		       const char __user *ubuf,
 735		       size_t cnt,
 736		       loff_t *ppos)
 737{
 738	struct i915_gpu_state *error = filp->private_data;
 739
 740	if (!error)
 741		return 0;
 
 
 
 
 
 
 
 
 
 
 
 742
 743	DRM_DEBUG_DRIVER("Resetting error state\n");
 744	i915_reset_error_state(error->i915);
 
 
 745
 746	return cnt;
 
 
 747}
 748
 749static int i915_error_state_open(struct inode *inode, struct file *file)
 750{
 751	struct i915_gpu_state *error;
 
 
 
 
 
 
 
 
 
 
 
 752
 753	error = i915_first_error_state(inode->i_private);
 754	if (IS_ERR(error))
 755		return PTR_ERR(error);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 756
 757	file->private_data  = error;
 758	return 0;
 759}
 760
 761static const struct file_operations i915_error_state_fops = {
 762	.owner = THIS_MODULE,
 763	.open = i915_error_state_open,
 764	.read = gpu_state_read,
 765	.write = i915_error_state_write,
 766	.llseek = default_llseek,
 767	.release = gpu_state_release,
 768};
 769#endif
 
 
 770
 771static int i915_frequency_info(struct seq_file *m, void *unused)
 772{
 773	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 774	struct intel_uncore *uncore = &dev_priv->uncore;
 775	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 776	intel_wakeref_t wakeref;
 777	int ret = 0;
 778
 779	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 780
 781	if (IS_GEN(dev_priv, 5)) {
 782		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
 783		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
 784
 785		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
 786		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
 787		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
 788			   MEMSTAT_VID_SHIFT);
 789		seq_printf(m, "Current P-state: %d\n",
 790			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
 791	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 792		u32 rpmodectl, freq_sts;
 793
 794		rpmodectl = I915_READ(GEN6_RP_CONTROL);
 795		seq_printf(m, "Video Turbo Mode: %s\n",
 796			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
 797		seq_printf(m, "HW control enabled: %s\n",
 798			   yesno(rpmodectl & GEN6_RP_ENABLE));
 799		seq_printf(m, "SW control enabled: %s\n",
 800			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
 801				  GEN6_RP_MEDIA_SW_MODE));
 802
 803		vlv_punit_get(dev_priv);
 804		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 805		vlv_punit_put(dev_priv);
 806
 807		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
 808		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
 809
 810		seq_printf(m, "actual GPU freq: %d MHz\n",
 811			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
 812
 813		seq_printf(m, "current GPU freq: %d MHz\n",
 814			   intel_gpu_freq(dev_priv, rps->cur_freq));
 815
 816		seq_printf(m, "max GPU freq: %d MHz\n",
 817			   intel_gpu_freq(dev_priv, rps->max_freq));
 818
 819		seq_printf(m, "min GPU freq: %d MHz\n",
 820			   intel_gpu_freq(dev_priv, rps->min_freq));
 821
 822		seq_printf(m, "idle GPU freq: %d MHz\n",
 823			   intel_gpu_freq(dev_priv, rps->idle_freq));
 824
 825		seq_printf(m,
 826			   "efficient (RPe) frequency: %d MHz\n",
 827			   intel_gpu_freq(dev_priv, rps->efficient_freq));
 828	} else if (INTEL_GEN(dev_priv) >= 6) {
 829		u32 rp_state_limits;
 830		u32 gt_perf_status;
 831		u32 rp_state_cap;
 832		u32 rpmodectl, rpinclimit, rpdeclimit;
 833		u32 rpstat, cagf, reqf;
 834		u32 rpupei, rpcurup, rpprevup;
 835		u32 rpdownei, rpcurdown, rpprevdown;
 836		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
 837		int max_freq;
 838
 839		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
 840		if (IS_GEN9_LP(dev_priv)) {
 841			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
 842			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
 843		} else {
 844			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 845			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 846		}
 847
 848		/* RPSTAT1 is in the GT power well */
 849		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 
 850
 851		reqf = I915_READ(GEN6_RPNSWREQ);
 852		if (INTEL_GEN(dev_priv) >= 9)
 853			reqf >>= 23;
 854		else {
 855			reqf &= ~GEN6_TURBO_DISABLE;
 856			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 857				reqf >>= 24;
 858			else
 859				reqf >>= 25;
 860		}
 861		reqf = intel_gpu_freq(dev_priv, reqf);
 862
 863		rpmodectl = I915_READ(GEN6_RP_CONTROL);
 864		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
 865		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
 
 
 
 
 866
 867		rpstat = I915_READ(GEN6_RPSTAT1);
 868		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
 869		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
 870		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
 871		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
 872		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
 873		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
 874		cagf = intel_gpu_freq(dev_priv,
 875				      intel_get_cagf(dev_priv, rpstat));
 876
 877		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 878
 879		if (INTEL_GEN(dev_priv) >= 11) {
 880			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
 881			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
 882			/*
 883			 * The equivalent to the PM ISR & IIR cannot be read
 884			 * without affecting the current state of the system
 885			 */
 886			pm_isr = 0;
 887			pm_iir = 0;
 888		} else if (INTEL_GEN(dev_priv) >= 8) {
 889			pm_ier = I915_READ(GEN8_GT_IER(2));
 890			pm_imr = I915_READ(GEN8_GT_IMR(2));
 891			pm_isr = I915_READ(GEN8_GT_ISR(2));
 892			pm_iir = I915_READ(GEN8_GT_IIR(2));
 893		} else {
 894			pm_ier = I915_READ(GEN6_PMIER);
 895			pm_imr = I915_READ(GEN6_PMIMR);
 896			pm_isr = I915_READ(GEN6_PMISR);
 897			pm_iir = I915_READ(GEN6_PMIIR);
 898		}
 899		pm_mask = I915_READ(GEN6_PMINTRMSK);
 900
 901		seq_printf(m, "Video Turbo Mode: %s\n",
 902			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
 903		seq_printf(m, "HW control enabled: %s\n",
 904			   yesno(rpmodectl & GEN6_RP_ENABLE));
 905		seq_printf(m, "SW control enabled: %s\n",
 906			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
 907				  GEN6_RP_MEDIA_SW_MODE));
 908
 909		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
 910			   pm_ier, pm_imr, pm_mask);
 911		if (INTEL_GEN(dev_priv) <= 10)
 912			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
 913				   pm_isr, pm_iir);
 914		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
 915			   rps->pm_intrmsk_mbz);
 916		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
 
 917		seq_printf(m, "Render p-state ratio: %d\n",
 918			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
 919		seq_printf(m, "Render p-state VID: %d\n",
 920			   gt_perf_status & 0xff);
 921		seq_printf(m, "Render p-state limit: %d\n",
 922			   rp_state_limits & 0xff);
 923		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
 924		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
 925		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
 926		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
 927		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
 928		seq_printf(m, "CAGF: %dMHz\n", cagf);
 929		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
 930			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
 931		seq_printf(m, "RP CUR UP: %d (%dus)\n",
 932			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
 933		seq_printf(m, "RP PREV UP: %d (%dus)\n",
 934			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
 935		seq_printf(m, "Up threshold: %d%%\n",
 936			   rps->power.up_threshold);
 937
 938		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
 939			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
 940		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
 941			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
 942		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
 943			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
 944		seq_printf(m, "Down threshold: %d%%\n",
 945			   rps->power.down_threshold);
 946
 947		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
 948			    rp_state_cap >> 16) & 0xff;
 949		max_freq *= (IS_GEN9_BC(dev_priv) ||
 950			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 951		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
 952			   intel_gpu_freq(dev_priv, max_freq));
 953
 954		max_freq = (rp_state_cap & 0xff00) >> 8;
 955		max_freq *= (IS_GEN9_BC(dev_priv) ||
 956			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 957		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
 958			   intel_gpu_freq(dev_priv, max_freq));
 959
 960		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
 961			    rp_state_cap >> 0) & 0xff;
 962		max_freq *= (IS_GEN9_BC(dev_priv) ||
 963			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 964		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
 965			   intel_gpu_freq(dev_priv, max_freq));
 966		seq_printf(m, "Max overclocked frequency: %dMHz\n",
 967			   intel_gpu_freq(dev_priv, rps->max_freq));
 968
 969		seq_printf(m, "Current freq: %d MHz\n",
 970			   intel_gpu_freq(dev_priv, rps->cur_freq));
 971		seq_printf(m, "Actual freq: %d MHz\n", cagf);
 972		seq_printf(m, "Idle freq: %d MHz\n",
 973			   intel_gpu_freq(dev_priv, rps->idle_freq));
 974		seq_printf(m, "Min freq: %d MHz\n",
 975			   intel_gpu_freq(dev_priv, rps->min_freq));
 976		seq_printf(m, "Boost freq: %d MHz\n",
 977			   intel_gpu_freq(dev_priv, rps->boost_freq));
 978		seq_printf(m, "Max freq: %d MHz\n",
 979			   intel_gpu_freq(dev_priv, rps->max_freq));
 980		seq_printf(m,
 981			   "efficient (RPe) frequency: %d MHz\n",
 982			   intel_gpu_freq(dev_priv, rps->efficient_freq));
 983	} else {
 984		seq_puts(m, "no P-state info available\n");
 985	}
 986
 987	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
 988	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
 989	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
 990
 991	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 992	return ret;
 993}
 994
 995static void i915_instdone_info(struct drm_i915_private *dev_priv,
 996			       struct seq_file *m,
 997			       struct intel_instdone *instdone)
 998{
 999	int slice;
1000	int subslice;
 
 
 
1001
1002	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1003		   instdone->instdone);
 
 
 
1004
1005	if (INTEL_GEN(dev_priv) <= 3)
1006		return;
1007
1008	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1009		   instdone->slice_common);
1010
1011	if (INTEL_GEN(dev_priv) <= 6)
1012		return;
1013
1014	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1015		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1016			   slice, subslice, instdone->sampler[slice][subslice]);
1017
1018	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1019		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1020			   slice, subslice, instdone->row[slice][subslice]);
1021}
1022
1023static int i915_hangcheck_info(struct seq_file *m, void *unused)
1024{
1025	struct drm_i915_private *i915 = node_to_i915(m->private);
1026	struct intel_gt *gt = &i915->gt;
1027	struct intel_engine_cs *engine;
1028	intel_wakeref_t wakeref;
1029	enum intel_engine_id id;
1030
1031	seq_printf(m, "Reset flags: %lx\n", gt->reset.flags);
1032	if (test_bit(I915_WEDGED, &gt->reset.flags))
1033		seq_puts(m, "\tWedged\n");
1034	if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
1035		seq_puts(m, "\tDevice (global) reset in progress\n");
1036
1037	if (!i915_modparams.enable_hangcheck) {
1038		seq_puts(m, "Hangcheck disabled\n");
1039		return 0;
1040	}
1041
1042	if (timer_pending(&gt->hangcheck.work.timer))
1043		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1044			   jiffies_to_msecs(gt->hangcheck.work.timer.expires -
1045					    jiffies));
1046	else if (delayed_work_pending(&gt->hangcheck.work))
1047		seq_puts(m, "Hangcheck active, work pending\n");
1048	else
1049		seq_puts(m, "Hangcheck inactive\n");
1050
1051	seq_printf(m, "GT active? %s\n", yesno(gt->awake));
1052
1053	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1054		for_each_engine(engine, i915, id) {
1055			struct intel_instdone instdone;
1056
1057			seq_printf(m, "%s: %d ms ago\n",
1058				   engine->name,
1059				   jiffies_to_msecs(jiffies -
1060						    engine->hangcheck.action_timestamp));
1061
1062			seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1063				   (long long)engine->hangcheck.acthd,
1064				   intel_engine_get_active_head(engine));
1065
1066			intel_engine_get_instdone(engine, &instdone);
1067
1068			seq_puts(m, "\tinstdone read =\n");
1069			i915_instdone_info(i915, m, &instdone);
1070
1071			seq_puts(m, "\tinstdone accu =\n");
1072			i915_instdone_info(i915, m,
1073					   &engine->hangcheck.instdone);
1074		}
1075	}
1076
1077	return 0;
1078}
1079
1080static int ironlake_drpc_info(struct seq_file *m)
1081{
1082	struct drm_i915_private *i915 = node_to_i915(m->private);
1083	struct intel_uncore *uncore = &i915->uncore;
1084	u32 rgvmodectl, rstdbyctl;
1085	u16 crstandvid;
1086
1087	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1088	rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1089	crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
1090
1091	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
 
1092	seq_printf(m, "Boost freq: %d\n",
1093		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1094		   MEMMODE_BOOST_FREQ_SHIFT);
1095	seq_printf(m, "HW control enabled: %s\n",
1096		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1097	seq_printf(m, "SW control enabled: %s\n",
1098		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1099	seq_printf(m, "Gated voltage change: %s\n",
1100		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1101	seq_printf(m, "Starting frequency: P%d\n",
1102		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1103	seq_printf(m, "Max P-state: P%d\n",
1104		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1105	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1106	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1107	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1108	seq_printf(m, "Render standby enabled: %s\n",
1109		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1110	seq_puts(m, "Current RS state: ");
1111	switch (rstdbyctl & RSX_STATUS_MASK) {
1112	case RSX_STATUS_ON:
1113		seq_puts(m, "on\n");
1114		break;
1115	case RSX_STATUS_RC1:
1116		seq_puts(m, "RC1\n");
1117		break;
1118	case RSX_STATUS_RC1E:
1119		seq_puts(m, "RC1E\n");
1120		break;
1121	case RSX_STATUS_RS1:
1122		seq_puts(m, "RS1\n");
1123		break;
1124	case RSX_STATUS_RS2:
1125		seq_puts(m, "RS2 (RC6)\n");
1126		break;
1127	case RSX_STATUS_RS3:
1128		seq_puts(m, "RC3 (RC6+)\n");
1129		break;
1130	default:
1131		seq_puts(m, "unknown\n");
1132		break;
1133	}
1134
1135	return 0;
1136}
1137
1138static int i915_forcewake_domains(struct seq_file *m, void *data)
1139{
1140	struct drm_i915_private *i915 = node_to_i915(m->private);
1141	struct intel_uncore *uncore = &i915->uncore;
1142	struct intel_uncore_forcewake_domain *fw_domain;
1143	unsigned int tmp;
1144
1145	seq_printf(m, "user.bypass_count = %u\n",
1146		   uncore->user_forcewake_count);
1147
1148	for_each_fw_domain(fw_domain, uncore, tmp)
1149		seq_printf(m, "%s.wake_count = %u\n",
1150			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1151			   READ_ONCE(fw_domain->wake_count));
1152
1153	return 0;
1154}
1155
1156static void print_rc6_res(struct seq_file *m,
1157			  const char *title,
1158			  const i915_reg_t reg)
1159{
1160	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1161
1162	seq_printf(m, "%s %u (%llu us)\n",
1163		   title, I915_READ(reg),
1164		   intel_rc6_residency_us(dev_priv, reg));
1165}
1166
1167static int vlv_drpc_info(struct seq_file *m)
1168{
1169	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1170	u32 rcctl1, pw_status;
1171
1172	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1173	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1174
1175	seq_printf(m, "RC6 Enabled: %s\n",
1176		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1177					GEN6_RC_CTL_EI_MODE(1))));
1178	seq_printf(m, "Render Power Well: %s\n",
1179		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1180	seq_printf(m, "Media Power Well: %s\n",
1181		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1182
1183	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1184	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1185
1186	return i915_forcewake_domains(m, NULL);
1187}
1188
1189static int gen6_drpc_info(struct seq_file *m)
1190{
1191	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1192	u32 gt_core_status, rcctl1, rc6vids = 0;
1193	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1194
1195	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1196	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1197
1198	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1199	if (INTEL_GEN(dev_priv) >= 9) {
1200		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1201		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1202	}
1203
1204	if (INTEL_GEN(dev_priv) <= 7)
1205		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1206				       &rc6vids, NULL);
1207
1208	seq_printf(m, "RC1e Enabled: %s\n",
1209		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1210	seq_printf(m, "RC6 Enabled: %s\n",
1211		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1212	if (INTEL_GEN(dev_priv) >= 9) {
1213		seq_printf(m, "Render Well Gating Enabled: %s\n",
1214			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1215		seq_printf(m, "Media Well Gating Enabled: %s\n",
1216			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1217	}
1218	seq_printf(m, "Deep RC6 Enabled: %s\n",
1219		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1220	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1221		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1222	seq_puts(m, "Current RC state: ");
1223	switch (gt_core_status & GEN6_RCn_MASK) {
1224	case GEN6_RC0:
1225		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1226			seq_puts(m, "Core Power Down\n");
1227		else
1228			seq_puts(m, "on\n");
1229		break;
1230	case GEN6_RC3:
1231		seq_puts(m, "RC3\n");
1232		break;
1233	case GEN6_RC6:
1234		seq_puts(m, "RC6\n");
1235		break;
1236	case GEN6_RC7:
1237		seq_puts(m, "RC7\n");
1238		break;
1239	default:
1240		seq_puts(m, "Unknown\n");
1241		break;
1242	}
1243
1244	seq_printf(m, "Core Power Down: %s\n",
1245		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1246	if (INTEL_GEN(dev_priv) >= 9) {
1247		seq_printf(m, "Render Power Well: %s\n",
1248			(gen9_powergate_status &
1249			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1250		seq_printf(m, "Media Power Well: %s\n",
1251			(gen9_powergate_status &
1252			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1253	}
1254
1255	/* Not exactly sure what this is */
1256	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1257		      GEN6_GT_GFX_RC6_LOCKED);
1258	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1259	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1260	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1261
1262	if (INTEL_GEN(dev_priv) <= 7) {
1263		seq_printf(m, "RC6   voltage: %dmV\n",
1264			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1265		seq_printf(m, "RC6+  voltage: %dmV\n",
1266			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1267		seq_printf(m, "RC6++ voltage: %dmV\n",
1268			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1269	}
1270
1271	return i915_forcewake_domains(m, NULL);
1272}
1273
1274static int i915_drpc_info(struct seq_file *m, void *unused)
1275{
1276	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1277	intel_wakeref_t wakeref;
1278	int err = -ENODEV;
1279
1280	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1281		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1282			err = vlv_drpc_info(m);
1283		else if (INTEL_GEN(dev_priv) >= 6)
1284			err = gen6_drpc_info(m);
1285		else
1286			err = ironlake_drpc_info(m);
1287	}
1288
1289	return err;
1290}
1291
1292static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1293{
1294	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1295
1296	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1297		   dev_priv->fb_tracking.busy_bits);
1298
1299	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1300		   dev_priv->fb_tracking.flip_bits);
1301
1302	return 0;
1303}
1304
1305static int i915_fbc_status(struct seq_file *m, void *unused)
1306{
1307	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1308	struct intel_fbc *fbc = &dev_priv->fbc;
1309	intel_wakeref_t wakeref;
1310
1311	if (!HAS_FBC(dev_priv))
1312		return -ENODEV;
1313
1314	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1315	mutex_lock(&fbc->lock);
1316
1317	if (intel_fbc_is_active(dev_priv))
1318		seq_puts(m, "FBC enabled\n");
1319	else
1320		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1321
1322	if (intel_fbc_is_active(dev_priv)) {
1323		u32 mask;
1324
1325		if (INTEL_GEN(dev_priv) >= 8)
1326			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1327		else if (INTEL_GEN(dev_priv) >= 7)
1328			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1329		else if (INTEL_GEN(dev_priv) >= 5)
1330			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1331		else if (IS_G4X(dev_priv))
1332			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1333		else
1334			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1335							FBC_STAT_COMPRESSED);
1336
1337		seq_printf(m, "Compressing: %s\n", yesno(mask));
1338	}
1339
1340	mutex_unlock(&fbc->lock);
1341	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1342
1343	return 0;
1344}
1345
1346static int i915_fbc_false_color_get(void *data, u64 *val)
1347{
1348	struct drm_i915_private *dev_priv = data;
1349
1350	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1351		return -ENODEV;
1352
1353	*val = dev_priv->fbc.false_color;
1354
1355	return 0;
1356}
1357
1358static int i915_fbc_false_color_set(void *data, u64 val)
1359{
1360	struct drm_i915_private *dev_priv = data;
1361	u32 reg;
1362
1363	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1364		return -ENODEV;
1365
1366	mutex_lock(&dev_priv->fbc.lock);
1367
1368	reg = I915_READ(ILK_DPFC_CONTROL);
1369	dev_priv->fbc.false_color = val;
1370
1371	I915_WRITE(ILK_DPFC_CONTROL, val ?
1372		   (reg | FBC_CTL_FALSE_COLOR) :
1373		   (reg & ~FBC_CTL_FALSE_COLOR));
1374
1375	mutex_unlock(&dev_priv->fbc.lock);
1376	return 0;
1377}
1378
1379DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1380			i915_fbc_false_color_get, i915_fbc_false_color_set,
1381			"%llu\n");
1382
1383static int i915_ips_status(struct seq_file *m, void *unused)
1384{
1385	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1386	intel_wakeref_t wakeref;
1387
1388	if (!HAS_IPS(dev_priv))
1389		return -ENODEV;
1390
1391	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1392
1393	seq_printf(m, "Enabled by kernel parameter: %s\n",
1394		   yesno(i915_modparams.enable_ips));
1395
1396	if (INTEL_GEN(dev_priv) >= 8) {
1397		seq_puts(m, "Currently: unknown\n");
1398	} else {
1399		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1400			seq_puts(m, "Currently: enabled\n");
1401		else
1402			seq_puts(m, "Currently: disabled\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1403	}
1404
1405	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1406
1407	return 0;
1408}
1409
1410static int i915_sr_status(struct seq_file *m, void *unused)
1411{
1412	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1413	intel_wakeref_t wakeref;
 
1414	bool sr_enabled = false;
1415
1416	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1417
1418	if (INTEL_GEN(dev_priv) >= 9)
1419		/* no global SR status; inspect per-plane WM */;
1420	else if (HAS_PCH_SPLIT(dev_priv))
1421		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1422	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1423		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1424		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1425	else if (IS_I915GM(dev_priv))
1426		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1427	else if (IS_PINEVIEW(dev_priv))
1428		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1429	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1430		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1431
1432	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1433
1434	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1435
1436	return 0;
1437}
1438
1439static int i915_ring_freq_table(struct seq_file *m, void *unused)
1440{
1441	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1442	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1443	unsigned int max_gpu_freq, min_gpu_freq;
1444	intel_wakeref_t wakeref;
1445	int gpu_freq, ia_freq;
1446
1447	if (!HAS_LLC(dev_priv))
1448		return -ENODEV;
1449
1450	min_gpu_freq = rps->min_freq;
1451	max_gpu_freq = rps->max_freq;
1452	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1453		/* Convert GT frequency to 50 HZ units */
1454		min_gpu_freq /= GEN9_FREQ_SCALER;
1455		max_gpu_freq /= GEN9_FREQ_SCALER;
1456	}
1457
1458	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1459
1460	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1461	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1462		ia_freq = gpu_freq;
1463		sandybridge_pcode_read(dev_priv,
1464				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1465				       &ia_freq, NULL);
1466		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1467			   intel_gpu_freq(dev_priv, (gpu_freq *
1468						     (IS_GEN9_BC(dev_priv) ||
1469						      INTEL_GEN(dev_priv) >= 10 ?
1470						      GEN9_FREQ_SCALER : 1))),
1471			   ((ia_freq >> 0) & 0xff) * 100,
1472			   ((ia_freq >> 8) & 0xff) * 100);
1473	}
1474	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1475
1476	return 0;
1477}
1478
1479static int i915_opregion(struct seq_file *m, void *unused)
1480{
1481	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1482	struct drm_device *dev = &dev_priv->drm;
1483	struct intel_opregion *opregion = &dev_priv->opregion;
 
1484	int ret;
1485
1486	ret = mutex_lock_interruptible(&dev->struct_mutex);
1487	if (ret)
1488		goto out;
1489
1490	if (opregion->header)
1491		seq_write(m, opregion->header, OPREGION_SIZE);
1492
 
 
 
1493	mutex_unlock(&dev->struct_mutex);
1494
1495out:
1496	return 0;
1497}
1498
1499static int i915_vbt(struct seq_file *m, void *unused)
1500{
1501	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1502
1503	if (opregion->vbt)
1504		seq_write(m, opregion->vbt, opregion->vbt_size);
1505
1506	return 0;
1507}
1508
1509static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1510{
1511	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1512	struct drm_device *dev = &dev_priv->drm;
1513	struct intel_framebuffer *fbdev_fb = NULL;
1514	struct drm_framebuffer *drm_fb;
1515	int ret;
 
1516
1517	ret = mutex_lock_interruptible(&dev->struct_mutex);
1518	if (ret)
1519		return ret;
1520
1521#ifdef CONFIG_DRM_FBDEV_EMULATION
1522	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1523		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1524
1525		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1526			   fbdev_fb->base.width,
1527			   fbdev_fb->base.height,
1528			   fbdev_fb->base.format->depth,
1529			   fbdev_fb->base.format->cpp[0] * 8,
1530			   fbdev_fb->base.modifier,
1531			   drm_framebuffer_read_refcount(&fbdev_fb->base));
1532		describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1533		seq_putc(m, '\n');
1534	}
1535#endif
1536
1537	mutex_lock(&dev->mode_config.fb_lock);
1538	drm_for_each_fb(drm_fb, dev) {
1539		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1540		if (fb == fbdev_fb)
1541			continue;
1542
1543		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1544			   fb->base.width,
1545			   fb->base.height,
1546			   fb->base.format->depth,
1547			   fb->base.format->cpp[0] * 8,
1548			   fb->base.modifier,
1549			   drm_framebuffer_read_refcount(&fb->base));
1550		describe_obj(m, intel_fb_obj(&fb->base));
1551		seq_putc(m, '\n');
1552	}
1553	mutex_unlock(&dev->mode_config.fb_lock);
1554	mutex_unlock(&dev->struct_mutex);
1555
1556	return 0;
1557}
1558
1559static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1560{
1561	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1562		   ring->space, ring->head, ring->tail, ring->emit);
1563}
1564
1565static int i915_context_status(struct seq_file *m, void *unused)
1566{
1567	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1568	struct drm_device *dev = &dev_priv->drm;
1569	struct i915_gem_context *ctx;
1570	int ret;
1571
1572	ret = mutex_lock_interruptible(&dev->struct_mutex);
1573	if (ret)
1574		return ret;
1575
1576	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1577		struct i915_gem_engines_iter it;
1578		struct intel_context *ce;
1579
1580		seq_puts(m, "HW context ");
1581		if (!list_empty(&ctx->hw_id_link))
1582			seq_printf(m, "%x [pin %u]", ctx->hw_id,
1583				   atomic_read(&ctx->hw_id_pin_count));
1584		if (ctx->pid) {
1585			struct task_struct *task;
1586
1587			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1588			if (task) {
1589				seq_printf(m, "(%s [%d]) ",
1590					   task->comm, task->pid);
1591				put_task_struct(task);
1592			}
1593		} else if (IS_ERR(ctx->file_priv)) {
1594			seq_puts(m, "(deleted) ");
1595		} else {
1596			seq_puts(m, "(kernel) ");
1597		}
1598
1599		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1600		seq_putc(m, '\n');
1601
1602		for_each_gem_engine(ce,
1603				    i915_gem_context_lock_engines(ctx), it) {
1604			intel_context_lock_pinned(ce);
1605			if (intel_context_is_pinned(ce)) {
1606				seq_printf(m, "%s: ", ce->engine->name);
1607				if (ce->state)
1608					describe_obj(m, ce->state->obj);
1609				describe_ctx_ring(m, ce->ring);
1610				seq_putc(m, '\n');
1611			}
1612			intel_context_unlock_pinned(ce);
1613		}
1614		i915_gem_context_unlock_engines(ctx);
1615
1616		seq_putc(m, '\n');
1617	}
1618
1619	mutex_unlock(&dev->struct_mutex);
1620
1621	return 0;
1622}
1623
1624static const char *swizzle_string(unsigned swizzle)
1625{
1626	switch (swizzle) {
1627	case I915_BIT_6_SWIZZLE_NONE:
1628		return "none";
1629	case I915_BIT_6_SWIZZLE_9:
1630		return "bit9";
1631	case I915_BIT_6_SWIZZLE_9_10:
1632		return "bit9/bit10";
1633	case I915_BIT_6_SWIZZLE_9_11:
1634		return "bit9/bit11";
1635	case I915_BIT_6_SWIZZLE_9_10_11:
1636		return "bit9/bit10/bit11";
1637	case I915_BIT_6_SWIZZLE_9_17:
1638		return "bit9/bit17";
1639	case I915_BIT_6_SWIZZLE_9_10_17:
1640		return "bit9/bit10/bit17";
1641	case I915_BIT_6_SWIZZLE_UNKNOWN:
1642		return "unknown";
1643	}
1644
1645	return "bug";
1646}
1647
1648static int i915_swizzle_info(struct seq_file *m, void *data)
1649{
1650	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1651	struct intel_uncore *uncore = &dev_priv->uncore;
1652	intel_wakeref_t wakeref;
1653
1654	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1655
1656	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1657		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1658	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1659		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1660
1661	if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1662		seq_printf(m, "DDC = 0x%08x\n",
1663			   intel_uncore_read(uncore, DCC));
1664		seq_printf(m, "DDC2 = 0x%08x\n",
1665			   intel_uncore_read(uncore, DCC2));
1666		seq_printf(m, "C0DRB3 = 0x%04x\n",
1667			   intel_uncore_read16(uncore, C0DRB3));
1668		seq_printf(m, "C1DRB3 = 0x%04x\n",
1669			   intel_uncore_read16(uncore, C1DRB3));
1670	} else if (INTEL_GEN(dev_priv) >= 6) {
1671		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1672			   intel_uncore_read(uncore, MAD_DIMM_C0));
1673		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1674			   intel_uncore_read(uncore, MAD_DIMM_C1));
1675		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1676			   intel_uncore_read(uncore, MAD_DIMM_C2));
1677		seq_printf(m, "TILECTL = 0x%08x\n",
1678			   intel_uncore_read(uncore, TILECTL));
1679		if (INTEL_GEN(dev_priv) >= 8)
1680			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1681				   intel_uncore_read(uncore, GAMTARBMODE));
1682		else
1683			seq_printf(m, "ARB_MODE = 0x%08x\n",
1684				   intel_uncore_read(uncore, ARB_MODE));
1685		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1686			   intel_uncore_read(uncore, DISP_ARB_CTL));
1687	}
1688
1689	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1690		seq_puts(m, "L-shaped memory detected\n");
1691
1692	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1693
1694	return 0;
1695}
1696
1697static const char *rps_power_to_str(unsigned int power)
1698{
1699	static const char * const strings[] = {
1700		[LOW_POWER] = "low power",
1701		[BETWEEN] = "mixed",
1702		[HIGH_POWER] = "high power",
1703	};
1704
1705	if (power >= ARRAY_SIZE(strings) || !strings[power])
1706		return "unknown";
1707
1708	return strings[power];
1709}
1710
1711static int i915_rps_boost_info(struct seq_file *m, void *data)
1712{
1713	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1714	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1715	u32 act_freq = rps->cur_freq;
1716	intel_wakeref_t wakeref;
1717
1718	with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
1719		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1720			vlv_punit_get(dev_priv);
1721			act_freq = vlv_punit_read(dev_priv,
1722						  PUNIT_REG_GPU_FREQ_STS);
1723			vlv_punit_put(dev_priv);
1724			act_freq = (act_freq >> 8) & 0xff;
1725		} else {
1726			act_freq = intel_get_cagf(dev_priv,
1727						  I915_READ(GEN6_RPSTAT1));
1728		}
1729	}
1730
1731	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1732	seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1733	seq_printf(m, "Boosts outstanding? %d\n",
1734		   atomic_read(&rps->num_waiters));
1735	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1736	seq_printf(m, "Frequency requested %d, actual %d\n",
1737		   intel_gpu_freq(dev_priv, rps->cur_freq),
1738		   intel_gpu_freq(dev_priv, act_freq));
1739	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1740		   intel_gpu_freq(dev_priv, rps->min_freq),
1741		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
1742		   intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
1743		   intel_gpu_freq(dev_priv, rps->max_freq));
1744	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
1745		   intel_gpu_freq(dev_priv, rps->idle_freq),
1746		   intel_gpu_freq(dev_priv, rps->efficient_freq),
1747		   intel_gpu_freq(dev_priv, rps->boost_freq));
1748
1749	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1750
1751	if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1752		u32 rpup, rpupei;
1753		u32 rpdown, rpdownei;
1754
1755		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1756		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1757		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1758		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1759		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1760		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1761
1762		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1763			   rps_power_to_str(rps->power.mode));
1764		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
1765			   rpup && rpupei ? 100 * rpup / rpupei : 0,
1766			   rps->power.up_threshold);
1767		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
1768			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1769			   rps->power.down_threshold);
1770	} else {
1771		seq_puts(m, "\nRPS Autotuning inactive\n");
1772	}
1773
1774	return 0;
1775}
1776
1777static int i915_llc(struct seq_file *m, void *data)
1778{
1779	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1780	const bool edram = INTEL_GEN(dev_priv) > 8;
1781
1782	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1783	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1784		   dev_priv->edram_size_mb);
1785
1786	return 0;
1787}
1788
1789static int i915_huc_load_status_info(struct seq_file *m, void *data)
1790{
1791	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1792	intel_wakeref_t wakeref;
1793	struct drm_printer p;
1794
1795	if (!HAS_GT_UC(dev_priv))
1796		return -ENODEV;
1797
1798	p = drm_seq_file_printer(m);
1799	intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1800
1801	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1802		seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1803
1804	return 0;
1805}
1806
1807static int i915_guc_load_status_info(struct seq_file *m, void *data)
1808{
1809	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1810	intel_wakeref_t wakeref;
1811	struct drm_printer p;
1812
1813	if (!HAS_GT_UC(dev_priv))
1814		return -ENODEV;
1815
1816	p = drm_seq_file_printer(m);
1817	intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1818
1819	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1820		u32 tmp = I915_READ(GUC_STATUS);
1821		u32 i;
1822
1823		seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1824		seq_printf(m, "\tBootrom status = 0x%x\n",
1825			   (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1826		seq_printf(m, "\tuKernel status = 0x%x\n",
1827			   (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1828		seq_printf(m, "\tMIA Core status = 0x%x\n",
1829			   (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1830		seq_puts(m, "\nScratch registers:\n");
1831		for (i = 0; i < 16; i++) {
1832			seq_printf(m, "\t%2d: \t0x%x\n",
1833				   i, I915_READ(SOFT_SCRATCH(i)));
1834		}
1835	}
1836
1837	return 0;
1838}
1839
1840static const char *
1841stringify_guc_log_type(enum guc_log_buffer_type type)
1842{
1843	switch (type) {
1844	case GUC_ISR_LOG_BUFFER:
1845		return "ISR";
1846	case GUC_DPC_LOG_BUFFER:
1847		return "DPC";
1848	case GUC_CRASH_DUMP_LOG_BUFFER:
1849		return "CRASH";
1850	default:
1851		MISSING_CASE(type);
1852	}
1853
1854	return "";
1855}
1856
1857static void i915_guc_log_info(struct seq_file *m,
1858			      struct drm_i915_private *dev_priv)
1859{
1860	struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
1861	enum guc_log_buffer_type type;
1862
1863	if (!intel_guc_log_relay_enabled(log)) {
1864		seq_puts(m, "GuC log relay disabled\n");
1865		return;
1866	}
1867
1868	seq_puts(m, "GuC logging stats:\n");
1869
1870	seq_printf(m, "\tRelay full count: %u\n",
1871		   log->relay.full_count);
1872
1873	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1874		seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1875			   stringify_guc_log_type(type),
1876			   log->stats[type].flush,
1877			   log->stats[type].sampled_overflow);
1878	}
1879}
1880
1881static int i915_guc_info(struct seq_file *m, void *data)
1882{
1883	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1884	const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1885	struct intel_guc_client *client = guc->execbuf_client;
1886
1887	if (!USES_GUC(dev_priv))
1888		return -ENODEV;
1889
1890	i915_guc_log_info(m, dev_priv);
1891
1892	if (!USES_GUC_SUBMISSION(dev_priv))
1893		return 0;
1894
1895	GEM_BUG_ON(!guc->execbuf_client);
1896
1897	seq_printf(m, "\nDoorbell map:\n");
1898	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
1899	seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
1900
1901	seq_printf(m, "\nGuC execbuf client @ %p:\n", client);
1902	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
1903		   client->priority,
1904		   client->stage_id,
1905		   client->proc_desc_offset);
1906	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
1907		   client->doorbell_id, client->doorbell_offset);
1908	/* Add more as required ... */
1909
1910	return 0;
1911}
1912
1913static int i915_guc_stage_pool(struct seq_file *m, void *data)
1914{
1915	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1916	const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1917	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
1918	int index;
1919
1920	if (!USES_GUC_SUBMISSION(dev_priv))
1921		return -ENODEV;
1922
1923	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1924		struct intel_engine_cs *engine;
1925
1926		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1927			continue;
1928
1929		seq_printf(m, "GuC stage descriptor %u:\n", index);
1930		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1931		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1932		seq_printf(m, "\tPriority: %d\n", desc->priority);
1933		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1934		seq_printf(m, "\tEngines used: 0x%x\n",
1935			   desc->engines_used);
1936		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1937			   desc->db_trigger_phy,
1938			   desc->db_trigger_cpu,
1939			   desc->db_trigger_uk);
1940		seq_printf(m, "\tProcess descriptor: 0x%x\n",
1941			   desc->process_desc);
1942		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
1943			   desc->wq_addr, desc->wq_size);
1944		seq_putc(m, '\n');
1945
1946		for_each_uabi_engine(engine, dev_priv) {
1947			u32 guc_engine_id = engine->guc_id;
1948			struct guc_execlist_context *lrc =
1949						&desc->lrc[guc_engine_id];
1950
1951			seq_printf(m, "\t%s LRC:\n", engine->name);
1952			seq_printf(m, "\t\tContext desc: 0x%x\n",
1953				   lrc->context_desc);
1954			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1955			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1956			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1957			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1958			seq_putc(m, '\n');
1959		}
1960	}
1961
1962	return 0;
1963}
1964
1965static int i915_guc_log_dump(struct seq_file *m, void *data)
1966{
1967	struct drm_info_node *node = m->private;
1968	struct drm_i915_private *dev_priv = node_to_i915(node);
1969	bool dump_load_err = !!node->info_ent->data;
1970	struct drm_i915_gem_object *obj = NULL;
1971	u32 *log;
1972	int i = 0;
1973
1974	if (!HAS_GT_UC(dev_priv))
1975		return -ENODEV;
1976
1977	if (dump_load_err)
1978		obj = dev_priv->gt.uc.load_err_log;
1979	else if (dev_priv->gt.uc.guc.log.vma)
1980		obj = dev_priv->gt.uc.guc.log.vma->obj;
1981
1982	if (!obj)
1983		return 0;
1984
1985	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
1986	if (IS_ERR(log)) {
1987		DRM_DEBUG("Failed to pin object\n");
1988		seq_puts(m, "(log data unaccessible)\n");
1989		return PTR_ERR(log);
1990	}
1991
1992	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
1993		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
1994			   *(log + i), *(log + i + 1),
1995			   *(log + i + 2), *(log + i + 3));
1996
1997	seq_putc(m, '\n');
1998
1999	i915_gem_object_unpin_map(obj);
2000
2001	return 0;
2002}
2003
2004static int i915_guc_log_level_get(void *data, u64 *val)
2005{
2006	struct drm_i915_private *dev_priv = data;
2007
2008	if (!USES_GUC(dev_priv))
2009		return -ENODEV;
2010
2011	*val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
2012
2013	return 0;
2014}
2015
2016static int i915_guc_log_level_set(void *data, u64 val)
2017{
2018	struct drm_i915_private *dev_priv = data;
2019
2020	if (!USES_GUC(dev_priv))
2021		return -ENODEV;
2022
2023	return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
2024}
2025
2026DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2027			i915_guc_log_level_get, i915_guc_log_level_set,
2028			"%lld\n");
2029
2030static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2031{
2032	struct drm_i915_private *i915 = inode->i_private;
2033	struct intel_guc *guc = &i915->gt.uc.guc;
2034	struct intel_guc_log *log = &guc->log;
2035
2036	if (!intel_guc_is_running(guc))
2037		return -ENODEV;
2038
2039	file->private_data = log;
2040
2041	return intel_guc_log_relay_open(log);
2042}
2043
2044static ssize_t
2045i915_guc_log_relay_write(struct file *filp,
2046			 const char __user *ubuf,
2047			 size_t cnt,
2048			 loff_t *ppos)
2049{
2050	struct intel_guc_log *log = filp->private_data;
2051
2052	intel_guc_log_relay_flush(log);
2053	return cnt;
2054}
2055
2056static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2057{
2058	struct drm_i915_private *i915 = inode->i_private;
2059	struct intel_guc *guc = &i915->gt.uc.guc;
2060
2061	intel_guc_log_relay_close(&guc->log);
2062	return 0;
2063}
2064
2065static const struct file_operations i915_guc_log_relay_fops = {
2066	.owner = THIS_MODULE,
2067	.open = i915_guc_log_relay_open,
2068	.write = i915_guc_log_relay_write,
2069	.release = i915_guc_log_relay_release,
2070};
2071
2072static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2073{
2074	u8 val;
2075	static const char * const sink_status[] = {
2076		"inactive",
2077		"transition to active, capture and display",
2078		"active, display from RFB",
2079		"active, capture and display on sink device timings",
2080		"transition to inactive, capture and display, timing re-sync",
2081		"reserved",
2082		"reserved",
2083		"sink internal error",
2084	};
2085	struct drm_connector *connector = m->private;
2086	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2087	struct intel_dp *intel_dp =
2088		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2089	int ret;
2090
2091	if (!CAN_PSR(dev_priv)) {
2092		seq_puts(m, "PSR Unsupported\n");
2093		return -ENODEV;
2094	}
2095
2096	if (connector->status != connector_status_connected)
2097		return -ENODEV;
2098
2099	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2100
2101	if (ret == 1) {
2102		const char *str = "unknown";
2103
2104		val &= DP_PSR_SINK_STATE_MASK;
2105		if (val < ARRAY_SIZE(sink_status))
2106			str = sink_status[val];
2107		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2108	} else {
2109		return ret;
2110	}
2111
2112	return 0;
2113}
2114DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2115
2116static void
2117psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2118{
2119	u32 val, status_val;
2120	const char *status = "unknown";
2121
2122	if (dev_priv->psr.psr2_enabled) {
2123		static const char * const live_status[] = {
2124			"IDLE",
2125			"CAPTURE",
2126			"CAPTURE_FS",
2127			"SLEEP",
2128			"BUFON_FW",
2129			"ML_UP",
2130			"SU_STANDBY",
2131			"FAST_SLEEP",
2132			"DEEP_SLEEP",
2133			"BUF_ON",
2134			"TG_ON"
2135		};
2136		val = I915_READ(EDP_PSR2_STATUS);
2137		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2138			      EDP_PSR2_STATUS_STATE_SHIFT;
2139		if (status_val < ARRAY_SIZE(live_status))
2140			status = live_status[status_val];
2141	} else {
2142		static const char * const live_status[] = {
2143			"IDLE",
2144			"SRDONACK",
2145			"SRDENT",
2146			"BUFOFF",
2147			"BUFON",
2148			"AUXACK",
2149			"SRDOFFACK",
2150			"SRDENT_ON",
2151		};
2152		val = I915_READ(EDP_PSR_STATUS);
2153		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2154			      EDP_PSR_STATUS_STATE_SHIFT;
2155		if (status_val < ARRAY_SIZE(live_status))
2156			status = live_status[status_val];
2157	}
2158
2159	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2160}
2161
2162static int i915_edp_psr_status(struct seq_file *m, void *data)
2163{
2164	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2165	struct i915_psr *psr = &dev_priv->psr;
2166	intel_wakeref_t wakeref;
2167	const char *status;
2168	bool enabled;
2169	u32 val;
2170
2171	if (!HAS_PSR(dev_priv))
2172		return -ENODEV;
2173
2174	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2175	if (psr->dp)
2176		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2177	seq_puts(m, "\n");
2178
2179	if (!psr->sink_support)
2180		return 0;
2181
2182	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2183	mutex_lock(&psr->lock);
2184
2185	if (psr->enabled)
2186		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2187	else
2188		status = "disabled";
2189	seq_printf(m, "PSR mode: %s\n", status);
2190
2191	if (!psr->enabled)
2192		goto unlock;
2193
2194	if (psr->psr2_enabled) {
2195		val = I915_READ(EDP_PSR2_CTL);
2196		enabled = val & EDP_PSR2_ENABLE;
2197	} else {
2198		val = I915_READ(EDP_PSR_CTL);
2199		enabled = val & EDP_PSR_ENABLE;
2200	}
2201	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2202		   enableddisabled(enabled), val);
2203	psr_source_status(dev_priv, m);
2204	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2205		   psr->busy_frontbuffer_bits);
2206
2207	/*
2208	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2209	 */
2210	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2211		val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2212		seq_printf(m, "Performance counter: %u\n", val);
2213	}
2214
2215	if (psr->debug & I915_PSR_DEBUG_IRQ) {
2216		seq_printf(m, "Last attempted entry at: %lld\n",
2217			   psr->last_entry_attempt);
2218		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2219	}
2220
2221	if (psr->psr2_enabled) {
2222		u32 su_frames_val[3];
2223		int frame;
2224
2225		/*
2226		 * Reading all 3 registers before hand to minimize crossing a
2227		 * frame boundary between register reads
2228		 */
2229		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2230			su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2231
2232		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2233
2234		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2235			u32 su_blocks;
2236
2237			su_blocks = su_frames_val[frame / 3] &
2238				    PSR2_SU_STATUS_MASK(frame);
2239			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2240			seq_printf(m, "%d\t%d\n", frame, su_blocks);
2241		}
2242	}
2243
2244unlock:
2245	mutex_unlock(&psr->lock);
2246	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2247
2248	return 0;
2249}
2250
2251static int
2252i915_edp_psr_debug_set(void *data, u64 val)
2253{
2254	struct drm_i915_private *dev_priv = data;
2255	intel_wakeref_t wakeref;
 
 
 
2256	int ret;
2257
2258	if (!CAN_PSR(dev_priv))
2259		return -ENODEV;
2260
2261	DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2262
2263	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2264
2265	ret = intel_psr_debug_set(dev_priv, val);
2266
2267	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2268
2269	return ret;
2270}
2271
2272static int
2273i915_edp_psr_debug_get(void *data, u64 *val)
2274{
2275	struct drm_i915_private *dev_priv = data;
2276
2277	if (!CAN_PSR(dev_priv))
2278		return -ENODEV;
2279
2280	*val = READ_ONCE(dev_priv->psr.debug);
2281	return 0;
2282}
2283
2284DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2285			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2286			"%llu\n");
2287
2288static int i915_energy_uJ(struct seq_file *m, void *data)
2289{
2290	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2291	unsigned long long power;
2292	intel_wakeref_t wakeref;
2293	u32 units;
2294
2295	if (INTEL_GEN(dev_priv) < 6)
2296		return -ENODEV;
2297
2298	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2299		return -ENODEV;
2300
2301	units = (power & 0x1f00) >> 8;
2302	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2303		power = I915_READ(MCH_SECP_NRG_STTS);
2304
2305	power = (1000000 * power) >> units; /* convert to uJ */
2306	seq_printf(m, "%llu", power);
2307
2308	return 0;
2309}
2310
2311static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2312{
2313	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2314	struct pci_dev *pdev = dev_priv->drm.pdev;
2315
2316	if (!HAS_RUNTIME_PM(dev_priv))
2317		seq_puts(m, "Runtime power management not supported\n");
2318
2319	seq_printf(m, "Runtime power status: %s\n",
2320		   enableddisabled(!dev_priv->power_domains.wakeref));
2321
2322	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2323	seq_printf(m, "IRQs disabled: %s\n",
2324		   yesno(!intel_irqs_enabled(dev_priv)));
2325#ifdef CONFIG_PM
2326	seq_printf(m, "Usage count: %d\n",
2327		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2328#else
2329	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2330#endif
2331	seq_printf(m, "PCI device power state: %s [%d]\n",
2332		   pci_power_name(pdev->current_state),
2333		   pdev->current_state);
2334
2335	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2336		struct drm_printer p = drm_seq_file_printer(m);
2337
2338		print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
2339	}
2340
2341	return 0;
2342}
2343
2344static int i915_power_domain_info(struct seq_file *m, void *unused)
2345{
2346	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2347	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2348	int i;
2349
2350	mutex_lock(&power_domains->lock);
2351
2352	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2353	for (i = 0; i < power_domains->power_well_count; i++) {
2354		struct i915_power_well *power_well;
2355		enum intel_display_power_domain power_domain;
2356
2357		power_well = &power_domains->power_wells[i];
2358		seq_printf(m, "%-25s %d\n", power_well->desc->name,
2359			   power_well->count);
2360
2361		for_each_power_domain(power_domain, power_well->desc->domains)
2362			seq_printf(m, "  %-23s %d\n",
2363				 intel_display_power_domain_str(dev_priv,
2364								power_domain),
2365				 power_domains->domain_use_count[power_domain]);
2366	}
2367
2368	mutex_unlock(&power_domains->lock);
2369
2370	return 0;
2371}
2372
2373static int i915_dmc_info(struct seq_file *m, void *unused)
2374{
2375	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2376	intel_wakeref_t wakeref;
2377	struct intel_csr *csr;
2378	i915_reg_t dc5_reg, dc6_reg = {};
2379
2380	if (!HAS_CSR(dev_priv))
2381		return -ENODEV;
2382
2383	csr = &dev_priv->csr;
2384
2385	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2386
2387	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2388	seq_printf(m, "path: %s\n", csr->fw_path);
2389
2390	if (!csr->dmc_payload)
2391		goto out;
2392
2393	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2394		   CSR_VERSION_MINOR(csr->version));
2395
2396	if (INTEL_GEN(dev_priv) >= 12) {
2397		dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
2398		dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
2399	} else {
2400		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2401						 SKL_CSR_DC3_DC5_COUNT;
2402		if (!IS_GEN9_LP(dev_priv))
2403			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
2404	}
2405
2406	seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
2407	if (dc6_reg.reg)
2408		seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
2409
2410out:
2411	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2412	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2413	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2414
2415	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2416
2417	return 0;
2418}
2419
2420static void intel_seq_print_mode(struct seq_file *m, int tabs,
2421				 struct drm_display_mode *mode)
2422{
2423	int i;
2424
2425	for (i = 0; i < tabs; i++)
2426		seq_putc(m, '\t');
2427
2428	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2429}
2430
2431static void intel_encoder_info(struct seq_file *m,
2432			       struct intel_crtc *intel_crtc,
2433			       struct intel_encoder *intel_encoder)
2434{
2435	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2436	struct drm_device *dev = &dev_priv->drm;
2437	struct drm_crtc *crtc = &intel_crtc->base;
2438	struct intel_connector *intel_connector;
2439	struct drm_encoder *encoder;
2440
2441	encoder = &intel_encoder->base;
2442	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2443		   encoder->base.id, encoder->name);
2444	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2445		struct drm_connector *connector = &intel_connector->base;
2446		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2447			   connector->base.id,
2448			   connector->name,
2449			   drm_get_connector_status_name(connector->status));
2450		if (connector->status == connector_status_connected) {
2451			struct drm_display_mode *mode = &crtc->mode;
2452			seq_printf(m, ", mode:\n");
2453			intel_seq_print_mode(m, 2, mode);
2454		} else {
2455			seq_putc(m, '\n');
2456		}
2457	}
2458}
2459
2460static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2461{
2462	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2463	struct drm_device *dev = &dev_priv->drm;
2464	struct drm_crtc *crtc = &intel_crtc->base;
2465	struct intel_encoder *intel_encoder;
2466	struct drm_plane_state *plane_state = crtc->primary->state;
2467	struct drm_framebuffer *fb = plane_state->fb;
2468
2469	if (fb)
2470		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2471			   fb->base.id, plane_state->src_x >> 16,
2472			   plane_state->src_y >> 16, fb->width, fb->height);
2473	else
2474		seq_puts(m, "\tprimary plane disabled\n");
2475	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2476		intel_encoder_info(m, intel_crtc, intel_encoder);
2477}
2478
2479static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2480{
2481	struct drm_display_mode *mode = panel->fixed_mode;
2482
2483	seq_printf(m, "\tfixed mode:\n");
2484	intel_seq_print_mode(m, 2, mode);
2485}
2486
2487static void intel_hdcp_info(struct seq_file *m,
2488			    struct intel_connector *intel_connector)
2489{
2490	bool hdcp_cap, hdcp2_cap;
2491
2492	hdcp_cap = intel_hdcp_capable(intel_connector);
2493	hdcp2_cap = intel_hdcp2_capable(intel_connector);
2494
2495	if (hdcp_cap)
2496		seq_puts(m, "HDCP1.4 ");
2497	if (hdcp2_cap)
2498		seq_puts(m, "HDCP2.2 ");
2499
2500	if (!hdcp_cap && !hdcp2_cap)
2501		seq_puts(m, "None");
2502
2503	seq_puts(m, "\n");
2504}
2505
2506static void intel_dp_info(struct seq_file *m,
2507			  struct intel_connector *intel_connector)
2508{
2509	struct intel_encoder *intel_encoder = intel_connector->encoder;
2510	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2511
2512	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2513	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2514	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2515		intel_panel_info(m, &intel_connector->panel);
2516
2517	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2518				&intel_dp->aux);
2519	if (intel_connector->hdcp.shim) {
2520		seq_puts(m, "\tHDCP version: ");
2521		intel_hdcp_info(m, intel_connector);
2522	}
2523}
2524
2525static void intel_dp_mst_info(struct seq_file *m,
2526			  struct intel_connector *intel_connector)
2527{
2528	struct intel_encoder *intel_encoder = intel_connector->encoder;
2529	struct intel_dp_mst_encoder *intel_mst =
2530		enc_to_mst(&intel_encoder->base);
2531	struct intel_digital_port *intel_dig_port = intel_mst->primary;
2532	struct intel_dp *intel_dp = &intel_dig_port->dp;
2533	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2534					intel_connector->port);
2535
2536	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2537}
2538
2539static void intel_hdmi_info(struct seq_file *m,
2540			    struct intel_connector *intel_connector)
2541{
2542	struct intel_encoder *intel_encoder = intel_connector->encoder;
2543	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2544
2545	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2546	if (intel_connector->hdcp.shim) {
2547		seq_puts(m, "\tHDCP version: ");
2548		intel_hdcp_info(m, intel_connector);
2549	}
2550}
2551
2552static void intel_lvds_info(struct seq_file *m,
2553			    struct intel_connector *intel_connector)
2554{
2555	intel_panel_info(m, &intel_connector->panel);
2556}
2557
2558static void intel_connector_info(struct seq_file *m,
2559				 struct drm_connector *connector)
2560{
2561	struct intel_connector *intel_connector = to_intel_connector(connector);
2562	struct intel_encoder *intel_encoder = intel_connector->encoder;
2563	struct drm_display_mode *mode;
2564
2565	seq_printf(m, "connector %d: type %s, status: %s\n",
2566		   connector->base.id, connector->name,
2567		   drm_get_connector_status_name(connector->status));
2568
2569	if (connector->status == connector_status_disconnected)
2570		return;
2571
2572	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2573		   connector->display_info.width_mm,
2574		   connector->display_info.height_mm);
2575	seq_printf(m, "\tsubpixel order: %s\n",
2576		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2577	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2578
2579	if (!intel_encoder)
2580		return;
2581
2582	switch (connector->connector_type) {
2583	case DRM_MODE_CONNECTOR_DisplayPort:
2584	case DRM_MODE_CONNECTOR_eDP:
2585		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2586			intel_dp_mst_info(m, intel_connector);
2587		else
2588			intel_dp_info(m, intel_connector);
2589		break;
2590	case DRM_MODE_CONNECTOR_LVDS:
2591		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2592			intel_lvds_info(m, intel_connector);
2593		break;
2594	case DRM_MODE_CONNECTOR_HDMIA:
2595		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2596		    intel_encoder->type == INTEL_OUTPUT_DDI)
2597			intel_hdmi_info(m, intel_connector);
2598		break;
2599	default:
2600		break;
2601	}
2602
2603	seq_printf(m, "\tmodes:\n");
2604	list_for_each_entry(mode, &connector->modes, head)
2605		intel_seq_print_mode(m, 2, mode);
2606}
2607
2608static const char *plane_type(enum drm_plane_type type)
2609{
2610	switch (type) {
2611	case DRM_PLANE_TYPE_OVERLAY:
2612		return "OVL";
2613	case DRM_PLANE_TYPE_PRIMARY:
2614		return "PRI";
2615	case DRM_PLANE_TYPE_CURSOR:
2616		return "CUR";
2617	/*
2618	 * Deliberately omitting default: to generate compiler warnings
2619	 * when a new drm_plane_type gets added.
2620	 */
2621	}
2622
2623	return "unknown";
2624}
2625
2626static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2627{
2628	/*
2629	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2630	 * will print them all to visualize if the values are misused
2631	 */
2632	snprintf(buf, bufsize,
2633		 "%s%s%s%s%s%s(0x%08x)",
2634		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2635		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2636		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2637		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2638		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2639		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2640		 rotation);
2641}
2642
2643static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2644{
2645	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2646	struct drm_device *dev = &dev_priv->drm;
2647	struct intel_plane *intel_plane;
2648
2649	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2650		struct drm_plane_state *state;
2651		struct drm_plane *plane = &intel_plane->base;
2652		struct drm_format_name_buf format_name;
2653		char rot_str[48];
2654
2655		if (!plane->state) {
2656			seq_puts(m, "plane->state is NULL!\n");
2657			continue;
2658		}
2659
2660		state = plane->state;
2661
2662		if (state->fb) {
2663			drm_get_format_name(state->fb->format->format,
2664					    &format_name);
2665		} else {
2666			sprintf(format_name.str, "N/A");
2667		}
2668
2669		plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2670
2671		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2672			   plane->base.id,
2673			   plane_type(intel_plane->base.type),
2674			   state->crtc_x, state->crtc_y,
2675			   state->crtc_w, state->crtc_h,
2676			   (state->src_x >> 16),
2677			   ((state->src_x & 0xffff) * 15625) >> 10,
2678			   (state->src_y >> 16),
2679			   ((state->src_y & 0xffff) * 15625) >> 10,
2680			   (state->src_w >> 16),
2681			   ((state->src_w & 0xffff) * 15625) >> 10,
2682			   (state->src_h >> 16),
2683			   ((state->src_h & 0xffff) * 15625) >> 10,
2684			   format_name.str,
2685			   rot_str);
2686	}
2687}
2688
2689static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2690{
2691	struct intel_crtc_state *pipe_config;
2692	int num_scalers = intel_crtc->num_scalers;
2693	int i;
2694
2695	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2696
2697	/* Not all platformas have a scaler */
2698	if (num_scalers) {
2699		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2700			   num_scalers,
2701			   pipe_config->scaler_state.scaler_users,
2702			   pipe_config->scaler_state.scaler_id);
2703
2704		for (i = 0; i < num_scalers; i++) {
2705			struct intel_scaler *sc =
2706					&pipe_config->scaler_state.scalers[i];
2707
2708			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2709				   i, yesno(sc->in_use), sc->mode);
2710		}
2711		seq_puts(m, "\n");
2712	} else {
2713		seq_puts(m, "\tNo scalers available on this platform\n");
2714	}
2715}
2716
2717static int i915_display_info(struct seq_file *m, void *unused)
2718{
2719	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2720	struct drm_device *dev = &dev_priv->drm;
2721	struct intel_crtc *crtc;
2722	struct drm_connector *connector;
2723	struct drm_connector_list_iter conn_iter;
2724	intel_wakeref_t wakeref;
2725
2726	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2727
2728	seq_printf(m, "CRTC info\n");
2729	seq_printf(m, "---------\n");
2730	for_each_intel_crtc(dev, crtc) {
2731		struct intel_crtc_state *pipe_config;
2732
2733		drm_modeset_lock(&crtc->base.mutex, NULL);
2734		pipe_config = to_intel_crtc_state(crtc->base.state);
2735
2736		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
2737			   crtc->base.base.id, pipe_name(crtc->pipe),
2738			   yesno(pipe_config->base.active),
2739			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
2740			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
2741
2742		if (pipe_config->base.active) {
2743			struct intel_plane *cursor =
2744				to_intel_plane(crtc->base.cursor);
2745
2746			intel_crtc_info(m, crtc);
2747
2748			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
2749				   yesno(cursor->base.state->visible),
2750				   cursor->base.state->crtc_x,
2751				   cursor->base.state->crtc_y,
2752				   cursor->base.state->crtc_w,
2753				   cursor->base.state->crtc_h,
2754				   cursor->cursor.base);
2755			intel_scaler_info(m, crtc);
2756			intel_plane_info(m, crtc);
2757		}
2758
2759		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2760			   yesno(!crtc->cpu_fifo_underrun_disabled),
2761			   yesno(!crtc->pch_fifo_underrun_disabled));
2762		drm_modeset_unlock(&crtc->base.mutex);
2763	}
2764
 
 
 
 
 
 
2765	seq_printf(m, "\n");
2766	seq_printf(m, "Connector info\n");
2767	seq_printf(m, "--------------\n");
2768	mutex_lock(&dev->mode_config.mutex);
2769	drm_connector_list_iter_begin(dev, &conn_iter);
2770	drm_for_each_connector_iter(connector, &conn_iter)
2771		intel_connector_info(m, connector);
2772	drm_connector_list_iter_end(&conn_iter);
2773	mutex_unlock(&dev->mode_config.mutex);
2774
2775	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2776
2777	return 0;
2778}
2779
2780static int i915_engine_info(struct seq_file *m, void *unused)
2781{
2782	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2783	struct intel_engine_cs *engine;
2784	intel_wakeref_t wakeref;
2785	struct drm_printer p;
2786
2787	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2788
2789	seq_printf(m, "GT awake? %s [%d]\n",
2790		   yesno(dev_priv->gt.awake),
2791		   atomic_read(&dev_priv->gt.wakeref.count));
2792	seq_printf(m, "CS timestamp frequency: %u kHz\n",
2793		   RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
2794
2795	p = drm_seq_file_printer(m);
2796	for_each_uabi_engine(engine, dev_priv)
2797		intel_engine_dump(engine, &p, "%s\n", engine->name);
2798
2799	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2800
2801	return 0;
2802}
2803
2804static int i915_rcs_topology(struct seq_file *m, void *unused)
2805{
2806	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2807	struct drm_printer p = drm_seq_file_printer(m);
2808
2809	intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
2810
2811	return 0;
2812}
2813
2814static int i915_shrinker_info(struct seq_file *m, void *unused)
2815{
2816	struct drm_i915_private *i915 = node_to_i915(m->private);
2817
2818	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
2819	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
2820
2821	return 0;
2822}
2823
2824static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2825{
2826	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2827	struct drm_device *dev = &dev_priv->drm;
2828	int i;
2829
2830	drm_modeset_lock_all(dev);
2831	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2832		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2833
2834		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
2835			   pll->info->id);
2836		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2837			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
2838		seq_printf(m, " tracked hardware state:\n");
2839		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
2840		seq_printf(m, " dpll_md: 0x%08x\n",
2841			   pll->state.hw_state.dpll_md);
2842		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
2843		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
2844		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
2845		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
2846		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
2847		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
2848			   pll->state.hw_state.mg_refclkin_ctl);
2849		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
2850			   pll->state.hw_state.mg_clktop2_coreclkctl1);
2851		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
2852			   pll->state.hw_state.mg_clktop2_hsclkctl);
2853		seq_printf(m, " mg_pll_div0:  0x%08x\n",
2854			   pll->state.hw_state.mg_pll_div0);
2855		seq_printf(m, " mg_pll_div1:  0x%08x\n",
2856			   pll->state.hw_state.mg_pll_div1);
2857		seq_printf(m, " mg_pll_lf:    0x%08x\n",
2858			   pll->state.hw_state.mg_pll_lf);
2859		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
2860			   pll->state.hw_state.mg_pll_frac_lock);
2861		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
2862			   pll->state.hw_state.mg_pll_ssc);
2863		seq_printf(m, " mg_pll_bias:  0x%08x\n",
2864			   pll->state.hw_state.mg_pll_bias);
2865		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
2866			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
2867	}
2868	drm_modeset_unlock_all(dev);
2869
2870	return 0;
2871}
2872
2873static int i915_wa_registers(struct seq_file *m, void *unused)
2874{
2875	struct drm_i915_private *i915 = node_to_i915(m->private);
2876	struct intel_engine_cs *engine;
2877
2878	for_each_uabi_engine(engine, i915) {
2879		const struct i915_wa_list *wal = &engine->ctx_wa_list;
2880		const struct i915_wa *wa;
2881		unsigned int count;
2882
2883		count = wal->count;
2884		if (!count)
2885			continue;
2886
2887		seq_printf(m, "%s: Workarounds applied: %u\n",
2888			   engine->name, count);
2889
2890		for (wa = wal->list; count--; wa++)
2891			seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2892				   i915_mmio_reg_offset(wa->reg),
2893				   wa->val, wa->mask);
2894
2895		seq_printf(m, "\n");
2896	}
2897
2898	return 0;
2899}
2900
2901static int i915_ipc_status_show(struct seq_file *m, void *data)
2902{
2903	struct drm_i915_private *dev_priv = m->private;
2904
2905	seq_printf(m, "Isochronous Priority Control: %s\n",
2906			yesno(dev_priv->ipc_enabled));
2907	return 0;
2908}
2909
2910static int i915_ipc_status_open(struct inode *inode, struct file *file)
2911{
2912	struct drm_i915_private *dev_priv = inode->i_private;
2913
2914	if (!HAS_IPC(dev_priv))
2915		return -ENODEV;
2916
2917	return single_open(file, i915_ipc_status_show, dev_priv);
2918}
2919
2920static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
2921				     size_t len, loff_t *offp)
2922{
2923	struct seq_file *m = file->private_data;
2924	struct drm_i915_private *dev_priv = m->private;
2925	intel_wakeref_t wakeref;
2926	bool enable;
2927	int ret;
2928
2929	ret = kstrtobool_from_user(ubuf, len, &enable);
2930	if (ret < 0)
2931		return ret;
2932
2933	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2934		if (!dev_priv->ipc_enabled && enable)
2935			DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
2936		dev_priv->wm.distrust_bios_wm = true;
2937		dev_priv->ipc_enabled = enable;
2938		intel_enable_ipc(dev_priv);
2939	}
2940
2941	return len;
2942}
2943
2944static const struct file_operations i915_ipc_status_fops = {
2945	.owner = THIS_MODULE,
2946	.open = i915_ipc_status_open,
2947	.read = seq_read,
2948	.llseek = seq_lseek,
2949	.release = single_release,
2950	.write = i915_ipc_status_write
2951};
2952
2953static int i915_ddb_info(struct seq_file *m, void *unused)
2954{
2955	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2956	struct drm_device *dev = &dev_priv->drm;
2957	struct skl_ddb_entry *entry;
2958	struct intel_crtc *crtc;
2959
2960	if (INTEL_GEN(dev_priv) < 9)
2961		return -ENODEV;
2962
2963	drm_modeset_lock_all(dev);
2964
2965	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2966
2967	for_each_intel_crtc(&dev_priv->drm, crtc) {
2968		struct intel_crtc_state *crtc_state =
2969			to_intel_crtc_state(crtc->base.state);
2970		enum pipe pipe = crtc->pipe;
2971		enum plane_id plane_id;
2972
2973		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2974
2975		for_each_plane_id_on_crtc(crtc, plane_id) {
2976			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
2977			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
2978				   entry->start, entry->end,
2979				   skl_ddb_entry_size(entry));
2980		}
2981
2982		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
2983		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
2984			   entry->end, skl_ddb_entry_size(entry));
2985	}
2986
2987	drm_modeset_unlock_all(dev);
2988
2989	return 0;
2990}
2991
2992static void drrs_status_per_crtc(struct seq_file *m,
2993				 struct drm_device *dev,
2994				 struct intel_crtc *intel_crtc)
2995{
2996	struct drm_i915_private *dev_priv = to_i915(dev);
2997	struct i915_drrs *drrs = &dev_priv->drrs;
2998	int vrefresh = 0;
2999	struct drm_connector *connector;
3000	struct drm_connector_list_iter conn_iter;
3001
3002	drm_connector_list_iter_begin(dev, &conn_iter);
3003	drm_for_each_connector_iter(connector, &conn_iter) {
3004		if (connector->state->crtc != &intel_crtc->base)
3005			continue;
3006
3007		seq_printf(m, "%s:\n", connector->name);
3008	}
3009	drm_connector_list_iter_end(&conn_iter);
3010
3011	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3012		seq_puts(m, "\tVBT: DRRS_type: Static");
3013	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3014		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3015	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3016		seq_puts(m, "\tVBT: DRRS_type: None");
3017	else
3018		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3019
3020	seq_puts(m, "\n\n");
3021
3022	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3023		struct intel_panel *panel;
3024
3025		mutex_lock(&drrs->mutex);
3026		/* DRRS Supported */
3027		seq_puts(m, "\tDRRS Supported: Yes\n");
3028
3029		/* disable_drrs() will make drrs->dp NULL */
3030		if (!drrs->dp) {
3031			seq_puts(m, "Idleness DRRS: Disabled\n");
3032			if (dev_priv->psr.enabled)
3033				seq_puts(m,
3034				"\tAs PSR is enabled, DRRS is not enabled\n");
3035			mutex_unlock(&drrs->mutex);
3036			return;
3037		}
3038
3039		panel = &drrs->dp->attached_connector->panel;
3040		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3041					drrs->busy_frontbuffer_bits);
3042
3043		seq_puts(m, "\n\t\t");
3044		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3045			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3046			vrefresh = panel->fixed_mode->vrefresh;
3047		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3048			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3049			vrefresh = panel->downclock_mode->vrefresh;
3050		} else {
3051			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3052						drrs->refresh_rate_type);
3053			mutex_unlock(&drrs->mutex);
3054			return;
3055		}
3056		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3057
3058		seq_puts(m, "\n\t\t");
3059		mutex_unlock(&drrs->mutex);
3060	} else {
3061		/* DRRS not supported. Print the VBT parameter*/
3062		seq_puts(m, "\tDRRS Supported : No");
3063	}
3064	seq_puts(m, "\n");
3065}
3066
3067static int i915_drrs_status(struct seq_file *m, void *unused)
3068{
3069	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3070	struct drm_device *dev = &dev_priv->drm;
3071	struct intel_crtc *intel_crtc;
3072	int active_crtc_cnt = 0;
3073
3074	drm_modeset_lock_all(dev);
3075	for_each_intel_crtc(dev, intel_crtc) {
3076		if (intel_crtc->base.state->active) {
3077			active_crtc_cnt++;
3078			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3079
3080			drrs_status_per_crtc(m, dev, intel_crtc);
3081		}
3082	}
3083	drm_modeset_unlock_all(dev);
3084
3085	if (!active_crtc_cnt)
3086		seq_puts(m, "No active crtc found\n");
3087
3088	return 0;
3089}
3090
3091static int i915_dp_mst_info(struct seq_file *m, void *unused)
 
 
3092{
3093	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3094	struct drm_device *dev = &dev_priv->drm;
3095	struct intel_encoder *intel_encoder;
3096	struct intel_digital_port *intel_dig_port;
3097	struct drm_connector *connector;
3098	struct drm_connector_list_iter conn_iter;
3099
3100	drm_connector_list_iter_begin(dev, &conn_iter);
3101	drm_for_each_connector_iter(connector, &conn_iter) {
3102		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3103			continue;
3104
3105		intel_encoder = intel_attached_encoder(connector);
3106		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3107			continue;
3108
3109		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3110		if (!intel_dig_port->dp.can_mst)
3111			continue;
3112
3113		seq_printf(m, "MST Source Port %c\n",
3114			   port_name(intel_dig_port->base.port));
3115		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3116	}
3117	drm_connector_list_iter_end(&conn_iter);
3118
3119	return 0;
3120}
3121
3122static ssize_t i915_displayport_test_active_write(struct file *file,
3123						  const char __user *ubuf,
3124						  size_t len, loff_t *offp)
3125{
3126	char *input_buffer;
3127	int status = 0;
3128	struct drm_device *dev;
3129	struct drm_connector *connector;
3130	struct drm_connector_list_iter conn_iter;
3131	struct intel_dp *intel_dp;
3132	int val = 0;
3133
3134	dev = ((struct seq_file *)file->private_data)->private;
 
3135
3136	if (len == 0)
3137		return 0;
3138
3139	input_buffer = memdup_user_nul(ubuf, len);
3140	if (IS_ERR(input_buffer))
3141		return PTR_ERR(input_buffer);
3142
3143	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3144
3145	drm_connector_list_iter_begin(dev, &conn_iter);
3146	drm_for_each_connector_iter(connector, &conn_iter) {
3147		struct intel_encoder *encoder;
3148
3149		if (connector->connector_type !=
3150		    DRM_MODE_CONNECTOR_DisplayPort)
3151			continue;
3152
3153		encoder = to_intel_encoder(connector->encoder);
3154		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3155			continue;
3156
3157		if (encoder && connector->status == connector_status_connected) {
3158			intel_dp = enc_to_intel_dp(&encoder->base);
3159			status = kstrtoint(input_buffer, 10, &val);
3160			if (status < 0)
3161				break;
3162			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3163			/* To prevent erroneous activation of the compliance
3164			 * testing code, only accept an actual value of 1 here
3165			 */
3166			if (val == 1)
3167				intel_dp->compliance.test_active = 1;
3168			else
3169				intel_dp->compliance.test_active = 0;
3170		}
3171	}
3172	drm_connector_list_iter_end(&conn_iter);
3173	kfree(input_buffer);
3174	if (status < 0)
3175		return status;
3176
3177	*offp += len;
3178	return len;
3179}
3180
3181static int i915_displayport_test_active_show(struct seq_file *m, void *data)
 
 
 
 
3182{
3183	struct drm_i915_private *dev_priv = m->private;
3184	struct drm_device *dev = &dev_priv->drm;
3185	struct drm_connector *connector;
3186	struct drm_connector_list_iter conn_iter;
3187	struct intel_dp *intel_dp;
3188
3189	drm_connector_list_iter_begin(dev, &conn_iter);
3190	drm_for_each_connector_iter(connector, &conn_iter) {
3191		struct intel_encoder *encoder;
3192
3193		if (connector->connector_type !=
3194		    DRM_MODE_CONNECTOR_DisplayPort)
3195			continue;
3196
3197		encoder = to_intel_encoder(connector->encoder);
3198		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3199			continue;
3200
3201		if (encoder && connector->status == connector_status_connected) {
3202			intel_dp = enc_to_intel_dp(&encoder->base);
3203			if (intel_dp->compliance.test_active)
3204				seq_puts(m, "1");
3205			else
3206				seq_puts(m, "0");
3207		} else
3208			seq_puts(m, "0");
3209	}
3210	drm_connector_list_iter_end(&conn_iter);
3211
3212	return 0;
3213}
3214
3215static int i915_displayport_test_active_open(struct inode *inode,
3216					     struct file *file)
3217{
3218	return single_open(file, i915_displayport_test_active_show,
3219			   inode->i_private);
3220}
3221
3222static const struct file_operations i915_displayport_test_active_fops = {
3223	.owner = THIS_MODULE,
3224	.open = i915_displayport_test_active_open,
3225	.read = seq_read,
3226	.llseek = seq_lseek,
3227	.release = single_release,
3228	.write = i915_displayport_test_active_write
3229};
3230
3231static int i915_displayport_test_data_show(struct seq_file *m, void *data)
 
 
3232{
3233	struct drm_i915_private *dev_priv = m->private;
3234	struct drm_device *dev = &dev_priv->drm;
3235	struct drm_connector *connector;
3236	struct drm_connector_list_iter conn_iter;
3237	struct intel_dp *intel_dp;
3238
3239	drm_connector_list_iter_begin(dev, &conn_iter);
3240	drm_for_each_connector_iter(connector, &conn_iter) {
3241		struct intel_encoder *encoder;
3242
3243		if (connector->connector_type !=
3244		    DRM_MODE_CONNECTOR_DisplayPort)
3245			continue;
3246
3247		encoder = to_intel_encoder(connector->encoder);
3248		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3249			continue;
3250
3251		if (encoder && connector->status == connector_status_connected) {
3252			intel_dp = enc_to_intel_dp(&encoder->base);
3253			if (intel_dp->compliance.test_type ==
3254			    DP_TEST_LINK_EDID_READ)
3255				seq_printf(m, "%lx",
3256					   intel_dp->compliance.test_data.edid);
3257			else if (intel_dp->compliance.test_type ==
3258				 DP_TEST_LINK_VIDEO_PATTERN) {
3259				seq_printf(m, "hdisplay: %d\n",
3260					   intel_dp->compliance.test_data.hdisplay);
3261				seq_printf(m, "vdisplay: %d\n",
3262					   intel_dp->compliance.test_data.vdisplay);
3263				seq_printf(m, "bpc: %u\n",
3264					   intel_dp->compliance.test_data.bpc);
3265			}
3266		} else
3267			seq_puts(m, "0");
3268	}
3269	drm_connector_list_iter_end(&conn_iter);
3270
3271	return 0;
3272}
3273DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3274
3275static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3276{
3277	struct drm_i915_private *dev_priv = m->private;
3278	struct drm_device *dev = &dev_priv->drm;
3279	struct drm_connector *connector;
3280	struct drm_connector_list_iter conn_iter;
3281	struct intel_dp *intel_dp;
3282
3283	drm_connector_list_iter_begin(dev, &conn_iter);
3284	drm_for_each_connector_iter(connector, &conn_iter) {
3285		struct intel_encoder *encoder;
3286
3287		if (connector->connector_type !=
3288		    DRM_MODE_CONNECTOR_DisplayPort)
3289			continue;
3290
3291		encoder = to_intel_encoder(connector->encoder);
3292		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3293			continue;
3294
3295		if (encoder && connector->status == connector_status_connected) {
3296			intel_dp = enc_to_intel_dp(&encoder->base);
3297			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3298		} else
3299			seq_puts(m, "0");
3300	}
3301	drm_connector_list_iter_end(&conn_iter);
3302
3303	return 0;
3304}
3305DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3306
3307static void wm_latency_show(struct seq_file *m, const u16 wm[8])
 
 
 
 
3308{
3309	struct drm_i915_private *dev_priv = m->private;
3310	struct drm_device *dev = &dev_priv->drm;
3311	int level;
3312	int num_levels;
3313
3314	if (IS_CHERRYVIEW(dev_priv))
3315		num_levels = 3;
3316	else if (IS_VALLEYVIEW(dev_priv))
3317		num_levels = 1;
3318	else if (IS_G4X(dev_priv))
3319		num_levels = 3;
3320	else
3321		num_levels = ilk_wm_max_level(dev_priv) + 1;
3322
3323	drm_modeset_lock_all(dev);
 
 
3324
3325	for (level = 0; level < num_levels; level++) {
3326		unsigned int latency = wm[level];
 
3327
3328		/*
3329		 * - WM1+ latency values in 0.5us units
3330		 * - latencies are in us on gen9/vlv/chv
3331		 */
3332		if (INTEL_GEN(dev_priv) >= 9 ||
3333		    IS_VALLEYVIEW(dev_priv) ||
3334		    IS_CHERRYVIEW(dev_priv) ||
3335		    IS_G4X(dev_priv))
3336			latency *= 10;
3337		else if (level > 0)
3338			latency *= 5;
3339
3340		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3341			   level, wm[level], latency / 10, latency % 10);
3342	}
3343
3344	drm_modeset_unlock_all(dev);
3345}
3346
3347static int pri_wm_latency_show(struct seq_file *m, void *data)
3348{
3349	struct drm_i915_private *dev_priv = m->private;
3350	const u16 *latencies;
3351
3352	if (INTEL_GEN(dev_priv) >= 9)
3353		latencies = dev_priv->wm.skl_latency;
3354	else
3355		latencies = dev_priv->wm.pri_latency;
3356
3357	wm_latency_show(m, latencies);
3358
3359	return 0;
3360}
3361
3362static int spr_wm_latency_show(struct seq_file *m, void *data)
3363{
3364	struct drm_i915_private *dev_priv = m->private;
3365	const u16 *latencies;
 
 
 
3366
3367	if (INTEL_GEN(dev_priv) >= 9)
3368		latencies = dev_priv->wm.skl_latency;
3369	else
3370		latencies = dev_priv->wm.spr_latency;
3371
3372	wm_latency_show(m, latencies);
3373
3374	return 0;
3375}
3376
3377static int cur_wm_latency_show(struct seq_file *m, void *data)
3378{
3379	struct drm_i915_private *dev_priv = m->private;
3380	const u16 *latencies;
3381
3382	if (INTEL_GEN(dev_priv) >= 9)
3383		latencies = dev_priv->wm.skl_latency;
3384	else
3385		latencies = dev_priv->wm.cur_latency;
3386
3387	wm_latency_show(m, latencies);
3388
3389	return 0;
3390}
3391
3392static int pri_wm_latency_open(struct inode *inode, struct file *file)
3393{
3394	struct drm_i915_private *dev_priv = inode->i_private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3395
3396	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3397		return -ENODEV;
3398
3399	return single_open(file, pri_wm_latency_show, dev_priv);
3400}
3401
3402static int spr_wm_latency_open(struct inode *inode, struct file *file)
 
 
 
 
3403{
3404	struct drm_i915_private *dev_priv = inode->i_private;
 
 
 
 
3405
3406	if (HAS_GMCH(dev_priv))
3407		return -ENODEV;
 
3408
3409	return single_open(file, spr_wm_latency_show, dev_priv);
3410}
 
3411
3412static int cur_wm_latency_open(struct inode *inode, struct file *file)
3413{
3414	struct drm_i915_private *dev_priv = inode->i_private;
3415
3416	if (HAS_GMCH(dev_priv))
3417		return -ENODEV;
3418
3419	return single_open(file, cur_wm_latency_show, dev_priv);
3420}
3421
3422static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3423				size_t len, loff_t *offp, u16 wm[8])
3424{
3425	struct seq_file *m = file->private_data;
3426	struct drm_i915_private *dev_priv = m->private;
3427	struct drm_device *dev = &dev_priv->drm;
3428	u16 new[8] = { 0 };
3429	int num_levels;
3430	int level;
3431	int ret;
3432	char tmp[32];
3433
3434	if (IS_CHERRYVIEW(dev_priv))
3435		num_levels = 3;
3436	else if (IS_VALLEYVIEW(dev_priv))
3437		num_levels = 1;
3438	else if (IS_G4X(dev_priv))
3439		num_levels = 3;
3440	else
3441		num_levels = ilk_wm_max_level(dev_priv) + 1;
3442
3443	if (len >= sizeof(tmp))
3444		return -EINVAL;
3445
3446	if (copy_from_user(tmp, ubuf, len))
3447		return -EFAULT;
3448
3449	tmp[len] = '\0';
 
 
 
 
3450
3451	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3452		     &new[0], &new[1], &new[2], &new[3],
3453		     &new[4], &new[5], &new[6], &new[7]);
3454	if (ret != num_levels)
3455		return -EINVAL;
3456
3457	drm_modeset_lock_all(dev);
3458
3459	for (level = 0; level < num_levels; level++)
3460		wm[level] = new[level];
3461
3462	drm_modeset_unlock_all(dev);
3463
3464	return len;
3465}
3466
3467
3468static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3469				    size_t len, loff_t *offp)
3470{
3471	struct seq_file *m = file->private_data;
3472	struct drm_i915_private *dev_priv = m->private;
3473	u16 *latencies;
3474
3475	if (INTEL_GEN(dev_priv) >= 9)
3476		latencies = dev_priv->wm.skl_latency;
3477	else
3478		latencies = dev_priv->wm.pri_latency;
3479
3480	return wm_latency_write(file, ubuf, len, offp, latencies);
3481}
3482
3483static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3484				    size_t len, loff_t *offp)
3485{
3486	struct seq_file *m = file->private_data;
3487	struct drm_i915_private *dev_priv = m->private;
3488	u16 *latencies;
3489
3490	if (INTEL_GEN(dev_priv) >= 9)
3491		latencies = dev_priv->wm.skl_latency;
3492	else
3493		latencies = dev_priv->wm.spr_latency;
3494
3495	return wm_latency_write(file, ubuf, len, offp, latencies);
3496}
3497
3498static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3499				    size_t len, loff_t *offp)
3500{
3501	struct seq_file *m = file->private_data;
3502	struct drm_i915_private *dev_priv = m->private;
3503	u16 *latencies;
3504
3505	if (INTEL_GEN(dev_priv) >= 9)
3506		latencies = dev_priv->wm.skl_latency;
3507	else
3508		latencies = dev_priv->wm.cur_latency;
3509
3510	return wm_latency_write(file, ubuf, len, offp, latencies);
3511}
3512
3513static const struct file_operations i915_pri_wm_latency_fops = {
3514	.owner = THIS_MODULE,
3515	.open = pri_wm_latency_open,
3516	.read = seq_read,
3517	.llseek = seq_lseek,
3518	.release = single_release,
3519	.write = pri_wm_latency_write
3520};
3521
3522static const struct file_operations i915_spr_wm_latency_fops = {
3523	.owner = THIS_MODULE,
3524	.open = spr_wm_latency_open,
3525	.read = seq_read,
3526	.llseek = seq_lseek,
3527	.release = single_release,
3528	.write = spr_wm_latency_write
3529};
3530
3531static const struct file_operations i915_cur_wm_latency_fops = {
3532	.owner = THIS_MODULE,
3533	.open = cur_wm_latency_open,
3534	.read = seq_read,
3535	.llseek = seq_lseek,
3536	.release = single_release,
3537	.write = cur_wm_latency_write
3538};
3539
 
 
3540static int
3541i915_wedged_get(void *data, u64 *val)
3542{
3543	struct drm_i915_private *i915 = data;
3544	int ret = intel_gt_terminally_wedged(&i915->gt);
3545
3546	switch (ret) {
3547	case -EIO:
3548		*val = 1;
3549		return 0;
3550	case 0:
3551		*val = 0;
3552		return 0;
3553	default:
3554		return ret;
3555	}
3556}
3557
3558static int
3559i915_wedged_set(void *data, u64 val)
3560{
3561	struct drm_i915_private *i915 = data;
3562
3563	/* Flush any previous reset before applying for a new one */
3564	wait_event(i915->gt.reset.queue,
3565		   !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
3566
3567	intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
3568			      "Manually set wedged engine mask = %llx", val);
3569	return 0;
3570}
3571
3572DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3573			i915_wedged_get, i915_wedged_set,
3574			"%llu\n");
3575
3576#define DROP_UNBOUND	BIT(0)
3577#define DROP_BOUND	BIT(1)
3578#define DROP_RETIRE	BIT(2)
3579#define DROP_ACTIVE	BIT(3)
3580#define DROP_FREED	BIT(4)
3581#define DROP_SHRINK_ALL	BIT(5)
3582#define DROP_IDLE	BIT(6)
3583#define DROP_RESET_ACTIVE	BIT(7)
3584#define DROP_RESET_SEQNO	BIT(8)
3585#define DROP_ALL (DROP_UNBOUND	| \
3586		  DROP_BOUND	| \
3587		  DROP_RETIRE	| \
3588		  DROP_ACTIVE	| \
3589		  DROP_FREED	| \
3590		  DROP_SHRINK_ALL |\
3591		  DROP_IDLE	| \
3592		  DROP_RESET_ACTIVE | \
3593		  DROP_RESET_SEQNO)
3594static int
3595i915_drop_caches_get(void *data, u64 *val)
3596{
3597	*val = DROP_ALL;
3598
3599	return 0;
3600}
3601
3602static int
3603i915_drop_caches_set(void *data, u64 val)
3604{
3605	struct drm_i915_private *i915 = data;
3606
3607	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3608		  val, val & DROP_ALL);
3609
3610	if (val & DROP_RESET_ACTIVE &&
3611	    wait_for(intel_engines_are_idle(&i915->gt),
3612		     I915_IDLE_ENGINES_TIMEOUT))
3613		intel_gt_set_wedged(&i915->gt);
3614
3615	/* No need to check and wait for gpu resets, only libdrm auto-restarts
3616	 * on ioctls on -EAGAIN. */
3617	if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3618		int ret;
3619
3620		ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
3621		if (ret)
3622			return ret;
3623
3624		/*
3625		 * To finish the flush of the idle_worker, we must complete
3626		 * the switch-to-kernel-context, which requires a double
3627		 * pass through wait_for_idle: first queues the switch,
3628		 * second waits for the switch.
3629		 */
3630		if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
3631			ret = i915_gem_wait_for_idle(i915,
3632						     I915_WAIT_INTERRUPTIBLE |
3633						     I915_WAIT_LOCKED,
3634						     MAX_SCHEDULE_TIMEOUT);
3635
3636		if (ret == 0 && val & DROP_IDLE)
3637			ret = i915_gem_wait_for_idle(i915,
3638						     I915_WAIT_INTERRUPTIBLE |
3639						     I915_WAIT_LOCKED,
3640						     MAX_SCHEDULE_TIMEOUT);
3641
3642		if (val & DROP_RETIRE)
3643			i915_retire_requests(i915);
3644
3645		mutex_unlock(&i915->drm.struct_mutex);
3646
3647		if (ret == 0 && val & DROP_IDLE)
3648			ret = intel_gt_pm_wait_for_idle(&i915->gt);
3649	}
3650
3651	if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
3652		intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL);
3653
3654	fs_reclaim_acquire(GFP_KERNEL);
3655	if (val & DROP_BOUND)
3656		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3657
3658	if (val & DROP_UNBOUND)
3659		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3660
3661	if (val & DROP_SHRINK_ALL)
3662		i915_gem_shrink_all(i915);
3663	fs_reclaim_release(GFP_KERNEL);
3664
3665	if (val & DROP_IDLE) {
3666		flush_delayed_work(&i915->gem.retire_work);
3667		flush_work(&i915->gem.idle_work);
3668	}
3669
3670	if (val & DROP_FREED)
3671		i915_gem_drain_freed_objects(i915);
 
 
3672
3673	return 0;
3674}
3675
3676DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3677			i915_drop_caches_get, i915_drop_caches_set,
3678			"0x%08llx\n");
3679
3680static int
3681i915_cache_sharing_get(void *data, u64 *val)
3682{
3683	struct drm_i915_private *dev_priv = data;
3684	intel_wakeref_t wakeref;
3685	u32 snpcr = 0;
3686
3687	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3688		return -ENODEV;
 
 
 
 
3689
3690	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
3691		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3692
3693	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3694
3695	return 0;
3696}
3697
3698static int
3699i915_cache_sharing_set(void *data, u64 val)
3700{
3701	struct drm_i915_private *dev_priv = data;
3702	intel_wakeref_t wakeref;
3703
3704	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3705		return -ENODEV;
3706
3707	if (val > 3)
3708		return -EINVAL;
3709
3710	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3711	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3712		u32 snpcr;
3713
3714		/* Update the cache sharing policy here as well */
3715		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3716		snpcr &= ~GEN6_MBC_SNPCR_MASK;
3717		snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3718		I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3719	}
3720
3721	return 0;
3722}
3723
3724DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3725			i915_cache_sharing_get, i915_cache_sharing_set,
3726			"%llu\n");
3727
3728static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
3729					  struct sseu_dev_info *sseu)
3730{
3731#define SS_MAX 2
3732	const int ss_max = SS_MAX;
3733	u32 sig1[SS_MAX], sig2[SS_MAX];
3734	int ss;
3735
3736	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3737	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3738	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3739	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3740
3741	for (ss = 0; ss < ss_max; ss++) {
3742		unsigned int eu_cnt;
3743
3744		if (sig1[ss] & CHV_SS_PG_ENABLE)
3745			/* skip disabled subslice */
3746			continue;
3747
3748		sseu->slice_mask = BIT(0);
3749		sseu->subslice_mask[0] |= BIT(ss);
3750		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3751			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3752			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3753			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
3754		sseu->eu_total += eu_cnt;
3755		sseu->eu_per_subslice = max_t(unsigned int,
3756					      sseu->eu_per_subslice, eu_cnt);
3757	}
3758#undef SS_MAX
3759}
3760
3761static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
3762				     struct sseu_dev_info *sseu)
3763{
3764#define SS_MAX 6
3765	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3766	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3767	int s, ss;
3768
3769	for (s = 0; s < info->sseu.max_slices; s++) {
3770		/*
3771		 * FIXME: Valid SS Mask respects the spec and read
3772		 * only valid bits for those registers, excluding reserved
3773		 * although this seems wrong because it would leave many
3774		 * subslices without ACK.
3775		 */
3776		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
3777			GEN10_PGCTL_VALID_SS_MASK(s);
3778		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
3779		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
3780	}
3781
3782	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3783		     GEN9_PGCTL_SSA_EU19_ACK |
3784		     GEN9_PGCTL_SSA_EU210_ACK |
3785		     GEN9_PGCTL_SSA_EU311_ACK;
3786	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3787		     GEN9_PGCTL_SSB_EU19_ACK |
3788		     GEN9_PGCTL_SSB_EU210_ACK |
3789		     GEN9_PGCTL_SSB_EU311_ACK;
3790
3791	for (s = 0; s < info->sseu.max_slices; s++) {
3792		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3793			/* skip disabled slice */
3794			continue;
3795
3796		sseu->slice_mask |= BIT(s);
3797		sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
3798
3799		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3800			unsigned int eu_cnt;
3801
3802			if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3803				/* skip disabled subslice */
3804				continue;
3805
3806			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
3807					       eu_mask[ss % 2]);
3808			sseu->eu_total += eu_cnt;
3809			sseu->eu_per_subslice = max_t(unsigned int,
3810						      sseu->eu_per_subslice,
3811						      eu_cnt);
3812		}
3813	}
3814#undef SS_MAX
3815}
3816
3817static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
3818				    struct sseu_dev_info *sseu)
3819{
3820#define SS_MAX 3
3821	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3822	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3823	int s, ss;
3824
3825	for (s = 0; s < info->sseu.max_slices; s++) {
3826		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
3827		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
3828		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
3829	}
3830
3831	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3832		     GEN9_PGCTL_SSA_EU19_ACK |
3833		     GEN9_PGCTL_SSA_EU210_ACK |
3834		     GEN9_PGCTL_SSA_EU311_ACK;
3835	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3836		     GEN9_PGCTL_SSB_EU19_ACK |
3837		     GEN9_PGCTL_SSB_EU210_ACK |
3838		     GEN9_PGCTL_SSB_EU311_ACK;
3839
3840	for (s = 0; s < info->sseu.max_slices; s++) {
3841		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3842			/* skip disabled slice */
3843			continue;
3844
3845		sseu->slice_mask |= BIT(s);
3846
3847		if (IS_GEN9_BC(dev_priv))
3848			sseu->subslice_mask[s] =
3849				RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
3850
3851		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3852			unsigned int eu_cnt;
3853
3854			if (IS_GEN9_LP(dev_priv)) {
3855				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3856					/* skip disabled subslice */
3857					continue;
3858
3859				sseu->subslice_mask[s] |= BIT(ss);
3860			}
3861
3862			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
3863					       eu_mask[ss%2]);
3864			sseu->eu_total += eu_cnt;
3865			sseu->eu_per_subslice = max_t(unsigned int,
3866						      sseu->eu_per_subslice,
3867						      eu_cnt);
3868		}
3869	}
3870#undef SS_MAX
3871}
3872
3873static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
3874					 struct sseu_dev_info *sseu)
3875{
3876	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
3877	int s;
3878
3879	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
3880
3881	if (sseu->slice_mask) {
3882		sseu->eu_per_subslice =
3883			RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
3884		for (s = 0; s < fls(sseu->slice_mask); s++) {
3885			sseu->subslice_mask[s] =
3886				RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
3887		}
3888		sseu->eu_total = sseu->eu_per_subslice *
3889				 intel_sseu_subslice_total(sseu);
3890
3891		/* subtract fused off EU(s) from enabled slice(s) */
3892		for (s = 0; s < fls(sseu->slice_mask); s++) {
3893			u8 subslice_7eu =
3894				RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
3895
3896			sseu->eu_total -= hweight8(subslice_7eu);
3897		}
3898	}
3899}
3900
3901static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
3902				 const struct sseu_dev_info *sseu)
3903{
3904	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3905	const char *type = is_available_info ? "Available" : "Enabled";
3906	int s;
3907
3908	seq_printf(m, "  %s Slice Mask: %04x\n", type,
3909		   sseu->slice_mask);
3910	seq_printf(m, "  %s Slice Total: %u\n", type,
3911		   hweight8(sseu->slice_mask));
3912	seq_printf(m, "  %s Subslice Total: %u\n", type,
3913		   intel_sseu_subslice_total(sseu));
3914	for (s = 0; s < fls(sseu->slice_mask); s++) {
3915		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
3916			   s, intel_sseu_subslices_per_slice(sseu, s));
3917	}
3918	seq_printf(m, "  %s EU Total: %u\n", type,
3919		   sseu->eu_total);
3920	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
3921		   sseu->eu_per_subslice);
3922
3923	if (!is_available_info)
3924		return;
3925
3926	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
3927	if (HAS_POOLED_EU(dev_priv))
3928		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
3929
3930	seq_printf(m, "  Has Slice Power Gating: %s\n",
3931		   yesno(sseu->has_slice_pg));
3932	seq_printf(m, "  Has Subslice Power Gating: %s\n",
3933		   yesno(sseu->has_subslice_pg));
3934	seq_printf(m, "  Has EU Power Gating: %s\n",
3935		   yesno(sseu->has_eu_pg));
3936}
3937
3938static int i915_sseu_status(struct seq_file *m, void *unused)
3939{
3940	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3941	struct sseu_dev_info sseu;
3942	intel_wakeref_t wakeref;
3943
3944	if (INTEL_GEN(dev_priv) < 8)
3945		return -ENODEV;
3946
3947	seq_puts(m, "SSEU Device Info\n");
3948	i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
3949
3950	seq_puts(m, "SSEU Device Status\n");
3951	memset(&sseu, 0, sizeof(sseu));
3952	sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
3953	sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
3954	sseu.max_eus_per_subslice =
3955		RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
3956
3957	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3958		if (IS_CHERRYVIEW(dev_priv))
3959			cherryview_sseu_device_status(dev_priv, &sseu);
3960		else if (IS_BROADWELL(dev_priv))
3961			broadwell_sseu_device_status(dev_priv, &sseu);
3962		else if (IS_GEN(dev_priv, 9))
3963			gen9_sseu_device_status(dev_priv, &sseu);
3964		else if (INTEL_GEN(dev_priv) >= 10)
3965			gen10_sseu_device_status(dev_priv, &sseu);
3966	}
3967
3968	i915_print_sseu_info(m, false, &sseu);
3969
3970	return 0;
3971}
3972
3973static int i915_forcewake_open(struct inode *inode, struct file *file)
3974{
3975	struct drm_i915_private *i915 = inode->i_private;
 
 
3976
3977	if (INTEL_GEN(i915) < 6)
3978		return 0;
3979
3980	file->private_data =
3981		(void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
3982	intel_uncore_forcewake_user_get(&i915->uncore);
 
 
3983
3984	return 0;
3985}
3986
3987static int i915_forcewake_release(struct inode *inode, struct file *file)
3988{
3989	struct drm_i915_private *i915 = inode->i_private;
 
3990
3991	if (INTEL_GEN(i915) < 6)
3992		return 0;
3993
3994	intel_uncore_forcewake_user_put(&i915->uncore);
3995	intel_runtime_pm_put(&i915->runtime_pm,
3996			     (intel_wakeref_t)(uintptr_t)file->private_data);
 
 
 
 
 
 
 
3997
3998	return 0;
3999}
4000
4001static const struct file_operations i915_forcewake_fops = {
4002	.owner = THIS_MODULE,
4003	.open = i915_forcewake_open,
4004	.release = i915_forcewake_release,
4005};
4006
4007static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4008{
4009	struct drm_i915_private *dev_priv = m->private;
4010	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4011
4012	/* Synchronize with everything first in case there's been an HPD
4013	 * storm, but we haven't finished handling it in the kernel yet
4014	 */
4015	intel_synchronize_irq(dev_priv);
4016	flush_work(&dev_priv->hotplug.dig_port_work);
4017	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
4018
4019	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4020	seq_printf(m, "Detected: %s\n",
4021		   yesno(delayed_work_pending(&hotplug->reenable_work)));
4022
4023	return 0;
4024}
4025
4026static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4027					const char __user *ubuf, size_t len,
4028					loff_t *offp)
4029{
4030	struct seq_file *m = file->private_data;
4031	struct drm_i915_private *dev_priv = m->private;
4032	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4033	unsigned int new_threshold;
4034	int i;
4035	char *newline;
4036	char tmp[16];
4037
4038	if (len >= sizeof(tmp))
4039		return -EINVAL;
4040
4041	if (copy_from_user(tmp, ubuf, len))
4042		return -EFAULT;
4043
4044	tmp[len] = '\0';
4045
4046	/* Strip newline, if any */
4047	newline = strchr(tmp, '\n');
4048	if (newline)
4049		*newline = '\0';
4050
4051	if (strcmp(tmp, "reset") == 0)
4052		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4053	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4054		return -EINVAL;
4055
4056	if (new_threshold > 0)
4057		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4058			      new_threshold);
4059	else
4060		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4061
4062	spin_lock_irq(&dev_priv->irq_lock);
4063	hotplug->hpd_storm_threshold = new_threshold;
4064	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4065	for_each_hpd_pin(i)
4066		hotplug->stats[i].count = 0;
4067	spin_unlock_irq(&dev_priv->irq_lock);
4068
4069	/* Re-enable hpd immediately if we were in an irq storm */
4070	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4071
4072	return len;
4073}
4074
4075static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4076{
4077	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4078}
4079
4080static const struct file_operations i915_hpd_storm_ctl_fops = {
4081	.owner = THIS_MODULE,
4082	.open = i915_hpd_storm_ctl_open,
4083	.read = seq_read,
4084	.llseek = seq_lseek,
4085	.release = single_release,
4086	.write = i915_hpd_storm_ctl_write
4087};
4088
4089static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4090{
4091	struct drm_i915_private *dev_priv = m->private;
 
4092
4093	seq_printf(m, "Enabled: %s\n",
4094		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
 
 
 
 
4095
4096	return 0;
4097}
4098
4099static int
4100i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4101{
4102	return single_open(file, i915_hpd_short_storm_ctl_show,
4103			   inode->i_private);
4104}
4105
4106static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4107					      const char __user *ubuf,
4108					      size_t len, loff_t *offp)
4109{
4110	struct seq_file *m = file->private_data;
4111	struct drm_i915_private *dev_priv = m->private;
4112	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4113	char *newline;
4114	char tmp[16];
4115	int i;
4116	bool new_state;
4117
4118	if (len >= sizeof(tmp))
4119		return -EINVAL;
4120
4121	if (copy_from_user(tmp, ubuf, len))
4122		return -EFAULT;
4123
4124	tmp[len] = '\0';
4125
4126	/* Strip newline, if any */
4127	newline = strchr(tmp, '\n');
4128	if (newline)
4129		*newline = '\0';
4130
4131	/* Reset to the "default" state for this system */
4132	if (strcmp(tmp, "reset") == 0)
4133		new_state = !HAS_DP_MST(dev_priv);
4134	else if (kstrtobool(tmp, &new_state) != 0)
4135		return -EINVAL;
4136
4137	DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4138		      new_state ? "En" : "Dis");
 
 
 
 
4139
4140	spin_lock_irq(&dev_priv->irq_lock);
4141	hotplug->hpd_short_storm_enabled = new_state;
4142	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4143	for_each_hpd_pin(i)
4144		hotplug->stats[i].count = 0;
4145	spin_unlock_irq(&dev_priv->irq_lock);
4146
4147	/* Re-enable hpd immediately if we were in an irq storm */
4148	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4149
4150	return len;
4151}
4152
4153static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4154	.owner = THIS_MODULE,
4155	.open = i915_hpd_short_storm_ctl_open,
4156	.read = seq_read,
4157	.llseek = seq_lseek,
4158	.release = single_release,
4159	.write = i915_hpd_short_storm_ctl_write,
4160};
4161
4162static int i915_drrs_ctl_set(void *data, u64 val)
4163{
4164	struct drm_i915_private *dev_priv = data;
4165	struct drm_device *dev = &dev_priv->drm;
4166	struct intel_crtc *crtc;
4167
4168	if (INTEL_GEN(dev_priv) < 7)
4169		return -ENODEV;
4170
4171	for_each_intel_crtc(dev, crtc) {
4172		struct drm_connector_list_iter conn_iter;
4173		struct intel_crtc_state *crtc_state;
4174		struct drm_connector *connector;
4175		struct drm_crtc_commit *commit;
4176		int ret;
4177
4178		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4179		if (ret)
4180			return ret;
4181
4182		crtc_state = to_intel_crtc_state(crtc->base.state);
4183
4184		if (!crtc_state->base.active ||
4185		    !crtc_state->has_drrs)
4186			goto out;
4187
4188		commit = crtc_state->base.commit;
4189		if (commit) {
4190			ret = wait_for_completion_interruptible(&commit->hw_done);
4191			if (ret)
4192				goto out;
4193		}
4194
4195		drm_connector_list_iter_begin(dev, &conn_iter);
4196		drm_for_each_connector_iter(connector, &conn_iter) {
4197			struct intel_encoder *encoder;
4198			struct intel_dp *intel_dp;
4199
4200			if (!(crtc_state->base.connector_mask &
4201			      drm_connector_mask(connector)))
4202				continue;
4203
4204			encoder = intel_attached_encoder(connector);
4205			if (encoder->type != INTEL_OUTPUT_EDP)
4206				continue;
4207
4208			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4209						val ? "en" : "dis", val);
4210
4211			intel_dp = enc_to_intel_dp(&encoder->base);
4212			if (val)
4213				intel_edp_drrs_enable(intel_dp,
4214						      crtc_state);
4215			else
4216				intel_edp_drrs_disable(intel_dp,
4217						       crtc_state);
4218		}
4219		drm_connector_list_iter_end(&conn_iter);
4220
4221out:
4222		drm_modeset_unlock(&crtc->base.mutex);
4223		if (ret)
4224			return ret;
4225	}
4226
4227	return 0;
4228}
4229
4230DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4231
4232static ssize_t
4233i915_fifo_underrun_reset_write(struct file *filp,
4234			       const char __user *ubuf,
4235			       size_t cnt, loff_t *ppos)
4236{
4237	struct drm_i915_private *dev_priv = filp->private_data;
4238	struct intel_crtc *intel_crtc;
4239	struct drm_device *dev = &dev_priv->drm;
4240	int ret;
4241	bool reset;
4242
4243	ret = kstrtobool_from_user(ubuf, cnt, &reset);
4244	if (ret)
4245		return ret;
4246
4247	if (!reset)
4248		return cnt;
4249
4250	for_each_intel_crtc(dev, intel_crtc) {
4251		struct drm_crtc_commit *commit;
4252		struct intel_crtc_state *crtc_state;
4253
4254		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4255		if (ret)
4256			return ret;
4257
4258		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4259		commit = crtc_state->base.commit;
4260		if (commit) {
4261			ret = wait_for_completion_interruptible(&commit->hw_done);
4262			if (!ret)
4263				ret = wait_for_completion_interruptible(&commit->flip_done);
4264		}
4265
4266		if (!ret && crtc_state->base.active) {
4267			DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4268				      pipe_name(intel_crtc->pipe));
4269
4270			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4271		}
4272
4273		drm_modeset_unlock(&intel_crtc->base.mutex);
4274
4275		if (ret)
4276			return ret;
4277	}
4278
4279	ret = intel_fbc_reset_underrun(dev_priv);
4280	if (ret)
4281		return ret;
4282
4283	return cnt;
4284}
4285
4286static const struct file_operations i915_fifo_underrun_reset_ops = {
4287	.owner = THIS_MODULE,
4288	.open = simple_open,
4289	.write = i915_fifo_underrun_reset_write,
4290	.llseek = default_llseek,
4291};
4292
4293static const struct drm_info_list i915_debugfs_list[] = {
4294	{"i915_capabilities", i915_capabilities, 0},
4295	{"i915_gem_objects", i915_gem_object_info, 0},
 
 
 
 
 
 
 
 
 
4296	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4297	{"i915_gem_interrupt", i915_interrupt_info, 0},
4298	{"i915_guc_info", i915_guc_info, 0},
4299	{"i915_guc_load_status", i915_guc_load_status_info, 0},
4300	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4301	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4302	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4303	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4304	{"i915_frequency_info", i915_frequency_info, 0},
4305	{"i915_hangcheck_info", i915_hangcheck_info, 0},
 
 
 
 
 
 
 
4306	{"i915_drpc_info", i915_drpc_info, 0},
 
4307	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4308	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4309	{"i915_fbc_status", i915_fbc_status, 0},
4310	{"i915_ips_status", i915_ips_status, 0},
4311	{"i915_sr_status", i915_sr_status, 0},
4312	{"i915_opregion", i915_opregion, 0},
4313	{"i915_vbt", i915_vbt, 0},
4314	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4315	{"i915_context_status", i915_context_status, 0},
4316	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4317	{"i915_swizzle_info", i915_swizzle_info, 0},
4318	{"i915_llc", i915_llc, 0},
4319	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4320	{"i915_energy_uJ", i915_energy_uJ, 0},
4321	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4322	{"i915_power_domain_info", i915_power_domain_info, 0},
4323	{"i915_dmc_info", i915_dmc_info, 0},
4324	{"i915_display_info", i915_display_info, 0},
4325	{"i915_engine_info", i915_engine_info, 0},
4326	{"i915_rcs_topology", i915_rcs_topology, 0},
4327	{"i915_shrinker_info", i915_shrinker_info, 0},
4328	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4329	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4330	{"i915_wa_registers", i915_wa_registers, 0},
4331	{"i915_ddb_info", i915_ddb_info, 0},
4332	{"i915_sseu_status", i915_sseu_status, 0},
4333	{"i915_drrs_status", i915_drrs_status, 0},
4334	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4335};
4336#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4337
4338static const struct i915_debugfs_files {
4339	const char *name;
4340	const struct file_operations *fops;
4341} i915_debugfs_files[] = {
4342	{"i915_wedged", &i915_wedged_fops},
4343	{"i915_cache_sharing", &i915_cache_sharing_fops},
4344	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4345#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4346	{"i915_error_state", &i915_error_state_fops},
4347	{"i915_gpu_info", &i915_gpu_info_fops},
4348#endif
4349	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4350	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4351	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4352	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4353	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4354	{"i915_dp_test_data", &i915_displayport_test_data_fops},
4355	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4356	{"i915_dp_test_active", &i915_displayport_test_active_fops},
4357	{"i915_guc_log_level", &i915_guc_log_level_fops},
4358	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
4359	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4360	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4361	{"i915_ipc_status", &i915_ipc_status_fops},
4362	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
4363	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4364};
4365
4366int i915_debugfs_register(struct drm_i915_private *dev_priv)
4367{
4368	struct drm_minor *minor = dev_priv->drm.primary;
4369	int i;
4370
4371	debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
4372			    to_i915(minor->dev), &i915_forcewake_fops);
 
4373
4374	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4375		debugfs_create_file(i915_debugfs_files[i].name,
4376				    S_IRUGO | S_IWUSR,
4377				    minor->debugfs_root,
4378				    to_i915(minor->dev),
4379				    i915_debugfs_files[i].fops);
4380	}
 
 
4381
4382	return drm_debugfs_create_files(i915_debugfs_list,
4383					I915_DEBUGFS_ENTRIES,
4384					minor->debugfs_root, minor);
4385}
4386
4387struct dpcd_block {
4388	/* DPCD dump start address. */
4389	unsigned int offset;
4390	/* DPCD dump end address, inclusive. If unset, .size will be used. */
4391	unsigned int end;
4392	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4393	size_t size;
4394	/* Only valid for eDP. */
4395	bool edp;
4396};
4397
4398static const struct dpcd_block i915_dpcd_debug[] = {
4399	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4400	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4401	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4402	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4403	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4404	{ .offset = DP_SET_POWER },
4405	{ .offset = DP_EDP_DPCD_REV },
4406	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4407	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4408	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4409};
4410
4411static int i915_dpcd_show(struct seq_file *m, void *data)
4412{
4413	struct drm_connector *connector = m->private;
4414	struct intel_dp *intel_dp =
4415		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4416	u8 buf[16];
4417	ssize_t err;
4418	int i;
4419
4420	if (connector->status != connector_status_connected)
4421		return -ENODEV;
4422
4423	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4424		const struct dpcd_block *b = &i915_dpcd_debug[i];
4425		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4426
4427		if (b->edp &&
4428		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4429			continue;
4430
4431		/* low tech for now */
4432		if (WARN_ON(size > sizeof(buf)))
4433			continue;
4434
4435		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4436		if (err < 0)
4437			seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4438		else
4439			seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4440	}
4441
4442	return 0;
4443}
4444DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4445
4446static int i915_panel_show(struct seq_file *m, void *data)
4447{
4448	struct drm_connector *connector = m->private;
4449	struct intel_dp *intel_dp =
4450		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4451
4452	if (connector->status != connector_status_connected)
4453		return -ENODEV;
4454
4455	seq_printf(m, "Panel power up delay: %d\n",
4456		   intel_dp->panel_power_up_delay);
4457	seq_printf(m, "Panel power down delay: %d\n",
4458		   intel_dp->panel_power_down_delay);
4459	seq_printf(m, "Backlight on delay: %d\n",
4460		   intel_dp->backlight_on_delay);
4461	seq_printf(m, "Backlight off delay: %d\n",
4462		   intel_dp->backlight_off_delay);
4463
4464	return 0;
4465}
4466DEFINE_SHOW_ATTRIBUTE(i915_panel);
4467
4468static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4469{
4470	struct drm_connector *connector = m->private;
4471	struct intel_connector *intel_connector = to_intel_connector(connector);
4472
4473	if (connector->status != connector_status_connected)
4474		return -ENODEV;
4475
4476	/* HDCP is supported by connector */
4477	if (!intel_connector->hdcp.shim)
4478		return -EINVAL;
4479
4480	seq_printf(m, "%s:%d HDCP version: ", connector->name,
4481		   connector->base.id);
4482	intel_hdcp_info(m, intel_connector);
4483
4484	return 0;
4485}
4486DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4487
4488static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4489{
4490	struct drm_connector *connector = m->private;
4491	struct drm_device *dev = connector->dev;
4492	struct drm_crtc *crtc;
4493	struct intel_dp *intel_dp;
4494	struct drm_modeset_acquire_ctx ctx;
4495	struct intel_crtc_state *crtc_state = NULL;
4496	int ret = 0;
4497	bool try_again = false;
4498
4499	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4500
4501	do {
4502		try_again = false;
4503		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4504				       &ctx);
4505		if (ret) {
4506			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4507				try_again = true;
4508				continue;
4509			}
4510			break;
4511		}
4512		crtc = connector->state->crtc;
4513		if (connector->status != connector_status_connected || !crtc) {
4514			ret = -ENODEV;
4515			break;
4516		}
4517		ret = drm_modeset_lock(&crtc->mutex, &ctx);
4518		if (ret == -EDEADLK) {
4519			ret = drm_modeset_backoff(&ctx);
4520			if (!ret) {
4521				try_again = true;
4522				continue;
4523			}
4524			break;
4525		} else if (ret) {
4526			break;
4527		}
4528		intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4529		crtc_state = to_intel_crtc_state(crtc->state);
4530		seq_printf(m, "DSC_Enabled: %s\n",
4531			   yesno(crtc_state->dsc_params.compression_enable));
4532		seq_printf(m, "DSC_Sink_Support: %s\n",
4533			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4534		seq_printf(m, "Force_DSC_Enable: %s\n",
4535			   yesno(intel_dp->force_dsc_en));
4536		if (!intel_dp_is_edp(intel_dp))
4537			seq_printf(m, "FEC_Sink_Support: %s\n",
4538				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4539	} while (try_again);
4540
4541	drm_modeset_drop_locks(&ctx);
4542	drm_modeset_acquire_fini(&ctx);
4543
4544	return ret;
4545}
4546
4547static ssize_t i915_dsc_fec_support_write(struct file *file,
4548					  const char __user *ubuf,
4549					  size_t len, loff_t *offp)
4550{
4551	bool dsc_enable = false;
4552	int ret;
4553	struct drm_connector *connector =
4554		((struct seq_file *)file->private_data)->private;
4555	struct intel_encoder *encoder = intel_attached_encoder(connector);
4556	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4557
4558	if (len == 0)
4559		return 0;
4560
4561	DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4562			 len);
4563
4564	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4565	if (ret < 0)
4566		return ret;
4567
4568	DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4569			 (dsc_enable) ? "true" : "false");
4570	intel_dp->force_dsc_en = dsc_enable;
4571
4572	*offp += len;
4573	return len;
4574}
4575
4576static int i915_dsc_fec_support_open(struct inode *inode,
4577				     struct file *file)
4578{
4579	return single_open(file, i915_dsc_fec_support_show,
4580			   inode->i_private);
4581}
4582
4583static const struct file_operations i915_dsc_fec_support_fops = {
4584	.owner = THIS_MODULE,
4585	.open = i915_dsc_fec_support_open,
4586	.read = seq_read,
4587	.llseek = seq_lseek,
4588	.release = single_release,
4589	.write = i915_dsc_fec_support_write
4590};
4591
4592/**
4593 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4594 * @connector: pointer to a registered drm_connector
4595 *
4596 * Cleanup will be done by drm_connector_unregister() through a call to
4597 * drm_debugfs_connector_remove().
4598 *
4599 * Returns 0 on success, negative error codes on error.
4600 */
4601int i915_debugfs_connector_add(struct drm_connector *connector)
4602{
4603	struct dentry *root = connector->debugfs_entry;
4604	struct drm_i915_private *dev_priv = to_i915(connector->dev);
4605
4606	/* The connector must have been registered beforehands. */
4607	if (!root)
4608		return -ENODEV;
4609
4610	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4611	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4612		debugfs_create_file("i915_dpcd", S_IRUGO, root,
4613				    connector, &i915_dpcd_fops);
4614
4615	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4616		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4617				    connector, &i915_panel_fops);
4618		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4619				    connector, &i915_psr_sink_status_fops);
4620	}
4621
4622	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4623	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4624	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4625		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4626				    connector, &i915_hdcp_sink_capability_fops);
4627	}
4628
4629	if (INTEL_GEN(dev_priv) >= 10 &&
4630	    (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4631	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4632		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4633				    connector, &i915_dsc_fec_support_fops);
4634
4635	return 0;
4636}