Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Keith Packard <keithp@keithp.com>
  26 *
  27 */
  28
  29#include <linux/seq_file.h>
  30#include <linux/circ_buf.h>
  31#include <linux/ctype.h>
  32#include <linux/debugfs.h>
  33#include <linux/slab.h>
  34#include <linux/export.h>
  35#include <linux/list_sort.h>
  36#include <asm/msr-index.h>
  37#include <drm/drmP.h>
  38#include "intel_drv.h"
  39#include "intel_ringbuffer.h"
  40#include <drm/i915_drm.h>
  41#include "i915_drv.h"
  42
  43enum {
  44	ACTIVE_LIST,
  45	INACTIVE_LIST,
  46	PINNED_LIST,
  47};
  48
  49/* As the drm_debugfs_init() routines are called before dev->dev_private is
  50 * allocated we need to hook into the minor for release. */
  51static int
  52drm_add_fake_info_node(struct drm_minor *minor,
  53		       struct dentry *ent,
  54		       const void *key)
  55{
  56	struct drm_info_node *node;
  57
  58	node = kmalloc(sizeof(*node), GFP_KERNEL);
  59	if (node == NULL) {
  60		debugfs_remove(ent);
  61		return -ENOMEM;
  62	}
  63
  64	node->minor = minor;
  65	node->dent = ent;
  66	node->info_ent = (void *) key;
  67
  68	mutex_lock(&minor->debugfs_lock);
  69	list_add(&node->list, &minor->debugfs_list);
  70	mutex_unlock(&minor->debugfs_lock);
  71
  72	return 0;
 
 
  73}
  74
  75static int i915_capabilities(struct seq_file *m, void *data)
  76{
  77	struct drm_info_node *node = m->private;
  78	struct drm_device *dev = node->minor->dev;
  79	const struct intel_device_info *info = INTEL_INFO(dev);
  80
  81	seq_printf(m, "gen: %d\n", info->gen);
  82	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
  83#define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
  84#define SEP_SEMICOLON ;
  85	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
  86#undef PRINT_FLAG
  87#undef SEP_SEMICOLON
 
 
 
 
  88
  89	return 0;
  90}
  91
  92static const char *get_pin_flag(struct drm_i915_gem_object *obj)
  93{
  94	if (obj->pin_display)
  95		return "p";
  96	else
  97		return " ";
  98}
  99
 100static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
 101{
 102	switch (obj->tiling_mode) {
 
 
 
 
 
 103	default:
 104	case I915_TILING_NONE: return " ";
 105	case I915_TILING_X: return "X";
 106	case I915_TILING_Y: return "Y";
 107	}
 108}
 109
 110static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
 
 
 
 
 
 111{
 112	return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
 113}
 114
 115static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
 116{
 117	u64 size = 0;
 118	struct i915_vma *vma;
 119
 120	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 121		if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
 122			size += vma->node.size;
 123	}
 124
 125	return size;
 126}
 127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 128static void
 129describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 130{
 131	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 132	struct intel_engine_cs *ring;
 133	struct i915_vma *vma;
 
 134	int pin_count = 0;
 135	int i;
 136
 137	seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
 
 
 138		   &obj->base,
 139		   obj->active ? "*" : " ",
 140		   get_pin_flag(obj),
 141		   get_tiling_flag(obj),
 142		   get_global_flag(obj),
 
 143		   obj->base.size / 1024,
 144		   obj->base.read_domains,
 145		   obj->base.write_domain);
 146	for_each_ring(ring, dev_priv, i)
 147		seq_printf(m, "%x ",
 148				i915_gem_request_get_seqno(obj->last_read_req[i]));
 149	seq_printf(m, "] %x %x%s%s%s",
 150		   i915_gem_request_get_seqno(obj->last_write_req),
 151		   i915_gem_request_get_seqno(obj->last_fenced_req),
 152		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
 153		   obj->dirty ? " dirty" : "",
 154		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 155	if (obj->base.name)
 156		seq_printf(m, " (name: %d)", obj->base.name);
 157	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 158		if (vma->pin_count > 0)
 159			pin_count++;
 160	}
 161	seq_printf(m, " (pinned x %d)", pin_count);
 162	if (obj->pin_display)
 163		seq_printf(m, " (display)");
 164	if (obj->fence_reg != I915_FENCE_REG_NONE)
 165		seq_printf(m, " (fence: %d)", obj->fence_reg);
 166	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 167		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
 168			   vma->is_ggtt ? "g" : "pp",
 169			   vma->node.start, vma->node.size);
 170		if (vma->is_ggtt)
 171			seq_printf(m, ", type: %u", vma->ggtt_view.type);
 172		seq_puts(m, ")");
 173	}
 174	if (obj->stolen)
 175		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
 176	if (obj->pin_display || obj->fault_mappable) {
 177		char s[3], *t = s;
 178		if (obj->pin_display)
 179			*t++ = 'p';
 180		if (obj->fault_mappable)
 181			*t++ = 'f';
 182		*t = '\0';
 183		seq_printf(m, " (%s mappable)", s);
 184	}
 185	if (obj->last_write_req != NULL)
 186		seq_printf(m, " (%s)",
 187			   i915_gem_request_get_ring(obj->last_write_req)->name);
 188	if (obj->frontbuffer_bits)
 189		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
 190}
 191
 192static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
 193{
 194	seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
 195	seq_putc(m, ctx->remap_slice ? 'R' : 'r');
 196	seq_putc(m, ' ');
 197}
 198
 199static int i915_gem_object_list_info(struct seq_file *m, void *data)
 200{
 201	struct drm_info_node *node = m->private;
 202	uintptr_t list = (uintptr_t) node->info_ent->data;
 203	struct list_head *head;
 204	struct drm_device *dev = node->minor->dev;
 205	struct drm_i915_private *dev_priv = dev->dev_private;
 206	struct i915_address_space *vm = &dev_priv->gtt.base;
 207	struct i915_vma *vma;
 208	u64 total_obj_size, total_gtt_size;
 209	int count, ret;
 210
 211	ret = mutex_lock_interruptible(&dev->struct_mutex);
 212	if (ret)
 213		return ret;
 
 
 214
 215	/* FIXME: the user of this interface might want more than just GGTT */
 216	switch (list) {
 217	case ACTIVE_LIST:
 218		seq_puts(m, "Active:\n");
 219		head = &vm->active_list;
 220		break;
 221	case INACTIVE_LIST:
 222		seq_puts(m, "Inactive:\n");
 223		head = &vm->inactive_list;
 224		break;
 225	default:
 226		mutex_unlock(&dev->struct_mutex);
 227		return -EINVAL;
 228	}
 229
 230	total_obj_size = total_gtt_size = count = 0;
 231	list_for_each_entry(vma, head, vm_link) {
 232		seq_printf(m, "   ");
 233		describe_obj(m, vma->obj);
 234		seq_printf(m, "\n");
 235		total_obj_size += vma->obj->base.size;
 236		total_gtt_size += vma->node.size;
 237		count++;
 
 
 238	}
 239	mutex_unlock(&dev->struct_mutex);
 
 240
 241	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 242		   count, total_obj_size, total_gtt_size);
 243	return 0;
 
 
 
 
 244}
 245
 246static int obj_rank_by_stolen(void *priv,
 247			      struct list_head *A, struct list_head *B)
 248{
 249	struct drm_i915_gem_object *a =
 250		container_of(A, struct drm_i915_gem_object, obj_exec_link);
 251	struct drm_i915_gem_object *b =
 252		container_of(B, struct drm_i915_gem_object, obj_exec_link);
 253
 254	if (a->stolen->start < b->stolen->start)
 255		return -1;
 256	if (a->stolen->start > b->stolen->start)
 257		return 1;
 258	return 0;
 259}
 260
 261static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 262{
 263	struct drm_info_node *node = m->private;
 264	struct drm_device *dev = node->minor->dev;
 265	struct drm_i915_private *dev_priv = dev->dev_private;
 266	struct drm_i915_gem_object *obj;
 267	u64 total_obj_size, total_gtt_size;
 268	LIST_HEAD(stolen);
 269	int count, ret;
 
 
 
 
 
 270
 271	ret = mutex_lock_interruptible(&dev->struct_mutex);
 272	if (ret)
 273		return ret;
 274
 275	total_obj_size = total_gtt_size = count = 0;
 276	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 
 
 
 
 
 277		if (obj->stolen == NULL)
 278			continue;
 279
 280		list_add(&obj->obj_exec_link, &stolen);
 281
 282		total_obj_size += obj->base.size;
 283		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 284		count++;
 285	}
 286	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
 
 
 
 287		if (obj->stolen == NULL)
 288			continue;
 289
 290		list_add(&obj->obj_exec_link, &stolen);
 291
 292		total_obj_size += obj->base.size;
 293		count++;
 294	}
 295	list_sort(NULL, &stolen, obj_rank_by_stolen);
 
 
 
 296	seq_puts(m, "Stolen:\n");
 297	while (!list_empty(&stolen)) {
 298		obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
 299		seq_puts(m, "   ");
 300		describe_obj(m, obj);
 301		seq_putc(m, '\n');
 302		list_del_init(&obj->obj_exec_link);
 303	}
 304	mutex_unlock(&dev->struct_mutex);
 305
 306	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 307		   count, total_obj_size, total_gtt_size);
 308	return 0;
 309}
 310
 311#define count_objects(list, member) do { \
 312	list_for_each_entry(obj, list, member) { \
 313		size += i915_gem_obj_total_ggtt_size(obj); \
 314		++count; \
 315		if (obj->map_and_fenceable) { \
 316			mappable_size += i915_gem_obj_ggtt_size(obj); \
 317			++mappable_count; \
 318		} \
 319	} \
 320} while (0)
 321
 322struct file_stats {
 323	struct drm_i915_file_private *file_priv;
 324	unsigned long count;
 325	u64 total, unbound;
 326	u64 global, shared;
 327	u64 active, inactive;
 328};
 329
 330static int per_file_stats(int id, void *ptr, void *data)
 331{
 332	struct drm_i915_gem_object *obj = ptr;
 333	struct file_stats *stats = data;
 334	struct i915_vma *vma;
 335
 
 
 336	stats->count++;
 337	stats->total += obj->base.size;
 338
 
 339	if (obj->base.name || obj->base.dma_buf)
 340		stats->shared += obj->base.size;
 341
 342	if (USES_FULL_PPGTT(obj->base.dev)) {
 343		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 344			struct i915_hw_ppgtt *ppgtt;
 345
 346			if (!drm_mm_node_allocated(&vma->node))
 347				continue;
 348
 349			if (vma->is_ggtt) {
 350				stats->global += obj->base.size;
 351				continue;
 352			}
 353
 354			ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
 355			if (ppgtt->file_priv != stats->file_priv)
 356				continue;
 357
 358			if (obj->active) /* XXX per-vma statistic */
 359				stats->active += obj->base.size;
 360			else
 361				stats->inactive += obj->base.size;
 362
 363			return 0;
 364		}
 365	} else {
 366		if (i915_gem_obj_ggtt_bound(obj)) {
 367			stats->global += obj->base.size;
 368			if (obj->active)
 369				stats->active += obj->base.size;
 370			else
 371				stats->inactive += obj->base.size;
 372			return 0;
 373		}
 374	}
 375
 376	if (!list_empty(&obj->global_list))
 377		stats->unbound += obj->base.size;
 
 
 
 378
 379	return 0;
 380}
 381
 382#define print_file_stats(m, name, stats) do { \
 383	if (stats.count) \
 384		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
 385			   name, \
 386			   stats.count, \
 387			   stats.total, \
 388			   stats.active, \
 389			   stats.inactive, \
 390			   stats.global, \
 391			   stats.shared, \
 392			   stats.unbound); \
 393} while (0)
 394
 395static void print_batch_pool_stats(struct seq_file *m,
 396				   struct drm_i915_private *dev_priv)
 397{
 398	struct drm_i915_gem_object *obj;
 399	struct file_stats stats;
 400	struct intel_engine_cs *ring;
 401	int i, j;
 
 402
 403	memset(&stats, 0, sizeof(stats));
 404
 405	for_each_ring(ring, dev_priv, i) {
 406		for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
 407			list_for_each_entry(obj,
 408					    &ring->batch_pool.cache_list[j],
 409					    batch_pool_link)
 410				per_file_stats(0, obj, &stats);
 411		}
 412	}
 413
 414	print_file_stats(m, "[k]batch pool", stats);
 415}
 416
 417#define count_vmas(list, member) do { \
 418	list_for_each_entry(vma, list, member) { \
 419		size += i915_gem_obj_total_ggtt_size(vma->obj); \
 420		++count; \
 421		if (vma->obj->map_and_fenceable) { \
 422			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
 423			++mappable_count; \
 424		} \
 425	} \
 426} while (0)
 
 
 
 
 427
 428static int i915_gem_object_info(struct seq_file *m, void* data)
 
 429{
 430	struct drm_info_node *node = m->private;
 431	struct drm_device *dev = node->minor->dev;
 432	struct drm_i915_private *dev_priv = dev->dev_private;
 433	u32 count, mappable_count, purgeable_count;
 434	u64 size, mappable_size, purgeable_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 435	struct drm_i915_gem_object *obj;
 436	struct i915_address_space *vm = &dev_priv->gtt.base;
 437	struct drm_file *file;
 438	struct i915_vma *vma;
 439	int ret;
 440
 441	ret = mutex_lock_interruptible(&dev->struct_mutex);
 442	if (ret)
 443		return ret;
 444
 445	seq_printf(m, "%u objects, %zu bytes\n",
 446		   dev_priv->mm.object_count,
 447		   dev_priv->mm.object_memory);
 448
 449	size = count = mappable_size = mappable_count = 0;
 450	count_objects(&dev_priv->mm.bound_list, global_list);
 451	seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
 452		   count, mappable_count, size, mappable_size);
 453
 454	size = count = mappable_size = mappable_count = 0;
 455	count_vmas(&vm->active_list, vm_link);
 456	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
 457		   count, mappable_count, size, mappable_size);
 458
 459	size = count = mappable_size = mappable_count = 0;
 460	count_vmas(&vm->inactive_list, vm_link);
 461	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
 462		   count, mappable_count, size, mappable_size);
 463
 464	size = count = purgeable_size = purgeable_count = 0;
 465	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
 466		size += obj->base.size, ++count;
 467		if (obj->madv == I915_MADV_DONTNEED)
 468			purgeable_size += obj->base.size, ++purgeable_count;
 
 
 
 
 
 469	}
 470	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 471
 472	size = count = mappable_size = mappable_count = 0;
 473	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 474		if (obj->fault_mappable) {
 475			size += i915_gem_obj_ggtt_size(obj);
 476			++count;
 477		}
 478		if (obj->pin_display) {
 479			mappable_size += i915_gem_obj_ggtt_size(obj);
 480			++mappable_count;
 481		}
 482		if (obj->madv == I915_MADV_DONTNEED) {
 
 483			purgeable_size += obj->base.size;
 484			++purgeable_count;
 485		}
 
 
 
 
 
 
 
 
 
 
 
 486	}
 
 
 
 
 487	seq_printf(m, "%u purgeable objects, %llu bytes\n",
 488		   purgeable_count, purgeable_size);
 489	seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
 490		   mappable_count, mappable_size);
 491	seq_printf(m, "%u fault mappable objects, %llu bytes\n",
 492		   count, size);
 493
 494	seq_printf(m, "%llu [%llu] gtt total\n",
 495		   dev_priv->gtt.base.total,
 496		   (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
 
 
 
 
 
 
 497
 498	seq_putc(m, '\n');
 499	print_batch_pool_stats(m, dev_priv);
 
 
 
 
 500	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
 501		struct file_stats stats;
 
 
 502		struct task_struct *task;
 503
 
 
 504		memset(&stats, 0, sizeof(stats));
 505		stats.file_priv = file->driver_priv;
 506		spin_lock(&file->table_lock);
 507		idr_for_each(&file->object_idr, per_file_stats, &stats);
 508		spin_unlock(&file->table_lock);
 509		/*
 510		 * Although we have a valid reference on file->pid, that does
 511		 * not guarantee that the task_struct who called get_pid() is
 512		 * still alive (e.g. get_pid(current) => fork() => exit()).
 513		 * Therefore, we need to protect this ->comm access using RCU.
 514		 */
 
 
 
 515		rcu_read_lock();
 516		task = pid_task(file->pid, PIDTYPE_PID);
 
 
 517		print_file_stats(m, task ? task->comm : "<unknown>", stats);
 518		rcu_read_unlock();
 519	}
 520
 521	mutex_unlock(&dev->struct_mutex);
 
 
 522
 523	return 0;
 524}
 525
 526static int i915_gem_gtt_info(struct seq_file *m, void *data)
 527{
 528	struct drm_info_node *node = m->private;
 529	struct drm_device *dev = node->minor->dev;
 530	uintptr_t list = (uintptr_t) node->info_ent->data;
 531	struct drm_i915_private *dev_priv = dev->dev_private;
 532	struct drm_i915_gem_object *obj;
 533	u64 total_obj_size, total_gtt_size;
 
 534	int count, ret;
 535
 
 
 
 
 
 536	ret = mutex_lock_interruptible(&dev->struct_mutex);
 537	if (ret)
 538		return ret;
 539
 540	total_obj_size = total_gtt_size = count = 0;
 541	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 542		if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
 543			continue;
 
 
 
 
 
 
 
 
 544
 545		seq_puts(m, "   ");
 546		describe_obj(m, obj);
 547		seq_putc(m, '\n');
 548		total_obj_size += obj->base.size;
 549		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 550		count++;
 551	}
 552
 553	mutex_unlock(&dev->struct_mutex);
 554
 555	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 556		   count, total_obj_size, total_gtt_size);
 557
 558	return 0;
 559}
 560
 561static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 562{
 563	struct drm_info_node *node = m->private;
 564	struct drm_device *dev = node->minor->dev;
 565	struct drm_i915_private *dev_priv = dev->dev_private;
 566	struct intel_crtc *crtc;
 567	int ret;
 568
 569	ret = mutex_lock_interruptible(&dev->struct_mutex);
 570	if (ret)
 571		return ret;
 572
 573	for_each_intel_crtc(dev, crtc) {
 574		const char pipe = pipe_name(crtc->pipe);
 575		const char plane = plane_name(crtc->plane);
 576		struct intel_unpin_work *work;
 577
 578		spin_lock_irq(&dev->event_lock);
 579		work = crtc->unpin_work;
 580		if (work == NULL) {
 581			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
 582				   pipe, plane);
 583		} else {
 584			u32 addr;
 585
 586			if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
 587				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
 588					   pipe, plane);
 589			} else {
 590				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
 591					   pipe, plane);
 592			}
 593			if (work->flip_queued_req) {
 594				struct intel_engine_cs *ring =
 595					i915_gem_request_get_ring(work->flip_queued_req);
 596
 597				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
 598					   ring->name,
 599					   i915_gem_request_get_seqno(work->flip_queued_req),
 600					   dev_priv->next_seqno,
 601					   ring->get_seqno(ring, true),
 602					   i915_gem_request_completed(work->flip_queued_req, true));
 603			} else
 604				seq_printf(m, "Flip not associated with any ring\n");
 605			seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
 606				   work->flip_queued_vblank,
 607				   work->flip_ready_vblank,
 608				   drm_crtc_vblank_count(&crtc->base));
 609			if (work->enable_stall_check)
 610				seq_puts(m, "Stall check enabled, ");
 611			else
 612				seq_puts(m, "Stall check waiting for page flip ioctl, ");
 613			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
 614
 615			if (INTEL_INFO(dev)->gen >= 4)
 616				addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
 617			else
 618				addr = I915_READ(DSPADDR(crtc->plane));
 619			seq_printf(m, "Current scanout address 0x%08x\n", addr);
 620
 621			if (work->pending_flip_obj) {
 622				seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
 623				seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
 624			}
 625		}
 626		spin_unlock_irq(&dev->event_lock);
 627	}
 628
 629	mutex_unlock(&dev->struct_mutex);
 630
 631	return 0;
 632}
 633
 634static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 635{
 636	struct drm_info_node *node = m->private;
 637	struct drm_device *dev = node->minor->dev;
 638	struct drm_i915_private *dev_priv = dev->dev_private;
 639	struct drm_i915_gem_object *obj;
 640	struct intel_engine_cs *ring;
 
 641	int total = 0;
 642	int ret, i, j;
 643
 644	ret = mutex_lock_interruptible(&dev->struct_mutex);
 645	if (ret)
 646		return ret;
 647
 648	for_each_ring(ring, dev_priv, i) {
 649		for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
 650			int count;
 651
 652			count = 0;
 653			list_for_each_entry(obj,
 654					    &ring->batch_pool.cache_list[j],
 655					    batch_pool_link)
 656				count++;
 657			seq_printf(m, "%s cache[%d]: %d objects\n",
 658				   ring->name, j, count);
 659
 660			list_for_each_entry(obj,
 661					    &ring->batch_pool.cache_list[j],
 662					    batch_pool_link) {
 663				seq_puts(m, "   ");
 664				describe_obj(m, obj);
 665				seq_putc(m, '\n');
 666			}
 667
 668			total += count;
 669		}
 670	}
 671
 672	seq_printf(m, "total: %d\n", total);
 673
 674	mutex_unlock(&dev->struct_mutex);
 675
 676	return 0;
 677}
 678
 679static int i915_gem_request_info(struct seq_file *m, void *data)
 680{
 681	struct drm_info_node *node = m->private;
 682	struct drm_device *dev = node->minor->dev;
 683	struct drm_i915_private *dev_priv = dev->dev_private;
 684	struct intel_engine_cs *ring;
 685	struct drm_i915_gem_request *req;
 686	int ret, any, i;
 687
 688	ret = mutex_lock_interruptible(&dev->struct_mutex);
 689	if (ret)
 690		return ret;
 691
 692	any = 0;
 693	for_each_ring(ring, dev_priv, i) {
 694		int count;
 695
 696		count = 0;
 697		list_for_each_entry(req, &ring->request_list, list)
 698			count++;
 699		if (count == 0)
 700			continue;
 701
 702		seq_printf(m, "%s requests: %d\n", ring->name, count);
 703		list_for_each_entry(req, &ring->request_list, list) {
 704			struct task_struct *task;
 705
 706			rcu_read_lock();
 707			task = NULL;
 708			if (req->pid)
 709				task = pid_task(req->pid, PIDTYPE_PID);
 710			seq_printf(m, "    %x @ %d: %s [%d]\n",
 711				   req->seqno,
 712				   (int) (jiffies - req->emitted_jiffies),
 713				   task ? task->comm : "<unknown>",
 714				   task ? task->pid : -1);
 715			rcu_read_unlock();
 716		}
 717
 718		any++;
 719	}
 720	mutex_unlock(&dev->struct_mutex);
 721
 722	if (any == 0)
 723		seq_puts(m, "No requests\n");
 724
 725	return 0;
 726}
 727
 728static void i915_ring_seqno_info(struct seq_file *m,
 729				 struct intel_engine_cs *ring)
 730{
 731	if (ring->get_seqno) {
 732		seq_printf(m, "Current sequence (%s): %x\n",
 733			   ring->name, ring->get_seqno(ring, false));
 734	}
 735}
 736
 737static int i915_gem_seqno_info(struct seq_file *m, void *data)
 738{
 739	struct drm_info_node *node = m->private;
 740	struct drm_device *dev = node->minor->dev;
 741	struct drm_i915_private *dev_priv = dev->dev_private;
 742	struct intel_engine_cs *ring;
 743	int ret, i;
 744
 745	ret = mutex_lock_interruptible(&dev->struct_mutex);
 746	if (ret)
 747		return ret;
 748	intel_runtime_pm_get(dev_priv);
 749
 750	for_each_ring(ring, dev_priv, i)
 751		i915_ring_seqno_info(m, ring);
 752
 753	intel_runtime_pm_put(dev_priv);
 754	mutex_unlock(&dev->struct_mutex);
 755
 756	return 0;
 757}
 758
 759
 760static int i915_interrupt_info(struct seq_file *m, void *data)
 761{
 762	struct drm_info_node *node = m->private;
 763	struct drm_device *dev = node->minor->dev;
 764	struct drm_i915_private *dev_priv = dev->dev_private;
 765	struct intel_engine_cs *ring;
 766	int ret, i, pipe;
 767
 768	ret = mutex_lock_interruptible(&dev->struct_mutex);
 769	if (ret)
 770		return ret;
 771	intel_runtime_pm_get(dev_priv);
 772
 773	if (IS_CHERRYVIEW(dev)) {
 774		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 775			   I915_READ(GEN8_MASTER_IRQ));
 776
 777		seq_printf(m, "Display IER:\t%08x\n",
 778			   I915_READ(VLV_IER));
 779		seq_printf(m, "Display IIR:\t%08x\n",
 780			   I915_READ(VLV_IIR));
 781		seq_printf(m, "Display IIR_RW:\t%08x\n",
 782			   I915_READ(VLV_IIR_RW));
 783		seq_printf(m, "Display IMR:\t%08x\n",
 784			   I915_READ(VLV_IMR));
 785		for_each_pipe(dev_priv, pipe)
 
 
 
 
 
 
 
 
 
 
 786			seq_printf(m, "Pipe %c stat:\t%08x\n",
 787				   pipe_name(pipe),
 788				   I915_READ(PIPESTAT(pipe)));
 789
 
 
 
 
 790		seq_printf(m, "Port hotplug:\t%08x\n",
 791			   I915_READ(PORT_HOTPLUG_EN));
 792		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 793			   I915_READ(VLV_DPFLIPSTAT));
 794		seq_printf(m, "DPINVGTT:\t%08x\n",
 795			   I915_READ(DPINVGTT));
 
 796
 797		for (i = 0; i < 4; i++) {
 798			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
 799				   i, I915_READ(GEN8_GT_IMR(i)));
 800			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
 801				   i, I915_READ(GEN8_GT_IIR(i)));
 802			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
 803				   i, I915_READ(GEN8_GT_IER(i)));
 804		}
 805
 806		seq_printf(m, "PCU interrupt mask:\t%08x\n",
 807			   I915_READ(GEN8_PCU_IMR));
 808		seq_printf(m, "PCU interrupt identity:\t%08x\n",
 809			   I915_READ(GEN8_PCU_IIR));
 810		seq_printf(m, "PCU interrupt enable:\t%08x\n",
 811			   I915_READ(GEN8_PCU_IER));
 812	} else if (INTEL_INFO(dev)->gen >= 8) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 813		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 814			   I915_READ(GEN8_MASTER_IRQ));
 815
 816		for (i = 0; i < 4; i++) {
 817			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
 818				   i, I915_READ(GEN8_GT_IMR(i)));
 819			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
 820				   i, I915_READ(GEN8_GT_IIR(i)));
 821			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
 822				   i, I915_READ(GEN8_GT_IER(i)));
 823		}
 824
 
 
 
 
 
 
 
 
 
 
 825		for_each_pipe(dev_priv, pipe) {
 826			enum intel_display_power_domain power_domain;
 827
 828			power_domain = POWER_DOMAIN_PIPE(pipe);
 829			if (!intel_display_power_get_if_enabled(dev_priv,
 830								power_domain)) {
 831				seq_printf(m, "Pipe %c power disabled\n",
 832					   pipe_name(pipe));
 833				continue;
 834			}
 835			seq_printf(m, "Pipe %c IMR:\t%08x\n",
 836				   pipe_name(pipe),
 837				   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
 838			seq_printf(m, "Pipe %c IIR:\t%08x\n",
 839				   pipe_name(pipe),
 840				   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
 841			seq_printf(m, "Pipe %c IER:\t%08x\n",
 842				   pipe_name(pipe),
 843				   I915_READ(GEN8_DE_PIPE_IER(pipe)));
 844
 845			intel_display_power_put(dev_priv, power_domain);
 846		}
 847
 848		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
 849			   I915_READ(GEN8_DE_PORT_IMR));
 850		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
 851			   I915_READ(GEN8_DE_PORT_IIR));
 852		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
 853			   I915_READ(GEN8_DE_PORT_IER));
 854
 855		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
 856			   I915_READ(GEN8_DE_MISC_IMR));
 857		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
 858			   I915_READ(GEN8_DE_MISC_IIR));
 859		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
 860			   I915_READ(GEN8_DE_MISC_IER));
 861
 862		seq_printf(m, "PCU interrupt mask:\t%08x\n",
 863			   I915_READ(GEN8_PCU_IMR));
 864		seq_printf(m, "PCU interrupt identity:\t%08x\n",
 865			   I915_READ(GEN8_PCU_IIR));
 866		seq_printf(m, "PCU interrupt enable:\t%08x\n",
 867			   I915_READ(GEN8_PCU_IER));
 868	} else if (IS_VALLEYVIEW(dev)) {
 869		seq_printf(m, "Display IER:\t%08x\n",
 870			   I915_READ(VLV_IER));
 871		seq_printf(m, "Display IIR:\t%08x\n",
 872			   I915_READ(VLV_IIR));
 873		seq_printf(m, "Display IIR_RW:\t%08x\n",
 874			   I915_READ(VLV_IIR_RW));
 875		seq_printf(m, "Display IMR:\t%08x\n",
 876			   I915_READ(VLV_IMR));
 877		for_each_pipe(dev_priv, pipe)
 878			seq_printf(m, "Pipe %c stat:\t%08x\n",
 879				   pipe_name(pipe),
 880				   I915_READ(PIPESTAT(pipe)));
 
 
 881
 882		seq_printf(m, "Master IER:\t%08x\n",
 883			   I915_READ(VLV_MASTER_IER));
 884
 885		seq_printf(m, "Render IER:\t%08x\n",
 886			   I915_READ(GTIER));
 887		seq_printf(m, "Render IIR:\t%08x\n",
 888			   I915_READ(GTIIR));
 889		seq_printf(m, "Render IMR:\t%08x\n",
 890			   I915_READ(GTIMR));
 891
 892		seq_printf(m, "PM IER:\t\t%08x\n",
 893			   I915_READ(GEN6_PMIER));
 894		seq_printf(m, "PM IIR:\t\t%08x\n",
 895			   I915_READ(GEN6_PMIIR));
 896		seq_printf(m, "PM IMR:\t\t%08x\n",
 897			   I915_READ(GEN6_PMIMR));
 898
 899		seq_printf(m, "Port hotplug:\t%08x\n",
 900			   I915_READ(PORT_HOTPLUG_EN));
 901		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 902			   I915_READ(VLV_DPFLIPSTAT));
 903		seq_printf(m, "DPINVGTT:\t%08x\n",
 904			   I915_READ(DPINVGTT));
 905
 906	} else if (!HAS_PCH_SPLIT(dev)) {
 907		seq_printf(m, "Interrupt enable:    %08x\n",
 908			   I915_READ(IER));
 909		seq_printf(m, "Interrupt identity:  %08x\n",
 910			   I915_READ(IIR));
 911		seq_printf(m, "Interrupt mask:      %08x\n",
 912			   I915_READ(IMR));
 913		for_each_pipe(dev_priv, pipe)
 914			seq_printf(m, "Pipe %c stat:         %08x\n",
 915				   pipe_name(pipe),
 916				   I915_READ(PIPESTAT(pipe)));
 917	} else {
 918		seq_printf(m, "North Display Interrupt enable:		%08x\n",
 919			   I915_READ(DEIER));
 920		seq_printf(m, "North Display Interrupt identity:	%08x\n",
 921			   I915_READ(DEIIR));
 922		seq_printf(m, "North Display Interrupt mask:		%08x\n",
 923			   I915_READ(DEIMR));
 924		seq_printf(m, "South Display Interrupt enable:		%08x\n",
 925			   I915_READ(SDEIER));
 926		seq_printf(m, "South Display Interrupt identity:	%08x\n",
 927			   I915_READ(SDEIIR));
 928		seq_printf(m, "South Display Interrupt mask:		%08x\n",
 929			   I915_READ(SDEIMR));
 930		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
 931			   I915_READ(GTIER));
 932		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
 933			   I915_READ(GTIIR));
 934		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
 935			   I915_READ(GTIMR));
 936	}
 937	for_each_ring(ring, dev_priv, i) {
 938		if (INTEL_INFO(dev)->gen >= 6) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 939			seq_printf(m,
 940				   "Graphics Interrupt mask (%s):	%08x\n",
 941				   ring->name, I915_READ_IMR(ring));
 942		}
 943		i915_ring_seqno_info(m, ring);
 944	}
 
 945	intel_runtime_pm_put(dev_priv);
 946	mutex_unlock(&dev->struct_mutex);
 947
 948	return 0;
 949}
 950
 951static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 952{
 953	struct drm_info_node *node = m->private;
 954	struct drm_device *dev = node->minor->dev;
 955	struct drm_i915_private *dev_priv = dev->dev_private;
 956	int i, ret;
 957
 958	ret = mutex_lock_interruptible(&dev->struct_mutex);
 959	if (ret)
 960		return ret;
 961
 962	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
 963	for (i = 0; i < dev_priv->num_fence_regs; i++) {
 964		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
 965
 966		seq_printf(m, "Fence %d, pin count = %d, object = ",
 967			   i, dev_priv->fence_regs[i].pin_count);
 968		if (obj == NULL)
 969			seq_puts(m, "unused");
 970		else
 971			describe_obj(m, obj);
 972		seq_putc(m, '\n');
 973	}
 974
 975	mutex_unlock(&dev->struct_mutex);
 976	return 0;
 977}
 978
 979static int i915_hws_info(struct seq_file *m, void *data)
 980{
 981	struct drm_info_node *node = m->private;
 982	struct drm_device *dev = node->minor->dev;
 983	struct drm_i915_private *dev_priv = dev->dev_private;
 984	struct intel_engine_cs *ring;
 985	const u32 *hws;
 986	int i;
 987
 988	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
 989	hws = ring->status_page.page_addr;
 990	if (hws == NULL)
 991		return 0;
 992
 993	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
 994		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
 995			   i * 4,
 996			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
 997	}
 998	return 0;
 999}
1000
1001static ssize_t
1002i915_error_state_write(struct file *filp,
1003		       const char __user *ubuf,
1004		       size_t cnt,
1005		       loff_t *ppos)
1006{
1007	struct i915_error_state_file_priv *error_priv = filp->private_data;
1008	struct drm_device *dev = error_priv->dev;
1009	int ret;
1010
1011	DRM_DEBUG_DRIVER("Resetting error state\n");
1012
1013	ret = mutex_lock_interruptible(&dev->struct_mutex);
1014	if (ret)
1015		return ret;
1016
1017	i915_destroy_error_state(dev);
1018	mutex_unlock(&dev->struct_mutex);
 
1019
1020	return cnt;
 
 
 
 
 
 
 
 
1021}
1022
1023static int i915_error_state_open(struct inode *inode, struct file *file)
1024{
1025	struct drm_device *dev = inode->i_private;
1026	struct i915_error_state_file_priv *error_priv;
1027
1028	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
1029	if (!error_priv)
1030		return -ENOMEM;
1031
1032	error_priv->dev = dev;
1033
1034	i915_error_state_get(dev, error_priv);
1035
1036	file->private_data = error_priv;
1037
1038	return 0;
1039}
1040
1041static int i915_error_state_release(struct inode *inode, struct file *file)
1042{
1043	struct i915_error_state_file_priv *error_priv = file->private_data;
 
1044
1045	i915_error_state_put(error_priv);
1046	kfree(error_priv);
 
 
 
1047
 
1048	return 0;
1049}
1050
1051static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
1052				     size_t count, loff_t *pos)
 
 
 
 
 
 
 
 
 
 
 
1053{
1054	struct i915_error_state_file_priv *error_priv = file->private_data;
1055	struct drm_i915_error_state_buf error_str;
1056	loff_t tmp_pos = 0;
1057	ssize_t ret_count = 0;
1058	int ret;
1059
1060	ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
1061	if (ret)
1062		return ret;
1063
1064	ret = i915_error_state_to_str(&error_str, error_priv);
1065	if (ret)
1066		goto out;
1067
1068	ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
1069					    error_str.buf,
1070					    error_str.bytes);
1071
1072	if (ret_count < 0)
1073		ret = ret_count;
1074	else
1075		*pos = error_str.start + ret_count;
1076out:
1077	i915_error_state_buf_release(&error_str);
1078	return ret ?: ret_count;
1079}
1080
1081static const struct file_operations i915_error_state_fops = {
1082	.owner = THIS_MODULE,
1083	.open = i915_error_state_open,
1084	.read = i915_error_state_read,
1085	.write = i915_error_state_write,
1086	.llseek = default_llseek,
1087	.release = i915_error_state_release,
1088};
1089
1090static int
1091i915_next_seqno_get(void *data, u64 *val)
1092{
1093	struct drm_device *dev = data;
1094	struct drm_i915_private *dev_priv = dev->dev_private;
1095	int ret;
1096
1097	ret = mutex_lock_interruptible(&dev->struct_mutex);
1098	if (ret)
1099		return ret;
1100
1101	*val = dev_priv->next_seqno;
1102	mutex_unlock(&dev->struct_mutex);
1103
1104	return 0;
1105}
1106
1107static int
1108i915_next_seqno_set(void *data, u64 val)
1109{
1110	struct drm_device *dev = data;
 
1111	int ret;
1112
1113	ret = mutex_lock_interruptible(&dev->struct_mutex);
1114	if (ret)
1115		return ret;
1116
1117	ret = i915_gem_set_seqno(dev, val);
 
 
 
1118	mutex_unlock(&dev->struct_mutex);
1119
1120	return ret;
1121}
1122
1123DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1124			i915_next_seqno_get, i915_next_seqno_set,
1125			"0x%llx\n");
1126
1127static int i915_frequency_info(struct seq_file *m, void *unused)
1128{
1129	struct drm_info_node *node = m->private;
1130	struct drm_device *dev = node->minor->dev;
1131	struct drm_i915_private *dev_priv = dev->dev_private;
1132	int ret = 0;
1133
1134	intel_runtime_pm_get(dev_priv);
1135
1136	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1137
1138	if (IS_GEN5(dev)) {
1139		u16 rgvswctl = I915_READ16(MEMSWCTL);
1140		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1141
1142		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1143		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1144		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1145			   MEMSTAT_VID_SHIFT);
1146		seq_printf(m, "Current P-state: %d\n",
1147			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1148	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1149		u32 freq_sts;
 
 
 
 
 
 
 
 
 
 
 
1150
1151		mutex_lock(&dev_priv->rps.hw_lock);
1152		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1153		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1154		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1155
1156		seq_printf(m, "actual GPU freq: %d MHz\n",
1157			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1158
1159		seq_printf(m, "current GPU freq: %d MHz\n",
1160			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1161
1162		seq_printf(m, "max GPU freq: %d MHz\n",
1163			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1164
1165		seq_printf(m, "min GPU freq: %d MHz\n",
1166			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1167
1168		seq_printf(m, "idle GPU freq: %d MHz\n",
1169			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1170
1171		seq_printf(m,
1172			   "efficient (RPe) frequency: %d MHz\n",
1173			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1174		mutex_unlock(&dev_priv->rps.hw_lock);
1175	} else if (INTEL_INFO(dev)->gen >= 6) {
1176		u32 rp_state_limits;
1177		u32 gt_perf_status;
1178		u32 rp_state_cap;
1179		u32 rpmodectl, rpinclimit, rpdeclimit;
1180		u32 rpstat, cagf, reqf;
1181		u32 rpupei, rpcurup, rpprevup;
1182		u32 rpdownei, rpcurdown, rpprevdown;
1183		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1184		int max_freq;
1185
1186		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1187		if (IS_BROXTON(dev)) {
1188			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1189			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1190		} else {
1191			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1192			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1193		}
1194
1195		/* RPSTAT1 is in the GT power well */
1196		ret = mutex_lock_interruptible(&dev->struct_mutex);
1197		if (ret)
1198			goto out;
1199
1200		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1201
1202		reqf = I915_READ(GEN6_RPNSWREQ);
1203		if (IS_GEN9(dev))
1204			reqf >>= 23;
1205		else {
1206			reqf &= ~GEN6_TURBO_DISABLE;
1207			if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1208				reqf >>= 24;
1209			else
1210				reqf >>= 25;
1211		}
1212		reqf = intel_gpu_freq(dev_priv, reqf);
1213
1214		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1215		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1216		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1217
1218		rpstat = I915_READ(GEN6_RPSTAT1);
1219		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
1220		rpcurup = I915_READ(GEN6_RP_CUR_UP);
1221		rpprevup = I915_READ(GEN6_RP_PREV_UP);
1222		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
1223		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
1224		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
1225		if (IS_GEN9(dev))
1226			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1227		else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1228			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1229		else
1230			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1231		cagf = intel_gpu_freq(dev_priv, cagf);
1232
1233		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1234		mutex_unlock(&dev->struct_mutex);
1235
1236		if (IS_GEN6(dev) || IS_GEN7(dev)) {
1237			pm_ier = I915_READ(GEN6_PMIER);
1238			pm_imr = I915_READ(GEN6_PMIMR);
1239			pm_isr = I915_READ(GEN6_PMISR);
1240			pm_iir = I915_READ(GEN6_PMIIR);
1241			pm_mask = I915_READ(GEN6_PMINTRMSK);
1242		} else {
1243			pm_ier = I915_READ(GEN8_GT_IER(2));
1244			pm_imr = I915_READ(GEN8_GT_IMR(2));
1245			pm_isr = I915_READ(GEN8_GT_ISR(2));
1246			pm_iir = I915_READ(GEN8_GT_IIR(2));
1247			pm_mask = I915_READ(GEN6_PMINTRMSK);
1248		}
 
 
 
 
 
 
 
1249		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1250			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
 
 
1251		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1252		seq_printf(m, "Render p-state ratio: %d\n",
1253			   (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
1254		seq_printf(m, "Render p-state VID: %d\n",
1255			   gt_perf_status & 0xff);
1256		seq_printf(m, "Render p-state limit: %d\n",
1257			   rp_state_limits & 0xff);
1258		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1259		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1260		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1261		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1262		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1263		seq_printf(m, "CAGF: %dMHz\n", cagf);
1264		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
1265			   GEN6_CURICONT_MASK);
1266		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
1267			   GEN6_CURBSYTAVG_MASK);
1268		seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
1269			   GEN6_CURBSYTAVG_MASK);
1270		seq_printf(m, "Up threshold: %d%%\n",
1271			   dev_priv->rps.up_threshold);
1272
1273		seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
1274			   GEN6_CURIAVG_MASK);
1275		seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
1276			   GEN6_CURBSYTAVG_MASK);
1277		seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
1278			   GEN6_CURBSYTAVG_MASK);
1279		seq_printf(m, "Down threshold: %d%%\n",
1280			   dev_priv->rps.down_threshold);
1281
1282		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
1283			    rp_state_cap >> 16) & 0xff;
1284		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1285			     GEN9_FREQ_SCALER : 1);
1286		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1287			   intel_gpu_freq(dev_priv, max_freq));
1288
1289		max_freq = (rp_state_cap & 0xff00) >> 8;
1290		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1291			     GEN9_FREQ_SCALER : 1);
1292		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1293			   intel_gpu_freq(dev_priv, max_freq));
1294
1295		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
1296			    rp_state_cap >> 0) & 0xff;
1297		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1298			     GEN9_FREQ_SCALER : 1);
1299		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1300			   intel_gpu_freq(dev_priv, max_freq));
1301		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1302			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1303
1304		seq_printf(m, "Current freq: %d MHz\n",
1305			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1306		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1307		seq_printf(m, "Idle freq: %d MHz\n",
1308			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1309		seq_printf(m, "Min freq: %d MHz\n",
1310			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
 
 
1311		seq_printf(m, "Max freq: %d MHz\n",
1312			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1313		seq_printf(m,
1314			   "efficient (RPe) frequency: %d MHz\n",
1315			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1316	} else {
1317		seq_puts(m, "no P-state info available\n");
1318	}
1319
1320	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
1321	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1322	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1323
1324out:
1325	intel_runtime_pm_put(dev_priv);
1326	return ret;
1327}
1328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1329static int i915_hangcheck_info(struct seq_file *m, void *unused)
1330{
1331	struct drm_info_node *node = m->private;
1332	struct drm_device *dev = node->minor->dev;
1333	struct drm_i915_private *dev_priv = dev->dev_private;
1334	struct intel_engine_cs *ring;
1335	u64 acthd[I915_NUM_RINGS];
1336	u32 seqno[I915_NUM_RINGS];
1337	u32 instdone[I915_NUM_INSTDONE_REG];
1338	int i, j;
 
 
 
 
 
 
 
 
 
1339
1340	if (!i915.enable_hangcheck) {
1341		seq_printf(m, "Hangcheck disabled\n");
1342		return 0;
1343	}
1344
1345	intel_runtime_pm_get(dev_priv);
1346
1347	for_each_ring(ring, dev_priv, i) {
1348		seqno[i] = ring->get_seqno(ring, false);
1349		acthd[i] = intel_ring_get_active_head(ring);
1350	}
1351
1352	i915_get_extra_instdone(dev, instdone);
1353
1354	intel_runtime_pm_put(dev_priv);
1355
1356	if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
1357		seq_printf(m, "Hangcheck active, fires in %dms\n",
1358			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1359					    jiffies));
1360	} else
1361		seq_printf(m, "Hangcheck inactive\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1362
1363	for_each_ring(ring, dev_priv, i) {
1364		seq_printf(m, "%s:\n", ring->name);
1365		seq_printf(m, "\tseqno = %x [current %x]\n",
1366			   ring->hangcheck.seqno, seqno[i]);
1367		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1368			   (long long)ring->hangcheck.acthd,
1369			   (long long)acthd[i]);
1370		seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
1371			   (long long)ring->hangcheck.max_acthd);
1372		seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
1373		seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
1374
1375		if (ring->id == RCS) {
1376			seq_puts(m, "\tinstdone read =");
1377
1378			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
1379				seq_printf(m, " 0x%08x", instdone[j]);
1380
1381			seq_puts(m, "\n\tinstdone accu =");
1382
1383			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
1384				seq_printf(m, " 0x%08x",
1385					   ring->hangcheck.instdone[j]);
1386
1387			seq_puts(m, "\n");
 
 
 
 
 
1388		}
1389	}
1390
1391	return 0;
1392}
1393
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1394static int ironlake_drpc_info(struct seq_file *m)
1395{
1396	struct drm_info_node *node = m->private;
1397	struct drm_device *dev = node->minor->dev;
1398	struct drm_i915_private *dev_priv = dev->dev_private;
1399	u32 rgvmodectl, rstdbyctl;
1400	u16 crstandvid;
1401	int ret;
1402
1403	ret = mutex_lock_interruptible(&dev->struct_mutex);
1404	if (ret)
1405		return ret;
1406	intel_runtime_pm_get(dev_priv);
1407
1408	rgvmodectl = I915_READ(MEMMODECTL);
1409	rstdbyctl = I915_READ(RSTDBYCTL);
1410	crstandvid = I915_READ16(CRSTANDVID);
1411
1412	intel_runtime_pm_put(dev_priv);
1413	mutex_unlock(&dev->struct_mutex);
1414
1415	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1416	seq_printf(m, "Boost freq: %d\n",
1417		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1418		   MEMMODE_BOOST_FREQ_SHIFT);
1419	seq_printf(m, "HW control enabled: %s\n",
1420		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1421	seq_printf(m, "SW control enabled: %s\n",
1422		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1423	seq_printf(m, "Gated voltage change: %s\n",
1424		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1425	seq_printf(m, "Starting frequency: P%d\n",
1426		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1427	seq_printf(m, "Max P-state: P%d\n",
1428		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1429	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1430	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1431	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1432	seq_printf(m, "Render standby enabled: %s\n",
1433		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1434	seq_puts(m, "Current RS state: ");
1435	switch (rstdbyctl & RSX_STATUS_MASK) {
1436	case RSX_STATUS_ON:
1437		seq_puts(m, "on\n");
1438		break;
1439	case RSX_STATUS_RC1:
1440		seq_puts(m, "RC1\n");
1441		break;
1442	case RSX_STATUS_RC1E:
1443		seq_puts(m, "RC1E\n");
1444		break;
1445	case RSX_STATUS_RS1:
1446		seq_puts(m, "RS1\n");
1447		break;
1448	case RSX_STATUS_RS2:
1449		seq_puts(m, "RS2 (RC6)\n");
1450		break;
1451	case RSX_STATUS_RS3:
1452		seq_puts(m, "RC3 (RC6+)\n");
1453		break;
1454	default:
1455		seq_puts(m, "unknown\n");
1456		break;
1457	}
1458
1459	return 0;
1460}
1461
1462static int i915_forcewake_domains(struct seq_file *m, void *data)
1463{
1464	struct drm_info_node *node = m->private;
1465	struct drm_device *dev = node->minor->dev;
1466	struct drm_i915_private *dev_priv = dev->dev_private;
1467	struct intel_uncore_forcewake_domain *fw_domain;
1468	int i;
 
 
 
1469
1470	spin_lock_irq(&dev_priv->uncore.lock);
1471	for_each_fw_domain(fw_domain, dev_priv, i) {
1472		seq_printf(m, "%s.wake_count = %u\n",
1473			   intel_uncore_forcewake_domain_to_str(i),
1474			   fw_domain->wake_count);
1475	}
1476	spin_unlock_irq(&dev_priv->uncore.lock);
1477
1478	return 0;
1479}
1480
1481static int vlv_drpc_info(struct seq_file *m)
 
 
1482{
1483	struct drm_info_node *node = m->private;
1484	struct drm_device *dev = node->minor->dev;
1485	struct drm_i915_private *dev_priv = dev->dev_private;
1486	u32 rpmodectl1, rcctl1, pw_status;
1487
1488	intel_runtime_pm_get(dev_priv);
 
 
 
 
 
 
 
 
1489
1490	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1491	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1492	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1493
1494	intel_runtime_pm_put(dev_priv);
1495
1496	seq_printf(m, "Video Turbo Mode: %s\n",
1497		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1498	seq_printf(m, "Turbo enabled: %s\n",
1499		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1500	seq_printf(m, "HW control enabled: %s\n",
1501		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1502	seq_printf(m, "SW control enabled: %s\n",
1503		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1504			  GEN6_RP_MEDIA_SW_MODE));
1505	seq_printf(m, "RC6 Enabled: %s\n",
1506		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1507					GEN6_RC_CTL_EI_MODE(1))));
1508	seq_printf(m, "Render Power Well: %s\n",
1509		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1510	seq_printf(m, "Media Power Well: %s\n",
1511		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1512
1513	seq_printf(m, "Render RC6 residency since boot: %u\n",
1514		   I915_READ(VLV_GT_RENDER_RC6));
1515	seq_printf(m, "Media RC6 residency since boot: %u\n",
1516		   I915_READ(VLV_GT_MEDIA_RC6));
1517
1518	return i915_forcewake_domains(m, NULL);
1519}
1520
1521static int gen6_drpc_info(struct seq_file *m)
1522{
1523	struct drm_info_node *node = m->private;
1524	struct drm_device *dev = node->minor->dev;
1525	struct drm_i915_private *dev_priv = dev->dev_private;
1526	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1527	unsigned forcewake_count;
1528	int count = 0, ret;
1529
1530	ret = mutex_lock_interruptible(&dev->struct_mutex);
1531	if (ret)
1532		return ret;
1533	intel_runtime_pm_get(dev_priv);
1534
1535	spin_lock_irq(&dev_priv->uncore.lock);
1536	forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
1537	spin_unlock_irq(&dev_priv->uncore.lock);
1538
1539	if (forcewake_count) {
1540		seq_puts(m, "RC information inaccurate because somebody "
1541			    "holds a forcewake reference \n");
1542	} else {
1543		/* NB: we cannot use forcewake, else we read the wrong values */
1544		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1545			udelay(10);
1546		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1547	}
1548
1549	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1550	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1551
1552	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1553	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1554	mutex_unlock(&dev->struct_mutex);
1555	mutex_lock(&dev_priv->rps.hw_lock);
1556	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1557	mutex_unlock(&dev_priv->rps.hw_lock);
1558
1559	intel_runtime_pm_put(dev_priv);
 
 
 
 
 
1560
1561	seq_printf(m, "Video Turbo Mode: %s\n",
1562		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1563	seq_printf(m, "HW control enabled: %s\n",
1564		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1565	seq_printf(m, "SW control enabled: %s\n",
1566		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1567			  GEN6_RP_MEDIA_SW_MODE));
1568	seq_printf(m, "RC1e Enabled: %s\n",
1569		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1570	seq_printf(m, "RC6 Enabled: %s\n",
1571		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
 
 
 
 
 
 
1572	seq_printf(m, "Deep RC6 Enabled: %s\n",
1573		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1574	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1575		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1576	seq_puts(m, "Current RC state: ");
1577	switch (gt_core_status & GEN6_RCn_MASK) {
1578	case GEN6_RC0:
1579		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1580			seq_puts(m, "Core Power Down\n");
1581		else
1582			seq_puts(m, "on\n");
1583		break;
1584	case GEN6_RC3:
1585		seq_puts(m, "RC3\n");
1586		break;
1587	case GEN6_RC6:
1588		seq_puts(m, "RC6\n");
1589		break;
1590	case GEN6_RC7:
1591		seq_puts(m, "RC7\n");
1592		break;
1593	default:
1594		seq_puts(m, "Unknown\n");
1595		break;
1596	}
1597
1598	seq_printf(m, "Core Power Down: %s\n",
1599		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
 
 
 
 
 
 
 
 
1600
1601	/* Not exactly sure what this is */
1602	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1603		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1604	seq_printf(m, "RC6 residency since boot: %u\n",
1605		   I915_READ(GEN6_GT_GFX_RC6));
1606	seq_printf(m, "RC6+ residency since boot: %u\n",
1607		   I915_READ(GEN6_GT_GFX_RC6p));
1608	seq_printf(m, "RC6++ residency since boot: %u\n",
1609		   I915_READ(GEN6_GT_GFX_RC6pp));
1610
1611	seq_printf(m, "RC6   voltage: %dmV\n",
1612		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1613	seq_printf(m, "RC6+  voltage: %dmV\n",
1614		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1615	seq_printf(m, "RC6++ voltage: %dmV\n",
1616		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1617	return 0;
1618}
1619
1620static int i915_drpc_info(struct seq_file *m, void *unused)
1621{
1622	struct drm_info_node *node = m->private;
1623	struct drm_device *dev = node->minor->dev;
 
 
1624
1625	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1626		return vlv_drpc_info(m);
1627	else if (INTEL_INFO(dev)->gen >= 6)
1628		return gen6_drpc_info(m);
1629	else
1630		return ironlake_drpc_info(m);
 
 
 
 
1631}
1632
1633static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1634{
1635	struct drm_info_node *node = m->private;
1636	struct drm_device *dev = node->minor->dev;
1637	struct drm_i915_private *dev_priv = dev->dev_private;
1638
1639	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1640		   dev_priv->fb_tracking.busy_bits);
1641
1642	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1643		   dev_priv->fb_tracking.flip_bits);
1644
1645	return 0;
1646}
1647
1648static int i915_fbc_status(struct seq_file *m, void *unused)
1649{
1650	struct drm_info_node *node = m->private;
1651	struct drm_device *dev = node->minor->dev;
1652	struct drm_i915_private *dev_priv = dev->dev_private;
1653
1654	if (!HAS_FBC(dev)) {
1655		seq_puts(m, "FBC unsupported on this chipset\n");
1656		return 0;
1657	}
1658
1659	intel_runtime_pm_get(dev_priv);
1660	mutex_lock(&dev_priv->fbc.lock);
1661
1662	if (intel_fbc_is_active(dev_priv))
1663		seq_puts(m, "FBC enabled\n");
1664	else
1665		seq_printf(m, "FBC disabled: %s\n",
1666			   dev_priv->fbc.no_fbc_reason);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1667
1668	if (INTEL_INFO(dev_priv)->gen >= 7)
1669		seq_printf(m, "Compressing: %s\n",
1670			   yesno(I915_READ(FBC_STATUS2) &
1671				 FBC_COMPRESSION_MASK));
1672
1673	mutex_unlock(&dev_priv->fbc.lock);
1674	intel_runtime_pm_put(dev_priv);
1675
1676	return 0;
1677}
1678
1679static int i915_fbc_fc_get(void *data, u64 *val)
1680{
1681	struct drm_device *dev = data;
1682	struct drm_i915_private *dev_priv = dev->dev_private;
1683
1684	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1685		return -ENODEV;
1686
1687	*val = dev_priv->fbc.false_color;
1688
1689	return 0;
1690}
1691
1692static int i915_fbc_fc_set(void *data, u64 val)
1693{
1694	struct drm_device *dev = data;
1695	struct drm_i915_private *dev_priv = dev->dev_private;
1696	u32 reg;
1697
1698	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1699		return -ENODEV;
1700
1701	mutex_lock(&dev_priv->fbc.lock);
1702
1703	reg = I915_READ(ILK_DPFC_CONTROL);
1704	dev_priv->fbc.false_color = val;
1705
1706	I915_WRITE(ILK_DPFC_CONTROL, val ?
1707		   (reg | FBC_CTL_FALSE_COLOR) :
1708		   (reg & ~FBC_CTL_FALSE_COLOR));
1709
1710	mutex_unlock(&dev_priv->fbc.lock);
1711	return 0;
1712}
1713
1714DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
1715			i915_fbc_fc_get, i915_fbc_fc_set,
1716			"%llu\n");
1717
1718static int i915_ips_status(struct seq_file *m, void *unused)
1719{
1720	struct drm_info_node *node = m->private;
1721	struct drm_device *dev = node->minor->dev;
1722	struct drm_i915_private *dev_priv = dev->dev_private;
1723
1724	if (!HAS_IPS(dev)) {
1725		seq_puts(m, "not supported\n");
1726		return 0;
1727	}
1728
1729	intel_runtime_pm_get(dev_priv);
1730
1731	seq_printf(m, "Enabled by kernel parameter: %s\n",
1732		   yesno(i915.enable_ips));
1733
1734	if (INTEL_INFO(dev)->gen >= 8) {
1735		seq_puts(m, "Currently: unknown\n");
1736	} else {
1737		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1738			seq_puts(m, "Currently: enabled\n");
1739		else
1740			seq_puts(m, "Currently: disabled\n");
1741	}
1742
1743	intel_runtime_pm_put(dev_priv);
1744
1745	return 0;
1746}
1747
1748static int i915_sr_status(struct seq_file *m, void *unused)
1749{
1750	struct drm_info_node *node = m->private;
1751	struct drm_device *dev = node->minor->dev;
1752	struct drm_i915_private *dev_priv = dev->dev_private;
1753	bool sr_enabled = false;
1754
1755	intel_runtime_pm_get(dev_priv);
 
1756
1757	if (HAS_PCH_SPLIT(dev))
 
 
1758		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1759	else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
1760		 IS_I945G(dev) || IS_I945GM(dev))
1761		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1762	else if (IS_I915GM(dev))
1763		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1764	else if (IS_PINEVIEW(dev))
1765		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1766	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1767		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1768
 
1769	intel_runtime_pm_put(dev_priv);
1770
1771	seq_printf(m, "self-refresh: %s\n",
1772		   sr_enabled ? "enabled" : "disabled");
1773
1774	return 0;
1775}
1776
1777static int i915_emon_status(struct seq_file *m, void *unused)
1778{
1779	struct drm_info_node *node = m->private;
1780	struct drm_device *dev = node->minor->dev;
1781	struct drm_i915_private *dev_priv = dev->dev_private;
1782	unsigned long temp, chipset, gfx;
1783	int ret;
1784
1785	if (!IS_GEN5(dev))
1786		return -ENODEV;
1787
1788	ret = mutex_lock_interruptible(&dev->struct_mutex);
1789	if (ret)
1790		return ret;
1791
1792	temp = i915_mch_val(dev_priv);
1793	chipset = i915_chipset_val(dev_priv);
1794	gfx = i915_gfx_val(dev_priv);
1795	mutex_unlock(&dev->struct_mutex);
1796
1797	seq_printf(m, "GMCH temp: %ld\n", temp);
1798	seq_printf(m, "Chipset power: %ld\n", chipset);
1799	seq_printf(m, "GFX power: %ld\n", gfx);
1800	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1801
1802	return 0;
1803}
1804
1805static int i915_ring_freq_table(struct seq_file *m, void *unused)
1806{
1807	struct drm_info_node *node = m->private;
1808	struct drm_device *dev = node->minor->dev;
1809	struct drm_i915_private *dev_priv = dev->dev_private;
1810	int ret = 0;
1811	int gpu_freq, ia_freq;
1812	unsigned int max_gpu_freq, min_gpu_freq;
1813
1814	if (!HAS_CORE_RING_FREQ(dev)) {
1815		seq_puts(m, "unsupported on this chipset\n");
1816		return 0;
1817	}
1818
1819	intel_runtime_pm_get(dev_priv);
1820
1821	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1822
1823	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1824	if (ret)
1825		goto out;
1826
1827	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1828		/* Convert GT frequency to 50 HZ units */
1829		min_gpu_freq =
1830			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
1831		max_gpu_freq =
1832			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
1833	} else {
1834		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
1835		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
1836	}
1837
1838	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1839
1840	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1841		ia_freq = gpu_freq;
1842		sandybridge_pcode_read(dev_priv,
1843				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1844				       &ia_freq);
1845		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1846			   intel_gpu_freq(dev_priv, (gpu_freq *
1847				(IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1848				 GEN9_FREQ_SCALER : 1))),
 
1849			   ((ia_freq >> 0) & 0xff) * 100,
1850			   ((ia_freq >> 8) & 0xff) * 100);
1851	}
1852
1853	mutex_unlock(&dev_priv->rps.hw_lock);
1854
1855out:
1856	intel_runtime_pm_put(dev_priv);
1857	return ret;
1858}
1859
1860static int i915_opregion(struct seq_file *m, void *unused)
1861{
1862	struct drm_info_node *node = m->private;
1863	struct drm_device *dev = node->minor->dev;
1864	struct drm_i915_private *dev_priv = dev->dev_private;
1865	struct intel_opregion *opregion = &dev_priv->opregion;
1866	int ret;
1867
1868	ret = mutex_lock_interruptible(&dev->struct_mutex);
1869	if (ret)
1870		goto out;
1871
1872	if (opregion->header)
1873		seq_write(m, opregion->header, OPREGION_SIZE);
1874
1875	mutex_unlock(&dev->struct_mutex);
1876
1877out:
1878	return 0;
1879}
1880
1881static int i915_vbt(struct seq_file *m, void *unused)
1882{
1883	struct drm_info_node *node = m->private;
1884	struct drm_device *dev = node->minor->dev;
1885	struct drm_i915_private *dev_priv = dev->dev_private;
1886	struct intel_opregion *opregion = &dev_priv->opregion;
1887
1888	if (opregion->vbt)
1889		seq_write(m, opregion->vbt, opregion->vbt_size);
1890
1891	return 0;
1892}
1893
1894static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1895{
1896	struct drm_info_node *node = m->private;
1897	struct drm_device *dev = node->minor->dev;
1898	struct intel_framebuffer *fbdev_fb = NULL;
1899	struct drm_framebuffer *drm_fb;
 
 
 
 
 
1900
1901#ifdef CONFIG_DRM_FBDEV_EMULATION
1902       if (to_i915(dev)->fbdev) {
1903               fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
1904
1905               seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1906                         fbdev_fb->base.width,
1907                         fbdev_fb->base.height,
1908                         fbdev_fb->base.depth,
1909                         fbdev_fb->base.bits_per_pixel,
1910                         fbdev_fb->base.modifier[0],
1911                         atomic_read(&fbdev_fb->base.refcount.refcount));
1912               describe_obj(m, fbdev_fb->obj);
1913               seq_putc(m, '\n');
1914       }
1915#endif
1916
1917	mutex_lock(&dev->mode_config.fb_lock);
1918	drm_for_each_fb(drm_fb, dev) {
1919		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1920		if (fb == fbdev_fb)
1921			continue;
1922
1923		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1924			   fb->base.width,
1925			   fb->base.height,
1926			   fb->base.depth,
1927			   fb->base.bits_per_pixel,
1928			   fb->base.modifier[0],
1929			   atomic_read(&fb->base.refcount.refcount));
1930		describe_obj(m, fb->obj);
1931		seq_putc(m, '\n');
1932	}
1933	mutex_unlock(&dev->mode_config.fb_lock);
 
1934
1935	return 0;
1936}
1937
1938static void describe_ctx_ringbuf(struct seq_file *m,
1939				 struct intel_ringbuffer *ringbuf)
1940{
1941	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1942		   ringbuf->space, ringbuf->head, ringbuf->tail,
1943		   ringbuf->last_retired_head);
1944}
1945
1946static int i915_context_status(struct seq_file *m, void *unused)
1947{
1948	struct drm_info_node *node = m->private;
1949	struct drm_device *dev = node->minor->dev;
1950	struct drm_i915_private *dev_priv = dev->dev_private;
1951	struct intel_engine_cs *ring;
1952	struct intel_context *ctx;
1953	int ret, i;
1954
1955	ret = mutex_lock_interruptible(&dev->struct_mutex);
1956	if (ret)
1957		return ret;
1958
1959	list_for_each_entry(ctx, &dev_priv->context_list, link) {
1960		if (!i915.enable_execlists &&
1961		    ctx->legacy_hw_ctx.rcs_state == NULL)
1962			continue;
1963
1964		seq_puts(m, "HW context ");
1965		describe_ctx(m, ctx);
1966		if (ctx == dev_priv->kernel_context)
1967			seq_printf(m, "(kernel context) ");
1968
1969		if (i915.enable_execlists) {
1970			seq_putc(m, '\n');
1971			for_each_ring(ring, dev_priv, i) {
1972				struct drm_i915_gem_object *ctx_obj =
1973					ctx->engine[i].state;
1974				struct intel_ringbuffer *ringbuf =
1975					ctx->engine[i].ringbuf;
1976
1977				seq_printf(m, "%s: ", ring->name);
1978				if (ctx_obj)
1979					describe_obj(m, ctx_obj);
1980				if (ringbuf)
1981					describe_ctx_ringbuf(m, ringbuf);
1982				seq_putc(m, '\n');
1983			}
 
 
1984		} else {
1985			describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1986		}
1987
 
1988		seq_putc(m, '\n');
1989	}
1990
1991	mutex_unlock(&dev->struct_mutex);
 
1992
1993	return 0;
1994}
1995
1996static void i915_dump_lrc_obj(struct seq_file *m,
1997			      struct intel_context *ctx,
1998			      struct intel_engine_cs *ring)
1999{
2000	struct page *page;
2001	uint32_t *reg_state;
2002	int j;
2003	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
2004	unsigned long ggtt_offset = 0;
2005
2006	if (ctx_obj == NULL) {
2007		seq_printf(m, "Context on %s with no gem object\n",
2008			   ring->name);
2009		return;
2010	}
2011
2012	seq_printf(m, "CONTEXT: %s %u\n", ring->name,
2013		   intel_execlists_ctx_id(ctx, ring));
2014
2015	if (!i915_gem_obj_ggtt_bound(ctx_obj))
2016		seq_puts(m, "\tNot bound in GGTT\n");
2017	else
2018		ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
2019
2020	if (i915_gem_object_get_pages(ctx_obj)) {
2021		seq_puts(m, "\tFailed to get pages for context object\n");
2022		return;
2023	}
2024
2025	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
2026	if (!WARN_ON(page == NULL)) {
2027		reg_state = kmap_atomic(page);
2028
2029		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
2030			seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
2031				   ggtt_offset + 4096 + (j * 4),
2032				   reg_state[j], reg_state[j + 1],
2033				   reg_state[j + 2], reg_state[j + 3]);
2034		}
2035		kunmap_atomic(reg_state);
2036	}
2037
2038	seq_putc(m, '\n');
2039}
2040
2041static int i915_dump_lrc(struct seq_file *m, void *unused)
2042{
2043	struct drm_info_node *node = (struct drm_info_node *) m->private;
2044	struct drm_device *dev = node->minor->dev;
2045	struct drm_i915_private *dev_priv = dev->dev_private;
2046	struct intel_engine_cs *ring;
2047	struct intel_context *ctx;
2048	int ret, i;
2049
2050	if (!i915.enable_execlists) {
2051		seq_printf(m, "Logical Ring Contexts are disabled\n");
2052		return 0;
2053	}
2054
2055	ret = mutex_lock_interruptible(&dev->struct_mutex);
2056	if (ret)
2057		return ret;
2058
2059	list_for_each_entry(ctx, &dev_priv->context_list, link)
2060		if (ctx != dev_priv->kernel_context)
2061			for_each_ring(ring, dev_priv, i)
2062				i915_dump_lrc_obj(m, ctx, ring);
2063
2064	mutex_unlock(&dev->struct_mutex);
2065
2066	return 0;
2067}
2068
2069static int i915_execlists(struct seq_file *m, void *data)
2070{
2071	struct drm_info_node *node = (struct drm_info_node *)m->private;
2072	struct drm_device *dev = node->minor->dev;
2073	struct drm_i915_private *dev_priv = dev->dev_private;
2074	struct intel_engine_cs *ring;
2075	u32 status_pointer;
2076	u8 read_pointer;
2077	u8 write_pointer;
2078	u32 status;
2079	u32 ctx_id;
2080	struct list_head *cursor;
2081	int ring_id, i;
2082	int ret;
2083
2084	if (!i915.enable_execlists) {
2085		seq_puts(m, "Logical Ring Contexts are disabled\n");
2086		return 0;
2087	}
2088
2089	ret = mutex_lock_interruptible(&dev->struct_mutex);
2090	if (ret)
2091		return ret;
2092
2093	intel_runtime_pm_get(dev_priv);
2094
2095	for_each_ring(ring, dev_priv, ring_id) {
2096		struct drm_i915_gem_request *head_req = NULL;
2097		int count = 0;
2098		unsigned long flags;
2099
2100		seq_printf(m, "%s\n", ring->name);
2101
2102		status = I915_READ(RING_EXECLIST_STATUS_LO(ring));
2103		ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring));
2104		seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
2105			   status, ctx_id);
2106
2107		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
2108		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
2109
2110		read_pointer = ring->next_context_status_buffer;
2111		write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
2112		if (read_pointer > write_pointer)
2113			write_pointer += GEN8_CSB_ENTRIES;
2114		seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
2115			   read_pointer, write_pointer);
2116
2117		for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
2118			status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
2119			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
2120
2121			seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
2122				   i, status, ctx_id);
2123		}
2124
2125		spin_lock_irqsave(&ring->execlist_lock, flags);
2126		list_for_each(cursor, &ring->execlist_queue)
2127			count++;
2128		head_req = list_first_entry_or_null(&ring->execlist_queue,
2129				struct drm_i915_gem_request, execlist_link);
2130		spin_unlock_irqrestore(&ring->execlist_lock, flags);
2131
2132		seq_printf(m, "\t%d requests in queue\n", count);
2133		if (head_req) {
2134			seq_printf(m, "\tHead request id: %u\n",
2135				   intel_execlists_ctx_id(head_req->ctx, ring));
2136			seq_printf(m, "\tHead request tail: %u\n",
2137				   head_req->tail);
2138		}
2139
2140		seq_putc(m, '\n');
2141	}
2142
2143	intel_runtime_pm_put(dev_priv);
2144	mutex_unlock(&dev->struct_mutex);
2145
2146	return 0;
2147}
2148
2149static const char *swizzle_string(unsigned swizzle)
2150{
2151	switch (swizzle) {
2152	case I915_BIT_6_SWIZZLE_NONE:
2153		return "none";
2154	case I915_BIT_6_SWIZZLE_9:
2155		return "bit9";
2156	case I915_BIT_6_SWIZZLE_9_10:
2157		return "bit9/bit10";
2158	case I915_BIT_6_SWIZZLE_9_11:
2159		return "bit9/bit11";
2160	case I915_BIT_6_SWIZZLE_9_10_11:
2161		return "bit9/bit10/bit11";
2162	case I915_BIT_6_SWIZZLE_9_17:
2163		return "bit9/bit17";
2164	case I915_BIT_6_SWIZZLE_9_10_17:
2165		return "bit9/bit10/bit17";
2166	case I915_BIT_6_SWIZZLE_UNKNOWN:
2167		return "unknown";
2168	}
2169
2170	return "bug";
2171}
2172
2173static int i915_swizzle_info(struct seq_file *m, void *data)
2174{
2175	struct drm_info_node *node = m->private;
2176	struct drm_device *dev = node->minor->dev;
2177	struct drm_i915_private *dev_priv = dev->dev_private;
2178	int ret;
2179
2180	ret = mutex_lock_interruptible(&dev->struct_mutex);
2181	if (ret)
2182		return ret;
2183	intel_runtime_pm_get(dev_priv);
2184
2185	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2186		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2187	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2188		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2189
2190	if (IS_GEN3(dev) || IS_GEN4(dev)) {
2191		seq_printf(m, "DDC = 0x%08x\n",
2192			   I915_READ(DCC));
2193		seq_printf(m, "DDC2 = 0x%08x\n",
2194			   I915_READ(DCC2));
2195		seq_printf(m, "C0DRB3 = 0x%04x\n",
2196			   I915_READ16(C0DRB3));
2197		seq_printf(m, "C1DRB3 = 0x%04x\n",
2198			   I915_READ16(C1DRB3));
2199	} else if (INTEL_INFO(dev)->gen >= 6) {
2200		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2201			   I915_READ(MAD_DIMM_C0));
2202		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2203			   I915_READ(MAD_DIMM_C1));
2204		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2205			   I915_READ(MAD_DIMM_C2));
2206		seq_printf(m, "TILECTL = 0x%08x\n",
2207			   I915_READ(TILECTL));
2208		if (INTEL_INFO(dev)->gen >= 8)
2209			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2210				   I915_READ(GAMTARBMODE));
2211		else
2212			seq_printf(m, "ARB_MODE = 0x%08x\n",
2213				   I915_READ(ARB_MODE));
2214		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2215			   I915_READ(DISP_ARB_CTL));
2216	}
2217
2218	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2219		seq_puts(m, "L-shaped memory detected\n");
2220
2221	intel_runtime_pm_put(dev_priv);
2222	mutex_unlock(&dev->struct_mutex);
2223
2224	return 0;
2225}
2226
2227static int per_file_ctx(int id, void *ptr, void *data)
2228{
2229	struct intel_context *ctx = ptr;
2230	struct seq_file *m = data;
2231	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2232
2233	if (!ppgtt) {
2234		seq_printf(m, "  no ppgtt for context %d\n",
2235			   ctx->user_handle);
2236		return 0;
2237	}
2238
2239	if (i915_gem_context_is_default(ctx))
2240		seq_puts(m, "  default context:\n");
2241	else
2242		seq_printf(m, "  context %d:\n", ctx->user_handle);
2243	ppgtt->debug_dump(ppgtt, m);
2244
2245	return 0;
2246}
2247
2248static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 
2249{
2250	struct drm_i915_private *dev_priv = dev->dev_private;
2251	struct intel_engine_cs *ring;
2252	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2253	int unused, i;
 
 
2254
2255	if (!ppgtt)
2256		return;
2257
2258	for_each_ring(ring, dev_priv, unused) {
2259		seq_printf(m, "%s\n", ring->name);
2260		for (i = 0; i < 4; i++) {
2261			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i));
2262			pdp <<= 32;
2263			pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i));
2264			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2265		}
2266	}
2267}
2268
2269static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 
2270{
2271	struct drm_i915_private *dev_priv = dev->dev_private;
2272	struct intel_engine_cs *ring;
2273	int i;
2274
2275	if (INTEL_INFO(dev)->gen == 6)
2276		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2277
2278	for_each_ring(ring, dev_priv, i) {
2279		seq_printf(m, "%s\n", ring->name);
2280		if (INTEL_INFO(dev)->gen == 7)
2281			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
2282		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
2283		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
2284		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
 
 
 
 
2285	}
2286	if (dev_priv->mm.aliasing_ppgtt) {
2287		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2288
2289		seq_puts(m, "aliasing PPGTT:\n");
2290		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2291
2292		ppgtt->debug_dump(ppgtt, m);
2293	}
2294
2295	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2296}
2297
2298static int i915_ppgtt_info(struct seq_file *m, void *data)
2299{
2300	struct drm_info_node *node = m->private;
2301	struct drm_device *dev = node->minor->dev;
2302	struct drm_i915_private *dev_priv = dev->dev_private;
2303	struct drm_file *file;
 
2304
2305	int ret = mutex_lock_interruptible(&dev->struct_mutex);
 
2306	if (ret)
2307		return ret;
 
2308	intel_runtime_pm_get(dev_priv);
2309
2310	if (INTEL_INFO(dev)->gen >= 8)
2311		gen8_ppgtt_info(m, dev);
2312	else if (INTEL_INFO(dev)->gen >= 6)
2313		gen6_ppgtt_info(m, dev);
2314
2315	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2316		struct drm_i915_file_private *file_priv = file->driver_priv;
2317		struct task_struct *task;
2318
2319		task = get_pid_task(file->pid, PIDTYPE_PID);
2320		if (!task) {
2321			ret = -ESRCH;
2322			goto out_put;
2323		}
2324		seq_printf(m, "\nproc: %s\n", task->comm);
2325		put_task_struct(task);
2326		idr_for_each(&file_priv->context_idr, per_file_ctx,
2327			     (void *)(unsigned long)m);
2328	}
2329
2330out_put:
2331	intel_runtime_pm_put(dev_priv);
2332	mutex_unlock(&dev->struct_mutex);
2333
 
2334	return ret;
2335}
2336
2337static int count_irq_waiters(struct drm_i915_private *i915)
2338{
2339	struct intel_engine_cs *ring;
 
2340	int count = 0;
2341	int i;
2342
2343	for_each_ring(ring, i915, i)
2344		count += ring->irq_refcount;
2345
2346	return count;
2347}
2348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2349static int i915_rps_boost_info(struct seq_file *m, void *data)
2350{
2351	struct drm_info_node *node = m->private;
2352	struct drm_device *dev = node->minor->dev;
2353	struct drm_i915_private *dev_priv = dev->dev_private;
2354	struct drm_file *file;
2355
2356	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
2357	seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
 
2358	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2359	seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2360		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
2361		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
2362		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
2363		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
2364		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
2365	spin_lock(&dev_priv->rps.client_lock);
 
 
 
 
 
 
 
 
2366	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2367		struct drm_i915_file_private *file_priv = file->driver_priv;
2368		struct task_struct *task;
2369
2370		rcu_read_lock();
2371		task = pid_task(file->pid, PIDTYPE_PID);
2372		seq_printf(m, "%s [%d]: %d boosts%s\n",
2373			   task ? task->comm : "<unknown>",
2374			   task ? task->pid : -1,
2375			   file_priv->rps.boosts,
2376			   list_empty(&file_priv->rps.link) ? "" : ", active");
2377		rcu_read_unlock();
2378	}
2379	seq_printf(m, "Semaphore boosts: %d%s\n",
2380		   dev_priv->rps.semaphores.boosts,
2381		   list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active");
2382	seq_printf(m, "MMIO flip boosts: %d%s\n",
2383		   dev_priv->rps.mmioflips.boosts,
2384		   list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
2385	seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
2386	spin_unlock(&dev_priv->rps.client_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2387
2388	return 0;
2389}
2390
2391static int i915_llc(struct seq_file *m, void *data)
2392{
2393	struct drm_info_node *node = m->private;
2394	struct drm_device *dev = node->minor->dev;
2395	struct drm_i915_private *dev_priv = dev->dev_private;
2396
2397	/* Size calculation for LLC is a bit of a pain. Ignore for now. */
2398	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
2399	seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2400
2401	return 0;
2402}
2403
2404static int i915_guc_load_status_info(struct seq_file *m, void *data)
2405{
2406	struct drm_info_node *node = m->private;
2407	struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
2408	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
2409	u32 tmp, i;
2410
2411	if (!HAS_GUC_UCODE(dev_priv->dev))
2412		return 0;
2413
2414	seq_printf(m, "GuC firmware status:\n");
2415	seq_printf(m, "\tpath: %s\n",
2416		guc_fw->guc_fw_path);
2417	seq_printf(m, "\tfetch: %s\n",
2418		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
2419	seq_printf(m, "\tload: %s\n",
2420		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
2421	seq_printf(m, "\tversion wanted: %d.%d\n",
2422		guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
2423	seq_printf(m, "\tversion found: %d.%d\n",
2424		guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
2425	seq_printf(m, "\theader: offset is %d; size = %d\n",
2426		guc_fw->header_offset, guc_fw->header_size);
2427	seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2428		guc_fw->ucode_offset, guc_fw->ucode_size);
2429	seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2430		guc_fw->rsa_offset, guc_fw->rsa_size);
2431
2432	tmp = I915_READ(GUC_STATUS);
2433
2434	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2435	seq_printf(m, "\tBootrom status = 0x%x\n",
2436		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2437	seq_printf(m, "\tuKernel status = 0x%x\n",
2438		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2439	seq_printf(m, "\tMIA Core status = 0x%x\n",
2440		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2441	seq_puts(m, "\nScratch registers:\n");
2442	for (i = 0; i < 16; i++)
2443		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2444
 
 
2445	return 0;
2446}
2447
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2448static void i915_guc_client_info(struct seq_file *m,
2449				 struct drm_i915_private *dev_priv,
2450				 struct i915_guc_client *client)
2451{
2452	struct intel_engine_cs *ring;
 
2453	uint64_t tot = 0;
2454	uint32_t i;
2455
2456	seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
2457		client->priority, client->ctx_index, client->proc_desc_offset);
2458	seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
2459		client->doorbell_id, client->doorbell_offset, client->cookie);
2460	seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2461		client->wq_size, client->wq_offset, client->wq_tail);
2462
2463	seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
2464	seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
2465	seq_printf(m, "\tLast submission result: %d\n", client->retcode);
2466
2467	for_each_ring(ring, dev_priv, i) {
 
 
 
 
 
 
 
2468		seq_printf(m, "\tSubmissions: %llu %s\n",
2469				client->submissions[ring->guc_id],
2470				ring->name);
2471		tot += client->submissions[ring->guc_id];
2472	}
2473	seq_printf(m, "\tTotal: %llu\n", tot);
2474}
2475
2476static int i915_guc_info(struct seq_file *m, void *data)
2477{
2478	struct drm_info_node *node = m->private;
2479	struct drm_device *dev = node->minor->dev;
2480	struct drm_i915_private *dev_priv = dev->dev_private;
2481	struct intel_guc guc;
2482	struct i915_guc_client client = {};
2483	struct intel_engine_cs *ring;
2484	enum intel_ring_id i;
2485	u64 total = 0;
2486
2487	if (!HAS_GUC_SCHED(dev_priv->dev))
2488		return 0;
2489
2490	if (mutex_lock_interruptible(&dev->struct_mutex))
2491		return 0;
2492
2493	/* Take a local copy of the GuC data, so we can dump it at leisure */
2494	guc = dev_priv->guc;
2495	if (guc.execbuf_client)
2496		client = *guc.execbuf_client;
2497
2498	mutex_unlock(&dev->struct_mutex);
 
 
2499
2500	seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
2501	seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
2502	seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
2503	seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
2504	seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
2505
2506	seq_printf(m, "\nGuC submissions:\n");
2507	for_each_ring(ring, dev_priv, i) {
2508		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
2509			ring->name, guc.submissions[ring->guc_id],
2510			guc.last_seqno[ring->guc_id]);
2511		total += guc.submissions[ring->guc_id];
2512	}
2513	seq_printf(m, "\t%s: %llu\n", "Total", total);
2514
2515	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
2516	i915_guc_client_info(m, dev_priv, &client);
2517
2518	/* Add more as required ... */
2519
2520	return 0;
2521}
2522
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2523static int i915_guc_log_dump(struct seq_file *m, void *data)
2524{
2525	struct drm_info_node *node = m->private;
2526	struct drm_device *dev = node->minor->dev;
2527	struct drm_i915_private *dev_priv = dev->dev_private;
2528	struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
2529	u32 *log;
2530	int i = 0, pg;
2531
2532	if (!log_obj)
2533		return 0;
2534
2535	for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
2536		log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));
 
 
2537
2538		for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
2539			seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2540				   *(log + i), *(log + i + 1),
2541				   *(log + i + 2), *(log + i + 3));
2542
2543		kunmap_atomic(log);
 
 
 
 
2544	}
2545
 
 
 
 
 
2546	seq_putc(m, '\n');
2547
 
 
2548	return 0;
2549}
2550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2551static int i915_edp_psr_status(struct seq_file *m, void *data)
2552{
2553	struct drm_info_node *node = m->private;
2554	struct drm_device *dev = node->minor->dev;
2555	struct drm_i915_private *dev_priv = dev->dev_private;
2556	u32 psrperf = 0;
2557	u32 stat[3];
2558	enum pipe pipe;
2559	bool enabled = false;
 
 
 
 
2560
2561	if (!HAS_PSR(dev)) {
2562		seq_puts(m, "PSR not supported\n");
 
2563		return 0;
2564	}
2565
2566	intel_runtime_pm_get(dev_priv);
2567
2568	mutex_lock(&dev_priv->psr.lock);
2569	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2570	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2571	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2572	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2573	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2574		   dev_priv->psr.busy_frontbuffer_bits);
2575	seq_printf(m, "Re-enable work scheduled: %s\n",
2576		   yesno(work_busy(&dev_priv->psr.work.work)));
2577
2578	if (HAS_DDI(dev))
2579		enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2580	else {
 
 
 
2581		for_each_pipe(dev_priv, pipe) {
 
 
 
 
 
 
 
 
 
2582			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2583				VLV_EDP_PSR_CURR_STATE_MASK;
2584			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2585			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2586				enabled = true;
 
 
2587		}
2588	}
2589
2590	seq_printf(m, "Main link in standby mode: %s\n",
2591		   yesno(dev_priv->psr.link_standby));
2592
2593	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2594
2595	if (!HAS_DDI(dev))
2596		for_each_pipe(dev_priv, pipe) {
2597			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2598			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2599				seq_printf(m, " pipe %c", pipe_name(pipe));
2600		}
2601	seq_puts(m, "\n");
2602
2603	/*
2604	 * VLV/CHV PSR has no kind of performance counter
2605	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2606	 */
2607	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2608		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2609			EDP_PSR_PERF_CNT_MASK;
2610
2611		seq_printf(m, "Performance_Counter: %u\n", psrperf);
2612	}
 
 
 
 
 
 
2613	mutex_unlock(&dev_priv->psr.lock);
2614
2615	intel_runtime_pm_put(dev_priv);
2616	return 0;
2617}
2618
2619static int i915_sink_crc(struct seq_file *m, void *data)
2620{
2621	struct drm_info_node *node = m->private;
2622	struct drm_device *dev = node->minor->dev;
2623	struct intel_encoder *encoder;
2624	struct intel_connector *connector;
 
2625	struct intel_dp *intel_dp = NULL;
 
2626	int ret;
2627	u8 crc[6];
2628
2629	drm_modeset_lock_all(dev);
2630	for_each_intel_connector(dev, connector) {
 
 
 
 
 
 
2631
2632		if (connector->base.dpms != DRM_MODE_DPMS_ON)
2633			continue;
2634
2635		if (!connector->base.encoder)
 
 
 
 
 
 
2636			continue;
2637
2638		encoder = to_intel_encoder(connector->base.encoder);
2639		if (encoder->type != INTEL_OUTPUT_EDP)
 
 
 
 
 
2640			continue;
2641
2642		intel_dp = enc_to_intel_dp(&encoder->base);
 
 
 
 
 
 
 
 
 
 
 
2643
2644		ret = intel_dp_sink_crc(intel_dp, crc);
2645		if (ret)
2646			goto out;
2647
2648		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2649			   crc[0], crc[1], crc[2],
2650			   crc[3], crc[4], crc[5]);
2651		goto out;
 
 
 
 
 
 
 
 
2652	}
2653	ret = -ENODEV;
2654out:
2655	drm_modeset_unlock_all(dev);
 
 
 
2656	return ret;
2657}
2658
2659static int i915_energy_uJ(struct seq_file *m, void *data)
2660{
2661	struct drm_info_node *node = m->private;
2662	struct drm_device *dev = node->minor->dev;
2663	struct drm_i915_private *dev_priv = dev->dev_private;
2664	u64 power;
2665	u32 units;
2666
2667	if (INTEL_INFO(dev)->gen < 6)
2668		return -ENODEV;
2669
2670	intel_runtime_pm_get(dev_priv);
2671
2672	rdmsrl(MSR_RAPL_POWER_UNIT, power);
2673	power = (power & 0x1f00) >> 8;
2674	units = 1000000 / (1 << power); /* convert to uJ */
 
 
 
2675	power = I915_READ(MCH_SECP_NRG_STTS);
2676	power *= units;
2677
2678	intel_runtime_pm_put(dev_priv);
2679
2680	seq_printf(m, "%llu", (long long unsigned)power);
2681
2682	return 0;
2683}
2684
2685static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2686{
2687	struct drm_info_node *node = m->private;
2688	struct drm_device *dev = node->minor->dev;
2689	struct drm_i915_private *dev_priv = dev->dev_private;
2690
2691	if (!HAS_RUNTIME_PM(dev)) {
2692		seq_puts(m, "not supported\n");
2693		return 0;
2694	}
2695
2696	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
 
2697	seq_printf(m, "IRQs disabled: %s\n",
2698		   yesno(!intel_irqs_enabled(dev_priv)));
2699#ifdef CONFIG_PM
2700	seq_printf(m, "Usage count: %d\n",
2701		   atomic_read(&dev->dev->power.usage_count));
2702#else
2703	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2704#endif
 
 
 
2705
2706	return 0;
2707}
2708
2709static int i915_power_domain_info(struct seq_file *m, void *unused)
2710{
2711	struct drm_info_node *node = m->private;
2712	struct drm_device *dev = node->minor->dev;
2713	struct drm_i915_private *dev_priv = dev->dev_private;
2714	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2715	int i;
2716
2717	mutex_lock(&power_domains->lock);
2718
2719	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2720	for (i = 0; i < power_domains->power_well_count; i++) {
2721		struct i915_power_well *power_well;
2722		enum intel_display_power_domain power_domain;
2723
2724		power_well = &power_domains->power_wells[i];
2725		seq_printf(m, "%-25s %d\n", power_well->name,
2726			   power_well->count);
2727
2728		for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2729		     power_domain++) {
2730			if (!(BIT(power_domain) & power_well->domains))
2731				continue;
2732
2733			seq_printf(m, "  %-23s %d\n",
2734				 intel_display_power_domain_str(power_domain),
2735				 power_domains->domain_use_count[power_domain]);
2736		}
2737	}
2738
2739	mutex_unlock(&power_domains->lock);
2740
2741	return 0;
2742}
2743
2744static int i915_dmc_info(struct seq_file *m, void *unused)
2745{
2746	struct drm_info_node *node = m->private;
2747	struct drm_device *dev = node->minor->dev;
2748	struct drm_i915_private *dev_priv = dev->dev_private;
2749	struct intel_csr *csr;
2750
2751	if (!HAS_CSR(dev)) {
2752		seq_puts(m, "not supported\n");
2753		return 0;
2754	}
2755
2756	csr = &dev_priv->csr;
2757
2758	intel_runtime_pm_get(dev_priv);
2759
2760	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2761	seq_printf(m, "path: %s\n", csr->fw_path);
2762
2763	if (!csr->dmc_payload)
2764		goto out;
2765
2766	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2767		   CSR_VERSION_MINOR(csr->version));
2768
2769	if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
 
2770		seq_printf(m, "DC3 -> DC5 count: %d\n",
2771			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
2772		seq_printf(m, "DC5 -> DC6 count: %d\n",
2773			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2774	} else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
2775		seq_printf(m, "DC3 -> DC5 count: %d\n",
2776			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
2777	}
2778
2779out:
2780	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2781	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2782	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2783
2784	intel_runtime_pm_put(dev_priv);
2785
2786	return 0;
2787}
2788
2789static void intel_seq_print_mode(struct seq_file *m, int tabs,
2790				 struct drm_display_mode *mode)
2791{
2792	int i;
2793
2794	for (i = 0; i < tabs; i++)
2795		seq_putc(m, '\t');
2796
2797	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2798		   mode->base.id, mode->name,
2799		   mode->vrefresh, mode->clock,
2800		   mode->hdisplay, mode->hsync_start,
2801		   mode->hsync_end, mode->htotal,
2802		   mode->vdisplay, mode->vsync_start,
2803		   mode->vsync_end, mode->vtotal,
2804		   mode->type, mode->flags);
2805}
2806
2807static void intel_encoder_info(struct seq_file *m,
2808			       struct intel_crtc *intel_crtc,
2809			       struct intel_encoder *intel_encoder)
2810{
2811	struct drm_info_node *node = m->private;
2812	struct drm_device *dev = node->minor->dev;
2813	struct drm_crtc *crtc = &intel_crtc->base;
2814	struct intel_connector *intel_connector;
2815	struct drm_encoder *encoder;
2816
2817	encoder = &intel_encoder->base;
2818	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2819		   encoder->base.id, encoder->name);
2820	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2821		struct drm_connector *connector = &intel_connector->base;
2822		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2823			   connector->base.id,
2824			   connector->name,
2825			   drm_get_connector_status_name(connector->status));
2826		if (connector->status == connector_status_connected) {
2827			struct drm_display_mode *mode = &crtc->mode;
2828			seq_printf(m, ", mode:\n");
2829			intel_seq_print_mode(m, 2, mode);
2830		} else {
2831			seq_putc(m, '\n');
2832		}
2833	}
2834}
2835
2836static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2837{
2838	struct drm_info_node *node = m->private;
2839	struct drm_device *dev = node->minor->dev;
2840	struct drm_crtc *crtc = &intel_crtc->base;
2841	struct intel_encoder *intel_encoder;
2842	struct drm_plane_state *plane_state = crtc->primary->state;
2843	struct drm_framebuffer *fb = plane_state->fb;
2844
2845	if (fb)
2846		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2847			   fb->base.id, plane_state->src_x >> 16,
2848			   plane_state->src_y >> 16, fb->width, fb->height);
2849	else
2850		seq_puts(m, "\tprimary plane disabled\n");
2851	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2852		intel_encoder_info(m, intel_crtc, intel_encoder);
2853}
2854
2855static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2856{
2857	struct drm_display_mode *mode = panel->fixed_mode;
2858
2859	seq_printf(m, "\tfixed mode:\n");
2860	intel_seq_print_mode(m, 2, mode);
2861}
2862
2863static void intel_dp_info(struct seq_file *m,
2864			  struct intel_connector *intel_connector)
2865{
2866	struct intel_encoder *intel_encoder = intel_connector->encoder;
2867	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2868
2869	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2870	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2871	if (intel_encoder->type == INTEL_OUTPUT_EDP)
2872		intel_panel_info(m, &intel_connector->panel);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2873}
2874
2875static void intel_hdmi_info(struct seq_file *m,
2876			    struct intel_connector *intel_connector)
2877{
2878	struct intel_encoder *intel_encoder = intel_connector->encoder;
2879	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2880
2881	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2882}
2883
2884static void intel_lvds_info(struct seq_file *m,
2885			    struct intel_connector *intel_connector)
2886{
2887	intel_panel_info(m, &intel_connector->panel);
2888}
2889
2890static void intel_connector_info(struct seq_file *m,
2891				 struct drm_connector *connector)
2892{
2893	struct intel_connector *intel_connector = to_intel_connector(connector);
2894	struct intel_encoder *intel_encoder = intel_connector->encoder;
2895	struct drm_display_mode *mode;
2896
2897	seq_printf(m, "connector %d: type %s, status: %s\n",
2898		   connector->base.id, connector->name,
2899		   drm_get_connector_status_name(connector->status));
2900	if (connector->status == connector_status_connected) {
2901		seq_printf(m, "\tname: %s\n", connector->display_info.name);
2902		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2903			   connector->display_info.width_mm,
2904			   connector->display_info.height_mm);
2905		seq_printf(m, "\tsubpixel order: %s\n",
2906			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2907		seq_printf(m, "\tCEA rev: %d\n",
2908			   connector->display_info.cea_rev);
2909	}
2910	if (intel_encoder) {
2911		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2912		    intel_encoder->type == INTEL_OUTPUT_EDP)
 
 
 
 
 
 
 
2913			intel_dp_info(m, intel_connector);
2914		else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2915			intel_hdmi_info(m, intel_connector);
2916		else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2917			intel_lvds_info(m, intel_connector);
 
 
 
 
 
 
 
 
2918	}
2919
2920	seq_printf(m, "\tmodes:\n");
2921	list_for_each_entry(mode, &connector->modes, head)
2922		intel_seq_print_mode(m, 2, mode);
2923}
2924
2925static bool cursor_active(struct drm_device *dev, int pipe)
2926{
2927	struct drm_i915_private *dev_priv = dev->dev_private;
2928	u32 state;
2929
2930	if (IS_845G(dev) || IS_I865G(dev))
2931		state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
2932	else
2933		state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2934
2935	return state;
2936}
2937
2938static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2939{
2940	struct drm_i915_private *dev_priv = dev->dev_private;
2941	u32 pos;
2942
2943	pos = I915_READ(CURPOS(pipe));
2944
2945	*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2946	if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2947		*x = -*x;
2948
2949	*y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2950	if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2951		*y = -*y;
2952
2953	return cursor_active(dev, pipe);
2954}
2955
2956static const char *plane_type(enum drm_plane_type type)
2957{
2958	switch (type) {
2959	case DRM_PLANE_TYPE_OVERLAY:
2960		return "OVL";
2961	case DRM_PLANE_TYPE_PRIMARY:
2962		return "PRI";
2963	case DRM_PLANE_TYPE_CURSOR:
2964		return "CUR";
2965	/*
2966	 * Deliberately omitting default: to generate compiler warnings
2967	 * when a new drm_plane_type gets added.
2968	 */
2969	}
2970
2971	return "unknown";
2972}
2973
2974static const char *plane_rotation(unsigned int rotation)
2975{
2976	static char buf[48];
2977	/*
2978	 * According to doc only one DRM_ROTATE_ is allowed but this
2979	 * will print them all to visualize if the values are misused
2980	 */
2981	snprintf(buf, sizeof(buf),
2982		 "%s%s%s%s%s%s(0x%08x)",
2983		 (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "",
2984		 (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "",
2985		 (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "",
2986		 (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "",
2987		 (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "",
2988		 (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "",
2989		 rotation);
2990
2991	return buf;
2992}
2993
2994static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2995{
2996	struct drm_info_node *node = m->private;
2997	struct drm_device *dev = node->minor->dev;
2998	struct intel_plane *intel_plane;
2999
3000	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3001		struct drm_plane_state *state;
3002		struct drm_plane *plane = &intel_plane->base;
 
3003
3004		if (!plane->state) {
3005			seq_puts(m, "plane->state is NULL!\n");
3006			continue;
3007		}
3008
3009		state = plane->state;
3010
 
 
 
 
 
 
 
3011		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3012			   plane->base.id,
3013			   plane_type(intel_plane->base.type),
3014			   state->crtc_x, state->crtc_y,
3015			   state->crtc_w, state->crtc_h,
3016			   (state->src_x >> 16),
3017			   ((state->src_x & 0xffff) * 15625) >> 10,
3018			   (state->src_y >> 16),
3019			   ((state->src_y & 0xffff) * 15625) >> 10,
3020			   (state->src_w >> 16),
3021			   ((state->src_w & 0xffff) * 15625) >> 10,
3022			   (state->src_h >> 16),
3023			   ((state->src_h & 0xffff) * 15625) >> 10,
3024			   state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
3025			   plane_rotation(state->rotation));
3026	}
3027}
3028
3029static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3030{
3031	struct intel_crtc_state *pipe_config;
3032	int num_scalers = intel_crtc->num_scalers;
3033	int i;
3034
3035	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3036
3037	/* Not all platformas have a scaler */
3038	if (num_scalers) {
3039		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3040			   num_scalers,
3041			   pipe_config->scaler_state.scaler_users,
3042			   pipe_config->scaler_state.scaler_id);
3043
3044		for (i = 0; i < SKL_NUM_SCALERS; i++) {
3045			struct intel_scaler *sc =
3046					&pipe_config->scaler_state.scalers[i];
3047
3048			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3049				   i, yesno(sc->in_use), sc->mode);
3050		}
3051		seq_puts(m, "\n");
3052	} else {
3053		seq_puts(m, "\tNo scalers available on this platform\n");
3054	}
3055}
3056
3057static int i915_display_info(struct seq_file *m, void *unused)
3058{
3059	struct drm_info_node *node = m->private;
3060	struct drm_device *dev = node->minor->dev;
3061	struct drm_i915_private *dev_priv = dev->dev_private;
3062	struct intel_crtc *crtc;
3063	struct drm_connector *connector;
 
3064
3065	intel_runtime_pm_get(dev_priv);
3066	drm_modeset_lock_all(dev);
3067	seq_printf(m, "CRTC info\n");
3068	seq_printf(m, "---------\n");
3069	for_each_intel_crtc(dev, crtc) {
3070		bool active;
3071		struct intel_crtc_state *pipe_config;
3072		int x, y;
3073
 
3074		pipe_config = to_intel_crtc_state(crtc->base.state);
3075
3076		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3077			   crtc->base.base.id, pipe_name(crtc->pipe),
3078			   yesno(pipe_config->base.active),
3079			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3080			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
3081
3082		if (pipe_config->base.active) {
 
 
 
3083			intel_crtc_info(m, crtc);
3084
3085			active = cursor_position(dev, crtc->pipe, &x, &y);
3086			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
3087				   yesno(crtc->cursor_base),
3088				   x, y, crtc->base.cursor->state->crtc_w,
3089				   crtc->base.cursor->state->crtc_h,
3090				   crtc->cursor_addr, yesno(active));
 
3091			intel_scaler_info(m, crtc);
3092			intel_plane_info(m, crtc);
3093		}
3094
3095		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3096			   yesno(!crtc->cpu_fifo_underrun_disabled),
3097			   yesno(!crtc->pch_fifo_underrun_disabled));
 
3098	}
3099
3100	seq_printf(m, "\n");
3101	seq_printf(m, "Connector info\n");
3102	seq_printf(m, "--------------\n");
3103	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
 
3104		intel_connector_info(m, connector);
3105	}
3106	drm_modeset_unlock_all(dev);
 
3107	intel_runtime_pm_put(dev_priv);
3108
3109	return 0;
3110}
3111
3112static int i915_semaphore_status(struct seq_file *m, void *unused)
3113{
3114	struct drm_info_node *node = (struct drm_info_node *) m->private;
3115	struct drm_device *dev = node->minor->dev;
3116	struct drm_i915_private *dev_priv = dev->dev_private;
3117	struct intel_engine_cs *ring;
3118	int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
3119	int i, j, ret;
3120
3121	if (!i915_semaphore_is_enabled(dev)) {
3122		seq_puts(m, "Semaphores are disabled\n");
3123		return 0;
3124	}
3125
3126	ret = mutex_lock_interruptible(&dev->struct_mutex);
3127	if (ret)
3128		return ret;
3129	intel_runtime_pm_get(dev_priv);
3130
3131	if (IS_BROADWELL(dev)) {
3132		struct page *page;
3133		uint64_t *seqno;
3134
3135		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
3136
3137		seqno = (uint64_t *)kmap_atomic(page);
3138		for_each_ring(ring, dev_priv, i) {
3139			uint64_t offset;
3140
3141			seq_printf(m, "%s\n", ring->name);
3142
3143			seq_puts(m, "  Last signal:");
3144			for (j = 0; j < num_rings; j++) {
3145				offset = i * I915_NUM_RINGS + j;
3146				seq_printf(m, "0x%08llx (0x%02llx) ",
3147					   seqno[offset], offset * 8);
3148			}
3149			seq_putc(m, '\n');
3150
3151			seq_puts(m, "  Last wait:  ");
3152			for (j = 0; j < num_rings; j++) {
3153				offset = i + (j * I915_NUM_RINGS);
3154				seq_printf(m, "0x%08llx (0x%02llx) ",
3155					   seqno[offset], offset * 8);
3156			}
3157			seq_putc(m, '\n');
3158
3159		}
3160		kunmap_atomic(seqno);
3161	} else {
3162		seq_puts(m, "  Last signal:");
3163		for_each_ring(ring, dev_priv, i)
3164			for (j = 0; j < num_rings; j++)
3165				seq_printf(m, "0x%08x\n",
3166					   I915_READ(ring->semaphore.mbox.signal[j]));
3167		seq_putc(m, '\n');
3168	}
3169
3170	seq_puts(m, "\nSync seqno:\n");
3171	for_each_ring(ring, dev_priv, i) {
3172		for (j = 0; j < num_rings; j++) {
3173			seq_printf(m, "  0x%08x ", ring->semaphore.sync_seqno[j]);
3174		}
3175		seq_putc(m, '\n');
3176	}
3177	seq_putc(m, '\n');
 
 
 
 
 
 
 
 
3178
3179	intel_runtime_pm_put(dev_priv);
3180	mutex_unlock(&dev->struct_mutex);
3181	return 0;
3182}
3183
3184static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3185{
3186	struct drm_info_node *node = (struct drm_info_node *) m->private;
3187	struct drm_device *dev = node->minor->dev;
3188	struct drm_i915_private *dev_priv = dev->dev_private;
3189	int i;
3190
3191	drm_modeset_lock_all(dev);
3192	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3193		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3194
3195		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
3196		seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
3197			   pll->config.crtc_mask, pll->active, yesno(pll->on));
3198		seq_printf(m, " tracked hardware state:\n");
3199		seq_printf(m, " dpll:    0x%08x\n", pll->config.hw_state.dpll);
3200		seq_printf(m, " dpll_md: 0x%08x\n",
3201			   pll->config.hw_state.dpll_md);
3202		seq_printf(m, " fp0:     0x%08x\n", pll->config.hw_state.fp0);
3203		seq_printf(m, " fp1:     0x%08x\n", pll->config.hw_state.fp1);
3204		seq_printf(m, " wrpll:   0x%08x\n", pll->config.hw_state.wrpll);
3205	}
3206	drm_modeset_unlock_all(dev);
3207
3208	return 0;
3209}
3210
3211static int i915_wa_registers(struct seq_file *m, void *unused)
3212{
3213	int i;
3214	int ret;
3215	struct intel_engine_cs *ring;
3216	struct drm_info_node *node = (struct drm_info_node *) m->private;
3217	struct drm_device *dev = node->minor->dev;
3218	struct drm_i915_private *dev_priv = dev->dev_private;
3219	struct i915_workarounds *workarounds = &dev_priv->workarounds;
 
3220
3221	ret = mutex_lock_interruptible(&dev->struct_mutex);
3222	if (ret)
3223		return ret;
3224
3225	intel_runtime_pm_get(dev_priv);
3226
3227	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3228	for_each_ring(ring, dev_priv, i)
3229		seq_printf(m, "HW whitelist count for %s: %d\n",
3230			   ring->name, workarounds->hw_whitelist_count[i]);
3231	for (i = 0; i < workarounds->count; ++i) {
3232		i915_reg_t addr;
3233		u32 mask, value, read;
3234		bool ok;
3235
3236		addr = workarounds->reg[i].addr;
3237		mask = workarounds->reg[i].mask;
3238		value = workarounds->reg[i].value;
3239		read = I915_READ(addr);
3240		ok = (value & mask) == (read & mask);
3241		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3242			   i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3243	}
3244
3245	intel_runtime_pm_put(dev_priv);
3246	mutex_unlock(&dev->struct_mutex);
3247
3248	return 0;
3249}
3250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3251static int i915_ddb_info(struct seq_file *m, void *unused)
3252{
3253	struct drm_info_node *node = m->private;
3254	struct drm_device *dev = node->minor->dev;
3255	struct drm_i915_private *dev_priv = dev->dev_private;
3256	struct skl_ddb_allocation *ddb;
3257	struct skl_ddb_entry *entry;
3258	enum pipe pipe;
3259	int plane;
3260
3261	if (INTEL_INFO(dev)->gen < 9)
3262		return 0;
3263
3264	drm_modeset_lock_all(dev);
3265
3266	ddb = &dev_priv->wm.skl_hw.ddb;
3267
3268	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3269
3270	for_each_pipe(dev_priv, pipe) {
3271		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3272
3273		for_each_plane(dev_priv, pipe, plane) {
3274			entry = &ddb->plane[pipe][plane];
3275			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3276				   entry->start, entry->end,
3277				   skl_ddb_entry_size(entry));
3278		}
3279
3280		entry = &ddb->plane[pipe][PLANE_CURSOR];
3281		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3282			   entry->end, skl_ddb_entry_size(entry));
3283	}
3284
3285	drm_modeset_unlock_all(dev);
3286
3287	return 0;
3288}
3289
3290static void drrs_status_per_crtc(struct seq_file *m,
3291		struct drm_device *dev, struct intel_crtc *intel_crtc)
 
3292{
3293	struct intel_encoder *intel_encoder;
3294	struct drm_i915_private *dev_priv = dev->dev_private;
3295	struct i915_drrs *drrs = &dev_priv->drrs;
3296	int vrefresh = 0;
 
 
3297
3298	for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
3299		/* Encoder connected on this CRTC */
3300		switch (intel_encoder->type) {
3301		case INTEL_OUTPUT_EDP:
3302			seq_puts(m, "eDP:\n");
3303			break;
3304		case INTEL_OUTPUT_DSI:
3305			seq_puts(m, "DSI:\n");
3306			break;
3307		case INTEL_OUTPUT_HDMI:
3308			seq_puts(m, "HDMI:\n");
3309			break;
3310		case INTEL_OUTPUT_DISPLAYPORT:
3311			seq_puts(m, "DP:\n");
3312			break;
3313		default:
3314			seq_printf(m, "Other encoder (id=%d).\n",
3315						intel_encoder->type);
3316			return;
3317		}
3318	}
 
3319
3320	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3321		seq_puts(m, "\tVBT: DRRS_type: Static");
3322	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3323		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3324	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3325		seq_puts(m, "\tVBT: DRRS_type: None");
3326	else
3327		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3328
3329	seq_puts(m, "\n\n");
3330
3331	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3332		struct intel_panel *panel;
3333
3334		mutex_lock(&drrs->mutex);
3335		/* DRRS Supported */
3336		seq_puts(m, "\tDRRS Supported: Yes\n");
3337
3338		/* disable_drrs() will make drrs->dp NULL */
3339		if (!drrs->dp) {
3340			seq_puts(m, "Idleness DRRS: Disabled");
 
 
 
3341			mutex_unlock(&drrs->mutex);
3342			return;
3343		}
3344
3345		panel = &drrs->dp->attached_connector->panel;
3346		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3347					drrs->busy_frontbuffer_bits);
3348
3349		seq_puts(m, "\n\t\t");
3350		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3351			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3352			vrefresh = panel->fixed_mode->vrefresh;
3353		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3354			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3355			vrefresh = panel->downclock_mode->vrefresh;
3356		} else {
3357			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3358						drrs->refresh_rate_type);
3359			mutex_unlock(&drrs->mutex);
3360			return;
3361		}
3362		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3363
3364		seq_puts(m, "\n\t\t");
3365		mutex_unlock(&drrs->mutex);
3366	} else {
3367		/* DRRS not supported. Print the VBT parameter*/
3368		seq_puts(m, "\tDRRS Supported : No");
3369	}
3370	seq_puts(m, "\n");
3371}
3372
3373static int i915_drrs_status(struct seq_file *m, void *unused)
3374{
3375	struct drm_info_node *node = m->private;
3376	struct drm_device *dev = node->minor->dev;
3377	struct intel_crtc *intel_crtc;
3378	int active_crtc_cnt = 0;
3379
 
3380	for_each_intel_crtc(dev, intel_crtc) {
3381		drm_modeset_lock(&intel_crtc->base.mutex, NULL);
3382
3383		if (intel_crtc->base.state->active) {
3384			active_crtc_cnt++;
3385			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3386
3387			drrs_status_per_crtc(m, dev, intel_crtc);
3388		}
3389
3390		drm_modeset_unlock(&intel_crtc->base.mutex);
3391	}
 
3392
3393	if (!active_crtc_cnt)
3394		seq_puts(m, "No active crtc found\n");
3395
3396	return 0;
3397}
3398
3399struct pipe_crc_info {
3400	const char *name;
3401	struct drm_device *dev;
3402	enum pipe pipe;
3403};
3404
3405static int i915_dp_mst_info(struct seq_file *m, void *unused)
3406{
3407	struct drm_info_node *node = (struct drm_info_node *) m->private;
3408	struct drm_device *dev = node->minor->dev;
3409	struct drm_encoder *encoder;
3410	struct intel_encoder *intel_encoder;
3411	struct intel_digital_port *intel_dig_port;
3412	drm_modeset_lock_all(dev);
3413	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3414		intel_encoder = to_intel_encoder(encoder);
3415		if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
3416			continue;
3417		intel_dig_port = enc_to_dig_port(encoder);
3418		if (!intel_dig_port->dp.can_mst)
3419			continue;
3420
3421		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3422	}
3423	drm_modeset_unlock_all(dev);
3424	return 0;
3425}
3426
3427static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
3428{
3429	struct pipe_crc_info *info = inode->i_private;
3430	struct drm_i915_private *dev_priv = info->dev->dev_private;
3431	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3432
3433	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
3434		return -ENODEV;
3435
3436	spin_lock_irq(&pipe_crc->lock);
3437
3438	if (pipe_crc->opened) {
3439		spin_unlock_irq(&pipe_crc->lock);
3440		return -EBUSY; /* already open */
3441	}
3442
3443	pipe_crc->opened = true;
3444	filep->private_data = inode->i_private;
3445
3446	spin_unlock_irq(&pipe_crc->lock);
3447
3448	return 0;
3449}
3450
3451static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
3452{
3453	struct pipe_crc_info *info = inode->i_private;
3454	struct drm_i915_private *dev_priv = info->dev->dev_private;
3455	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3456
3457	spin_lock_irq(&pipe_crc->lock);
3458	pipe_crc->opened = false;
3459	spin_unlock_irq(&pipe_crc->lock);
3460
3461	return 0;
3462}
3463
3464/* (6 fields, 8 chars each, space separated (5) + '\n') */
3465#define PIPE_CRC_LINE_LEN	(6 * 8 + 5 + 1)
3466/* account for \'0' */
3467#define PIPE_CRC_BUFFER_LEN	(PIPE_CRC_LINE_LEN + 1)
3468
3469static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
3470{
3471	assert_spin_locked(&pipe_crc->lock);
3472	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3473			INTEL_PIPE_CRC_ENTRIES_NR);
3474}
3475
3476static ssize_t
3477i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
3478		   loff_t *pos)
3479{
3480	struct pipe_crc_info *info = filep->private_data;
3481	struct drm_device *dev = info->dev;
3482	struct drm_i915_private *dev_priv = dev->dev_private;
3483	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3484	char buf[PIPE_CRC_BUFFER_LEN];
3485	int n_entries;
3486	ssize_t bytes_read;
3487
3488	/*
3489	 * Don't allow user space to provide buffers not big enough to hold
3490	 * a line of data.
3491	 */
3492	if (count < PIPE_CRC_LINE_LEN)
3493		return -EINVAL;
3494
3495	if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
3496		return 0;
3497
3498	/* nothing to read */
3499	spin_lock_irq(&pipe_crc->lock);
3500	while (pipe_crc_data_count(pipe_crc) == 0) {
3501		int ret;
3502
3503		if (filep->f_flags & O_NONBLOCK) {
3504			spin_unlock_irq(&pipe_crc->lock);
3505			return -EAGAIN;
3506		}
3507
3508		ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
3509				pipe_crc_data_count(pipe_crc), pipe_crc->lock);
3510		if (ret) {
3511			spin_unlock_irq(&pipe_crc->lock);
3512			return ret;
3513		}
3514	}
3515
3516	/* We now have one or more entries to read */
3517	n_entries = count / PIPE_CRC_LINE_LEN;
3518
3519	bytes_read = 0;
3520	while (n_entries > 0) {
3521		struct intel_pipe_crc_entry *entry =
3522			&pipe_crc->entries[pipe_crc->tail];
3523		int ret;
3524
3525		if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3526			     INTEL_PIPE_CRC_ENTRIES_NR) < 1)
3527			break;
3528
3529		BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
3530		pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
3531
3532		bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
3533				       "%8u %8x %8x %8x %8x %8x\n",
3534				       entry->frame, entry->crc[0],
3535				       entry->crc[1], entry->crc[2],
3536				       entry->crc[3], entry->crc[4]);
3537
3538		spin_unlock_irq(&pipe_crc->lock);
3539
3540		ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
3541		if (ret == PIPE_CRC_LINE_LEN)
3542			return -EFAULT;
3543
3544		user_buf += PIPE_CRC_LINE_LEN;
3545		n_entries--;
3546
3547		spin_lock_irq(&pipe_crc->lock);
3548	}
3549
3550	spin_unlock_irq(&pipe_crc->lock);
3551
3552	return bytes_read;
3553}
3554
3555static const struct file_operations i915_pipe_crc_fops = {
3556	.owner = THIS_MODULE,
3557	.open = i915_pipe_crc_open,
3558	.read = i915_pipe_crc_read,
3559	.release = i915_pipe_crc_release,
3560};
3561
3562static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
3563	{
3564		.name = "i915_pipe_A_crc",
3565		.pipe = PIPE_A,
3566	},
3567	{
3568		.name = "i915_pipe_B_crc",
3569		.pipe = PIPE_B,
3570	},
3571	{
3572		.name = "i915_pipe_C_crc",
3573		.pipe = PIPE_C,
3574	},
3575};
3576
3577static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
3578				enum pipe pipe)
3579{
3580	struct drm_device *dev = minor->dev;
3581	struct dentry *ent;
3582	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
3583
3584	info->dev = dev;
3585	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
3586				  &i915_pipe_crc_fops);
3587	if (!ent)
3588		return -ENOMEM;
3589
3590	return drm_add_fake_info_node(minor, ent, info);
3591}
3592
3593static const char * const pipe_crc_sources[] = {
3594	"none",
3595	"plane1",
3596	"plane2",
3597	"pf",
3598	"pipe",
3599	"TV",
3600	"DP-B",
3601	"DP-C",
3602	"DP-D",
3603	"auto",
3604};
3605
3606static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
3607{
3608	BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
3609	return pipe_crc_sources[source];
3610}
3611
3612static int display_crc_ctl_show(struct seq_file *m, void *data)
3613{
3614	struct drm_device *dev = m->private;
3615	struct drm_i915_private *dev_priv = dev->dev_private;
3616	int i;
3617
3618	for (i = 0; i < I915_MAX_PIPES; i++)
3619		seq_printf(m, "%c %s\n", pipe_name(i),
3620			   pipe_crc_source_name(dev_priv->pipe_crc[i].source));
3621
3622	return 0;
3623}
3624
3625static int display_crc_ctl_open(struct inode *inode, struct file *file)
3626{
3627	struct drm_device *dev = inode->i_private;
3628
3629	return single_open(file, display_crc_ctl_show, dev);
3630}
3631
3632static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3633				 uint32_t *val)
3634{
3635	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3636		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3637
3638	switch (*source) {
3639	case INTEL_PIPE_CRC_SOURCE_PIPE:
3640		*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
3641		break;
3642	case INTEL_PIPE_CRC_SOURCE_NONE:
3643		*val = 0;
3644		break;
3645	default:
3646		return -EINVAL;
3647	}
3648
3649	return 0;
3650}
3651
3652static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
3653				     enum intel_pipe_crc_source *source)
3654{
3655	struct intel_encoder *encoder;
3656	struct intel_crtc *crtc;
3657	struct intel_digital_port *dig_port;
3658	int ret = 0;
3659
3660	*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3661
3662	drm_modeset_lock_all(dev);
3663	for_each_intel_encoder(dev, encoder) {
3664		if (!encoder->base.crtc)
3665			continue;
3666
3667		crtc = to_intel_crtc(encoder->base.crtc);
3668
3669		if (crtc->pipe != pipe)
3670			continue;
3671
3672		switch (encoder->type) {
3673		case INTEL_OUTPUT_TVOUT:
3674			*source = INTEL_PIPE_CRC_SOURCE_TV;
3675			break;
3676		case INTEL_OUTPUT_DISPLAYPORT:
3677		case INTEL_OUTPUT_EDP:
3678			dig_port = enc_to_dig_port(&encoder->base);
3679			switch (dig_port->port) {
3680			case PORT_B:
3681				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
3682				break;
3683			case PORT_C:
3684				*source = INTEL_PIPE_CRC_SOURCE_DP_C;
3685				break;
3686			case PORT_D:
3687				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
3688				break;
3689			default:
3690				WARN(1, "nonexisting DP port %c\n",
3691				     port_name(dig_port->port));
3692				break;
3693			}
3694			break;
3695		default:
3696			break;
3697		}
3698	}
3699	drm_modeset_unlock_all(dev);
3700
3701	return ret;
3702}
3703
3704static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
3705				enum pipe pipe,
3706				enum intel_pipe_crc_source *source,
3707				uint32_t *val)
3708{
3709	struct drm_i915_private *dev_priv = dev->dev_private;
3710	bool need_stable_symbols = false;
3711
3712	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3713		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3714		if (ret)
3715			return ret;
3716	}
3717
3718	switch (*source) {
3719	case INTEL_PIPE_CRC_SOURCE_PIPE:
3720		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
3721		break;
3722	case INTEL_PIPE_CRC_SOURCE_DP_B:
3723		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
3724		need_stable_symbols = true;
3725		break;
3726	case INTEL_PIPE_CRC_SOURCE_DP_C:
3727		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
3728		need_stable_symbols = true;
3729		break;
3730	case INTEL_PIPE_CRC_SOURCE_DP_D:
3731		if (!IS_CHERRYVIEW(dev))
3732			return -EINVAL;
3733		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
3734		need_stable_symbols = true;
3735		break;
3736	case INTEL_PIPE_CRC_SOURCE_NONE:
3737		*val = 0;
3738		break;
3739	default:
3740		return -EINVAL;
3741	}
3742
3743	/*
3744	 * When the pipe CRC tap point is after the transcoders we need
3745	 * to tweak symbol-level features to produce a deterministic series of
3746	 * symbols for a given frame. We need to reset those features only once
3747	 * a frame (instead of every nth symbol):
3748	 *   - DC-balance: used to ensure a better clock recovery from the data
3749	 *     link (SDVO)
3750	 *   - DisplayPort scrambling: used for EMI reduction
3751	 */
3752	if (need_stable_symbols) {
3753		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3754
3755		tmp |= DC_BALANCE_RESET_VLV;
3756		switch (pipe) {
3757		case PIPE_A:
3758			tmp |= PIPE_A_SCRAMBLE_RESET;
3759			break;
3760		case PIPE_B:
3761			tmp |= PIPE_B_SCRAMBLE_RESET;
3762			break;
3763		case PIPE_C:
3764			tmp |= PIPE_C_SCRAMBLE_RESET;
3765			break;
3766		default:
3767			return -EINVAL;
3768		}
3769		I915_WRITE(PORT_DFT2_G4X, tmp);
3770	}
3771
3772	return 0;
3773}
3774
3775static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3776				 enum pipe pipe,
3777				 enum intel_pipe_crc_source *source,
3778				 uint32_t *val)
3779{
3780	struct drm_i915_private *dev_priv = dev->dev_private;
3781	bool need_stable_symbols = false;
3782
3783	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3784		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3785		if (ret)
3786			return ret;
3787	}
3788
3789	switch (*source) {
3790	case INTEL_PIPE_CRC_SOURCE_PIPE:
3791		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
3792		break;
3793	case INTEL_PIPE_CRC_SOURCE_TV:
3794		if (!SUPPORTS_TV(dev))
3795			return -EINVAL;
3796		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
3797		break;
3798	case INTEL_PIPE_CRC_SOURCE_DP_B:
3799		if (!IS_G4X(dev))
3800			return -EINVAL;
3801		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
3802		need_stable_symbols = true;
3803		break;
3804	case INTEL_PIPE_CRC_SOURCE_DP_C:
3805		if (!IS_G4X(dev))
3806			return -EINVAL;
3807		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
3808		need_stable_symbols = true;
3809		break;
3810	case INTEL_PIPE_CRC_SOURCE_DP_D:
3811		if (!IS_G4X(dev))
3812			return -EINVAL;
3813		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
3814		need_stable_symbols = true;
3815		break;
3816	case INTEL_PIPE_CRC_SOURCE_NONE:
3817		*val = 0;
3818		break;
3819	default:
3820		return -EINVAL;
3821	}
3822
3823	/*
3824	 * When the pipe CRC tap point is after the transcoders we need
3825	 * to tweak symbol-level features to produce a deterministic series of
3826	 * symbols for a given frame. We need to reset those features only once
3827	 * a frame (instead of every nth symbol):
3828	 *   - DC-balance: used to ensure a better clock recovery from the data
3829	 *     link (SDVO)
3830	 *   - DisplayPort scrambling: used for EMI reduction
3831	 */
3832	if (need_stable_symbols) {
3833		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3834
3835		WARN_ON(!IS_G4X(dev));
3836
3837		I915_WRITE(PORT_DFT_I9XX,
3838			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
3839
3840		if (pipe == PIPE_A)
3841			tmp |= PIPE_A_SCRAMBLE_RESET;
3842		else
3843			tmp |= PIPE_B_SCRAMBLE_RESET;
3844
3845		I915_WRITE(PORT_DFT2_G4X, tmp);
3846	}
3847
3848	return 0;
3849}
3850
3851static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
3852					 enum pipe pipe)
3853{
3854	struct drm_i915_private *dev_priv = dev->dev_private;
3855	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3856
3857	switch (pipe) {
3858	case PIPE_A:
3859		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3860		break;
3861	case PIPE_B:
3862		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3863		break;
3864	case PIPE_C:
3865		tmp &= ~PIPE_C_SCRAMBLE_RESET;
3866		break;
3867	default:
3868		return;
3869	}
3870	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
3871		tmp &= ~DC_BALANCE_RESET_VLV;
3872	I915_WRITE(PORT_DFT2_G4X, tmp);
3873
3874}
3875
3876static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
3877					 enum pipe pipe)
3878{
3879	struct drm_i915_private *dev_priv = dev->dev_private;
3880	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3881
3882	if (pipe == PIPE_A)
3883		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3884	else
3885		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3886	I915_WRITE(PORT_DFT2_G4X, tmp);
3887
3888	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
3889		I915_WRITE(PORT_DFT_I9XX,
3890			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
3891	}
3892}
3893
3894static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3895				uint32_t *val)
3896{
3897	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3898		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3899
3900	switch (*source) {
3901	case INTEL_PIPE_CRC_SOURCE_PLANE1:
3902		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
3903		break;
3904	case INTEL_PIPE_CRC_SOURCE_PLANE2:
3905		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
3906		break;
3907	case INTEL_PIPE_CRC_SOURCE_PIPE:
3908		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
3909		break;
3910	case INTEL_PIPE_CRC_SOURCE_NONE:
3911		*val = 0;
3912		break;
3913	default:
3914		return -EINVAL;
3915	}
3916
3917	return 0;
3918}
3919
3920static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
3921{
3922	struct drm_i915_private *dev_priv = dev->dev_private;
3923	struct intel_crtc *crtc =
3924		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3925	struct intel_crtc_state *pipe_config;
3926	struct drm_atomic_state *state;
3927	int ret = 0;
3928
3929	drm_modeset_lock_all(dev);
3930	state = drm_atomic_state_alloc(dev);
3931	if (!state) {
3932		ret = -ENOMEM;
3933		goto out;
3934	}
3935
3936	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
3937	pipe_config = intel_atomic_get_crtc_state(state, crtc);
3938	if (IS_ERR(pipe_config)) {
3939		ret = PTR_ERR(pipe_config);
3940		goto out;
3941	}
3942
3943	pipe_config->pch_pfit.force_thru = enable;
3944	if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
3945	    pipe_config->pch_pfit.enabled != enable)
3946		pipe_config->base.connectors_changed = true;
3947
3948	ret = drm_atomic_commit(state);
3949out:
3950	drm_modeset_unlock_all(dev);
3951	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
3952	if (ret)
3953		drm_atomic_state_free(state);
3954}
3955
3956static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
3957				enum pipe pipe,
3958				enum intel_pipe_crc_source *source,
3959				uint32_t *val)
3960{
3961	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3962		*source = INTEL_PIPE_CRC_SOURCE_PF;
3963
3964	switch (*source) {
3965	case INTEL_PIPE_CRC_SOURCE_PLANE1:
3966		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
3967		break;
3968	case INTEL_PIPE_CRC_SOURCE_PLANE2:
3969		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
3970		break;
3971	case INTEL_PIPE_CRC_SOURCE_PF:
3972		if (IS_HASWELL(dev) && pipe == PIPE_A)
3973			hsw_trans_edp_pipe_A_crc_wa(dev, true);
3974
3975		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
3976		break;
3977	case INTEL_PIPE_CRC_SOURCE_NONE:
3978		*val = 0;
3979		break;
3980	default:
3981		return -EINVAL;
3982	}
3983
3984	return 0;
3985}
3986
3987static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3988			       enum intel_pipe_crc_source source)
3989{
3990	struct drm_i915_private *dev_priv = dev->dev_private;
3991	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3992	struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
3993									pipe));
3994	enum intel_display_power_domain power_domain;
3995	u32 val = 0; /* shut up gcc */
3996	int ret;
3997
3998	if (pipe_crc->source == source)
3999		return 0;
4000
4001	/* forbid changing the source without going back to 'none' */
4002	if (pipe_crc->source && source)
4003		return -EINVAL;
4004
4005	power_domain = POWER_DOMAIN_PIPE(pipe);
4006	if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
4007		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
4008		return -EIO;
4009	}
4010
4011	if (IS_GEN2(dev))
4012		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
4013	else if (INTEL_INFO(dev)->gen < 5)
4014		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4015	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4016		ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4017	else if (IS_GEN5(dev) || IS_GEN6(dev))
4018		ret = ilk_pipe_crc_ctl_reg(&source, &val);
4019	else
4020		ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4021
4022	if (ret != 0)
4023		goto out;
4024
4025	/* none -> real source transition */
4026	if (source) {
4027		struct intel_pipe_crc_entry *entries;
4028
4029		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
4030				 pipe_name(pipe), pipe_crc_source_name(source));
4031
4032		entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
4033				  sizeof(pipe_crc->entries[0]),
4034				  GFP_KERNEL);
4035		if (!entries) {
4036			ret = -ENOMEM;
4037			goto out;
4038		}
4039
4040		/*
4041		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4042		 * enabled and disabled dynamically based on package C states,
4043		 * user space can't make reliable use of the CRCs, so let's just
4044		 * completely disable it.
4045		 */
4046		hsw_disable_ips(crtc);
4047
4048		spin_lock_irq(&pipe_crc->lock);
4049		kfree(pipe_crc->entries);
4050		pipe_crc->entries = entries;
4051		pipe_crc->head = 0;
4052		pipe_crc->tail = 0;
4053		spin_unlock_irq(&pipe_crc->lock);
4054	}
4055
4056	pipe_crc->source = source;
4057
4058	I915_WRITE(PIPE_CRC_CTL(pipe), val);
4059	POSTING_READ(PIPE_CRC_CTL(pipe));
4060
4061	/* real source -> none transition */
4062	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
4063		struct intel_pipe_crc_entry *entries;
4064		struct intel_crtc *crtc =
4065			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
4066
4067		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
4068				 pipe_name(pipe));
4069
4070		drm_modeset_lock(&crtc->base.mutex, NULL);
4071		if (crtc->base.state->active)
4072			intel_wait_for_vblank(dev, pipe);
4073		drm_modeset_unlock(&crtc->base.mutex);
4074
4075		spin_lock_irq(&pipe_crc->lock);
4076		entries = pipe_crc->entries;
4077		pipe_crc->entries = NULL;
4078		pipe_crc->head = 0;
4079		pipe_crc->tail = 0;
4080		spin_unlock_irq(&pipe_crc->lock);
4081
4082		kfree(entries);
4083
4084		if (IS_G4X(dev))
4085			g4x_undo_pipe_scramble_reset(dev, pipe);
4086		else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4087			vlv_undo_pipe_scramble_reset(dev, pipe);
4088		else if (IS_HASWELL(dev) && pipe == PIPE_A)
4089			hsw_trans_edp_pipe_A_crc_wa(dev, false);
4090
4091		hsw_enable_ips(crtc);
4092	}
4093
4094	ret = 0;
4095
4096out:
4097	intel_display_power_put(dev_priv, power_domain);
4098
4099	return ret;
4100}
4101
4102/*
4103 * Parse pipe CRC command strings:
4104 *   command: wsp* object wsp+ name wsp+ source wsp*
4105 *   object: 'pipe'
4106 *   name: (A | B | C)
4107 *   source: (none | plane1 | plane2 | pf)
4108 *   wsp: (#0x20 | #0x9 | #0xA)+
4109 *
4110 * eg.:
4111 *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
4112 *  "pipe A none"    ->  Stop CRC
4113 */
4114static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
4115{
4116	int n_words = 0;
4117
4118	while (*buf) {
4119		char *end;
4120
4121		/* skip leading white space */
4122		buf = skip_spaces(buf);
4123		if (!*buf)
4124			break;	/* end of buffer */
4125
4126		/* find end of word */
4127		for (end = buf; *end && !isspace(*end); end++)
4128			;
4129
4130		if (n_words == max_words) {
4131			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
4132					 max_words);
4133			return -EINVAL;	/* ran out of words[] before bytes */
4134		}
4135
4136		if (*end)
4137			*end++ = '\0';
4138		words[n_words++] = buf;
4139		buf = end;
4140	}
4141
4142	return n_words;
4143}
4144
4145enum intel_pipe_crc_object {
4146	PIPE_CRC_OBJECT_PIPE,
4147};
4148
4149static const char * const pipe_crc_objects[] = {
4150	"pipe",
4151};
4152
4153static int
4154display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
4155{
4156	int i;
4157
4158	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
4159		if (!strcmp(buf, pipe_crc_objects[i])) {
4160			*o = i;
4161			return 0;
4162		    }
4163
4164	return -EINVAL;
4165}
4166
4167static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
4168{
4169	const char name = buf[0];
4170
4171	if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
4172		return -EINVAL;
4173
4174	*pipe = name - 'A';
4175
4176	return 0;
4177}
4178
4179static int
4180display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
4181{
4182	int i;
4183
4184	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
4185		if (!strcmp(buf, pipe_crc_sources[i])) {
4186			*s = i;
4187			return 0;
4188		    }
4189
4190	return -EINVAL;
4191}
4192
4193static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
4194{
4195#define N_WORDS 3
4196	int n_words;
4197	char *words[N_WORDS];
4198	enum pipe pipe;
4199	enum intel_pipe_crc_object object;
4200	enum intel_pipe_crc_source source;
4201
4202	n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
4203	if (n_words != N_WORDS) {
4204		DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
4205				 N_WORDS);
4206		return -EINVAL;
4207	}
4208
4209	if (display_crc_ctl_parse_object(words[0], &object) < 0) {
4210		DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
4211		return -EINVAL;
4212	}
4213
4214	if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
4215		DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
4216		return -EINVAL;
4217	}
4218
4219	if (display_crc_ctl_parse_source(words[2], &source) < 0) {
4220		DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
4221		return -EINVAL;
4222	}
4223
4224	return pipe_crc_set_source(dev, pipe, source);
4225}
4226
4227static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
4228				     size_t len, loff_t *offp)
4229{
4230	struct seq_file *m = file->private_data;
4231	struct drm_device *dev = m->private;
4232	char *tmpbuf;
4233	int ret;
4234
4235	if (len == 0)
4236		return 0;
4237
4238	if (len > PAGE_SIZE - 1) {
4239		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
4240				 PAGE_SIZE);
4241		return -E2BIG;
4242	}
4243
4244	tmpbuf = kmalloc(len + 1, GFP_KERNEL);
4245	if (!tmpbuf)
4246		return -ENOMEM;
4247
4248	if (copy_from_user(tmpbuf, ubuf, len)) {
4249		ret = -EFAULT;
4250		goto out;
4251	}
4252	tmpbuf[len] = '\0';
4253
4254	ret = display_crc_ctl_parse(dev, tmpbuf, len);
4255
4256out:
4257	kfree(tmpbuf);
4258	if (ret < 0)
4259		return ret;
4260
4261	*offp += len;
4262	return len;
4263}
4264
4265static const struct file_operations i915_display_crc_ctl_fops = {
4266	.owner = THIS_MODULE,
4267	.open = display_crc_ctl_open,
4268	.read = seq_read,
4269	.llseek = seq_lseek,
4270	.release = single_release,
4271	.write = display_crc_ctl_write
4272};
4273
4274static ssize_t i915_displayport_test_active_write(struct file *file,
4275					    const char __user *ubuf,
4276					    size_t len, loff_t *offp)
4277{
4278	char *input_buffer;
4279	int status = 0;
4280	struct drm_device *dev;
4281	struct drm_connector *connector;
4282	struct list_head *connector_list;
4283	struct intel_dp *intel_dp;
4284	int val = 0;
4285
4286	dev = ((struct seq_file *)file->private_data)->private;
4287
4288	connector_list = &dev->mode_config.connector_list;
4289
4290	if (len == 0)
4291		return 0;
4292
4293	input_buffer = kmalloc(len + 1, GFP_KERNEL);
4294	if (!input_buffer)
4295		return -ENOMEM;
4296
4297	if (copy_from_user(input_buffer, ubuf, len)) {
4298		status = -EFAULT;
4299		goto out;
4300	}
4301
4302	input_buffer[len] = '\0';
4303	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
4304
4305	list_for_each_entry(connector, connector_list, head) {
 
 
4306
4307		if (connector->connector_type !=
4308		    DRM_MODE_CONNECTOR_DisplayPort)
4309			continue;
4310
4311		if (connector->status == connector_status_connected &&
4312		    connector->encoder != NULL) {
4313			intel_dp = enc_to_intel_dp(connector->encoder);
 
 
 
4314			status = kstrtoint(input_buffer, 10, &val);
4315			if (status < 0)
4316				goto out;
4317			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
4318			/* To prevent erroneous activation of the compliance
4319			 * testing code, only accept an actual value of 1 here
4320			 */
4321			if (val == 1)
4322				intel_dp->compliance_test_active = 1;
4323			else
4324				intel_dp->compliance_test_active = 0;
4325		}
4326	}
4327out:
4328	kfree(input_buffer);
4329	if (status < 0)
4330		return status;
4331
4332	*offp += len;
4333	return len;
4334}
4335
4336static int i915_displayport_test_active_show(struct seq_file *m, void *data)
4337{
4338	struct drm_device *dev = m->private;
4339	struct drm_connector *connector;
4340	struct list_head *connector_list = &dev->mode_config.connector_list;
4341	struct intel_dp *intel_dp;
4342
4343	list_for_each_entry(connector, connector_list, head) {
 
 
4344
4345		if (connector->connector_type !=
4346		    DRM_MODE_CONNECTOR_DisplayPort)
4347			continue;
4348
4349		if (connector->status == connector_status_connected &&
4350		    connector->encoder != NULL) {
4351			intel_dp = enc_to_intel_dp(connector->encoder);
4352			if (intel_dp->compliance_test_active)
 
 
 
4353				seq_puts(m, "1");
4354			else
4355				seq_puts(m, "0");
4356		} else
4357			seq_puts(m, "0");
4358	}
 
4359
4360	return 0;
4361}
4362
4363static int i915_displayport_test_active_open(struct inode *inode,
4364				       struct file *file)
4365{
4366	struct drm_device *dev = inode->i_private;
4367
4368	return single_open(file, i915_displayport_test_active_show, dev);
 
4369}
4370
4371static const struct file_operations i915_displayport_test_active_fops = {
4372	.owner = THIS_MODULE,
4373	.open = i915_displayport_test_active_open,
4374	.read = seq_read,
4375	.llseek = seq_lseek,
4376	.release = single_release,
4377	.write = i915_displayport_test_active_write
4378};
4379
4380static int i915_displayport_test_data_show(struct seq_file *m, void *data)
4381{
4382	struct drm_device *dev = m->private;
4383	struct drm_connector *connector;
4384	struct list_head *connector_list = &dev->mode_config.connector_list;
4385	struct intel_dp *intel_dp;
4386
4387	list_for_each_entry(connector, connector_list, head) {
 
 
4388
4389		if (connector->connector_type !=
4390		    DRM_MODE_CONNECTOR_DisplayPort)
4391			continue;
4392
4393		if (connector->status == connector_status_connected &&
4394		    connector->encoder != NULL) {
4395			intel_dp = enc_to_intel_dp(connector->encoder);
4396			seq_printf(m, "%lx", intel_dp->compliance_test_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4397		} else
4398			seq_puts(m, "0");
4399	}
 
4400
4401	return 0;
4402}
4403static int i915_displayport_test_data_open(struct inode *inode,
4404				       struct file *file)
4405{
4406	struct drm_device *dev = inode->i_private;
4407
4408	return single_open(file, i915_displayport_test_data_show, dev);
 
4409}
4410
4411static const struct file_operations i915_displayport_test_data_fops = {
4412	.owner = THIS_MODULE,
4413	.open = i915_displayport_test_data_open,
4414	.read = seq_read,
4415	.llseek = seq_lseek,
4416	.release = single_release
4417};
4418
4419static int i915_displayport_test_type_show(struct seq_file *m, void *data)
4420{
4421	struct drm_device *dev = m->private;
4422	struct drm_connector *connector;
4423	struct list_head *connector_list = &dev->mode_config.connector_list;
4424	struct intel_dp *intel_dp;
4425
4426	list_for_each_entry(connector, connector_list, head) {
 
 
4427
4428		if (connector->connector_type !=
4429		    DRM_MODE_CONNECTOR_DisplayPort)
4430			continue;
4431
4432		if (connector->status == connector_status_connected &&
4433		    connector->encoder != NULL) {
4434			intel_dp = enc_to_intel_dp(connector->encoder);
4435			seq_printf(m, "%02lx", intel_dp->compliance_test_type);
 
 
 
4436		} else
4437			seq_puts(m, "0");
4438	}
 
4439
4440	return 0;
4441}
4442
4443static int i915_displayport_test_type_open(struct inode *inode,
4444				       struct file *file)
4445{
4446	struct drm_device *dev = inode->i_private;
4447
4448	return single_open(file, i915_displayport_test_type_show, dev);
 
4449}
4450
4451static const struct file_operations i915_displayport_test_type_fops = {
4452	.owner = THIS_MODULE,
4453	.open = i915_displayport_test_type_open,
4454	.read = seq_read,
4455	.llseek = seq_lseek,
4456	.release = single_release
4457};
4458
4459static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
4460{
4461	struct drm_device *dev = m->private;
 
4462	int level;
4463	int num_levels;
4464
4465	if (IS_CHERRYVIEW(dev))
4466		num_levels = 3;
4467	else if (IS_VALLEYVIEW(dev))
4468		num_levels = 1;
 
 
4469	else
4470		num_levels = ilk_wm_max_level(dev) + 1;
4471
4472	drm_modeset_lock_all(dev);
4473
4474	for (level = 0; level < num_levels; level++) {
4475		unsigned int latency = wm[level];
4476
4477		/*
4478		 * - WM1+ latency values in 0.5us units
4479		 * - latencies are in us on gen9/vlv/chv
4480		 */
4481		if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) ||
4482		    IS_CHERRYVIEW(dev))
 
 
4483			latency *= 10;
4484		else if (level > 0)
4485			latency *= 5;
4486
4487		seq_printf(m, "WM%d %u (%u.%u usec)\n",
4488			   level, wm[level], latency / 10, latency % 10);
4489	}
4490
4491	drm_modeset_unlock_all(dev);
4492}
4493
4494static int pri_wm_latency_show(struct seq_file *m, void *data)
4495{
4496	struct drm_device *dev = m->private;
4497	struct drm_i915_private *dev_priv = dev->dev_private;
4498	const uint16_t *latencies;
4499
4500	if (INTEL_INFO(dev)->gen >= 9)
4501		latencies = dev_priv->wm.skl_latency;
4502	else
4503		latencies = to_i915(dev)->wm.pri_latency;
4504
4505	wm_latency_show(m, latencies);
4506
4507	return 0;
4508}
4509
4510static int spr_wm_latency_show(struct seq_file *m, void *data)
4511{
4512	struct drm_device *dev = m->private;
4513	struct drm_i915_private *dev_priv = dev->dev_private;
4514	const uint16_t *latencies;
4515
4516	if (INTEL_INFO(dev)->gen >= 9)
4517		latencies = dev_priv->wm.skl_latency;
4518	else
4519		latencies = to_i915(dev)->wm.spr_latency;
4520
4521	wm_latency_show(m, latencies);
4522
4523	return 0;
4524}
4525
4526static int cur_wm_latency_show(struct seq_file *m, void *data)
4527{
4528	struct drm_device *dev = m->private;
4529	struct drm_i915_private *dev_priv = dev->dev_private;
4530	const uint16_t *latencies;
4531
4532	if (INTEL_INFO(dev)->gen >= 9)
4533		latencies = dev_priv->wm.skl_latency;
4534	else
4535		latencies = to_i915(dev)->wm.cur_latency;
4536
4537	wm_latency_show(m, latencies);
4538
4539	return 0;
4540}
4541
4542static int pri_wm_latency_open(struct inode *inode, struct file *file)
4543{
4544	struct drm_device *dev = inode->i_private;
4545
4546	if (INTEL_INFO(dev)->gen < 5)
4547		return -ENODEV;
4548
4549	return single_open(file, pri_wm_latency_show, dev);
4550}
4551
4552static int spr_wm_latency_open(struct inode *inode, struct file *file)
4553{
4554	struct drm_device *dev = inode->i_private;
4555
4556	if (HAS_GMCH_DISPLAY(dev))
4557		return -ENODEV;
4558
4559	return single_open(file, spr_wm_latency_show, dev);
4560}
4561
4562static int cur_wm_latency_open(struct inode *inode, struct file *file)
4563{
4564	struct drm_device *dev = inode->i_private;
4565
4566	if (HAS_GMCH_DISPLAY(dev))
4567		return -ENODEV;
4568
4569	return single_open(file, cur_wm_latency_show, dev);
4570}
4571
4572static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
4573				size_t len, loff_t *offp, uint16_t wm[8])
4574{
4575	struct seq_file *m = file->private_data;
4576	struct drm_device *dev = m->private;
 
4577	uint16_t new[8] = { 0 };
4578	int num_levels;
4579	int level;
4580	int ret;
4581	char tmp[32];
4582
4583	if (IS_CHERRYVIEW(dev))
4584		num_levels = 3;
4585	else if (IS_VALLEYVIEW(dev))
4586		num_levels = 1;
 
 
4587	else
4588		num_levels = ilk_wm_max_level(dev) + 1;
4589
4590	if (len >= sizeof(tmp))
4591		return -EINVAL;
4592
4593	if (copy_from_user(tmp, ubuf, len))
4594		return -EFAULT;
4595
4596	tmp[len] = '\0';
4597
4598	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
4599		     &new[0], &new[1], &new[2], &new[3],
4600		     &new[4], &new[5], &new[6], &new[7]);
4601	if (ret != num_levels)
4602		return -EINVAL;
4603
4604	drm_modeset_lock_all(dev);
4605
4606	for (level = 0; level < num_levels; level++)
4607		wm[level] = new[level];
4608
4609	drm_modeset_unlock_all(dev);
4610
4611	return len;
4612}
4613
4614
4615static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
4616				    size_t len, loff_t *offp)
4617{
4618	struct seq_file *m = file->private_data;
4619	struct drm_device *dev = m->private;
4620	struct drm_i915_private *dev_priv = dev->dev_private;
4621	uint16_t *latencies;
4622
4623	if (INTEL_INFO(dev)->gen >= 9)
4624		latencies = dev_priv->wm.skl_latency;
4625	else
4626		latencies = to_i915(dev)->wm.pri_latency;
4627
4628	return wm_latency_write(file, ubuf, len, offp, latencies);
4629}
4630
4631static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
4632				    size_t len, loff_t *offp)
4633{
4634	struct seq_file *m = file->private_data;
4635	struct drm_device *dev = m->private;
4636	struct drm_i915_private *dev_priv = dev->dev_private;
4637	uint16_t *latencies;
4638
4639	if (INTEL_INFO(dev)->gen >= 9)
4640		latencies = dev_priv->wm.skl_latency;
4641	else
4642		latencies = to_i915(dev)->wm.spr_latency;
4643
4644	return wm_latency_write(file, ubuf, len, offp, latencies);
4645}
4646
4647static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4648				    size_t len, loff_t *offp)
4649{
4650	struct seq_file *m = file->private_data;
4651	struct drm_device *dev = m->private;
4652	struct drm_i915_private *dev_priv = dev->dev_private;
4653	uint16_t *latencies;
4654
4655	if (INTEL_INFO(dev)->gen >= 9)
4656		latencies = dev_priv->wm.skl_latency;
4657	else
4658		latencies = to_i915(dev)->wm.cur_latency;
4659
4660	return wm_latency_write(file, ubuf, len, offp, latencies);
4661}
4662
4663static const struct file_operations i915_pri_wm_latency_fops = {
4664	.owner = THIS_MODULE,
4665	.open = pri_wm_latency_open,
4666	.read = seq_read,
4667	.llseek = seq_lseek,
4668	.release = single_release,
4669	.write = pri_wm_latency_write
4670};
4671
4672static const struct file_operations i915_spr_wm_latency_fops = {
4673	.owner = THIS_MODULE,
4674	.open = spr_wm_latency_open,
4675	.read = seq_read,
4676	.llseek = seq_lseek,
4677	.release = single_release,
4678	.write = spr_wm_latency_write
4679};
4680
4681static const struct file_operations i915_cur_wm_latency_fops = {
4682	.owner = THIS_MODULE,
4683	.open = cur_wm_latency_open,
4684	.read = seq_read,
4685	.llseek = seq_lseek,
4686	.release = single_release,
4687	.write = cur_wm_latency_write
4688};
4689
4690static int
4691i915_wedged_get(void *data, u64 *val)
4692{
4693	struct drm_device *dev = data;
4694	struct drm_i915_private *dev_priv = dev->dev_private;
4695
4696	*val = atomic_read(&dev_priv->gpu_error.reset_counter);
4697
4698	return 0;
4699}
4700
4701static int
4702i915_wedged_set(void *data, u64 val)
4703{
4704	struct drm_device *dev = data;
4705	struct drm_i915_private *dev_priv = dev->dev_private;
 
4706
4707	/*
4708	 * There is no safeguard against this debugfs entry colliding
4709	 * with the hangcheck calling same i915_handle_error() in
4710	 * parallel, causing an explosion. For now we assume that the
4711	 * test harness is responsible enough not to inject gpu hangs
4712	 * while it is writing to 'i915_wedged'
4713	 */
4714
4715	if (i915_reset_in_progress(&dev_priv->gpu_error))
4716		return -EAGAIN;
4717
4718	intel_runtime_pm_get(dev_priv);
 
 
 
4719
4720	i915_handle_error(dev, val,
4721			  "Manually setting wedged to %llu", val);
4722
4723	intel_runtime_pm_put(dev_priv);
 
 
4724
4725	return 0;
4726}
4727
4728DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4729			i915_wedged_get, i915_wedged_set,
4730			"%llu\n");
4731
4732static int
4733i915_ring_stop_get(void *data, u64 *val)
 
 
4734{
4735	struct drm_device *dev = data;
4736	struct drm_i915_private *dev_priv = dev->dev_private;
4737
4738	*val = dev_priv->gpu_error.stop_rings;
 
 
4739
4740	return 0;
4741}
 
 
 
4742
4743static int
4744i915_ring_stop_set(void *data, u64 val)
4745{
4746	struct drm_device *dev = data;
4747	struct drm_i915_private *dev_priv = dev->dev_private;
4748	int ret;
4749
4750	DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
4751
4752	ret = mutex_lock_interruptible(&dev->struct_mutex);
4753	if (ret)
4754		return ret;
4755
4756	dev_priv->gpu_error.stop_rings = val;
4757	mutex_unlock(&dev->struct_mutex);
4758
4759	return 0;
4760}
4761
4762DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
4763			i915_ring_stop_get, i915_ring_stop_set,
4764			"0x%08llx\n");
 
4765
4766static int
4767i915_ring_missed_irq_get(void *data, u64 *val)
4768{
4769	struct drm_device *dev = data;
4770	struct drm_i915_private *dev_priv = dev->dev_private;
4771
4772	*val = dev_priv->gpu_error.missed_irq_rings;
4773	return 0;
4774}
4775
4776static int
4777i915_ring_missed_irq_set(void *data, u64 val)
4778{
4779	struct drm_device *dev = data;
4780	struct drm_i915_private *dev_priv = dev->dev_private;
4781	int ret;
4782
4783	/* Lock against concurrent debugfs callers */
4784	ret = mutex_lock_interruptible(&dev->struct_mutex);
4785	if (ret)
4786		return ret;
4787	dev_priv->gpu_error.missed_irq_rings = val;
4788	mutex_unlock(&dev->struct_mutex);
4789
4790	return 0;
4791}
4792
4793DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4794			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4795			"0x%08llx\n");
4796
4797static int
4798i915_ring_test_irq_get(void *data, u64 *val)
4799{
4800	struct drm_device *dev = data;
4801	struct drm_i915_private *dev_priv = dev->dev_private;
4802
4803	*val = dev_priv->gpu_error.test_irq_rings;
4804
4805	return 0;
4806}
4807
4808static int
4809i915_ring_test_irq_set(void *data, u64 val)
4810{
4811	struct drm_device *dev = data;
4812	struct drm_i915_private *dev_priv = dev->dev_private;
4813	int ret;
4814
 
4815	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4816
4817	/* Lock against concurrent debugfs callers */
4818	ret = mutex_lock_interruptible(&dev->struct_mutex);
4819	if (ret)
4820		return ret;
4821
4822	dev_priv->gpu_error.test_irq_rings = val;
4823	mutex_unlock(&dev->struct_mutex);
4824
4825	return 0;
4826}
4827
4828DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4829			i915_ring_test_irq_get, i915_ring_test_irq_set,
4830			"0x%08llx\n");
4831
4832#define DROP_UNBOUND 0x1
4833#define DROP_BOUND 0x2
4834#define DROP_RETIRE 0x4
4835#define DROP_ACTIVE 0x8
4836#define DROP_ALL (DROP_UNBOUND | \
4837		  DROP_BOUND | \
4838		  DROP_RETIRE | \
4839		  DROP_ACTIVE)
 
 
 
 
 
 
4840static int
4841i915_drop_caches_get(void *data, u64 *val)
4842{
4843	*val = DROP_ALL;
4844
4845	return 0;
4846}
4847
4848static int
4849i915_drop_caches_set(void *data, u64 val)
4850{
4851	struct drm_device *dev = data;
4852	struct drm_i915_private *dev_priv = dev->dev_private;
4853	int ret;
4854
4855	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
 
4856
4857	/* No need to check and wait for gpu resets, only libdrm auto-restarts
4858	 * on ioctls on -EAGAIN. */
4859	ret = mutex_lock_interruptible(&dev->struct_mutex);
4860	if (ret)
4861		return ret;
4862
4863	if (val & DROP_ACTIVE) {
4864		ret = i915_gpu_idle(dev);
4865		if (ret)
4866			goto unlock;
4867	}
 
 
 
 
 
 
 
4868
4869	if (val & (DROP_RETIRE | DROP_ACTIVE))
4870		i915_gem_retire_requests(dev);
4871
 
4872	if (val & DROP_BOUND)
4873		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
4874
4875	if (val & DROP_UNBOUND)
4876		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
4877
4878unlock:
4879	mutex_unlock(&dev->struct_mutex);
 
 
 
 
 
 
 
4880
4881	return ret;
4882}
4883
4884DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4885			i915_drop_caches_get, i915_drop_caches_set,
4886			"0x%08llx\n");
4887
4888static int
4889i915_max_freq_get(void *data, u64 *val)
4890{
4891	struct drm_device *dev = data;
4892	struct drm_i915_private *dev_priv = dev->dev_private;
4893	int ret;
4894
4895	if (INTEL_INFO(dev)->gen < 6)
4896		return -ENODEV;
4897
4898	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4899
4900	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4901	if (ret)
4902		return ret;
4903
4904	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
4905	mutex_unlock(&dev_priv->rps.hw_lock);
4906
4907	return 0;
4908}
4909
4910static int
4911i915_max_freq_set(void *data, u64 val)
4912{
4913	struct drm_device *dev = data;
4914	struct drm_i915_private *dev_priv = dev->dev_private;
4915	u32 hw_max, hw_min;
4916	int ret;
4917
4918	if (INTEL_INFO(dev)->gen < 6)
4919		return -ENODEV;
4920
4921	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4922
4923	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4924
4925	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4926	if (ret)
4927		return ret;
4928
4929	/*
4930	 * Turbo will still be enabled, but won't go above the set value.
4931	 */
4932	val = intel_freq_opcode(dev_priv, val);
4933
4934	hw_max = dev_priv->rps.max_freq;
4935	hw_min = dev_priv->rps.min_freq;
4936
4937	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
4938		mutex_unlock(&dev_priv->rps.hw_lock);
4939		return -EINVAL;
4940	}
4941
4942	dev_priv->rps.max_freq_softlimit = val;
4943
4944	intel_set_rps(dev, val);
 
4945
4946	mutex_unlock(&dev_priv->rps.hw_lock);
4947
4948	return 0;
4949}
4950
4951DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
4952			i915_max_freq_get, i915_max_freq_set,
4953			"%llu\n");
4954
4955static int
4956i915_min_freq_get(void *data, u64 *val)
4957{
4958	struct drm_device *dev = data;
4959	struct drm_i915_private *dev_priv = dev->dev_private;
4960	int ret;
4961
4962	if (INTEL_INFO(dev)->gen < 6)
4963		return -ENODEV;
4964
4965	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4966
4967	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4968	if (ret)
4969		return ret;
4970
4971	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
4972	mutex_unlock(&dev_priv->rps.hw_lock);
4973
4974	return 0;
4975}
4976
4977static int
4978i915_min_freq_set(void *data, u64 val)
4979{
4980	struct drm_device *dev = data;
4981	struct drm_i915_private *dev_priv = dev->dev_private;
4982	u32 hw_max, hw_min;
4983	int ret;
4984
4985	if (INTEL_INFO(dev)->gen < 6)
4986		return -ENODEV;
4987
4988	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4989
4990	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
4991
4992	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4993	if (ret)
4994		return ret;
4995
4996	/*
4997	 * Turbo will still be enabled, but won't go below the set value.
4998	 */
4999	val = intel_freq_opcode(dev_priv, val);
5000
5001	hw_max = dev_priv->rps.max_freq;
5002	hw_min = dev_priv->rps.min_freq;
5003
5004	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
5005		mutex_unlock(&dev_priv->rps.hw_lock);
 
5006		return -EINVAL;
5007	}
5008
5009	dev_priv->rps.min_freq_softlimit = val;
5010
5011	intel_set_rps(dev, val);
 
5012
5013	mutex_unlock(&dev_priv->rps.hw_lock);
5014
5015	return 0;
5016}
5017
5018DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
5019			i915_min_freq_get, i915_min_freq_set,
5020			"%llu\n");
5021
5022static int
5023i915_cache_sharing_get(void *data, u64 *val)
5024{
5025	struct drm_device *dev = data;
5026	struct drm_i915_private *dev_priv = dev->dev_private;
5027	u32 snpcr;
5028	int ret;
5029
5030	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
5031		return -ENODEV;
5032
5033	ret = mutex_lock_interruptible(&dev->struct_mutex);
5034	if (ret)
5035		return ret;
5036	intel_runtime_pm_get(dev_priv);
5037
5038	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5039
5040	intel_runtime_pm_put(dev_priv);
5041	mutex_unlock(&dev_priv->dev->struct_mutex);
5042
5043	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
5044
5045	return 0;
5046}
5047
5048static int
5049i915_cache_sharing_set(void *data, u64 val)
5050{
5051	struct drm_device *dev = data;
5052	struct drm_i915_private *dev_priv = dev->dev_private;
5053	u32 snpcr;
5054
5055	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
5056		return -ENODEV;
5057
5058	if (val > 3)
5059		return -EINVAL;
5060
5061	intel_runtime_pm_get(dev_priv);
5062	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
5063
5064	/* Update the cache sharing policy here as well */
5065	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5066	snpcr &= ~GEN6_MBC_SNPCR_MASK;
5067	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
5068	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5069
5070	intel_runtime_pm_put(dev_priv);
5071	return 0;
5072}
5073
5074DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
5075			i915_cache_sharing_get, i915_cache_sharing_set,
5076			"%llu\n");
5077
5078struct sseu_dev_status {
5079	unsigned int slice_total;
5080	unsigned int subslice_total;
5081	unsigned int subslice_per_slice;
5082	unsigned int eu_total;
5083	unsigned int eu_per_subslice;
5084};
5085
5086static void cherryview_sseu_device_status(struct drm_device *dev,
5087					  struct sseu_dev_status *stat)
5088{
5089	struct drm_i915_private *dev_priv = dev->dev_private;
5090	int ss_max = 2;
5091	int ss;
5092	u32 sig1[ss_max], sig2[ss_max];
5093
5094	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
5095	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
5096	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
5097	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
5098
5099	for (ss = 0; ss < ss_max; ss++) {
5100		unsigned int eu_cnt;
5101
5102		if (sig1[ss] & CHV_SS_PG_ENABLE)
5103			/* skip disabled subslice */
5104			continue;
5105
5106		stat->slice_total = 1;
5107		stat->subslice_per_slice++;
5108		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
5109			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
5110			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
5111			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
5112		stat->eu_total += eu_cnt;
5113		stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
 
5114	}
5115	stat->subslice_total = stat->subslice_per_slice;
5116}
5117
5118static void gen9_sseu_device_status(struct drm_device *dev,
5119				    struct sseu_dev_status *stat)
5120{
5121	struct drm_i915_private *dev_priv = dev->dev_private;
5122	int s_max = 3, ss_max = 4;
5123	int s, ss;
5124	u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5125
5126	/* BXT has a single slice and at most 3 subslices. */
5127	if (IS_BROXTON(dev)) {
5128		s_max = 1;
5129		ss_max = 3;
 
 
 
5130	}
 
 
 
 
 
 
 
 
 
5131
5132	for (s = 0; s < s_max; s++) {
5133		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
5134		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
5135		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
5136	}
5137
5138	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
5139		     GEN9_PGCTL_SSA_EU19_ACK |
5140		     GEN9_PGCTL_SSA_EU210_ACK |
5141		     GEN9_PGCTL_SSA_EU311_ACK;
5142	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
5143		     GEN9_PGCTL_SSB_EU19_ACK |
5144		     GEN9_PGCTL_SSB_EU210_ACK |
5145		     GEN9_PGCTL_SSB_EU311_ACK;
5146
5147	for (s = 0; s < s_max; s++) {
5148		unsigned int ss_cnt = 0;
5149
5150		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
5151			/* skip disabled slice */
5152			continue;
5153
5154		stat->slice_total++;
5155
5156		if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
5157			ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
 
5158
5159		for (ss = 0; ss < ss_max; ss++) {
5160			unsigned int eu_cnt;
5161
5162			if (IS_BROXTON(dev) &&
5163			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
5164				/* skip disabled subslice */
5165				continue;
5166
5167			if (IS_BROXTON(dev))
5168				ss_cnt++;
5169
5170			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
5171					       eu_mask[ss%2]);
5172			stat->eu_total += eu_cnt;
5173			stat->eu_per_subslice = max(stat->eu_per_subslice,
5174						    eu_cnt);
 
5175		}
5176
5177		stat->subslice_total += ss_cnt;
5178		stat->subslice_per_slice = max(stat->subslice_per_slice,
5179					       ss_cnt);
5180	}
5181}
5182
5183static void broadwell_sseu_device_status(struct drm_device *dev,
5184					 struct sseu_dev_status *stat)
5185{
5186	struct drm_i915_private *dev_priv = dev->dev_private;
5187	int s;
5188	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
 
5189
5190	stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
5191
5192	if (stat->slice_total) {
5193		stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
5194		stat->subslice_total = stat->slice_total *
5195				       stat->subslice_per_slice;
5196		stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
5197		stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
 
 
 
5198
5199		/* subtract fused off EU(s) from enabled slice(s) */
5200		for (s = 0; s < stat->slice_total; s++) {
5201			u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
 
5202
5203			stat->eu_total -= hweight8(subslice_7eu);
5204		}
5205	}
5206}
5207
5208static int i915_sseu_status(struct seq_file *m, void *unused)
 
5209{
5210	struct drm_info_node *node = (struct drm_info_node *) m->private;
5211	struct drm_device *dev = node->minor->dev;
5212	struct sseu_dev_status stat;
5213
5214	if (INTEL_INFO(dev)->gen < 8)
5215		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5216
5217	seq_puts(m, "SSEU Device Info\n");
5218	seq_printf(m, "  Available Slice Total: %u\n",
5219		   INTEL_INFO(dev)->slice_total);
5220	seq_printf(m, "  Available Subslice Total: %u\n",
5221		   INTEL_INFO(dev)->subslice_total);
5222	seq_printf(m, "  Available Subslice Per Slice: %u\n",
5223		   INTEL_INFO(dev)->subslice_per_slice);
5224	seq_printf(m, "  Available EU Total: %u\n",
5225		   INTEL_INFO(dev)->eu_total);
5226	seq_printf(m, "  Available EU Per Subslice: %u\n",
5227		   INTEL_INFO(dev)->eu_per_subslice);
5228	seq_printf(m, "  Has Slice Power Gating: %s\n",
5229		   yesno(INTEL_INFO(dev)->has_slice_pg));
5230	seq_printf(m, "  Has Subslice Power Gating: %s\n",
5231		   yesno(INTEL_INFO(dev)->has_subslice_pg));
5232	seq_printf(m, "  Has EU Power Gating: %s\n",
5233		   yesno(INTEL_INFO(dev)->has_eu_pg));
 
 
 
 
 
 
 
 
 
 
 
 
5234
5235	seq_puts(m, "SSEU Device Status\n");
5236	memset(&stat, 0, sizeof(stat));
5237	if (IS_CHERRYVIEW(dev)) {
5238		cherryview_sseu_device_status(dev, &stat);
5239	} else if (IS_BROADWELL(dev)) {
5240		broadwell_sseu_device_status(dev, &stat);
5241	} else if (INTEL_INFO(dev)->gen >= 9) {
5242		gen9_sseu_device_status(dev, &stat);
5243	}
5244	seq_printf(m, "  Enabled Slice Total: %u\n",
5245		   stat.slice_total);
5246	seq_printf(m, "  Enabled Subslice Total: %u\n",
5247		   stat.subslice_total);
5248	seq_printf(m, "  Enabled Subslice Per Slice: %u\n",
5249		   stat.subslice_per_slice);
5250	seq_printf(m, "  Enabled EU Total: %u\n",
5251		   stat.eu_total);
5252	seq_printf(m, "  Enabled EU Per Subslice: %u\n",
5253		   stat.eu_per_subslice);
 
 
 
5254
5255	return 0;
5256}
5257
5258static int i915_forcewake_open(struct inode *inode, struct file *file)
5259{
5260	struct drm_device *dev = inode->i_private;
5261	struct drm_i915_private *dev_priv = dev->dev_private;
5262
5263	if (INTEL_INFO(dev)->gen < 6)
5264		return 0;
5265
5266	intel_runtime_pm_get(dev_priv);
5267	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5268
5269	return 0;
5270}
5271
5272static int i915_forcewake_release(struct inode *inode, struct file *file)
5273{
5274	struct drm_device *dev = inode->i_private;
5275	struct drm_i915_private *dev_priv = dev->dev_private;
5276
5277	if (INTEL_INFO(dev)->gen < 6)
5278		return 0;
5279
5280	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5281	intel_runtime_pm_put(dev_priv);
5282
5283	return 0;
5284}
5285
5286static const struct file_operations i915_forcewake_fops = {
5287	.owner = THIS_MODULE,
5288	.open = i915_forcewake_open,
5289	.release = i915_forcewake_release,
5290};
5291
5292static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
5293{
5294	struct drm_device *dev = minor->dev;
5295	struct dentry *ent;
5296
5297	ent = debugfs_create_file("i915_forcewake_user",
5298				  S_IRUSR,
5299				  root, dev,
5300				  &i915_forcewake_fops);
5301	if (!ent)
5302		return -ENOMEM;
5303
5304	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
5305}
5306
5307static int i915_debugfs_create(struct dentry *root,
5308			       struct drm_minor *minor,
5309			       const char *name,
5310			       const struct file_operations *fops)
5311{
5312	struct drm_device *dev = minor->dev;
5313	struct dentry *ent;
 
 
 
 
 
5314
5315	ent = debugfs_create_file(name,
5316				  S_IRUGO | S_IWUSR,
5317				  root, dev,
5318				  fops);
5319	if (!ent)
5320		return -ENOMEM;
5321
5322	return drm_add_fake_info_node(minor, ent, fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5323}
5324
 
 
5325static const struct drm_info_list i915_debugfs_list[] = {
5326	{"i915_capabilities", i915_capabilities, 0},
5327	{"i915_gem_objects", i915_gem_object_info, 0},
5328	{"i915_gem_gtt", i915_gem_gtt_info, 0},
5329	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
5330	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
5331	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
5332	{"i915_gem_stolen", i915_gem_stolen_list_info },
5333	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
5334	{"i915_gem_request", i915_gem_request_info, 0},
5335	{"i915_gem_seqno", i915_gem_seqno_info, 0},
5336	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
5337	{"i915_gem_interrupt", i915_interrupt_info, 0},
5338	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
5339	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
5340	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
5341	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
5342	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
5343	{"i915_guc_info", i915_guc_info, 0},
5344	{"i915_guc_load_status", i915_guc_load_status_info, 0},
5345	{"i915_guc_log_dump", i915_guc_log_dump, 0},
 
 
 
5346	{"i915_frequency_info", i915_frequency_info, 0},
5347	{"i915_hangcheck_info", i915_hangcheck_info, 0},
 
5348	{"i915_drpc_info", i915_drpc_info, 0},
5349	{"i915_emon_status", i915_emon_status, 0},
5350	{"i915_ring_freq_table", i915_ring_freq_table, 0},
5351	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
5352	{"i915_fbc_status", i915_fbc_status, 0},
5353	{"i915_ips_status", i915_ips_status, 0},
5354	{"i915_sr_status", i915_sr_status, 0},
5355	{"i915_opregion", i915_opregion, 0},
5356	{"i915_vbt", i915_vbt, 0},
5357	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
5358	{"i915_context_status", i915_context_status, 0},
5359	{"i915_dump_lrc", i915_dump_lrc, 0},
5360	{"i915_execlists", i915_execlists, 0},
5361	{"i915_forcewake_domains", i915_forcewake_domains, 0},
5362	{"i915_swizzle_info", i915_swizzle_info, 0},
5363	{"i915_ppgtt_info", i915_ppgtt_info, 0},
5364	{"i915_llc", i915_llc, 0},
5365	{"i915_edp_psr_status", i915_edp_psr_status, 0},
5366	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
5367	{"i915_energy_uJ", i915_energy_uJ, 0},
5368	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
5369	{"i915_power_domain_info", i915_power_domain_info, 0},
5370	{"i915_dmc_info", i915_dmc_info, 0},
5371	{"i915_display_info", i915_display_info, 0},
5372	{"i915_semaphore_status", i915_semaphore_status, 0},
 
 
5373	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
5374	{"i915_dp_mst_info", i915_dp_mst_info, 0},
5375	{"i915_wa_registers", i915_wa_registers, 0},
5376	{"i915_ddb_info", i915_ddb_info, 0},
5377	{"i915_sseu_status", i915_sseu_status, 0},
5378	{"i915_drrs_status", i915_drrs_status, 0},
5379	{"i915_rps_boost_info", i915_rps_boost_info, 0},
5380};
5381#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
5382
5383static const struct i915_debugfs_files {
5384	const char *name;
5385	const struct file_operations *fops;
5386} i915_debugfs_files[] = {
5387	{"i915_wedged", &i915_wedged_fops},
5388	{"i915_max_freq", &i915_max_freq_fops},
5389	{"i915_min_freq", &i915_min_freq_fops},
5390	{"i915_cache_sharing", &i915_cache_sharing_fops},
5391	{"i915_ring_stop", &i915_ring_stop_fops},
5392	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
5393	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
5394	{"i915_gem_drop_caches", &i915_drop_caches_fops},
 
5395	{"i915_error_state", &i915_error_state_fops},
 
 
5396	{"i915_next_seqno", &i915_next_seqno_fops},
5397	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
5398	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
5399	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
5400	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
5401	{"i915_fbc_false_color", &i915_fbc_fc_fops},
5402	{"i915_dp_test_data", &i915_displayport_test_data_fops},
5403	{"i915_dp_test_type", &i915_displayport_test_type_fops},
5404	{"i915_dp_test_active", &i915_displayport_test_active_fops}
 
 
 
 
5405};
5406
5407void intel_display_crc_init(struct drm_device *dev)
5408{
5409	struct drm_i915_private *dev_priv = dev->dev_private;
5410	enum pipe pipe;
5411
5412	for_each_pipe(dev_priv, pipe) {
5413		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
5414
5415		pipe_crc->opened = false;
5416		spin_lock_init(&pipe_crc->lock);
5417		init_waitqueue_head(&pipe_crc->wq);
5418	}
5419}
5420
5421int i915_debugfs_init(struct drm_minor *minor)
5422{
 
 
5423	int ret, i;
5424
5425	ret = i915_forcewake_create(minor->debugfs_root, minor);
 
 
 
 
 
 
5426	if (ret)
5427		return ret;
5428
5429	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5430		ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
5431		if (ret)
5432			return ret;
5433	}
5434
5435	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
5436		ret = i915_debugfs_create(minor->debugfs_root, minor,
5437					  i915_debugfs_files[i].name,
 
 
5438					  i915_debugfs_files[i].fops);
5439		if (ret)
5440			return ret;
5441	}
5442
5443	return drm_debugfs_create_files(i915_debugfs_list,
5444					I915_DEBUGFS_ENTRIES,
5445					minor->debugfs_root, minor);
5446}
5447
5448void i915_debugfs_cleanup(struct drm_minor *minor)
5449{
5450	int i;
5451
5452	drm_debugfs_remove_files(i915_debugfs_list,
5453				 I915_DEBUGFS_ENTRIES, minor);
5454
5455	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
5456				 1, minor);
5457
5458	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5459		struct drm_info_list *info_list =
5460			(struct drm_info_list *)&i915_pipe_crc_data[i];
5461
5462		drm_debugfs_remove_files(info_list, 1, minor);
5463	}
5464
5465	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
5466		struct drm_info_list *info_list =
5467			(struct drm_info_list *) i915_debugfs_files[i].fops;
5468
5469		drm_debugfs_remove_files(info_list, 1, minor);
5470	}
5471}
5472
5473struct dpcd_block {
5474	/* DPCD dump start address. */
5475	unsigned int offset;
5476	/* DPCD dump end address, inclusive. If unset, .size will be used. */
5477	unsigned int end;
5478	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
5479	size_t size;
5480	/* Only valid for eDP. */
5481	bool edp;
5482};
5483
5484static const struct dpcd_block i915_dpcd_debug[] = {
5485	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
5486	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
5487	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
5488	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
5489	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
5490	{ .offset = DP_SET_POWER },
5491	{ .offset = DP_EDP_DPCD_REV },
5492	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
5493	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
5494	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
5495};
5496
5497static int i915_dpcd_show(struct seq_file *m, void *data)
5498{
5499	struct drm_connector *connector = m->private;
5500	struct intel_dp *intel_dp =
5501		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5502	uint8_t buf[16];
5503	ssize_t err;
5504	int i;
5505
5506	if (connector->status != connector_status_connected)
5507		return -ENODEV;
5508
5509	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
5510		const struct dpcd_block *b = &i915_dpcd_debug[i];
5511		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
5512
5513		if (b->edp &&
5514		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
5515			continue;
5516
5517		/* low tech for now */
5518		if (WARN_ON(size > sizeof(buf)))
5519			continue;
5520
5521		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
5522		if (err <= 0) {
5523			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
5524				  size, b->offset, err);
5525			continue;
5526		}
5527
5528		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
5529	}
5530
5531	return 0;
5532}
5533
5534static int i915_dpcd_open(struct inode *inode, struct file *file)
5535{
5536	return single_open(file, i915_dpcd_show, inode->i_private);
5537}
5538
5539static const struct file_operations i915_dpcd_fops = {
5540	.owner = THIS_MODULE,
5541	.open = i915_dpcd_open,
5542	.read = seq_read,
5543	.llseek = seq_lseek,
5544	.release = single_release,
5545};
5546
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5547/**
5548 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5549 * @connector: pointer to a registered drm_connector
5550 *
5551 * Cleanup will be done by drm_connector_unregister() through a call to
5552 * drm_debugfs_connector_remove().
5553 *
5554 * Returns 0 on success, negative error codes on error.
5555 */
5556int i915_debugfs_connector_add(struct drm_connector *connector)
5557{
5558	struct dentry *root = connector->debugfs_entry;
5559
5560	/* The connector must have been registered beforehands. */
5561	if (!root)
5562		return -ENODEV;
5563
5564	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5565	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5566		debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
5567				    &i915_dpcd_fops);
 
 
 
 
5568
5569	return 0;
5570}
v4.17
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Keith Packard <keithp@keithp.com>
  26 *
  27 */
  28
 
 
 
  29#include <linux/debugfs.h>
  30#include <linux/sort.h>
  31#include <linux/sched/mm.h>
 
 
 
  32#include "intel_drv.h"
  33#include "intel_guc_submission.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34
  35static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
  36{
  37	return to_i915(node->minor->dev);
  38}
  39
  40static int i915_capabilities(struct seq_file *m, void *data)
  41{
  42	struct drm_i915_private *dev_priv = node_to_i915(m->private);
  43	const struct intel_device_info *info = INTEL_INFO(dev_priv);
  44	struct drm_printer p = drm_seq_file_printer(m);
  45
  46	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
  47	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
  48	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
  49
  50	intel_device_info_dump_flags(info, &p);
  51	intel_device_info_dump_runtime(info, &p);
  52	intel_driver_caps_print(&dev_priv->caps, &p);
  53
  54	kernel_param_lock(THIS_MODULE);
  55	i915_params_dump(&i915_modparams, &p);
  56	kernel_param_unlock(THIS_MODULE);
  57
  58	return 0;
  59}
  60
  61static char get_active_flag(struct drm_i915_gem_object *obj)
  62{
  63	return i915_gem_object_is_active(obj) ? '*' : ' ';
 
 
 
  64}
  65
  66static char get_pin_flag(struct drm_i915_gem_object *obj)
  67{
  68	return obj->pin_global ? 'p' : ' ';
  69}
  70
  71static char get_tiling_flag(struct drm_i915_gem_object *obj)
  72{
  73	switch (i915_gem_object_get_tiling(obj)) {
  74	default:
  75	case I915_TILING_NONE: return ' ';
  76	case I915_TILING_X: return 'X';
  77	case I915_TILING_Y: return 'Y';
  78	}
  79}
  80
  81static char get_global_flag(struct drm_i915_gem_object *obj)
  82{
  83	return obj->userfault_count ? 'g' : ' ';
  84}
  85
  86static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
  87{
  88	return obj->mm.mapping ? 'M' : ' ';
  89}
  90
  91static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
  92{
  93	u64 size = 0;
  94	struct i915_vma *vma;
  95
  96	for_each_ggtt_vma(vma, obj) {
  97		if (drm_mm_node_allocated(&vma->node))
  98			size += vma->node.size;
  99	}
 100
 101	return size;
 102}
 103
 104static const char *
 105stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
 106{
 107	size_t x = 0;
 108
 109	switch (page_sizes) {
 110	case 0:
 111		return "";
 112	case I915_GTT_PAGE_SIZE_4K:
 113		return "4K";
 114	case I915_GTT_PAGE_SIZE_64K:
 115		return "64K";
 116	case I915_GTT_PAGE_SIZE_2M:
 117		return "2M";
 118	default:
 119		if (!buf)
 120			return "M";
 121
 122		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
 123			x += snprintf(buf + x, len - x, "2M, ");
 124		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
 125			x += snprintf(buf + x, len - x, "64K, ");
 126		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
 127			x += snprintf(buf + x, len - x, "4K, ");
 128		buf[x-2] = '\0';
 129
 130		return buf;
 131	}
 132}
 133
 134static void
 135describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 136{
 137	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 138	struct intel_engine_cs *engine;
 139	struct i915_vma *vma;
 140	unsigned int frontbuffer_bits;
 141	int pin_count = 0;
 
 142
 143	lockdep_assert_held(&obj->base.dev->struct_mutex);
 144
 145	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
 146		   &obj->base,
 147		   get_active_flag(obj),
 148		   get_pin_flag(obj),
 149		   get_tiling_flag(obj),
 150		   get_global_flag(obj),
 151		   get_pin_mapped_flag(obj),
 152		   obj->base.size / 1024,
 153		   obj->read_domains,
 154		   obj->write_domain,
 155		   i915_cache_level_str(dev_priv, obj->cache_level),
 156		   obj->mm.dirty ? " dirty" : "",
 157		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
 
 
 
 
 
 
 158	if (obj->base.name)
 159		seq_printf(m, " (name: %d)", obj->base.name);
 160	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 161		if (i915_vma_is_pinned(vma))
 162			pin_count++;
 163	}
 164	seq_printf(m, " (pinned x %d)", pin_count);
 165	if (obj->pin_global)
 166		seq_printf(m, " (global)");
 
 
 167	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 168		if (!drm_mm_node_allocated(&vma->node))
 169			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 170
 171		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
 172			   i915_vma_is_ggtt(vma) ? "g" : "pp",
 173			   vma->node.start, vma->node.size,
 174			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
 175		if (i915_vma_is_ggtt(vma)) {
 176			switch (vma->ggtt_view.type) {
 177			case I915_GGTT_VIEW_NORMAL:
 178				seq_puts(m, ", normal");
 179				break;
 
 
 180
 181			case I915_GGTT_VIEW_PARTIAL:
 182				seq_printf(m, ", partial [%08llx+%x]",
 183					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
 184					   vma->ggtt_view.partial.size << PAGE_SHIFT);
 185				break;
 186
 187			case I915_GGTT_VIEW_ROTATED:
 188				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
 189					   vma->ggtt_view.rotated.plane[0].width,
 190					   vma->ggtt_view.rotated.plane[0].height,
 191					   vma->ggtt_view.rotated.plane[0].stride,
 192					   vma->ggtt_view.rotated.plane[0].offset,
 193					   vma->ggtt_view.rotated.plane[1].width,
 194					   vma->ggtt_view.rotated.plane[1].height,
 195					   vma->ggtt_view.rotated.plane[1].stride,
 196					   vma->ggtt_view.rotated.plane[1].offset);
 197				break;
 
 
 
 198
 199			default:
 200				MISSING_CASE(vma->ggtt_view.type);
 201				break;
 202			}
 203		}
 204		if (vma->fence)
 205			seq_printf(m, " , fence: %d%s",
 206				   vma->fence->id,
 207				   i915_gem_active_isset(&vma->last_fence) ? "*" : "");
 208		seq_puts(m, ")");
 209	}
 210	if (obj->stolen)
 211		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
 212
 213	engine = i915_gem_object_last_write_engine(obj);
 214	if (engine)
 215		seq_printf(m, " (%s)", engine->name);
 216
 217	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
 218	if (frontbuffer_bits)
 219		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
 220}
 221
 222static int obj_rank_by_stolen(const void *A, const void *B)
 
 223{
 224	const struct drm_i915_gem_object *a =
 225		*(const struct drm_i915_gem_object **)A;
 226	const struct drm_i915_gem_object *b =
 227		*(const struct drm_i915_gem_object **)B;
 228
 229	if (a->stolen->start < b->stolen->start)
 230		return -1;
 231	if (a->stolen->start > b->stolen->start)
 232		return 1;
 233	return 0;
 234}
 235
 236static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 237{
 238	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 239	struct drm_device *dev = &dev_priv->drm;
 240	struct drm_i915_gem_object **objects;
 241	struct drm_i915_gem_object *obj;
 242	u64 total_obj_size, total_gtt_size;
 243	unsigned long total, count, n;
 244	int ret;
 245
 246	total = READ_ONCE(dev_priv->mm.object_count);
 247	objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
 248	if (!objects)
 249		return -ENOMEM;
 250
 251	ret = mutex_lock_interruptible(&dev->struct_mutex);
 252	if (ret)
 253		goto out;
 254
 255	total_obj_size = total_gtt_size = count = 0;
 256
 257	spin_lock(&dev_priv->mm.obj_lock);
 258	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
 259		if (count == total)
 260			break;
 261
 262		if (obj->stolen == NULL)
 263			continue;
 264
 265		objects[count++] = obj;
 
 266		total_obj_size += obj->base.size;
 267		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 268
 269	}
 270	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
 271		if (count == total)
 272			break;
 273
 274		if (obj->stolen == NULL)
 275			continue;
 276
 277		objects[count++] = obj;
 
 278		total_obj_size += obj->base.size;
 
 279	}
 280	spin_unlock(&dev_priv->mm.obj_lock);
 281
 282	sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
 283
 284	seq_puts(m, "Stolen:\n");
 285	for (n = 0; n < count; n++) {
 
 286		seq_puts(m, "   ");
 287		describe_obj(m, objects[n]);
 288		seq_putc(m, '\n');
 
 289	}
 290	seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
 
 
 291		   count, total_obj_size, total_gtt_size);
 
 
 292
 293	mutex_unlock(&dev->struct_mutex);
 294out:
 295	kvfree(objects);
 296	return ret;
 297}
 
 
 
 
 
 298
 299struct file_stats {
 300	struct drm_i915_file_private *file_priv;
 301	unsigned long count;
 302	u64 total, unbound;
 303	u64 global, shared;
 304	u64 active, inactive;
 305};
 306
 307static int per_file_stats(int id, void *ptr, void *data)
 308{
 309	struct drm_i915_gem_object *obj = ptr;
 310	struct file_stats *stats = data;
 311	struct i915_vma *vma;
 312
 313	lockdep_assert_held(&obj->base.dev->struct_mutex);
 314
 315	stats->count++;
 316	stats->total += obj->base.size;
 317	if (!obj->bind_count)
 318		stats->unbound += obj->base.size;
 319	if (obj->base.name || obj->base.dma_buf)
 320		stats->shared += obj->base.size;
 321
 322	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 323		if (!drm_mm_node_allocated(&vma->node))
 324			continue;
 
 
 
 325
 326		if (i915_vma_is_ggtt(vma)) {
 327			stats->global += vma->node.size;
 328		} else {
 329			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
 330
 331			if (ppgtt->base.file != stats->file_priv)
 
 332				continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 333		}
 
 334
 335		if (i915_vma_is_active(vma))
 336			stats->active += vma->node.size;
 337		else
 338			stats->inactive += vma->node.size;
 339	}
 340
 341	return 0;
 342}
 343
 344#define print_file_stats(m, name, stats) do { \
 345	if (stats.count) \
 346		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
 347			   name, \
 348			   stats.count, \
 349			   stats.total, \
 350			   stats.active, \
 351			   stats.inactive, \
 352			   stats.global, \
 353			   stats.shared, \
 354			   stats.unbound); \
 355} while (0)
 356
 357static void print_batch_pool_stats(struct seq_file *m,
 358				   struct drm_i915_private *dev_priv)
 359{
 360	struct drm_i915_gem_object *obj;
 361	struct file_stats stats;
 362	struct intel_engine_cs *engine;
 363	enum intel_engine_id id;
 364	int j;
 365
 366	memset(&stats, 0, sizeof(stats));
 367
 368	for_each_engine(engine, dev_priv, id) {
 369		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
 370			list_for_each_entry(obj,
 371					    &engine->batch_pool.cache_list[j],
 372					    batch_pool_link)
 373				per_file_stats(0, obj, &stats);
 374		}
 375	}
 376
 377	print_file_stats(m, "[k]batch pool", stats);
 378}
 379
 380static int per_file_ctx_stats(int id, void *ptr, void *data)
 381{
 382	struct i915_gem_context *ctx = ptr;
 383	int n;
 384
 385	for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
 386		if (ctx->engine[n].state)
 387			per_file_stats(0, ctx->engine[n].state->obj, data);
 388		if (ctx->engine[n].ring)
 389			per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
 390	}
 391
 392	return 0;
 393}
 394
 395static void print_context_stats(struct seq_file *m,
 396				struct drm_i915_private *dev_priv)
 397{
 398	struct drm_device *dev = &dev_priv->drm;
 399	struct file_stats stats;
 400	struct drm_file *file;
 401
 402	memset(&stats, 0, sizeof(stats));
 403
 404	mutex_lock(&dev->struct_mutex);
 405	if (dev_priv->kernel_context)
 406		per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
 407
 408	list_for_each_entry(file, &dev->filelist, lhead) {
 409		struct drm_i915_file_private *fpriv = file->driver_priv;
 410		idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
 411	}
 412	mutex_unlock(&dev->struct_mutex);
 413
 414	print_file_stats(m, "[k]contexts", stats);
 415}
 416
 417static int i915_gem_object_info(struct seq_file *m, void *data)
 418{
 419	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 420	struct drm_device *dev = &dev_priv->drm;
 421	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 422	u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
 423	u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
 424	struct drm_i915_gem_object *obj;
 425	unsigned int page_sizes = 0;
 426	struct drm_file *file;
 427	char buf[80];
 428	int ret;
 429
 430	ret = mutex_lock_interruptible(&dev->struct_mutex);
 431	if (ret)
 432		return ret;
 433
 434	seq_printf(m, "%u objects, %llu bytes\n",
 435		   dev_priv->mm.object_count,
 436		   dev_priv->mm.object_memory);
 437
 438	size = count = 0;
 439	mapped_size = mapped_count = 0;
 440	purgeable_size = purgeable_count = 0;
 441	huge_size = huge_count = 0;
 442
 443	spin_lock(&dev_priv->mm.obj_lock);
 444	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
 445		size += obj->base.size;
 446		++count;
 447
 448		if (obj->mm.madv == I915_MADV_DONTNEED) {
 449			purgeable_size += obj->base.size;
 450			++purgeable_count;
 451		}
 452
 453		if (obj->mm.mapping) {
 454			mapped_count++;
 455			mapped_size += obj->base.size;
 456		}
 457
 458		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
 459			huge_count++;
 460			huge_size += obj->base.size;
 461			page_sizes |= obj->mm.page_sizes.sg;
 462		}
 463	}
 464	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 465
 466	size = count = dpy_size = dpy_count = 0;
 467	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
 468		size += obj->base.size;
 469		++count;
 470
 471		if (obj->pin_global) {
 472			dpy_size += obj->base.size;
 473			++dpy_count;
 
 474		}
 475
 476		if (obj->mm.madv == I915_MADV_DONTNEED) {
 477			purgeable_size += obj->base.size;
 478			++purgeable_count;
 479		}
 480
 481		if (obj->mm.mapping) {
 482			mapped_count++;
 483			mapped_size += obj->base.size;
 484		}
 485
 486		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
 487			huge_count++;
 488			huge_size += obj->base.size;
 489			page_sizes |= obj->mm.page_sizes.sg;
 490		}
 491	}
 492	spin_unlock(&dev_priv->mm.obj_lock);
 493
 494	seq_printf(m, "%u bound objects, %llu bytes\n",
 495		   count, size);
 496	seq_printf(m, "%u purgeable objects, %llu bytes\n",
 497		   purgeable_count, purgeable_size);
 498	seq_printf(m, "%u mapped objects, %llu bytes\n",
 499		   mapped_count, mapped_size);
 500	seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
 501		   huge_count,
 502		   stringify_page_sizes(page_sizes, buf, sizeof(buf)),
 503		   huge_size);
 504	seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
 505		   dpy_count, dpy_size);
 506
 507	seq_printf(m, "%llu [%pa] gtt total\n",
 508		   ggtt->base.total, &ggtt->mappable_end);
 509	seq_printf(m, "Supported page sizes: %s\n",
 510		   stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
 511					buf, sizeof(buf)));
 512
 513	seq_putc(m, '\n');
 514	print_batch_pool_stats(m, dev_priv);
 515	mutex_unlock(&dev->struct_mutex);
 516
 517	mutex_lock(&dev->filelist_mutex);
 518	print_context_stats(m, dev_priv);
 519	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
 520		struct file_stats stats;
 521		struct drm_i915_file_private *file_priv = file->driver_priv;
 522		struct i915_request *request;
 523		struct task_struct *task;
 524
 525		mutex_lock(&dev->struct_mutex);
 526
 527		memset(&stats, 0, sizeof(stats));
 528		stats.file_priv = file->driver_priv;
 529		spin_lock(&file->table_lock);
 530		idr_for_each(&file->object_idr, per_file_stats, &stats);
 531		spin_unlock(&file->table_lock);
 532		/*
 533		 * Although we have a valid reference on file->pid, that does
 534		 * not guarantee that the task_struct who called get_pid() is
 535		 * still alive (e.g. get_pid(current) => fork() => exit()).
 536		 * Therefore, we need to protect this ->comm access using RCU.
 537		 */
 538		request = list_first_entry_or_null(&file_priv->mm.request_list,
 539						   struct i915_request,
 540						   client_link);
 541		rcu_read_lock();
 542		task = pid_task(request && request->ctx->pid ?
 543				request->ctx->pid : file->pid,
 544				PIDTYPE_PID);
 545		print_file_stats(m, task ? task->comm : "<unknown>", stats);
 546		rcu_read_unlock();
 
 547
 548		mutex_unlock(&dev->struct_mutex);
 549	}
 550	mutex_unlock(&dev->filelist_mutex);
 551
 552	return 0;
 553}
 554
 555static int i915_gem_gtt_info(struct seq_file *m, void *data)
 556{
 557	struct drm_info_node *node = m->private;
 558	struct drm_i915_private *dev_priv = node_to_i915(node);
 559	struct drm_device *dev = &dev_priv->drm;
 560	struct drm_i915_gem_object **objects;
 561	struct drm_i915_gem_object *obj;
 562	u64 total_obj_size, total_gtt_size;
 563	unsigned long nobject, n;
 564	int count, ret;
 565
 566	nobject = READ_ONCE(dev_priv->mm.object_count);
 567	objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
 568	if (!objects)
 569		return -ENOMEM;
 570
 571	ret = mutex_lock_interruptible(&dev->struct_mutex);
 572	if (ret)
 573		return ret;
 574
 575	count = 0;
 576	spin_lock(&dev_priv->mm.obj_lock);
 577	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
 578		objects[count++] = obj;
 579		if (count == nobject)
 580			break;
 581	}
 582	spin_unlock(&dev_priv->mm.obj_lock);
 583
 584	total_obj_size = total_gtt_size = 0;
 585	for (n = 0;  n < count; n++) {
 586		obj = objects[n];
 587
 588		seq_puts(m, "   ");
 589		describe_obj(m, obj);
 590		seq_putc(m, '\n');
 591		total_obj_size += obj->base.size;
 592		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 
 593	}
 594
 595	mutex_unlock(&dev->struct_mutex);
 596
 597	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 598		   count, total_obj_size, total_gtt_size);
 599	kvfree(objects);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 600
 601	return 0;
 602}
 603
 604static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 605{
 606	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 607	struct drm_device *dev = &dev_priv->drm;
 
 608	struct drm_i915_gem_object *obj;
 609	struct intel_engine_cs *engine;
 610	enum intel_engine_id id;
 611	int total = 0;
 612	int ret, j;
 613
 614	ret = mutex_lock_interruptible(&dev->struct_mutex);
 615	if (ret)
 616		return ret;
 617
 618	for_each_engine(engine, dev_priv, id) {
 619		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
 620			int count;
 621
 622			count = 0;
 623			list_for_each_entry(obj,
 624					    &engine->batch_pool.cache_list[j],
 625					    batch_pool_link)
 626				count++;
 627			seq_printf(m, "%s cache[%d]: %d objects\n",
 628				   engine->name, j, count);
 629
 630			list_for_each_entry(obj,
 631					    &engine->batch_pool.cache_list[j],
 632					    batch_pool_link) {
 633				seq_puts(m, "   ");
 634				describe_obj(m, obj);
 635				seq_putc(m, '\n');
 636			}
 637
 638			total += count;
 639		}
 640	}
 641
 642	seq_printf(m, "total: %d\n", total);
 643
 644	mutex_unlock(&dev->struct_mutex);
 645
 646	return 0;
 647}
 648
 649static void gen8_display_interrupt_info(struct seq_file *m)
 650{
 651	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 652	int pipe;
 
 
 
 
 653
 654	for_each_pipe(dev_priv, pipe) {
 655		enum intel_display_power_domain power_domain;
 
 656
 657		power_domain = POWER_DOMAIN_PIPE(pipe);
 658		if (!intel_display_power_get_if_enabled(dev_priv,
 659							power_domain)) {
 660			seq_printf(m, "Pipe %c power disabled\n",
 661				   pipe_name(pipe));
 
 
 
 662			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 663		}
 664		seq_printf(m, "Pipe %c IMR:\t%08x\n",
 665			   pipe_name(pipe),
 666			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
 667		seq_printf(m, "Pipe %c IIR:\t%08x\n",
 668			   pipe_name(pipe),
 669			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
 670		seq_printf(m, "Pipe %c IER:\t%08x\n",
 671			   pipe_name(pipe),
 672			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
 673
 674		intel_display_power_put(dev_priv, power_domain);
 675	}
 676
 677	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
 678		   I915_READ(GEN8_DE_PORT_IMR));
 679	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
 680		   I915_READ(GEN8_DE_PORT_IIR));
 681	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
 682		   I915_READ(GEN8_DE_PORT_IER));
 683
 684	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
 685		   I915_READ(GEN8_DE_MISC_IMR));
 686	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
 687		   I915_READ(GEN8_DE_MISC_IIR));
 688	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
 689		   I915_READ(GEN8_DE_MISC_IER));
 690
 691	seq_printf(m, "PCU interrupt mask:\t%08x\n",
 692		   I915_READ(GEN8_PCU_IMR));
 693	seq_printf(m, "PCU interrupt identity:\t%08x\n",
 694		   I915_READ(GEN8_PCU_IIR));
 695	seq_printf(m, "PCU interrupt enable:\t%08x\n",
 696		   I915_READ(GEN8_PCU_IER));
 
 
 
 
 
 
 
 697}
 698
 
 699static int i915_interrupt_info(struct seq_file *m, void *data)
 700{
 701	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 702	struct intel_engine_cs *engine;
 703	enum intel_engine_id id;
 704	int i, pipe;
 
 705
 
 
 
 706	intel_runtime_pm_get(dev_priv);
 707
 708	if (IS_CHERRYVIEW(dev_priv)) {
 709		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 710			   I915_READ(GEN8_MASTER_IRQ));
 711
 712		seq_printf(m, "Display IER:\t%08x\n",
 713			   I915_READ(VLV_IER));
 714		seq_printf(m, "Display IIR:\t%08x\n",
 715			   I915_READ(VLV_IIR));
 716		seq_printf(m, "Display IIR_RW:\t%08x\n",
 717			   I915_READ(VLV_IIR_RW));
 718		seq_printf(m, "Display IMR:\t%08x\n",
 719			   I915_READ(VLV_IMR));
 720		for_each_pipe(dev_priv, pipe) {
 721			enum intel_display_power_domain power_domain;
 722
 723			power_domain = POWER_DOMAIN_PIPE(pipe);
 724			if (!intel_display_power_get_if_enabled(dev_priv,
 725								power_domain)) {
 726				seq_printf(m, "Pipe %c power disabled\n",
 727					   pipe_name(pipe));
 728				continue;
 729			}
 730
 731			seq_printf(m, "Pipe %c stat:\t%08x\n",
 732				   pipe_name(pipe),
 733				   I915_READ(PIPESTAT(pipe)));
 734
 735			intel_display_power_put(dev_priv, power_domain);
 736		}
 737
 738		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 739		seq_printf(m, "Port hotplug:\t%08x\n",
 740			   I915_READ(PORT_HOTPLUG_EN));
 741		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 742			   I915_READ(VLV_DPFLIPSTAT));
 743		seq_printf(m, "DPINVGTT:\t%08x\n",
 744			   I915_READ(DPINVGTT));
 745		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 746
 747		for (i = 0; i < 4; i++) {
 748			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
 749				   i, I915_READ(GEN8_GT_IMR(i)));
 750			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
 751				   i, I915_READ(GEN8_GT_IIR(i)));
 752			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
 753				   i, I915_READ(GEN8_GT_IER(i)));
 754		}
 755
 756		seq_printf(m, "PCU interrupt mask:\t%08x\n",
 757			   I915_READ(GEN8_PCU_IMR));
 758		seq_printf(m, "PCU interrupt identity:\t%08x\n",
 759			   I915_READ(GEN8_PCU_IIR));
 760		seq_printf(m, "PCU interrupt enable:\t%08x\n",
 761			   I915_READ(GEN8_PCU_IER));
 762	} else if (INTEL_GEN(dev_priv) >= 11) {
 763		seq_printf(m, "Master Interrupt Control:  %08x\n",
 764			   I915_READ(GEN11_GFX_MSTR_IRQ));
 765
 766		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
 767			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
 768		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
 769			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
 770		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
 771			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
 772		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
 773			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
 774		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
 775			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
 776		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
 777			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
 778
 779		seq_printf(m, "Display Interrupt Control:\t%08x\n",
 780			   I915_READ(GEN11_DISPLAY_INT_CTL));
 781
 782		gen8_display_interrupt_info(m);
 783	} else if (INTEL_GEN(dev_priv) >= 8) {
 784		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 785			   I915_READ(GEN8_MASTER_IRQ));
 786
 787		for (i = 0; i < 4; i++) {
 788			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
 789				   i, I915_READ(GEN8_GT_IMR(i)));
 790			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
 791				   i, I915_READ(GEN8_GT_IIR(i)));
 792			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
 793				   i, I915_READ(GEN8_GT_IER(i)));
 794		}
 795
 796		gen8_display_interrupt_info(m);
 797	} else if (IS_VALLEYVIEW(dev_priv)) {
 798		seq_printf(m, "Display IER:\t%08x\n",
 799			   I915_READ(VLV_IER));
 800		seq_printf(m, "Display IIR:\t%08x\n",
 801			   I915_READ(VLV_IIR));
 802		seq_printf(m, "Display IIR_RW:\t%08x\n",
 803			   I915_READ(VLV_IIR_RW));
 804		seq_printf(m, "Display IMR:\t%08x\n",
 805			   I915_READ(VLV_IMR));
 806		for_each_pipe(dev_priv, pipe) {
 807			enum intel_display_power_domain power_domain;
 808
 809			power_domain = POWER_DOMAIN_PIPE(pipe);
 810			if (!intel_display_power_get_if_enabled(dev_priv,
 811								power_domain)) {
 812				seq_printf(m, "Pipe %c power disabled\n",
 813					   pipe_name(pipe));
 814				continue;
 815			}
 
 
 
 
 
 
 
 
 
 816
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 817			seq_printf(m, "Pipe %c stat:\t%08x\n",
 818				   pipe_name(pipe),
 819				   I915_READ(PIPESTAT(pipe)));
 820			intel_display_power_put(dev_priv, power_domain);
 821		}
 822
 823		seq_printf(m, "Master IER:\t%08x\n",
 824			   I915_READ(VLV_MASTER_IER));
 825
 826		seq_printf(m, "Render IER:\t%08x\n",
 827			   I915_READ(GTIER));
 828		seq_printf(m, "Render IIR:\t%08x\n",
 829			   I915_READ(GTIIR));
 830		seq_printf(m, "Render IMR:\t%08x\n",
 831			   I915_READ(GTIMR));
 832
 833		seq_printf(m, "PM IER:\t\t%08x\n",
 834			   I915_READ(GEN6_PMIER));
 835		seq_printf(m, "PM IIR:\t\t%08x\n",
 836			   I915_READ(GEN6_PMIIR));
 837		seq_printf(m, "PM IMR:\t\t%08x\n",
 838			   I915_READ(GEN6_PMIMR));
 839
 840		seq_printf(m, "Port hotplug:\t%08x\n",
 841			   I915_READ(PORT_HOTPLUG_EN));
 842		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 843			   I915_READ(VLV_DPFLIPSTAT));
 844		seq_printf(m, "DPINVGTT:\t%08x\n",
 845			   I915_READ(DPINVGTT));
 846
 847	} else if (!HAS_PCH_SPLIT(dev_priv)) {
 848		seq_printf(m, "Interrupt enable:    %08x\n",
 849			   I915_READ(IER));
 850		seq_printf(m, "Interrupt identity:  %08x\n",
 851			   I915_READ(IIR));
 852		seq_printf(m, "Interrupt mask:      %08x\n",
 853			   I915_READ(IMR));
 854		for_each_pipe(dev_priv, pipe)
 855			seq_printf(m, "Pipe %c stat:         %08x\n",
 856				   pipe_name(pipe),
 857				   I915_READ(PIPESTAT(pipe)));
 858	} else {
 859		seq_printf(m, "North Display Interrupt enable:		%08x\n",
 860			   I915_READ(DEIER));
 861		seq_printf(m, "North Display Interrupt identity:	%08x\n",
 862			   I915_READ(DEIIR));
 863		seq_printf(m, "North Display Interrupt mask:		%08x\n",
 864			   I915_READ(DEIMR));
 865		seq_printf(m, "South Display Interrupt enable:		%08x\n",
 866			   I915_READ(SDEIER));
 867		seq_printf(m, "South Display Interrupt identity:	%08x\n",
 868			   I915_READ(SDEIIR));
 869		seq_printf(m, "South Display Interrupt mask:		%08x\n",
 870			   I915_READ(SDEIMR));
 871		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
 872			   I915_READ(GTIER));
 873		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
 874			   I915_READ(GTIIR));
 875		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
 876			   I915_READ(GTIMR));
 877	}
 878
 879	if (INTEL_GEN(dev_priv) >= 11) {
 880		seq_printf(m, "RCS Intr Mask:\t %08x\n",
 881			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
 882		seq_printf(m, "BCS Intr Mask:\t %08x\n",
 883			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
 884		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
 885			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
 886		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
 887			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
 888		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
 889			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
 890		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
 891			   I915_READ(GEN11_GUC_SG_INTR_MASK));
 892		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
 893			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
 894		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
 895			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
 896		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
 897			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
 898
 899	} else if (INTEL_GEN(dev_priv) >= 6) {
 900		for_each_engine(engine, dev_priv, id) {
 901			seq_printf(m,
 902				   "Graphics Interrupt mask (%s):	%08x\n",
 903				   engine->name, I915_READ_IMR(engine));
 904		}
 
 905	}
 906
 907	intel_runtime_pm_put(dev_priv);
 
 908
 909	return 0;
 910}
 911
 912static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 913{
 914	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 915	struct drm_device *dev = &dev_priv->drm;
 
 916	int i, ret;
 917
 918	ret = mutex_lock_interruptible(&dev->struct_mutex);
 919	if (ret)
 920		return ret;
 921
 922	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
 923	for (i = 0; i < dev_priv->num_fence_regs; i++) {
 924		struct i915_vma *vma = dev_priv->fence_regs[i].vma;
 925
 926		seq_printf(m, "Fence %d, pin count = %d, object = ",
 927			   i, dev_priv->fence_regs[i].pin_count);
 928		if (!vma)
 929			seq_puts(m, "unused");
 930		else
 931			describe_obj(m, vma->obj);
 932		seq_putc(m, '\n');
 933	}
 934
 935	mutex_unlock(&dev->struct_mutex);
 936	return 0;
 937}
 938
 939#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
 940static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
 941			      size_t count, loff_t *pos)
 942{
 943	struct i915_gpu_state *error = file->private_data;
 944	struct drm_i915_error_state_buf str;
 945	ssize_t ret;
 946	loff_t tmp;
 947
 948	if (!error)
 
 
 949		return 0;
 950
 951	ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 952	if (ret)
 953		return ret;
 954
 955	ret = i915_error_state_to_str(&str, error);
 956	if (ret)
 957		goto out;
 958
 959	tmp = 0;
 960	ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
 961	if (ret < 0)
 962		goto out;
 963
 964	*pos = str.start + ret;
 965out:
 966	i915_error_state_buf_release(&str);
 967	return ret;
 968}
 969
 970static int gpu_state_release(struct inode *inode, struct file *file)
 971{
 972	i915_gpu_state_put(file->private_data);
 
 
 
 
 
 
 
 
 
 
 
 
 973	return 0;
 974}
 975
 976static int i915_gpu_info_open(struct inode *inode, struct file *file)
 977{
 978	struct drm_i915_private *i915 = inode->i_private;
 979	struct i915_gpu_state *gpu;
 980
 981	intel_runtime_pm_get(i915);
 982	gpu = i915_capture_gpu_state(i915);
 983	intel_runtime_pm_put(i915);
 984	if (!gpu)
 985		return -ENOMEM;
 986
 987	file->private_data = gpu;
 988	return 0;
 989}
 990
 991static const struct file_operations i915_gpu_info_fops = {
 992	.owner = THIS_MODULE,
 993	.open = i915_gpu_info_open,
 994	.read = gpu_state_read,
 995	.llseek = default_llseek,
 996	.release = gpu_state_release,
 997};
 998
 999static ssize_t
1000i915_error_state_write(struct file *filp,
1001		       const char __user *ubuf,
1002		       size_t cnt,
1003		       loff_t *ppos)
1004{
1005	struct i915_gpu_state *error = filp->private_data;
 
 
 
 
1006
1007	if (!error)
1008		return 0;
 
1009
1010	DRM_DEBUG_DRIVER("Resetting error state\n");
1011	i915_reset_error_state(error->i915);
 
1012
1013	return cnt;
1014}
 
1015
1016static int i915_error_state_open(struct inode *inode, struct file *file)
1017{
1018	file->private_data = i915_first_error_state(inode->i_private);
1019	return 0;
 
 
 
1020}
1021
1022static const struct file_operations i915_error_state_fops = {
1023	.owner = THIS_MODULE,
1024	.open = i915_error_state_open,
1025	.read = gpu_state_read,
1026	.write = i915_error_state_write,
1027	.llseek = default_llseek,
1028	.release = gpu_state_release,
1029};
1030#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1031
1032static int
1033i915_next_seqno_set(void *data, u64 val)
1034{
1035	struct drm_i915_private *dev_priv = data;
1036	struct drm_device *dev = &dev_priv->drm;
1037	int ret;
1038
1039	ret = mutex_lock_interruptible(&dev->struct_mutex);
1040	if (ret)
1041		return ret;
1042
1043	intel_runtime_pm_get(dev_priv);
1044	ret = i915_gem_set_global_seqno(dev, val);
1045	intel_runtime_pm_put(dev_priv);
1046
1047	mutex_unlock(&dev->struct_mutex);
1048
1049	return ret;
1050}
1051
1052DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1053			NULL, i915_next_seqno_set,
1054			"0x%llx\n");
1055
1056static int i915_frequency_info(struct seq_file *m, void *unused)
1057{
1058	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1059	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 
1060	int ret = 0;
1061
1062	intel_runtime_pm_get(dev_priv);
1063
1064	if (IS_GEN5(dev_priv)) {
 
 
1065		u16 rgvswctl = I915_READ16(MEMSWCTL);
1066		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1067
1068		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1069		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1070		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1071			   MEMSTAT_VID_SHIFT);
1072		seq_printf(m, "Current P-state: %d\n",
1073			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1074	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1075		u32 rpmodectl, freq_sts;
1076
1077		mutex_lock(&dev_priv->pcu_lock);
1078
1079		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1080		seq_printf(m, "Video Turbo Mode: %s\n",
1081			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1082		seq_printf(m, "HW control enabled: %s\n",
1083			   yesno(rpmodectl & GEN6_RP_ENABLE));
1084		seq_printf(m, "SW control enabled: %s\n",
1085			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1086				  GEN6_RP_MEDIA_SW_MODE));
1087
 
1088		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1089		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1090		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1091
1092		seq_printf(m, "actual GPU freq: %d MHz\n",
1093			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1094
1095		seq_printf(m, "current GPU freq: %d MHz\n",
1096			   intel_gpu_freq(dev_priv, rps->cur_freq));
1097
1098		seq_printf(m, "max GPU freq: %d MHz\n",
1099			   intel_gpu_freq(dev_priv, rps->max_freq));
1100
1101		seq_printf(m, "min GPU freq: %d MHz\n",
1102			   intel_gpu_freq(dev_priv, rps->min_freq));
1103
1104		seq_printf(m, "idle GPU freq: %d MHz\n",
1105			   intel_gpu_freq(dev_priv, rps->idle_freq));
1106
1107		seq_printf(m,
1108			   "efficient (RPe) frequency: %d MHz\n",
1109			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1110		mutex_unlock(&dev_priv->pcu_lock);
1111	} else if (INTEL_GEN(dev_priv) >= 6) {
1112		u32 rp_state_limits;
1113		u32 gt_perf_status;
1114		u32 rp_state_cap;
1115		u32 rpmodectl, rpinclimit, rpdeclimit;
1116		u32 rpstat, cagf, reqf;
1117		u32 rpupei, rpcurup, rpprevup;
1118		u32 rpdownei, rpcurdown, rpprevdown;
1119		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1120		int max_freq;
1121
1122		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1123		if (IS_GEN9_LP(dev_priv)) {
1124			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1125			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1126		} else {
1127			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1128			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1129		}
1130
1131		/* RPSTAT1 is in the GT power well */
 
 
 
 
1132		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1133
1134		reqf = I915_READ(GEN6_RPNSWREQ);
1135		if (INTEL_GEN(dev_priv) >= 9)
1136			reqf >>= 23;
1137		else {
1138			reqf &= ~GEN6_TURBO_DISABLE;
1139			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1140				reqf >>= 24;
1141			else
1142				reqf >>= 25;
1143		}
1144		reqf = intel_gpu_freq(dev_priv, reqf);
1145
1146		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1147		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1148		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1149
1150		rpstat = I915_READ(GEN6_RPSTAT1);
1151		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1152		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1153		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1154		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1155		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1156		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1157		cagf = intel_gpu_freq(dev_priv,
1158				      intel_get_cagf(dev_priv, rpstat));
 
 
 
 
 
1159
1160		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
1161
1162		if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
1163			pm_ier = I915_READ(GEN6_PMIER);
1164			pm_imr = I915_READ(GEN6_PMIMR);
1165			pm_isr = I915_READ(GEN6_PMISR);
1166			pm_iir = I915_READ(GEN6_PMIIR);
1167			pm_mask = I915_READ(GEN6_PMINTRMSK);
1168		} else {
1169			pm_ier = I915_READ(GEN8_GT_IER(2));
1170			pm_imr = I915_READ(GEN8_GT_IMR(2));
1171			pm_isr = I915_READ(GEN8_GT_ISR(2));
1172			pm_iir = I915_READ(GEN8_GT_IIR(2));
1173			pm_mask = I915_READ(GEN6_PMINTRMSK);
1174		}
1175		seq_printf(m, "Video Turbo Mode: %s\n",
1176			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1177		seq_printf(m, "HW control enabled: %s\n",
1178			   yesno(rpmodectl & GEN6_RP_ENABLE));
1179		seq_printf(m, "SW control enabled: %s\n",
1180			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1181				  GEN6_RP_MEDIA_SW_MODE));
1182		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1183			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1184		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1185			   rps->pm_intrmsk_mbz);
1186		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1187		seq_printf(m, "Render p-state ratio: %d\n",
1188			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1189		seq_printf(m, "Render p-state VID: %d\n",
1190			   gt_perf_status & 0xff);
1191		seq_printf(m, "Render p-state limit: %d\n",
1192			   rp_state_limits & 0xff);
1193		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1194		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1195		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1196		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1197		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1198		seq_printf(m, "CAGF: %dMHz\n", cagf);
1199		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1200			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1201		seq_printf(m, "RP CUR UP: %d (%dus)\n",
1202			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1203		seq_printf(m, "RP PREV UP: %d (%dus)\n",
1204			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1205		seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
1206
1207		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1208			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1209		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1210			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1211		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1212			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1213		seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
 
 
1214
1215		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1216			    rp_state_cap >> 16) & 0xff;
1217		max_freq *= (IS_GEN9_BC(dev_priv) ||
1218			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1219		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1220			   intel_gpu_freq(dev_priv, max_freq));
1221
1222		max_freq = (rp_state_cap & 0xff00) >> 8;
1223		max_freq *= (IS_GEN9_BC(dev_priv) ||
1224			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1225		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1226			   intel_gpu_freq(dev_priv, max_freq));
1227
1228		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1229			    rp_state_cap >> 0) & 0xff;
1230		max_freq *= (IS_GEN9_BC(dev_priv) ||
1231			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1232		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1233			   intel_gpu_freq(dev_priv, max_freq));
1234		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1235			   intel_gpu_freq(dev_priv, rps->max_freq));
1236
1237		seq_printf(m, "Current freq: %d MHz\n",
1238			   intel_gpu_freq(dev_priv, rps->cur_freq));
1239		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1240		seq_printf(m, "Idle freq: %d MHz\n",
1241			   intel_gpu_freq(dev_priv, rps->idle_freq));
1242		seq_printf(m, "Min freq: %d MHz\n",
1243			   intel_gpu_freq(dev_priv, rps->min_freq));
1244		seq_printf(m, "Boost freq: %d MHz\n",
1245			   intel_gpu_freq(dev_priv, rps->boost_freq));
1246		seq_printf(m, "Max freq: %d MHz\n",
1247			   intel_gpu_freq(dev_priv, rps->max_freq));
1248		seq_printf(m,
1249			   "efficient (RPe) frequency: %d MHz\n",
1250			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1251	} else {
1252		seq_puts(m, "no P-state info available\n");
1253	}
1254
1255	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1256	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1257	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1258
 
1259	intel_runtime_pm_put(dev_priv);
1260	return ret;
1261}
1262
1263static void i915_instdone_info(struct drm_i915_private *dev_priv,
1264			       struct seq_file *m,
1265			       struct intel_instdone *instdone)
1266{
1267	int slice;
1268	int subslice;
1269
1270	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1271		   instdone->instdone);
1272
1273	if (INTEL_GEN(dev_priv) <= 3)
1274		return;
1275
1276	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1277		   instdone->slice_common);
1278
1279	if (INTEL_GEN(dev_priv) <= 6)
1280		return;
1281
1282	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1283		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1284			   slice, subslice, instdone->sampler[slice][subslice]);
1285
1286	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1287		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1288			   slice, subslice, instdone->row[slice][subslice]);
1289}
1290
1291static int i915_hangcheck_info(struct seq_file *m, void *unused)
1292{
1293	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1294	struct intel_engine_cs *engine;
1295	u64 acthd[I915_NUM_ENGINES];
1296	u32 seqno[I915_NUM_ENGINES];
1297	struct intel_instdone instdone;
1298	enum intel_engine_id id;
1299
1300	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1301		seq_puts(m, "Wedged\n");
1302	if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1303		seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1304	if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1305		seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1306	if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1307		seq_puts(m, "Waiter holding struct mutex\n");
1308	if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1309		seq_puts(m, "struct_mutex blocked for reset\n");
1310
1311	if (!i915_modparams.enable_hangcheck) {
1312		seq_puts(m, "Hangcheck disabled\n");
1313		return 0;
1314	}
1315
1316	intel_runtime_pm_get(dev_priv);
1317
1318	for_each_engine(engine, dev_priv, id) {
1319		acthd[id] = intel_engine_get_active_head(engine);
1320		seqno[id] = intel_engine_get_seqno(engine);
1321	}
1322
1323	intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1324
1325	intel_runtime_pm_put(dev_priv);
1326
1327	if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1328		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1329			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1330					    jiffies));
1331	else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1332		seq_puts(m, "Hangcheck active, work pending\n");
1333	else
1334		seq_puts(m, "Hangcheck inactive\n");
1335
1336	seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1337
1338	for_each_engine(engine, dev_priv, id) {
1339		struct intel_breadcrumbs *b = &engine->breadcrumbs;
1340		struct rb_node *rb;
1341
1342		seq_printf(m, "%s:\n", engine->name);
1343		seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
1344			   engine->hangcheck.seqno, seqno[id],
1345			   intel_engine_last_submit(engine),
1346			   engine->timeline->inflight_seqnos);
1347		seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
1348			   yesno(intel_engine_has_waiter(engine)),
1349			   yesno(test_bit(engine->id,
1350					  &dev_priv->gpu_error.missed_irq_rings)),
1351			   yesno(engine->hangcheck.stalled));
1352
1353		spin_lock_irq(&b->rb_lock);
1354		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1355			struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1356
1357			seq_printf(m, "\t%s [%d] waiting for %x\n",
1358				   w->tsk->comm, w->tsk->pid, w->seqno);
1359		}
1360		spin_unlock_irq(&b->rb_lock);
1361
 
 
 
 
1362		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1363			   (long long)engine->hangcheck.acthd,
1364			   (long long)acthd[id]);
1365		seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1366			   hangcheck_action_to_str(engine->hangcheck.action),
1367			   engine->hangcheck.action,
1368			   jiffies_to_msecs(jiffies -
1369					    engine->hangcheck.action_timestamp));
1370
1371		if (engine->id == RCS) {
1372			seq_puts(m, "\tinstdone read =\n");
 
 
 
 
 
 
 
 
1373
1374			i915_instdone_info(dev_priv, m, &instdone);
1375
1376			seq_puts(m, "\tinstdone accu =\n");
1377
1378			i915_instdone_info(dev_priv, m,
1379					   &engine->hangcheck.instdone);
1380		}
1381	}
1382
1383	return 0;
1384}
1385
1386static int i915_reset_info(struct seq_file *m, void *unused)
1387{
1388	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1389	struct i915_gpu_error *error = &dev_priv->gpu_error;
1390	struct intel_engine_cs *engine;
1391	enum intel_engine_id id;
1392
1393	seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1394
1395	for_each_engine(engine, dev_priv, id) {
1396		seq_printf(m, "%s = %u\n", engine->name,
1397			   i915_reset_engine_count(error, engine));
1398	}
1399
1400	return 0;
1401}
1402
1403static int ironlake_drpc_info(struct seq_file *m)
1404{
1405	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
 
1406	u32 rgvmodectl, rstdbyctl;
1407	u16 crstandvid;
 
 
 
 
 
 
1408
1409	rgvmodectl = I915_READ(MEMMODECTL);
1410	rstdbyctl = I915_READ(RSTDBYCTL);
1411	crstandvid = I915_READ16(CRSTANDVID);
1412
 
 
 
1413	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1414	seq_printf(m, "Boost freq: %d\n",
1415		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1416		   MEMMODE_BOOST_FREQ_SHIFT);
1417	seq_printf(m, "HW control enabled: %s\n",
1418		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1419	seq_printf(m, "SW control enabled: %s\n",
1420		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1421	seq_printf(m, "Gated voltage change: %s\n",
1422		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1423	seq_printf(m, "Starting frequency: P%d\n",
1424		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1425	seq_printf(m, "Max P-state: P%d\n",
1426		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1427	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1428	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1429	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1430	seq_printf(m, "Render standby enabled: %s\n",
1431		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1432	seq_puts(m, "Current RS state: ");
1433	switch (rstdbyctl & RSX_STATUS_MASK) {
1434	case RSX_STATUS_ON:
1435		seq_puts(m, "on\n");
1436		break;
1437	case RSX_STATUS_RC1:
1438		seq_puts(m, "RC1\n");
1439		break;
1440	case RSX_STATUS_RC1E:
1441		seq_puts(m, "RC1E\n");
1442		break;
1443	case RSX_STATUS_RS1:
1444		seq_puts(m, "RS1\n");
1445		break;
1446	case RSX_STATUS_RS2:
1447		seq_puts(m, "RS2 (RC6)\n");
1448		break;
1449	case RSX_STATUS_RS3:
1450		seq_puts(m, "RC3 (RC6+)\n");
1451		break;
1452	default:
1453		seq_puts(m, "unknown\n");
1454		break;
1455	}
1456
1457	return 0;
1458}
1459
1460static int i915_forcewake_domains(struct seq_file *m, void *data)
1461{
1462	struct drm_i915_private *i915 = node_to_i915(m->private);
 
 
1463	struct intel_uncore_forcewake_domain *fw_domain;
1464	unsigned int tmp;
1465
1466	seq_printf(m, "user.bypass_count = %u\n",
1467		   i915->uncore.user_forcewake.count);
1468
1469	for_each_fw_domain(fw_domain, i915, tmp)
 
1470		seq_printf(m, "%s.wake_count = %u\n",
1471			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1472			   READ_ONCE(fw_domain->wake_count));
 
 
1473
1474	return 0;
1475}
1476
1477static void print_rc6_res(struct seq_file *m,
1478			  const char *title,
1479			  const i915_reg_t reg)
1480{
1481	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
 
 
1482
1483	seq_printf(m, "%s %u (%llu us)\n",
1484		   title, I915_READ(reg),
1485		   intel_rc6_residency_us(dev_priv, reg));
1486}
1487
1488static int vlv_drpc_info(struct seq_file *m)
1489{
1490	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1491	u32 rcctl1, pw_status;
1492
1493	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
 
1494	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1495
 
 
 
 
 
 
 
 
 
 
 
1496	seq_printf(m, "RC6 Enabled: %s\n",
1497		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1498					GEN6_RC_CTL_EI_MODE(1))));
1499	seq_printf(m, "Render Power Well: %s\n",
1500		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1501	seq_printf(m, "Media Power Well: %s\n",
1502		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1503
1504	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1505	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
 
 
1506
1507	return i915_forcewake_domains(m, NULL);
1508}
1509
1510static int gen6_drpc_info(struct seq_file *m)
1511{
1512	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1513	u32 gt_core_status, rcctl1, rc6vids = 0;
1514	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1515
1516	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1517	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1518
 
1519	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1520	if (INTEL_GEN(dev_priv) >= 9) {
1521		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1522		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1523	}
1524
1525	if (INTEL_GEN(dev_priv) <= 7) {
1526		mutex_lock(&dev_priv->pcu_lock);
1527		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1528				       &rc6vids);
1529		mutex_unlock(&dev_priv->pcu_lock);
1530	}
1531
 
 
 
 
 
 
 
1532	seq_printf(m, "RC1e Enabled: %s\n",
1533		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1534	seq_printf(m, "RC6 Enabled: %s\n",
1535		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1536	if (INTEL_GEN(dev_priv) >= 9) {
1537		seq_printf(m, "Render Well Gating Enabled: %s\n",
1538			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1539		seq_printf(m, "Media Well Gating Enabled: %s\n",
1540			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1541	}
1542	seq_printf(m, "Deep RC6 Enabled: %s\n",
1543		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1544	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1545		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1546	seq_puts(m, "Current RC state: ");
1547	switch (gt_core_status & GEN6_RCn_MASK) {
1548	case GEN6_RC0:
1549		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1550			seq_puts(m, "Core Power Down\n");
1551		else
1552			seq_puts(m, "on\n");
1553		break;
1554	case GEN6_RC3:
1555		seq_puts(m, "RC3\n");
1556		break;
1557	case GEN6_RC6:
1558		seq_puts(m, "RC6\n");
1559		break;
1560	case GEN6_RC7:
1561		seq_puts(m, "RC7\n");
1562		break;
1563	default:
1564		seq_puts(m, "Unknown\n");
1565		break;
1566	}
1567
1568	seq_printf(m, "Core Power Down: %s\n",
1569		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1570	if (INTEL_GEN(dev_priv) >= 9) {
1571		seq_printf(m, "Render Power Well: %s\n",
1572			(gen9_powergate_status &
1573			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1574		seq_printf(m, "Media Power Well: %s\n",
1575			(gen9_powergate_status &
1576			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1577	}
1578
1579	/* Not exactly sure what this is */
1580	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1581		      GEN6_GT_GFX_RC6_LOCKED);
1582	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1583	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1584	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1585
1586	if (INTEL_GEN(dev_priv) <= 7) {
1587		seq_printf(m, "RC6   voltage: %dmV\n",
1588			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1589		seq_printf(m, "RC6+  voltage: %dmV\n",
1590			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1591		seq_printf(m, "RC6++ voltage: %dmV\n",
1592			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1593	}
1594
1595	return i915_forcewake_domains(m, NULL);
1596}
1597
1598static int i915_drpc_info(struct seq_file *m, void *unused)
1599{
1600	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1601	int err;
1602
1603	intel_runtime_pm_get(dev_priv);
1604
1605	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1606		err = vlv_drpc_info(m);
1607	else if (INTEL_GEN(dev_priv) >= 6)
1608		err = gen6_drpc_info(m);
1609	else
1610		err = ironlake_drpc_info(m);
1611
1612	intel_runtime_pm_put(dev_priv);
1613
1614	return err;
1615}
1616
1617static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1618{
1619	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
 
1620
1621	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1622		   dev_priv->fb_tracking.busy_bits);
1623
1624	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1625		   dev_priv->fb_tracking.flip_bits);
1626
1627	return 0;
1628}
1629
1630static int i915_fbc_status(struct seq_file *m, void *unused)
1631{
1632	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1633	struct intel_fbc *fbc = &dev_priv->fbc;
 
1634
1635	if (!HAS_FBC(dev_priv))
1636		return -ENODEV;
 
 
1637
1638	intel_runtime_pm_get(dev_priv);
1639	mutex_lock(&fbc->lock);
1640
1641	if (intel_fbc_is_active(dev_priv))
1642		seq_puts(m, "FBC enabled\n");
1643	else
1644		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1645
1646	if (fbc->work.scheduled)
1647		seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
1648			   fbc->work.scheduled_vblank,
1649			   drm_crtc_vblank_count(&fbc->crtc->base));
1650
1651	if (intel_fbc_is_active(dev_priv)) {
1652		u32 mask;
1653
1654		if (INTEL_GEN(dev_priv) >= 8)
1655			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1656		else if (INTEL_GEN(dev_priv) >= 7)
1657			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1658		else if (INTEL_GEN(dev_priv) >= 5)
1659			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1660		else if (IS_G4X(dev_priv))
1661			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1662		else
1663			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1664							FBC_STAT_COMPRESSED);
1665
1666		seq_printf(m, "Compressing: %s\n", yesno(mask));
1667	}
 
 
1668
1669	mutex_unlock(&fbc->lock);
1670	intel_runtime_pm_put(dev_priv);
1671
1672	return 0;
1673}
1674
1675static int i915_fbc_false_color_get(void *data, u64 *val)
1676{
1677	struct drm_i915_private *dev_priv = data;
 
1678
1679	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1680		return -ENODEV;
1681
1682	*val = dev_priv->fbc.false_color;
1683
1684	return 0;
1685}
1686
1687static int i915_fbc_false_color_set(void *data, u64 val)
1688{
1689	struct drm_i915_private *dev_priv = data;
 
1690	u32 reg;
1691
1692	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1693		return -ENODEV;
1694
1695	mutex_lock(&dev_priv->fbc.lock);
1696
1697	reg = I915_READ(ILK_DPFC_CONTROL);
1698	dev_priv->fbc.false_color = val;
1699
1700	I915_WRITE(ILK_DPFC_CONTROL, val ?
1701		   (reg | FBC_CTL_FALSE_COLOR) :
1702		   (reg & ~FBC_CTL_FALSE_COLOR));
1703
1704	mutex_unlock(&dev_priv->fbc.lock);
1705	return 0;
1706}
1707
1708DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1709			i915_fbc_false_color_get, i915_fbc_false_color_set,
1710			"%llu\n");
1711
1712static int i915_ips_status(struct seq_file *m, void *unused)
1713{
1714	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
 
1715
1716	if (!HAS_IPS(dev_priv))
1717		return -ENODEV;
 
 
1718
1719	intel_runtime_pm_get(dev_priv);
1720
1721	seq_printf(m, "Enabled by kernel parameter: %s\n",
1722		   yesno(i915_modparams.enable_ips));
1723
1724	if (INTEL_GEN(dev_priv) >= 8) {
1725		seq_puts(m, "Currently: unknown\n");
1726	} else {
1727		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1728			seq_puts(m, "Currently: enabled\n");
1729		else
1730			seq_puts(m, "Currently: disabled\n");
1731	}
1732
1733	intel_runtime_pm_put(dev_priv);
1734
1735	return 0;
1736}
1737
1738static int i915_sr_status(struct seq_file *m, void *unused)
1739{
1740	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
 
1741	bool sr_enabled = false;
1742
1743	intel_runtime_pm_get(dev_priv);
1744	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1745
1746	if (INTEL_GEN(dev_priv) >= 9)
1747		/* no global SR status; inspect per-plane WM */;
1748	else if (HAS_PCH_SPLIT(dev_priv))
1749		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1750	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1751		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1752		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1753	else if (IS_I915GM(dev_priv))
1754		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1755	else if (IS_PINEVIEW(dev_priv))
1756		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1757	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1758		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1759
1760	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1761	intel_runtime_pm_put(dev_priv);
1762
1763	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
 
1764
1765	return 0;
1766}
1767
1768static int i915_emon_status(struct seq_file *m, void *unused)
1769{
1770	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1771	struct drm_device *dev = &dev_priv->drm;
 
1772	unsigned long temp, chipset, gfx;
1773	int ret;
1774
1775	if (!IS_GEN5(dev_priv))
1776		return -ENODEV;
1777
1778	ret = mutex_lock_interruptible(&dev->struct_mutex);
1779	if (ret)
1780		return ret;
1781
1782	temp = i915_mch_val(dev_priv);
1783	chipset = i915_chipset_val(dev_priv);
1784	gfx = i915_gfx_val(dev_priv);
1785	mutex_unlock(&dev->struct_mutex);
1786
1787	seq_printf(m, "GMCH temp: %ld\n", temp);
1788	seq_printf(m, "Chipset power: %ld\n", chipset);
1789	seq_printf(m, "GFX power: %ld\n", gfx);
1790	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1791
1792	return 0;
1793}
1794
1795static int i915_ring_freq_table(struct seq_file *m, void *unused)
1796{
1797	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1798	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 
1799	int ret = 0;
1800	int gpu_freq, ia_freq;
1801	unsigned int max_gpu_freq, min_gpu_freq;
1802
1803	if (!HAS_LLC(dev_priv))
1804		return -ENODEV;
 
 
1805
1806	intel_runtime_pm_get(dev_priv);
1807
1808	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
 
 
1809	if (ret)
1810		goto out;
1811
1812	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
1813		/* Convert GT frequency to 50 HZ units */
1814		min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER;
1815		max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER;
 
 
1816	} else {
1817		min_gpu_freq = rps->min_freq_softlimit;
1818		max_gpu_freq = rps->max_freq_softlimit;
1819	}
1820
1821	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1822
1823	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1824		ia_freq = gpu_freq;
1825		sandybridge_pcode_read(dev_priv,
1826				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1827				       &ia_freq);
1828		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1829			   intel_gpu_freq(dev_priv, (gpu_freq *
1830						     (IS_GEN9_BC(dev_priv) ||
1831						      IS_CANNONLAKE(dev_priv) ?
1832						      GEN9_FREQ_SCALER : 1))),
1833			   ((ia_freq >> 0) & 0xff) * 100,
1834			   ((ia_freq >> 8) & 0xff) * 100);
1835	}
1836
1837	mutex_unlock(&dev_priv->pcu_lock);
1838
1839out:
1840	intel_runtime_pm_put(dev_priv);
1841	return ret;
1842}
1843
1844static int i915_opregion(struct seq_file *m, void *unused)
1845{
1846	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1847	struct drm_device *dev = &dev_priv->drm;
 
1848	struct intel_opregion *opregion = &dev_priv->opregion;
1849	int ret;
1850
1851	ret = mutex_lock_interruptible(&dev->struct_mutex);
1852	if (ret)
1853		goto out;
1854
1855	if (opregion->header)
1856		seq_write(m, opregion->header, OPREGION_SIZE);
1857
1858	mutex_unlock(&dev->struct_mutex);
1859
1860out:
1861	return 0;
1862}
1863
1864static int i915_vbt(struct seq_file *m, void *unused)
1865{
1866	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
 
 
 
1867
1868	if (opregion->vbt)
1869		seq_write(m, opregion->vbt, opregion->vbt_size);
1870
1871	return 0;
1872}
1873
1874static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1875{
1876	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1877	struct drm_device *dev = &dev_priv->drm;
1878	struct intel_framebuffer *fbdev_fb = NULL;
1879	struct drm_framebuffer *drm_fb;
1880	int ret;
1881
1882	ret = mutex_lock_interruptible(&dev->struct_mutex);
1883	if (ret)
1884		return ret;
1885
1886#ifdef CONFIG_DRM_FBDEV_EMULATION
1887	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1888		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1889
1890		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1891			   fbdev_fb->base.width,
1892			   fbdev_fb->base.height,
1893			   fbdev_fb->base.format->depth,
1894			   fbdev_fb->base.format->cpp[0] * 8,
1895			   fbdev_fb->base.modifier,
1896			   drm_framebuffer_read_refcount(&fbdev_fb->base));
1897		describe_obj(m, fbdev_fb->obj);
1898		seq_putc(m, '\n');
1899	}
1900#endif
1901
1902	mutex_lock(&dev->mode_config.fb_lock);
1903	drm_for_each_fb(drm_fb, dev) {
1904		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1905		if (fb == fbdev_fb)
1906			continue;
1907
1908		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1909			   fb->base.width,
1910			   fb->base.height,
1911			   fb->base.format->depth,
1912			   fb->base.format->cpp[0] * 8,
1913			   fb->base.modifier,
1914			   drm_framebuffer_read_refcount(&fb->base));
1915		describe_obj(m, fb->obj);
1916		seq_putc(m, '\n');
1917	}
1918	mutex_unlock(&dev->mode_config.fb_lock);
1919	mutex_unlock(&dev->struct_mutex);
1920
1921	return 0;
1922}
1923
1924static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
 
1925{
1926	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)",
1927		   ring->space, ring->head, ring->tail);
 
1928}
1929
1930static int i915_context_status(struct seq_file *m, void *unused)
1931{
1932	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1933	struct drm_device *dev = &dev_priv->drm;
1934	struct intel_engine_cs *engine;
1935	struct i915_gem_context *ctx;
1936	enum intel_engine_id id;
1937	int ret;
1938
1939	ret = mutex_lock_interruptible(&dev->struct_mutex);
1940	if (ret)
1941		return ret;
1942
1943	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1944		seq_printf(m, "HW context %u ", ctx->hw_id);
1945		if (ctx->pid) {
1946			struct task_struct *task;
 
 
 
 
 
1947
1948			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1949			if (task) {
1950				seq_printf(m, "(%s [%d]) ",
1951					   task->comm, task->pid);
1952				put_task_struct(task);
 
 
 
 
 
 
 
 
 
1953			}
1954		} else if (IS_ERR(ctx->file_priv)) {
1955			seq_puts(m, "(deleted) ");
1956		} else {
1957			seq_puts(m, "(kernel) ");
1958		}
1959
1960		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1961		seq_putc(m, '\n');
 
1962
1963		for_each_engine(engine, dev_priv, id) {
1964			struct intel_context *ce = &ctx->engine[engine->id];
1965
1966			seq_printf(m, "%s: ", engine->name);
1967			if (ce->state)
1968				describe_obj(m, ce->state->obj);
1969			if (ce->ring)
1970				describe_ctx_ring(m, ce->ring);
1971			seq_putc(m, '\n');
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1972		}
1973
1974		seq_putc(m, '\n');
1975	}
1976
 
1977	mutex_unlock(&dev->struct_mutex);
1978
1979	return 0;
1980}
1981
1982static const char *swizzle_string(unsigned swizzle)
1983{
1984	switch (swizzle) {
1985	case I915_BIT_6_SWIZZLE_NONE:
1986		return "none";
1987	case I915_BIT_6_SWIZZLE_9:
1988		return "bit9";
1989	case I915_BIT_6_SWIZZLE_9_10:
1990		return "bit9/bit10";
1991	case I915_BIT_6_SWIZZLE_9_11:
1992		return "bit9/bit11";
1993	case I915_BIT_6_SWIZZLE_9_10_11:
1994		return "bit9/bit10/bit11";
1995	case I915_BIT_6_SWIZZLE_9_17:
1996		return "bit9/bit17";
1997	case I915_BIT_6_SWIZZLE_9_10_17:
1998		return "bit9/bit10/bit17";
1999	case I915_BIT_6_SWIZZLE_UNKNOWN:
2000		return "unknown";
2001	}
2002
2003	return "bug";
2004}
2005
2006static int i915_swizzle_info(struct seq_file *m, void *data)
2007{
2008	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
 
 
2009
 
 
 
2010	intel_runtime_pm_get(dev_priv);
2011
2012	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2013		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2014	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2015		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2016
2017	if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2018		seq_printf(m, "DDC = 0x%08x\n",
2019			   I915_READ(DCC));
2020		seq_printf(m, "DDC2 = 0x%08x\n",
2021			   I915_READ(DCC2));
2022		seq_printf(m, "C0DRB3 = 0x%04x\n",
2023			   I915_READ16(C0DRB3));
2024		seq_printf(m, "C1DRB3 = 0x%04x\n",
2025			   I915_READ16(C1DRB3));
2026	} else if (INTEL_GEN(dev_priv) >= 6) {
2027		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2028			   I915_READ(MAD_DIMM_C0));
2029		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2030			   I915_READ(MAD_DIMM_C1));
2031		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2032			   I915_READ(MAD_DIMM_C2));
2033		seq_printf(m, "TILECTL = 0x%08x\n",
2034			   I915_READ(TILECTL));
2035		if (INTEL_GEN(dev_priv) >= 8)
2036			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2037				   I915_READ(GAMTARBMODE));
2038		else
2039			seq_printf(m, "ARB_MODE = 0x%08x\n",
2040				   I915_READ(ARB_MODE));
2041		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2042			   I915_READ(DISP_ARB_CTL));
2043	}
2044
2045	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2046		seq_puts(m, "L-shaped memory detected\n");
2047
2048	intel_runtime_pm_put(dev_priv);
 
2049
2050	return 0;
2051}
2052
2053static int per_file_ctx(int id, void *ptr, void *data)
2054{
2055	struct i915_gem_context *ctx = ptr;
2056	struct seq_file *m = data;
2057	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2058
2059	if (!ppgtt) {
2060		seq_printf(m, "  no ppgtt for context %d\n",
2061			   ctx->user_handle);
2062		return 0;
2063	}
2064
2065	if (i915_gem_context_is_default(ctx))
2066		seq_puts(m, "  default context:\n");
2067	else
2068		seq_printf(m, "  context %d:\n", ctx->user_handle);
2069	ppgtt->debug_dump(ppgtt, m);
2070
2071	return 0;
2072}
2073
2074static void gen8_ppgtt_info(struct seq_file *m,
2075			    struct drm_i915_private *dev_priv)
2076{
 
 
2077	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2078	struct intel_engine_cs *engine;
2079	enum intel_engine_id id;
2080	int i;
2081
2082	if (!ppgtt)
2083		return;
2084
2085	for_each_engine(engine, dev_priv, id) {
2086		seq_printf(m, "%s\n", engine->name);
2087		for (i = 0; i < 4; i++) {
2088			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2089			pdp <<= 32;
2090			pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2091			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2092		}
2093	}
2094}
2095
2096static void gen6_ppgtt_info(struct seq_file *m,
2097			    struct drm_i915_private *dev_priv)
2098{
2099	struct intel_engine_cs *engine;
2100	enum intel_engine_id id;
 
2101
2102	if (IS_GEN6(dev_priv))
2103		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2104
2105	for_each_engine(engine, dev_priv, id) {
2106		seq_printf(m, "%s\n", engine->name);
2107		if (IS_GEN7(dev_priv))
2108			seq_printf(m, "GFX_MODE: 0x%08x\n",
2109				   I915_READ(RING_MODE_GEN7(engine)));
2110		seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2111			   I915_READ(RING_PP_DIR_BASE(engine)));
2112		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2113			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
2114		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2115			   I915_READ(RING_PP_DIR_DCLV(engine)));
2116	}
2117	if (dev_priv->mm.aliasing_ppgtt) {
2118		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2119
2120		seq_puts(m, "aliasing PPGTT:\n");
2121		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2122
2123		ppgtt->debug_dump(ppgtt, m);
2124	}
2125
2126	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2127}
2128
2129static int i915_ppgtt_info(struct seq_file *m, void *data)
2130{
2131	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2132	struct drm_device *dev = &dev_priv->drm;
 
2133	struct drm_file *file;
2134	int ret;
2135
2136	mutex_lock(&dev->filelist_mutex);
2137	ret = mutex_lock_interruptible(&dev->struct_mutex);
2138	if (ret)
2139		goto out_unlock;
2140
2141	intel_runtime_pm_get(dev_priv);
2142
2143	if (INTEL_GEN(dev_priv) >= 8)
2144		gen8_ppgtt_info(m, dev_priv);
2145	else if (INTEL_GEN(dev_priv) >= 6)
2146		gen6_ppgtt_info(m, dev_priv);
2147
2148	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2149		struct drm_i915_file_private *file_priv = file->driver_priv;
2150		struct task_struct *task;
2151
2152		task = get_pid_task(file->pid, PIDTYPE_PID);
2153		if (!task) {
2154			ret = -ESRCH;
2155			goto out_rpm;
2156		}
2157		seq_printf(m, "\nproc: %s\n", task->comm);
2158		put_task_struct(task);
2159		idr_for_each(&file_priv->context_idr, per_file_ctx,
2160			     (void *)(unsigned long)m);
2161	}
2162
2163out_rpm:
2164	intel_runtime_pm_put(dev_priv);
2165	mutex_unlock(&dev->struct_mutex);
2166out_unlock:
2167	mutex_unlock(&dev->filelist_mutex);
2168	return ret;
2169}
2170
2171static int count_irq_waiters(struct drm_i915_private *i915)
2172{
2173	struct intel_engine_cs *engine;
2174	enum intel_engine_id id;
2175	int count = 0;
 
2176
2177	for_each_engine(engine, i915, id)
2178		count += intel_engine_has_waiter(engine);
2179
2180	return count;
2181}
2182
2183static const char *rps_power_to_str(unsigned int power)
2184{
2185	static const char * const strings[] = {
2186		[LOW_POWER] = "low power",
2187		[BETWEEN] = "mixed",
2188		[HIGH_POWER] = "high power",
2189	};
2190
2191	if (power >= ARRAY_SIZE(strings) || !strings[power])
2192		return "unknown";
2193
2194	return strings[power];
2195}
2196
2197static int i915_rps_boost_info(struct seq_file *m, void *data)
2198{
2199	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2200	struct drm_device *dev = &dev_priv->drm;
2201	struct intel_rps *rps = &dev_priv->gt_pm.rps;
2202	struct drm_file *file;
2203
2204	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2205	seq_printf(m, "GPU busy? %s [%d requests]\n",
2206		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2207	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2208	seq_printf(m, "Boosts outstanding? %d\n",
2209		   atomic_read(&rps->num_waiters));
2210	seq_printf(m, "Frequency requested %d\n",
2211		   intel_gpu_freq(dev_priv, rps->cur_freq));
2212	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2213		   intel_gpu_freq(dev_priv, rps->min_freq),
2214		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2215		   intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2216		   intel_gpu_freq(dev_priv, rps->max_freq));
2217	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2218		   intel_gpu_freq(dev_priv, rps->idle_freq),
2219		   intel_gpu_freq(dev_priv, rps->efficient_freq),
2220		   intel_gpu_freq(dev_priv, rps->boost_freq));
2221
2222	mutex_lock(&dev->filelist_mutex);
2223	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2224		struct drm_i915_file_private *file_priv = file->driver_priv;
2225		struct task_struct *task;
2226
2227		rcu_read_lock();
2228		task = pid_task(file->pid, PIDTYPE_PID);
2229		seq_printf(m, "%s [%d]: %d boosts\n",
2230			   task ? task->comm : "<unknown>",
2231			   task ? task->pid : -1,
2232			   atomic_read(&file_priv->rps_client.boosts));
 
2233		rcu_read_unlock();
2234	}
2235	seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2236		   atomic_read(&rps->boosts));
2237	mutex_unlock(&dev->filelist_mutex);
2238
2239	if (INTEL_GEN(dev_priv) >= 6 &&
2240	    rps->enabled &&
2241	    dev_priv->gt.active_requests) {
2242		u32 rpup, rpupei;
2243		u32 rpdown, rpdownei;
2244
2245		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2246		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2247		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2248		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2249		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2250		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2251
2252		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2253			   rps_power_to_str(rps->power));
2254		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2255			   rpup && rpupei ? 100 * rpup / rpupei : 0,
2256			   rps->up_threshold);
2257		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2258			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2259			   rps->down_threshold);
2260	} else {
2261		seq_puts(m, "\nRPS Autotuning inactive\n");
2262	}
2263
2264	return 0;
2265}
2266
2267static int i915_llc(struct seq_file *m, void *data)
2268{
2269	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2270	const bool edram = INTEL_GEN(dev_priv) > 8;
 
2271
2272	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2273	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2274		   intel_uncore_edram_size(dev_priv)/1024/1024);
2275
2276	return 0;
2277}
2278
2279static int i915_huc_load_status_info(struct seq_file *m, void *data)
2280{
2281	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2282	struct drm_printer p;
2283
2284	if (!HAS_HUC(dev_priv))
2285		return -ENODEV;
2286
2287	p = drm_seq_file_printer(m);
2288	intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2289
2290	intel_runtime_pm_get(dev_priv);
2291	seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2292	intel_runtime_pm_put(dev_priv);
2293
2294	return 0;
2295}
2296
2297static int i915_guc_load_status_info(struct seq_file *m, void *data)
2298{
2299	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2300	struct drm_printer p;
 
2301	u32 tmp, i;
2302
2303	if (!HAS_GUC(dev_priv))
2304		return -ENODEV;
2305
2306	p = drm_seq_file_printer(m);
2307	intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2308
2309	intel_runtime_pm_get(dev_priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
2310
2311	tmp = I915_READ(GUC_STATUS);
2312
2313	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2314	seq_printf(m, "\tBootrom status = 0x%x\n",
2315		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2316	seq_printf(m, "\tuKernel status = 0x%x\n",
2317		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2318	seq_printf(m, "\tMIA Core status = 0x%x\n",
2319		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2320	seq_puts(m, "\nScratch registers:\n");
2321	for (i = 0; i < 16; i++)
2322		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2323
2324	intel_runtime_pm_put(dev_priv);
2325
2326	return 0;
2327}
2328
2329static void i915_guc_log_info(struct seq_file *m,
2330			      struct drm_i915_private *dev_priv)
2331{
2332	struct intel_guc *guc = &dev_priv->guc;
2333
2334	seq_puts(m, "\nGuC logging stats:\n");
2335
2336	seq_printf(m, "\tISR:   flush count %10u, overflow count %10u\n",
2337		   guc->log.flush_count[GUC_ISR_LOG_BUFFER],
2338		   guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
2339
2340	seq_printf(m, "\tDPC:   flush count %10u, overflow count %10u\n",
2341		   guc->log.flush_count[GUC_DPC_LOG_BUFFER],
2342		   guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
2343
2344	seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
2345		   guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
2346		   guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
2347
2348	seq_printf(m, "\tTotal flush interrupt count: %u\n",
2349		   guc->log.flush_interrupt_count);
2350
2351	seq_printf(m, "\tCapture miss count: %u\n",
2352		   guc->log.capture_miss_count);
2353}
2354
2355static void i915_guc_client_info(struct seq_file *m,
2356				 struct drm_i915_private *dev_priv,
2357				 struct intel_guc_client *client)
2358{
2359	struct intel_engine_cs *engine;
2360	enum intel_engine_id id;
2361	uint64_t tot = 0;
 
 
 
 
 
 
 
 
 
 
 
 
2362
2363	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2364		client->priority, client->stage_id, client->proc_desc_offset);
2365	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2366		client->doorbell_id, client->doorbell_offset);
2367
2368	for_each_engine(engine, dev_priv, id) {
2369		u64 submissions = client->submissions[id];
2370		tot += submissions;
2371		seq_printf(m, "\tSubmissions: %llu %s\n",
2372				submissions, engine->name);
 
 
2373	}
2374	seq_printf(m, "\tTotal: %llu\n", tot);
2375}
2376
2377static int i915_guc_info(struct seq_file *m, void *data)
2378{
2379	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2380	const struct intel_guc *guc = &dev_priv->guc;
 
 
 
 
 
 
2381
2382	if (!USES_GUC_SUBMISSION(dev_priv))
2383		return -ENODEV;
 
 
 
2384
2385	GEM_BUG_ON(!guc->execbuf_client);
 
 
 
2386
2387	seq_printf(m, "Doorbell map:\n");
2388	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2389	seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
2390
2391	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2392	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2393	if (guc->preempt_client) {
2394		seq_printf(m, "\nGuC preempt client @ %p:\n",
2395			   guc->preempt_client);
2396		i915_guc_client_info(m, dev_priv, guc->preempt_client);
 
 
 
 
 
 
2397	}
 
2398
2399	i915_guc_log_info(m, dev_priv);
 
2400
2401	/* Add more as required ... */
2402
2403	return 0;
2404}
2405
2406static int i915_guc_stage_pool(struct seq_file *m, void *data)
2407{
2408	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2409	const struct intel_guc *guc = &dev_priv->guc;
2410	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2411	struct intel_guc_client *client = guc->execbuf_client;
2412	unsigned int tmp;
2413	int index;
2414
2415	if (!USES_GUC_SUBMISSION(dev_priv))
2416		return -ENODEV;
2417
2418	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2419		struct intel_engine_cs *engine;
2420
2421		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2422			continue;
2423
2424		seq_printf(m, "GuC stage descriptor %u:\n", index);
2425		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2426		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2427		seq_printf(m, "\tPriority: %d\n", desc->priority);
2428		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2429		seq_printf(m, "\tEngines used: 0x%x\n",
2430			   desc->engines_used);
2431		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2432			   desc->db_trigger_phy,
2433			   desc->db_trigger_cpu,
2434			   desc->db_trigger_uk);
2435		seq_printf(m, "\tProcess descriptor: 0x%x\n",
2436			   desc->process_desc);
2437		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2438			   desc->wq_addr, desc->wq_size);
2439		seq_putc(m, '\n');
2440
2441		for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2442			u32 guc_engine_id = engine->guc_id;
2443			struct guc_execlist_context *lrc =
2444						&desc->lrc[guc_engine_id];
2445
2446			seq_printf(m, "\t%s LRC:\n", engine->name);
2447			seq_printf(m, "\t\tContext desc: 0x%x\n",
2448				   lrc->context_desc);
2449			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2450			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2451			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2452			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2453			seq_putc(m, '\n');
2454		}
2455	}
2456
2457	return 0;
2458}
2459
2460static int i915_guc_log_dump(struct seq_file *m, void *data)
2461{
2462	struct drm_info_node *node = m->private;
2463	struct drm_i915_private *dev_priv = node_to_i915(node);
2464	bool dump_load_err = !!node->info_ent->data;
2465	struct drm_i915_gem_object *obj = NULL;
2466	u32 *log;
2467	int i = 0;
2468
2469	if (!HAS_GUC(dev_priv))
2470		return -ENODEV;
2471
2472	if (dump_load_err)
2473		obj = dev_priv->guc.load_err_log;
2474	else if (dev_priv->guc.log.vma)
2475		obj = dev_priv->guc.log.vma->obj;
2476
2477	if (!obj)
2478		return 0;
 
 
2479
2480	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2481	if (IS_ERR(log)) {
2482		DRM_DEBUG("Failed to pin object\n");
2483		seq_puts(m, "(log data unaccessible)\n");
2484		return PTR_ERR(log);
2485	}
2486
2487	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2488		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2489			   *(log + i), *(log + i + 1),
2490			   *(log + i + 2), *(log + i + 3));
2491
2492	seq_putc(m, '\n');
2493
2494	i915_gem_object_unpin_map(obj);
2495
2496	return 0;
2497}
2498
2499static int i915_guc_log_control_get(void *data, u64 *val)
2500{
2501	struct drm_i915_private *dev_priv = data;
2502
2503	if (!HAS_GUC(dev_priv))
2504		return -ENODEV;
2505
2506	if (!dev_priv->guc.log.vma)
2507		return -EINVAL;
2508
2509	*val = i915_modparams.guc_log_level;
2510
2511	return 0;
2512}
2513
2514static int i915_guc_log_control_set(void *data, u64 val)
2515{
2516	struct drm_i915_private *dev_priv = data;
2517
2518	if (!HAS_GUC(dev_priv))
2519		return -ENODEV;
2520
2521	return intel_guc_log_control(&dev_priv->guc, val);
2522}
2523
2524DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
2525			i915_guc_log_control_get, i915_guc_log_control_set,
2526			"%lld\n");
2527
2528static const char *psr2_live_status(u32 val)
2529{
2530	static const char * const live_status[] = {
2531		"IDLE",
2532		"CAPTURE",
2533		"CAPTURE_FS",
2534		"SLEEP",
2535		"BUFON_FW",
2536		"ML_UP",
2537		"SU_STANDBY",
2538		"FAST_SLEEP",
2539		"DEEP_SLEEP",
2540		"BUF_ON",
2541		"TG_ON"
2542	};
2543
2544	val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
2545	if (val < ARRAY_SIZE(live_status))
2546		return live_status[val];
2547
2548	return "unknown";
2549}
2550
2551static int i915_edp_psr_status(struct seq_file *m, void *data)
2552{
2553	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
 
2554	u32 psrperf = 0;
2555	u32 stat[3];
2556	enum pipe pipe;
2557	bool enabled = false;
2558	bool sink_support;
2559
2560	if (!HAS_PSR(dev_priv))
2561		return -ENODEV;
2562
2563	sink_support = dev_priv->psr.sink_support;
2564	seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2565	if (!sink_support)
2566		return 0;
 
2567
2568	intel_runtime_pm_get(dev_priv);
2569
2570	mutex_lock(&dev_priv->psr.lock);
 
 
2571	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2572	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2573	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2574		   dev_priv->psr.busy_frontbuffer_bits);
2575	seq_printf(m, "Re-enable work scheduled: %s\n",
2576		   yesno(work_busy(&dev_priv->psr.work.work)));
2577
2578	if (HAS_DDI(dev_priv)) {
2579		if (dev_priv->psr.psr2_support)
2580			enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2581		else
2582			enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2583	} else {
2584		for_each_pipe(dev_priv, pipe) {
2585			enum transcoder cpu_transcoder =
2586				intel_pipe_to_cpu_transcoder(dev_priv, pipe);
2587			enum intel_display_power_domain power_domain;
2588
2589			power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
2590			if (!intel_display_power_get_if_enabled(dev_priv,
2591								power_domain))
2592				continue;
2593
2594			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2595				VLV_EDP_PSR_CURR_STATE_MASK;
2596			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2597			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2598				enabled = true;
2599
2600			intel_display_power_put(dev_priv, power_domain);
2601		}
2602	}
2603
2604	seq_printf(m, "Main link in standby mode: %s\n",
2605		   yesno(dev_priv->psr.link_standby));
2606
2607	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2608
2609	if (!HAS_DDI(dev_priv))
2610		for_each_pipe(dev_priv, pipe) {
2611			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2612			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2613				seq_printf(m, " pipe %c", pipe_name(pipe));
2614		}
2615	seq_puts(m, "\n");
2616
2617	/*
2618	 * VLV/CHV PSR has no kind of performance counter
2619	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2620	 */
2621	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2622		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2623			EDP_PSR_PERF_CNT_MASK;
2624
2625		seq_printf(m, "Performance_Counter: %u\n", psrperf);
2626	}
2627	if (dev_priv->psr.psr2_support) {
2628		u32 psr2 = I915_READ(EDP_PSR2_STATUS);
2629
2630		seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
2631			   psr2, psr2_live_status(psr2));
2632	}
2633	mutex_unlock(&dev_priv->psr.lock);
2634
2635	intel_runtime_pm_put(dev_priv);
2636	return 0;
2637}
2638
2639static int i915_sink_crc(struct seq_file *m, void *data)
2640{
2641	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2642	struct drm_device *dev = &dev_priv->drm;
 
2643	struct intel_connector *connector;
2644	struct drm_connector_list_iter conn_iter;
2645	struct intel_dp *intel_dp = NULL;
2646	struct drm_modeset_acquire_ctx ctx;
2647	int ret;
2648	u8 crc[6];
2649
2650	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2651
2652	drm_connector_list_iter_begin(dev, &conn_iter);
2653
2654	for_each_intel_connector_iter(connector, &conn_iter) {
2655		struct drm_crtc *crtc;
2656		struct drm_connector_state *state;
2657		struct intel_crtc_state *crtc_state;
2658
2659		if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2660			continue;
2661
2662retry:
2663		ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
2664		if (ret)
2665			goto err;
2666
2667		state = connector->base.state;
2668		if (!state->best_encoder)
2669			continue;
2670
2671		crtc = state->crtc;
2672		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2673		if (ret)
2674			goto err;
2675
2676		crtc_state = to_intel_crtc_state(crtc->state);
2677		if (!crtc_state->base.active)
2678			continue;
2679
2680		/*
2681		 * We need to wait for all crtc updates to complete, to make
2682		 * sure any pending modesets and plane updates are completed.
2683		 */
2684		if (crtc_state->base.commit) {
2685			ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
2686
2687			if (ret)
2688				goto err;
2689		}
2690
2691		intel_dp = enc_to_intel_dp(state->best_encoder);
2692
2693		ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
2694		if (ret)
2695			goto err;
2696
2697		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2698			   crc[0], crc[1], crc[2],
2699			   crc[3], crc[4], crc[5]);
2700		goto out;
2701
2702err:
2703		if (ret == -EDEADLK) {
2704			ret = drm_modeset_backoff(&ctx);
2705			if (!ret)
2706				goto retry;
2707		}
2708		goto out;
2709	}
2710	ret = -ENODEV;
2711out:
2712	drm_connector_list_iter_end(&conn_iter);
2713	drm_modeset_drop_locks(&ctx);
2714	drm_modeset_acquire_fini(&ctx);
2715
2716	return ret;
2717}
2718
2719static int i915_energy_uJ(struct seq_file *m, void *data)
2720{
2721	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2722	unsigned long long power;
 
 
2723	u32 units;
2724
2725	if (INTEL_GEN(dev_priv) < 6)
2726		return -ENODEV;
2727
2728	intel_runtime_pm_get(dev_priv);
2729
2730	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2731		intel_runtime_pm_put(dev_priv);
2732		return -ENODEV;
2733	}
2734
2735	units = (power & 0x1f00) >> 8;
2736	power = I915_READ(MCH_SECP_NRG_STTS);
2737	power = (1000000 * power) >> units; /* convert to uJ */
2738
2739	intel_runtime_pm_put(dev_priv);
2740
2741	seq_printf(m, "%llu", power);
2742
2743	return 0;
2744}
2745
2746static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2747{
2748	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2749	struct pci_dev *pdev = dev_priv->drm.pdev;
 
2750
2751	if (!HAS_RUNTIME_PM(dev_priv))
2752		seq_puts(m, "Runtime power management not supported\n");
 
 
2753
2754	seq_printf(m, "GPU idle: %s (epoch %u)\n",
2755		   yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2756	seq_printf(m, "IRQs disabled: %s\n",
2757		   yesno(!intel_irqs_enabled(dev_priv)));
2758#ifdef CONFIG_PM
2759	seq_printf(m, "Usage count: %d\n",
2760		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2761#else
2762	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2763#endif
2764	seq_printf(m, "PCI device power state: %s [%d]\n",
2765		   pci_power_name(pdev->current_state),
2766		   pdev->current_state);
2767
2768	return 0;
2769}
2770
2771static int i915_power_domain_info(struct seq_file *m, void *unused)
2772{
2773	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
 
2774	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2775	int i;
2776
2777	mutex_lock(&power_domains->lock);
2778
2779	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2780	for (i = 0; i < power_domains->power_well_count; i++) {
2781		struct i915_power_well *power_well;
2782		enum intel_display_power_domain power_domain;
2783
2784		power_well = &power_domains->power_wells[i];
2785		seq_printf(m, "%-25s %d\n", power_well->name,
2786			   power_well->count);
2787
2788		for_each_power_domain(power_domain, power_well->domains)
 
 
 
 
2789			seq_printf(m, "  %-23s %d\n",
2790				 intel_display_power_domain_str(power_domain),
2791				 power_domains->domain_use_count[power_domain]);
 
2792	}
2793
2794	mutex_unlock(&power_domains->lock);
2795
2796	return 0;
2797}
2798
2799static int i915_dmc_info(struct seq_file *m, void *unused)
2800{
2801	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
 
2802	struct intel_csr *csr;
2803
2804	if (!HAS_CSR(dev_priv))
2805		return -ENODEV;
 
 
2806
2807	csr = &dev_priv->csr;
2808
2809	intel_runtime_pm_get(dev_priv);
2810
2811	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2812	seq_printf(m, "path: %s\n", csr->fw_path);
2813
2814	if (!csr->dmc_payload)
2815		goto out;
2816
2817	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2818		   CSR_VERSION_MINOR(csr->version));
2819
2820	if (IS_KABYLAKE(dev_priv) ||
2821	    (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
2822		seq_printf(m, "DC3 -> DC5 count: %d\n",
2823			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
2824		seq_printf(m, "DC5 -> DC6 count: %d\n",
2825			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2826	} else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2827		seq_printf(m, "DC3 -> DC5 count: %d\n",
2828			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
2829	}
2830
2831out:
2832	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2833	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2834	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2835
2836	intel_runtime_pm_put(dev_priv);
2837
2838	return 0;
2839}
2840
2841static void intel_seq_print_mode(struct seq_file *m, int tabs,
2842				 struct drm_display_mode *mode)
2843{
2844	int i;
2845
2846	for (i = 0; i < tabs; i++)
2847		seq_putc(m, '\t');
2848
2849	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2850		   mode->base.id, mode->name,
2851		   mode->vrefresh, mode->clock,
2852		   mode->hdisplay, mode->hsync_start,
2853		   mode->hsync_end, mode->htotal,
2854		   mode->vdisplay, mode->vsync_start,
2855		   mode->vsync_end, mode->vtotal,
2856		   mode->type, mode->flags);
2857}
2858
2859static void intel_encoder_info(struct seq_file *m,
2860			       struct intel_crtc *intel_crtc,
2861			       struct intel_encoder *intel_encoder)
2862{
2863	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2864	struct drm_device *dev = &dev_priv->drm;
2865	struct drm_crtc *crtc = &intel_crtc->base;
2866	struct intel_connector *intel_connector;
2867	struct drm_encoder *encoder;
2868
2869	encoder = &intel_encoder->base;
2870	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2871		   encoder->base.id, encoder->name);
2872	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2873		struct drm_connector *connector = &intel_connector->base;
2874		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2875			   connector->base.id,
2876			   connector->name,
2877			   drm_get_connector_status_name(connector->status));
2878		if (connector->status == connector_status_connected) {
2879			struct drm_display_mode *mode = &crtc->mode;
2880			seq_printf(m, ", mode:\n");
2881			intel_seq_print_mode(m, 2, mode);
2882		} else {
2883			seq_putc(m, '\n');
2884		}
2885	}
2886}
2887
2888static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2889{
2890	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2891	struct drm_device *dev = &dev_priv->drm;
2892	struct drm_crtc *crtc = &intel_crtc->base;
2893	struct intel_encoder *intel_encoder;
2894	struct drm_plane_state *plane_state = crtc->primary->state;
2895	struct drm_framebuffer *fb = plane_state->fb;
2896
2897	if (fb)
2898		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2899			   fb->base.id, plane_state->src_x >> 16,
2900			   plane_state->src_y >> 16, fb->width, fb->height);
2901	else
2902		seq_puts(m, "\tprimary plane disabled\n");
2903	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2904		intel_encoder_info(m, intel_crtc, intel_encoder);
2905}
2906
2907static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2908{
2909	struct drm_display_mode *mode = panel->fixed_mode;
2910
2911	seq_printf(m, "\tfixed mode:\n");
2912	intel_seq_print_mode(m, 2, mode);
2913}
2914
2915static void intel_dp_info(struct seq_file *m,
2916			  struct intel_connector *intel_connector)
2917{
2918	struct intel_encoder *intel_encoder = intel_connector->encoder;
2919	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2920
2921	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2922	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2923	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2924		intel_panel_info(m, &intel_connector->panel);
2925
2926	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2927				&intel_dp->aux);
2928}
2929
2930static void intel_dp_mst_info(struct seq_file *m,
2931			  struct intel_connector *intel_connector)
2932{
2933	struct intel_encoder *intel_encoder = intel_connector->encoder;
2934	struct intel_dp_mst_encoder *intel_mst =
2935		enc_to_mst(&intel_encoder->base);
2936	struct intel_digital_port *intel_dig_port = intel_mst->primary;
2937	struct intel_dp *intel_dp = &intel_dig_port->dp;
2938	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2939					intel_connector->port);
2940
2941	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2942}
2943
2944static void intel_hdmi_info(struct seq_file *m,
2945			    struct intel_connector *intel_connector)
2946{
2947	struct intel_encoder *intel_encoder = intel_connector->encoder;
2948	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2949
2950	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2951}
2952
2953static void intel_lvds_info(struct seq_file *m,
2954			    struct intel_connector *intel_connector)
2955{
2956	intel_panel_info(m, &intel_connector->panel);
2957}
2958
2959static void intel_connector_info(struct seq_file *m,
2960				 struct drm_connector *connector)
2961{
2962	struct intel_connector *intel_connector = to_intel_connector(connector);
2963	struct intel_encoder *intel_encoder = intel_connector->encoder;
2964	struct drm_display_mode *mode;
2965
2966	seq_printf(m, "connector %d: type %s, status: %s\n",
2967		   connector->base.id, connector->name,
2968		   drm_get_connector_status_name(connector->status));
2969	if (connector->status == connector_status_connected) {
2970		seq_printf(m, "\tname: %s\n", connector->display_info.name);
2971		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2972			   connector->display_info.width_mm,
2973			   connector->display_info.height_mm);
2974		seq_printf(m, "\tsubpixel order: %s\n",
2975			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2976		seq_printf(m, "\tCEA rev: %d\n",
2977			   connector->display_info.cea_rev);
2978	}
2979
2980	if (!intel_encoder)
2981		return;
2982
2983	switch (connector->connector_type) {
2984	case DRM_MODE_CONNECTOR_DisplayPort:
2985	case DRM_MODE_CONNECTOR_eDP:
2986		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2987			intel_dp_mst_info(m, intel_connector);
2988		else
2989			intel_dp_info(m, intel_connector);
2990		break;
2991	case DRM_MODE_CONNECTOR_LVDS:
2992		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2993			intel_lvds_info(m, intel_connector);
2994		break;
2995	case DRM_MODE_CONNECTOR_HDMIA:
2996		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2997		    intel_encoder->type == INTEL_OUTPUT_DDI)
2998			intel_hdmi_info(m, intel_connector);
2999		break;
3000	default:
3001		break;
3002	}
3003
3004	seq_printf(m, "\tmodes:\n");
3005	list_for_each_entry(mode, &connector->modes, head)
3006		intel_seq_print_mode(m, 2, mode);
3007}
3008
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3009static const char *plane_type(enum drm_plane_type type)
3010{
3011	switch (type) {
3012	case DRM_PLANE_TYPE_OVERLAY:
3013		return "OVL";
3014	case DRM_PLANE_TYPE_PRIMARY:
3015		return "PRI";
3016	case DRM_PLANE_TYPE_CURSOR:
3017		return "CUR";
3018	/*
3019	 * Deliberately omitting default: to generate compiler warnings
3020	 * when a new drm_plane_type gets added.
3021	 */
3022	}
3023
3024	return "unknown";
3025}
3026
3027static const char *plane_rotation(unsigned int rotation)
3028{
3029	static char buf[48];
3030	/*
3031	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3032	 * will print them all to visualize if the values are misused
3033	 */
3034	snprintf(buf, sizeof(buf),
3035		 "%s%s%s%s%s%s(0x%08x)",
3036		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3037		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3038		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3039		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3040		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3041		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3042		 rotation);
3043
3044	return buf;
3045}
3046
3047static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3048{
3049	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3050	struct drm_device *dev = &dev_priv->drm;
3051	struct intel_plane *intel_plane;
3052
3053	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3054		struct drm_plane_state *state;
3055		struct drm_plane *plane = &intel_plane->base;
3056		struct drm_format_name_buf format_name;
3057
3058		if (!plane->state) {
3059			seq_puts(m, "plane->state is NULL!\n");
3060			continue;
3061		}
3062
3063		state = plane->state;
3064
3065		if (state->fb) {
3066			drm_get_format_name(state->fb->format->format,
3067					    &format_name);
3068		} else {
3069			sprintf(format_name.str, "N/A");
3070		}
3071
3072		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3073			   plane->base.id,
3074			   plane_type(intel_plane->base.type),
3075			   state->crtc_x, state->crtc_y,
3076			   state->crtc_w, state->crtc_h,
3077			   (state->src_x >> 16),
3078			   ((state->src_x & 0xffff) * 15625) >> 10,
3079			   (state->src_y >> 16),
3080			   ((state->src_y & 0xffff) * 15625) >> 10,
3081			   (state->src_w >> 16),
3082			   ((state->src_w & 0xffff) * 15625) >> 10,
3083			   (state->src_h >> 16),
3084			   ((state->src_h & 0xffff) * 15625) >> 10,
3085			   format_name.str,
3086			   plane_rotation(state->rotation));
3087	}
3088}
3089
3090static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3091{
3092	struct intel_crtc_state *pipe_config;
3093	int num_scalers = intel_crtc->num_scalers;
3094	int i;
3095
3096	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3097
3098	/* Not all platformas have a scaler */
3099	if (num_scalers) {
3100		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3101			   num_scalers,
3102			   pipe_config->scaler_state.scaler_users,
3103			   pipe_config->scaler_state.scaler_id);
3104
3105		for (i = 0; i < num_scalers; i++) {
3106			struct intel_scaler *sc =
3107					&pipe_config->scaler_state.scalers[i];
3108
3109			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3110				   i, yesno(sc->in_use), sc->mode);
3111		}
3112		seq_puts(m, "\n");
3113	} else {
3114		seq_puts(m, "\tNo scalers available on this platform\n");
3115	}
3116}
3117
3118static int i915_display_info(struct seq_file *m, void *unused)
3119{
3120	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3121	struct drm_device *dev = &dev_priv->drm;
 
3122	struct intel_crtc *crtc;
3123	struct drm_connector *connector;
3124	struct drm_connector_list_iter conn_iter;
3125
3126	intel_runtime_pm_get(dev_priv);
 
3127	seq_printf(m, "CRTC info\n");
3128	seq_printf(m, "---------\n");
3129	for_each_intel_crtc(dev, crtc) {
 
3130		struct intel_crtc_state *pipe_config;
 
3131
3132		drm_modeset_lock(&crtc->base.mutex, NULL);
3133		pipe_config = to_intel_crtc_state(crtc->base.state);
3134
3135		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3136			   crtc->base.base.id, pipe_name(crtc->pipe),
3137			   yesno(pipe_config->base.active),
3138			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3139			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
3140
3141		if (pipe_config->base.active) {
3142			struct intel_plane *cursor =
3143				to_intel_plane(crtc->base.cursor);
3144
3145			intel_crtc_info(m, crtc);
3146
3147			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3148				   yesno(cursor->base.state->visible),
3149				   cursor->base.state->crtc_x,
3150				   cursor->base.state->crtc_y,
3151				   cursor->base.state->crtc_w,
3152				   cursor->base.state->crtc_h,
3153				   cursor->cursor.base);
3154			intel_scaler_info(m, crtc);
3155			intel_plane_info(m, crtc);
3156		}
3157
3158		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3159			   yesno(!crtc->cpu_fifo_underrun_disabled),
3160			   yesno(!crtc->pch_fifo_underrun_disabled));
3161		drm_modeset_unlock(&crtc->base.mutex);
3162	}
3163
3164	seq_printf(m, "\n");
3165	seq_printf(m, "Connector info\n");
3166	seq_printf(m, "--------------\n");
3167	mutex_lock(&dev->mode_config.mutex);
3168	drm_connector_list_iter_begin(dev, &conn_iter);
3169	drm_for_each_connector_iter(connector, &conn_iter)
3170		intel_connector_info(m, connector);
3171	drm_connector_list_iter_end(&conn_iter);
3172	mutex_unlock(&dev->mode_config.mutex);
3173
3174	intel_runtime_pm_put(dev_priv);
3175
3176	return 0;
3177}
3178
3179static int i915_engine_info(struct seq_file *m, void *unused)
3180{
3181	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3182	struct intel_engine_cs *engine;
3183	enum intel_engine_id id;
3184	struct drm_printer p;
 
 
 
 
 
 
 
3185
 
 
 
3186	intel_runtime_pm_get(dev_priv);
3187
3188	seq_printf(m, "GT awake? %s (epoch %u)\n",
3189		   yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3190	seq_printf(m, "Global active requests: %d\n",
3191		   dev_priv->gt.active_requests);
3192	seq_printf(m, "CS timestamp frequency: %u kHz\n",
3193		   dev_priv->info.cs_timestamp_frequency_khz);
3194
3195	p = drm_seq_file_printer(m);
3196	for_each_engine(engine, dev_priv, id)
3197		intel_engine_dump(engine, &p, "%s\n", engine->name);
 
 
 
 
 
 
 
 
 
3198
3199	intel_runtime_pm_put(dev_priv);
 
 
 
 
 
 
3200
3201	return 0;
3202}
 
 
 
 
 
 
 
 
3203
3204static int i915_rcs_topology(struct seq_file *m, void *unused)
3205{
3206	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3207	struct drm_printer p = drm_seq_file_printer(m);
3208
3209	intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3210
3211	return 0;
3212}
3213
3214static int i915_shrinker_info(struct seq_file *m, void *unused)
3215{
3216	struct drm_i915_private *i915 = node_to_i915(m->private);
3217
3218	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3219	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3220
 
 
3221	return 0;
3222}
3223
3224static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3225{
3226	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3227	struct drm_device *dev = &dev_priv->drm;
 
3228	int i;
3229
3230	drm_modeset_lock_all(dev);
3231	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3232		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3233
3234		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
3235		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3236			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3237		seq_printf(m, " tracked hardware state:\n");
3238		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3239		seq_printf(m, " dpll_md: 0x%08x\n",
3240			   pll->state.hw_state.dpll_md);
3241		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3242		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3243		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3244	}
3245	drm_modeset_unlock_all(dev);
3246
3247	return 0;
3248}
3249
3250static int i915_wa_registers(struct seq_file *m, void *unused)
3251{
3252	int i;
3253	int ret;
3254	struct intel_engine_cs *engine;
3255	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3256	struct drm_device *dev = &dev_priv->drm;
 
3257	struct i915_workarounds *workarounds = &dev_priv->workarounds;
3258	enum intel_engine_id id;
3259
3260	ret = mutex_lock_interruptible(&dev->struct_mutex);
3261	if (ret)
3262		return ret;
3263
3264	intel_runtime_pm_get(dev_priv);
3265
3266	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3267	for_each_engine(engine, dev_priv, id)
3268		seq_printf(m, "HW whitelist count for %s: %d\n",
3269			   engine->name, workarounds->hw_whitelist_count[id]);
3270	for (i = 0; i < workarounds->count; ++i) {
3271		i915_reg_t addr;
3272		u32 mask, value, read;
3273		bool ok;
3274
3275		addr = workarounds->reg[i].addr;
3276		mask = workarounds->reg[i].mask;
3277		value = workarounds->reg[i].value;
3278		read = I915_READ(addr);
3279		ok = (value & mask) == (read & mask);
3280		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3281			   i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3282	}
3283
3284	intel_runtime_pm_put(dev_priv);
3285	mutex_unlock(&dev->struct_mutex);
3286
3287	return 0;
3288}
3289
3290static int i915_ipc_status_show(struct seq_file *m, void *data)
3291{
3292	struct drm_i915_private *dev_priv = m->private;
3293
3294	seq_printf(m, "Isochronous Priority Control: %s\n",
3295			yesno(dev_priv->ipc_enabled));
3296	return 0;
3297}
3298
3299static int i915_ipc_status_open(struct inode *inode, struct file *file)
3300{
3301	struct drm_i915_private *dev_priv = inode->i_private;
3302
3303	if (!HAS_IPC(dev_priv))
3304		return -ENODEV;
3305
3306	return single_open(file, i915_ipc_status_show, dev_priv);
3307}
3308
3309static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3310				     size_t len, loff_t *offp)
3311{
3312	struct seq_file *m = file->private_data;
3313	struct drm_i915_private *dev_priv = m->private;
3314	int ret;
3315	bool enable;
3316
3317	ret = kstrtobool_from_user(ubuf, len, &enable);
3318	if (ret < 0)
3319		return ret;
3320
3321	intel_runtime_pm_get(dev_priv);
3322	if (!dev_priv->ipc_enabled && enable)
3323		DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3324	dev_priv->wm.distrust_bios_wm = true;
3325	dev_priv->ipc_enabled = enable;
3326	intel_enable_ipc(dev_priv);
3327	intel_runtime_pm_put(dev_priv);
3328
3329	return len;
3330}
3331
3332static const struct file_operations i915_ipc_status_fops = {
3333	.owner = THIS_MODULE,
3334	.open = i915_ipc_status_open,
3335	.read = seq_read,
3336	.llseek = seq_lseek,
3337	.release = single_release,
3338	.write = i915_ipc_status_write
3339};
3340
3341static int i915_ddb_info(struct seq_file *m, void *unused)
3342{
3343	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3344	struct drm_device *dev = &dev_priv->drm;
 
3345	struct skl_ddb_allocation *ddb;
3346	struct skl_ddb_entry *entry;
3347	enum pipe pipe;
3348	int plane;
3349
3350	if (INTEL_GEN(dev_priv) < 9)
3351		return -ENODEV;
3352
3353	drm_modeset_lock_all(dev);
3354
3355	ddb = &dev_priv->wm.skl_hw.ddb;
3356
3357	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3358
3359	for_each_pipe(dev_priv, pipe) {
3360		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3361
3362		for_each_universal_plane(dev_priv, pipe, plane) {
3363			entry = &ddb->plane[pipe][plane];
3364			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3365				   entry->start, entry->end,
3366				   skl_ddb_entry_size(entry));
3367		}
3368
3369		entry = &ddb->plane[pipe][PLANE_CURSOR];
3370		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3371			   entry->end, skl_ddb_entry_size(entry));
3372	}
3373
3374	drm_modeset_unlock_all(dev);
3375
3376	return 0;
3377}
3378
3379static void drrs_status_per_crtc(struct seq_file *m,
3380				 struct drm_device *dev,
3381				 struct intel_crtc *intel_crtc)
3382{
3383	struct drm_i915_private *dev_priv = to_i915(dev);
 
3384	struct i915_drrs *drrs = &dev_priv->drrs;
3385	int vrefresh = 0;
3386	struct drm_connector *connector;
3387	struct drm_connector_list_iter conn_iter;
3388
3389	drm_connector_list_iter_begin(dev, &conn_iter);
3390	drm_for_each_connector_iter(connector, &conn_iter) {
3391		if (connector->state->crtc != &intel_crtc->base)
3392			continue;
3393
3394		seq_printf(m, "%s:\n", connector->name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3395	}
3396	drm_connector_list_iter_end(&conn_iter);
3397
3398	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3399		seq_puts(m, "\tVBT: DRRS_type: Static");
3400	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3401		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3402	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3403		seq_puts(m, "\tVBT: DRRS_type: None");
3404	else
3405		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3406
3407	seq_puts(m, "\n\n");
3408
3409	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3410		struct intel_panel *panel;
3411
3412		mutex_lock(&drrs->mutex);
3413		/* DRRS Supported */
3414		seq_puts(m, "\tDRRS Supported: Yes\n");
3415
3416		/* disable_drrs() will make drrs->dp NULL */
3417		if (!drrs->dp) {
3418			seq_puts(m, "Idleness DRRS: Disabled\n");
3419			if (dev_priv->psr.enabled)
3420				seq_puts(m,
3421				"\tAs PSR is enabled, DRRS is not enabled\n");
3422			mutex_unlock(&drrs->mutex);
3423			return;
3424		}
3425
3426		panel = &drrs->dp->attached_connector->panel;
3427		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3428					drrs->busy_frontbuffer_bits);
3429
3430		seq_puts(m, "\n\t\t");
3431		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3432			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3433			vrefresh = panel->fixed_mode->vrefresh;
3434		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3435			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3436			vrefresh = panel->downclock_mode->vrefresh;
3437		} else {
3438			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3439						drrs->refresh_rate_type);
3440			mutex_unlock(&drrs->mutex);
3441			return;
3442		}
3443		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3444
3445		seq_puts(m, "\n\t\t");
3446		mutex_unlock(&drrs->mutex);
3447	} else {
3448		/* DRRS not supported. Print the VBT parameter*/
3449		seq_puts(m, "\tDRRS Supported : No");
3450	}
3451	seq_puts(m, "\n");
3452}
3453
3454static int i915_drrs_status(struct seq_file *m, void *unused)
3455{
3456	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3457	struct drm_device *dev = &dev_priv->drm;
3458	struct intel_crtc *intel_crtc;
3459	int active_crtc_cnt = 0;
3460
3461	drm_modeset_lock_all(dev);
3462	for_each_intel_crtc(dev, intel_crtc) {
 
 
3463		if (intel_crtc->base.state->active) {
3464			active_crtc_cnt++;
3465			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3466
3467			drrs_status_per_crtc(m, dev, intel_crtc);
3468		}
 
 
3469	}
3470	drm_modeset_unlock_all(dev);
3471
3472	if (!active_crtc_cnt)
3473		seq_puts(m, "No active crtc found\n");
3474
3475	return 0;
3476}
3477
 
 
 
 
 
 
3478static int i915_dp_mst_info(struct seq_file *m, void *unused)
3479{
3480	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3481	struct drm_device *dev = &dev_priv->drm;
 
3482	struct intel_encoder *intel_encoder;
3483	struct intel_digital_port *intel_dig_port;
3484	struct drm_connector *connector;
3485	struct drm_connector_list_iter conn_iter;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3486
3487	drm_connector_list_iter_begin(dev, &conn_iter);
3488	drm_for_each_connector_iter(connector, &conn_iter) {
3489		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3490			continue;
3491
3492		intel_encoder = intel_attached_encoder(connector);
3493		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
 
3494			continue;
3495
3496		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3497		if (!intel_dig_port->dp.can_mst)
3498			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3499
3500		seq_printf(m, "MST Source Port %c\n",
3501			   port_name(intel_dig_port->base.port));
3502		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
 
3503	}
3504	drm_connector_list_iter_end(&conn_iter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3505
3506	return 0;
3507}
3508
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3509static ssize_t i915_displayport_test_active_write(struct file *file,
3510						  const char __user *ubuf,
3511						  size_t len, loff_t *offp)
3512{
3513	char *input_buffer;
3514	int status = 0;
3515	struct drm_device *dev;
3516	struct drm_connector *connector;
3517	struct drm_connector_list_iter conn_iter;
3518	struct intel_dp *intel_dp;
3519	int val = 0;
3520
3521	dev = ((struct seq_file *)file->private_data)->private;
3522
 
 
3523	if (len == 0)
3524		return 0;
3525
3526	input_buffer = memdup_user_nul(ubuf, len);
3527	if (IS_ERR(input_buffer))
3528		return PTR_ERR(input_buffer);
 
 
 
 
 
3529
 
3530	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3531
3532	drm_connector_list_iter_begin(dev, &conn_iter);
3533	drm_for_each_connector_iter(connector, &conn_iter) {
3534		struct intel_encoder *encoder;
3535
3536		if (connector->connector_type !=
3537		    DRM_MODE_CONNECTOR_DisplayPort)
3538			continue;
3539
3540		encoder = to_intel_encoder(connector->encoder);
3541		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3542			continue;
3543
3544		if (encoder && connector->status == connector_status_connected) {
3545			intel_dp = enc_to_intel_dp(&encoder->base);
3546			status = kstrtoint(input_buffer, 10, &val);
3547			if (status < 0)
3548				break;
3549			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3550			/* To prevent erroneous activation of the compliance
3551			 * testing code, only accept an actual value of 1 here
3552			 */
3553			if (val == 1)
3554				intel_dp->compliance.test_active = 1;
3555			else
3556				intel_dp->compliance.test_active = 0;
3557		}
3558	}
3559	drm_connector_list_iter_end(&conn_iter);
3560	kfree(input_buffer);
3561	if (status < 0)
3562		return status;
3563
3564	*offp += len;
3565	return len;
3566}
3567
3568static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3569{
3570	struct drm_device *dev = m->private;
3571	struct drm_connector *connector;
3572	struct drm_connector_list_iter conn_iter;
3573	struct intel_dp *intel_dp;
3574
3575	drm_connector_list_iter_begin(dev, &conn_iter);
3576	drm_for_each_connector_iter(connector, &conn_iter) {
3577		struct intel_encoder *encoder;
3578
3579		if (connector->connector_type !=
3580		    DRM_MODE_CONNECTOR_DisplayPort)
3581			continue;
3582
3583		encoder = to_intel_encoder(connector->encoder);
3584		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3585			continue;
3586
3587		if (encoder && connector->status == connector_status_connected) {
3588			intel_dp = enc_to_intel_dp(&encoder->base);
3589			if (intel_dp->compliance.test_active)
3590				seq_puts(m, "1");
3591			else
3592				seq_puts(m, "0");
3593		} else
3594			seq_puts(m, "0");
3595	}
3596	drm_connector_list_iter_end(&conn_iter);
3597
3598	return 0;
3599}
3600
3601static int i915_displayport_test_active_open(struct inode *inode,
3602					     struct file *file)
3603{
3604	struct drm_i915_private *dev_priv = inode->i_private;
3605
3606	return single_open(file, i915_displayport_test_active_show,
3607			   &dev_priv->drm);
3608}
3609
3610static const struct file_operations i915_displayport_test_active_fops = {
3611	.owner = THIS_MODULE,
3612	.open = i915_displayport_test_active_open,
3613	.read = seq_read,
3614	.llseek = seq_lseek,
3615	.release = single_release,
3616	.write = i915_displayport_test_active_write
3617};
3618
3619static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3620{
3621	struct drm_device *dev = m->private;
3622	struct drm_connector *connector;
3623	struct drm_connector_list_iter conn_iter;
3624	struct intel_dp *intel_dp;
3625
3626	drm_connector_list_iter_begin(dev, &conn_iter);
3627	drm_for_each_connector_iter(connector, &conn_iter) {
3628		struct intel_encoder *encoder;
3629
3630		if (connector->connector_type !=
3631		    DRM_MODE_CONNECTOR_DisplayPort)
3632			continue;
3633
3634		encoder = to_intel_encoder(connector->encoder);
3635		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3636			continue;
3637
3638		if (encoder && connector->status == connector_status_connected) {
3639			intel_dp = enc_to_intel_dp(&encoder->base);
3640			if (intel_dp->compliance.test_type ==
3641			    DP_TEST_LINK_EDID_READ)
3642				seq_printf(m, "%lx",
3643					   intel_dp->compliance.test_data.edid);
3644			else if (intel_dp->compliance.test_type ==
3645				 DP_TEST_LINK_VIDEO_PATTERN) {
3646				seq_printf(m, "hdisplay: %d\n",
3647					   intel_dp->compliance.test_data.hdisplay);
3648				seq_printf(m, "vdisplay: %d\n",
3649					   intel_dp->compliance.test_data.vdisplay);
3650				seq_printf(m, "bpc: %u\n",
3651					   intel_dp->compliance.test_data.bpc);
3652			}
3653		} else
3654			seq_puts(m, "0");
3655	}
3656	drm_connector_list_iter_end(&conn_iter);
3657
3658	return 0;
3659}
3660static int i915_displayport_test_data_open(struct inode *inode,
3661					   struct file *file)
3662{
3663	struct drm_i915_private *dev_priv = inode->i_private;
3664
3665	return single_open(file, i915_displayport_test_data_show,
3666			   &dev_priv->drm);
3667}
3668
3669static const struct file_operations i915_displayport_test_data_fops = {
3670	.owner = THIS_MODULE,
3671	.open = i915_displayport_test_data_open,
3672	.read = seq_read,
3673	.llseek = seq_lseek,
3674	.release = single_release
3675};
3676
3677static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3678{
3679	struct drm_device *dev = m->private;
3680	struct drm_connector *connector;
3681	struct drm_connector_list_iter conn_iter;
3682	struct intel_dp *intel_dp;
3683
3684	drm_connector_list_iter_begin(dev, &conn_iter);
3685	drm_for_each_connector_iter(connector, &conn_iter) {
3686		struct intel_encoder *encoder;
3687
3688		if (connector->connector_type !=
3689		    DRM_MODE_CONNECTOR_DisplayPort)
3690			continue;
3691
3692		encoder = to_intel_encoder(connector->encoder);
3693		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3694			continue;
3695
3696		if (encoder && connector->status == connector_status_connected) {
3697			intel_dp = enc_to_intel_dp(&encoder->base);
3698			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3699		} else
3700			seq_puts(m, "0");
3701	}
3702	drm_connector_list_iter_end(&conn_iter);
3703
3704	return 0;
3705}
3706
3707static int i915_displayport_test_type_open(struct inode *inode,
3708				       struct file *file)
3709{
3710	struct drm_i915_private *dev_priv = inode->i_private;
3711
3712	return single_open(file, i915_displayport_test_type_show,
3713			   &dev_priv->drm);
3714}
3715
3716static const struct file_operations i915_displayport_test_type_fops = {
3717	.owner = THIS_MODULE,
3718	.open = i915_displayport_test_type_open,
3719	.read = seq_read,
3720	.llseek = seq_lseek,
3721	.release = single_release
3722};
3723
3724static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3725{
3726	struct drm_i915_private *dev_priv = m->private;
3727	struct drm_device *dev = &dev_priv->drm;
3728	int level;
3729	int num_levels;
3730
3731	if (IS_CHERRYVIEW(dev_priv))
3732		num_levels = 3;
3733	else if (IS_VALLEYVIEW(dev_priv))
3734		num_levels = 1;
3735	else if (IS_G4X(dev_priv))
3736		num_levels = 3;
3737	else
3738		num_levels = ilk_wm_max_level(dev_priv) + 1;
3739
3740	drm_modeset_lock_all(dev);
3741
3742	for (level = 0; level < num_levels; level++) {
3743		unsigned int latency = wm[level];
3744
3745		/*
3746		 * - WM1+ latency values in 0.5us units
3747		 * - latencies are in us on gen9/vlv/chv
3748		 */
3749		if (INTEL_GEN(dev_priv) >= 9 ||
3750		    IS_VALLEYVIEW(dev_priv) ||
3751		    IS_CHERRYVIEW(dev_priv) ||
3752		    IS_G4X(dev_priv))
3753			latency *= 10;
3754		else if (level > 0)
3755			latency *= 5;
3756
3757		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3758			   level, wm[level], latency / 10, latency % 10);
3759	}
3760
3761	drm_modeset_unlock_all(dev);
3762}
3763
3764static int pri_wm_latency_show(struct seq_file *m, void *data)
3765{
3766	struct drm_i915_private *dev_priv = m->private;
 
3767	const uint16_t *latencies;
3768
3769	if (INTEL_GEN(dev_priv) >= 9)
3770		latencies = dev_priv->wm.skl_latency;
3771	else
3772		latencies = dev_priv->wm.pri_latency;
3773
3774	wm_latency_show(m, latencies);
3775
3776	return 0;
3777}
3778
3779static int spr_wm_latency_show(struct seq_file *m, void *data)
3780{
3781	struct drm_i915_private *dev_priv = m->private;
 
3782	const uint16_t *latencies;
3783
3784	if (INTEL_GEN(dev_priv) >= 9)
3785		latencies = dev_priv->wm.skl_latency;
3786	else
3787		latencies = dev_priv->wm.spr_latency;
3788
3789	wm_latency_show(m, latencies);
3790
3791	return 0;
3792}
3793
3794static int cur_wm_latency_show(struct seq_file *m, void *data)
3795{
3796	struct drm_i915_private *dev_priv = m->private;
 
3797	const uint16_t *latencies;
3798
3799	if (INTEL_GEN(dev_priv) >= 9)
3800		latencies = dev_priv->wm.skl_latency;
3801	else
3802		latencies = dev_priv->wm.cur_latency;
3803
3804	wm_latency_show(m, latencies);
3805
3806	return 0;
3807}
3808
3809static int pri_wm_latency_open(struct inode *inode, struct file *file)
3810{
3811	struct drm_i915_private *dev_priv = inode->i_private;
3812
3813	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3814		return -ENODEV;
3815
3816	return single_open(file, pri_wm_latency_show, dev_priv);
3817}
3818
3819static int spr_wm_latency_open(struct inode *inode, struct file *file)
3820{
3821	struct drm_i915_private *dev_priv = inode->i_private;
3822
3823	if (HAS_GMCH_DISPLAY(dev_priv))
3824		return -ENODEV;
3825
3826	return single_open(file, spr_wm_latency_show, dev_priv);
3827}
3828
3829static int cur_wm_latency_open(struct inode *inode, struct file *file)
3830{
3831	struct drm_i915_private *dev_priv = inode->i_private;
3832
3833	if (HAS_GMCH_DISPLAY(dev_priv))
3834		return -ENODEV;
3835
3836	return single_open(file, cur_wm_latency_show, dev_priv);
3837}
3838
3839static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3840				size_t len, loff_t *offp, uint16_t wm[8])
3841{
3842	struct seq_file *m = file->private_data;
3843	struct drm_i915_private *dev_priv = m->private;
3844	struct drm_device *dev = &dev_priv->drm;
3845	uint16_t new[8] = { 0 };
3846	int num_levels;
3847	int level;
3848	int ret;
3849	char tmp[32];
3850
3851	if (IS_CHERRYVIEW(dev_priv))
3852		num_levels = 3;
3853	else if (IS_VALLEYVIEW(dev_priv))
3854		num_levels = 1;
3855	else if (IS_G4X(dev_priv))
3856		num_levels = 3;
3857	else
3858		num_levels = ilk_wm_max_level(dev_priv) + 1;
3859
3860	if (len >= sizeof(tmp))
3861		return -EINVAL;
3862
3863	if (copy_from_user(tmp, ubuf, len))
3864		return -EFAULT;
3865
3866	tmp[len] = '\0';
3867
3868	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3869		     &new[0], &new[1], &new[2], &new[3],
3870		     &new[4], &new[5], &new[6], &new[7]);
3871	if (ret != num_levels)
3872		return -EINVAL;
3873
3874	drm_modeset_lock_all(dev);
3875
3876	for (level = 0; level < num_levels; level++)
3877		wm[level] = new[level];
3878
3879	drm_modeset_unlock_all(dev);
3880
3881	return len;
3882}
3883
3884
3885static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3886				    size_t len, loff_t *offp)
3887{
3888	struct seq_file *m = file->private_data;
3889	struct drm_i915_private *dev_priv = m->private;
 
3890	uint16_t *latencies;
3891
3892	if (INTEL_GEN(dev_priv) >= 9)
3893		latencies = dev_priv->wm.skl_latency;
3894	else
3895		latencies = dev_priv->wm.pri_latency;
3896
3897	return wm_latency_write(file, ubuf, len, offp, latencies);
3898}
3899
3900static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3901				    size_t len, loff_t *offp)
3902{
3903	struct seq_file *m = file->private_data;
3904	struct drm_i915_private *dev_priv = m->private;
 
3905	uint16_t *latencies;
3906
3907	if (INTEL_GEN(dev_priv) >= 9)
3908		latencies = dev_priv->wm.skl_latency;
3909	else
3910		latencies = dev_priv->wm.spr_latency;
3911
3912	return wm_latency_write(file, ubuf, len, offp, latencies);
3913}
3914
3915static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3916				    size_t len, loff_t *offp)
3917{
3918	struct seq_file *m = file->private_data;
3919	struct drm_i915_private *dev_priv = m->private;
 
3920	uint16_t *latencies;
3921
3922	if (INTEL_GEN(dev_priv) >= 9)
3923		latencies = dev_priv->wm.skl_latency;
3924	else
3925		latencies = dev_priv->wm.cur_latency;
3926
3927	return wm_latency_write(file, ubuf, len, offp, latencies);
3928}
3929
3930static const struct file_operations i915_pri_wm_latency_fops = {
3931	.owner = THIS_MODULE,
3932	.open = pri_wm_latency_open,
3933	.read = seq_read,
3934	.llseek = seq_lseek,
3935	.release = single_release,
3936	.write = pri_wm_latency_write
3937};
3938
3939static const struct file_operations i915_spr_wm_latency_fops = {
3940	.owner = THIS_MODULE,
3941	.open = spr_wm_latency_open,
3942	.read = seq_read,
3943	.llseek = seq_lseek,
3944	.release = single_release,
3945	.write = spr_wm_latency_write
3946};
3947
3948static const struct file_operations i915_cur_wm_latency_fops = {
3949	.owner = THIS_MODULE,
3950	.open = cur_wm_latency_open,
3951	.read = seq_read,
3952	.llseek = seq_lseek,
3953	.release = single_release,
3954	.write = cur_wm_latency_write
3955};
3956
3957static int
3958i915_wedged_get(void *data, u64 *val)
3959{
3960	struct drm_i915_private *dev_priv = data;
 
3961
3962	*val = i915_terminally_wedged(&dev_priv->gpu_error);
3963
3964	return 0;
3965}
3966
3967static int
3968i915_wedged_set(void *data, u64 val)
3969{
3970	struct drm_i915_private *i915 = data;
3971	struct intel_engine_cs *engine;
3972	unsigned int tmp;
3973
3974	/*
3975	 * There is no safeguard against this debugfs entry colliding
3976	 * with the hangcheck calling same i915_handle_error() in
3977	 * parallel, causing an explosion. For now we assume that the
3978	 * test harness is responsible enough not to inject gpu hangs
3979	 * while it is writing to 'i915_wedged'
3980	 */
3981
3982	if (i915_reset_backoff(&i915->gpu_error))
3983		return -EAGAIN;
3984
3985	for_each_engine_masked(engine, i915, val, tmp) {
3986		engine->hangcheck.seqno = intel_engine_get_seqno(engine);
3987		engine->hangcheck.stalled = true;
3988	}
3989
3990	i915_handle_error(i915, val, "Manually set wedged engine mask = %llx",
3991			  val);
3992
3993	wait_on_bit(&i915->gpu_error.flags,
3994		    I915_RESET_HANDOFF,
3995		    TASK_UNINTERRUPTIBLE);
3996
3997	return 0;
3998}
3999
4000DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4001			i915_wedged_get, i915_wedged_set,
4002			"%llu\n");
4003
4004static int
4005fault_irq_set(struct drm_i915_private *i915,
4006	      unsigned long *irq,
4007	      unsigned long val)
4008{
4009	int err;
 
4010
4011	err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4012	if (err)
4013		return err;
4014
4015	err = i915_gem_wait_for_idle(i915,
4016				     I915_WAIT_LOCKED |
4017				     I915_WAIT_INTERRUPTIBLE);
4018	if (err)
4019		goto err_unlock;
4020
4021	*irq = val;
4022	mutex_unlock(&i915->drm.struct_mutex);
 
 
 
 
4023
4024	/* Flush idle worker to disarm irq */
4025	drain_delayed_work(&i915->gt.idle_work);
 
 
 
 
 
 
4026
4027	return 0;
 
4028
4029err_unlock:
4030	mutex_unlock(&i915->drm.struct_mutex);
4031	return err;
4032}
4033
4034static int
4035i915_ring_missed_irq_get(void *data, u64 *val)
4036{
4037	struct drm_i915_private *dev_priv = data;
 
4038
4039	*val = dev_priv->gpu_error.missed_irq_rings;
4040	return 0;
4041}
4042
4043static int
4044i915_ring_missed_irq_set(void *data, u64 val)
4045{
4046	struct drm_i915_private *i915 = data;
 
 
 
 
 
 
 
 
 
4047
4048	return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4049}
4050
4051DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4052			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4053			"0x%08llx\n");
4054
4055static int
4056i915_ring_test_irq_get(void *data, u64 *val)
4057{
4058	struct drm_i915_private *dev_priv = data;
 
4059
4060	*val = dev_priv->gpu_error.test_irq_rings;
4061
4062	return 0;
4063}
4064
4065static int
4066i915_ring_test_irq_set(void *data, u64 val)
4067{
4068	struct drm_i915_private *i915 = data;
 
 
4069
4070	val &= INTEL_INFO(i915)->ring_mask;
4071	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4072
4073	return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
 
 
 
 
 
 
 
 
4074}
4075
4076DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4077			i915_ring_test_irq_get, i915_ring_test_irq_set,
4078			"0x%08llx\n");
4079
4080#define DROP_UNBOUND	BIT(0)
4081#define DROP_BOUND	BIT(1)
4082#define DROP_RETIRE	BIT(2)
4083#define DROP_ACTIVE	BIT(3)
4084#define DROP_FREED	BIT(4)
4085#define DROP_SHRINK_ALL	BIT(5)
4086#define DROP_IDLE	BIT(6)
4087#define DROP_ALL (DROP_UNBOUND	| \
4088		  DROP_BOUND	| \
4089		  DROP_RETIRE	| \
4090		  DROP_ACTIVE	| \
4091		  DROP_FREED	| \
4092		  DROP_SHRINK_ALL |\
4093		  DROP_IDLE)
4094static int
4095i915_drop_caches_get(void *data, u64 *val)
4096{
4097	*val = DROP_ALL;
4098
4099	return 0;
4100}
4101
4102static int
4103i915_drop_caches_set(void *data, u64 val)
4104{
4105	struct drm_i915_private *dev_priv = data;
4106	struct drm_device *dev = &dev_priv->drm;
4107	int ret = 0;
4108
4109	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4110		  val, val & DROP_ALL);
4111
4112	/* No need to check and wait for gpu resets, only libdrm auto-restarts
4113	 * on ioctls on -EAGAIN. */
4114	if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4115		ret = mutex_lock_interruptible(&dev->struct_mutex);
 
 
 
 
4116		if (ret)
4117			return ret;
4118
4119		if (val & DROP_ACTIVE)
4120			ret = i915_gem_wait_for_idle(dev_priv,
4121						     I915_WAIT_INTERRUPTIBLE |
4122						     I915_WAIT_LOCKED);
4123
4124		if (val & DROP_RETIRE)
4125			i915_retire_requests(dev_priv);
4126
4127		mutex_unlock(&dev->struct_mutex);
4128	}
4129
4130	fs_reclaim_acquire(GFP_KERNEL);
4131	if (val & DROP_BOUND)
4132		i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
4133
4134	if (val & DROP_UNBOUND)
4135		i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4136
4137	if (val & DROP_SHRINK_ALL)
4138		i915_gem_shrink_all(dev_priv);
4139	fs_reclaim_release(GFP_KERNEL);
4140
4141	if (val & DROP_IDLE)
4142		drain_delayed_work(&dev_priv->gt.idle_work);
4143
4144	if (val & DROP_FREED)
4145		i915_gem_drain_freed_objects(dev_priv);
4146
4147	return ret;
4148}
4149
4150DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4151			i915_drop_caches_get, i915_drop_caches_set,
4152			"0x%08llx\n");
4153
4154static int
4155i915_max_freq_get(void *data, u64 *val)
4156{
4157	struct drm_i915_private *dev_priv = data;
 
 
4158
4159	if (INTEL_GEN(dev_priv) < 6)
4160		return -ENODEV;
4161
4162	*val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit);
 
 
 
 
 
 
 
 
4163	return 0;
4164}
4165
4166static int
4167i915_max_freq_set(void *data, u64 val)
4168{
4169	struct drm_i915_private *dev_priv = data;
4170	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4171	u32 hw_max, hw_min;
4172	int ret;
4173
4174	if (INTEL_GEN(dev_priv) < 6)
4175		return -ENODEV;
4176
 
 
4177	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4178
4179	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
4180	if (ret)
4181		return ret;
4182
4183	/*
4184	 * Turbo will still be enabled, but won't go above the set value.
4185	 */
4186	val = intel_freq_opcode(dev_priv, val);
4187
4188	hw_max = rps->max_freq;
4189	hw_min = rps->min_freq;
4190
4191	if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) {
4192		mutex_unlock(&dev_priv->pcu_lock);
4193		return -EINVAL;
4194	}
4195
4196	rps->max_freq_softlimit = val;
4197
4198	if (intel_set_rps(dev_priv, val))
4199		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
4200
4201	mutex_unlock(&dev_priv->pcu_lock);
4202
4203	return 0;
4204}
4205
4206DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
4207			i915_max_freq_get, i915_max_freq_set,
4208			"%llu\n");
4209
4210static int
4211i915_min_freq_get(void *data, u64 *val)
4212{
4213	struct drm_i915_private *dev_priv = data;
 
 
4214
4215	if (INTEL_GEN(dev_priv) < 6)
4216		return -ENODEV;
4217
4218	*val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit);
 
 
 
 
 
 
 
 
4219	return 0;
4220}
4221
4222static int
4223i915_min_freq_set(void *data, u64 val)
4224{
4225	struct drm_i915_private *dev_priv = data;
4226	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4227	u32 hw_max, hw_min;
4228	int ret;
4229
4230	if (INTEL_GEN(dev_priv) < 6)
4231		return -ENODEV;
4232
 
 
4233	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
4234
4235	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
4236	if (ret)
4237		return ret;
4238
4239	/*
4240	 * Turbo will still be enabled, but won't go below the set value.
4241	 */
4242	val = intel_freq_opcode(dev_priv, val);
4243
4244	hw_max = rps->max_freq;
4245	hw_min = rps->min_freq;
4246
4247	if (val < hw_min ||
4248	    val > hw_max || val > rps->max_freq_softlimit) {
4249		mutex_unlock(&dev_priv->pcu_lock);
4250		return -EINVAL;
4251	}
4252
4253	rps->min_freq_softlimit = val;
4254
4255	if (intel_set_rps(dev_priv, val))
4256		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
4257
4258	mutex_unlock(&dev_priv->pcu_lock);
4259
4260	return 0;
4261}
4262
4263DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
4264			i915_min_freq_get, i915_min_freq_set,
4265			"%llu\n");
4266
4267static int
4268i915_cache_sharing_get(void *data, u64 *val)
4269{
4270	struct drm_i915_private *dev_priv = data;
 
4271	u32 snpcr;
 
4272
4273	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4274		return -ENODEV;
4275
 
 
 
4276	intel_runtime_pm_get(dev_priv);
4277
4278	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4279
4280	intel_runtime_pm_put(dev_priv);
 
4281
4282	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4283
4284	return 0;
4285}
4286
4287static int
4288i915_cache_sharing_set(void *data, u64 val)
4289{
4290	struct drm_i915_private *dev_priv = data;
 
4291	u32 snpcr;
4292
4293	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4294		return -ENODEV;
4295
4296	if (val > 3)
4297		return -EINVAL;
4298
4299	intel_runtime_pm_get(dev_priv);
4300	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4301
4302	/* Update the cache sharing policy here as well */
4303	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4304	snpcr &= ~GEN6_MBC_SNPCR_MASK;
4305	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4306	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4307
4308	intel_runtime_pm_put(dev_priv);
4309	return 0;
4310}
4311
4312DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4313			i915_cache_sharing_get, i915_cache_sharing_set,
4314			"%llu\n");
4315
4316static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4317					  struct sseu_dev_info *sseu)
 
 
 
 
 
 
 
 
4318{
 
4319	int ss_max = 2;
4320	int ss;
4321	u32 sig1[ss_max], sig2[ss_max];
4322
4323	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4324	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4325	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4326	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4327
4328	for (ss = 0; ss < ss_max; ss++) {
4329		unsigned int eu_cnt;
4330
4331		if (sig1[ss] & CHV_SS_PG_ENABLE)
4332			/* skip disabled subslice */
4333			continue;
4334
4335		sseu->slice_mask = BIT(0);
4336		sseu->subslice_mask[0] |= BIT(ss);
4337		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4338			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4339			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4340			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4341		sseu->eu_total += eu_cnt;
4342		sseu->eu_per_subslice = max_t(unsigned int,
4343					      sseu->eu_per_subslice, eu_cnt);
4344	}
 
4345}
4346
4347static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4348				     struct sseu_dev_info *sseu)
4349{
4350	const struct intel_device_info *info = INTEL_INFO(dev_priv);
 
4351	int s, ss;
4352	u32 s_reg[info->sseu.max_slices];
4353	u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2];
4354
4355	for (s = 0; s < info->sseu.max_slices; s++) {
4356		/*
4357		 * FIXME: Valid SS Mask respects the spec and read
4358		 * only valid bits for those registers, excluding reserverd
4359		 * although this seems wrong because it would leave many
4360		 * subslices without ACK.
4361		 */
4362		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4363			GEN10_PGCTL_VALID_SS_MASK(s);
4364		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4365		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4366	}
4367
4368	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4369		     GEN9_PGCTL_SSA_EU19_ACK |
4370		     GEN9_PGCTL_SSA_EU210_ACK |
4371		     GEN9_PGCTL_SSA_EU311_ACK;
4372	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4373		     GEN9_PGCTL_SSB_EU19_ACK |
4374		     GEN9_PGCTL_SSB_EU210_ACK |
4375		     GEN9_PGCTL_SSB_EU311_ACK;
4376
4377	for (s = 0; s < info->sseu.max_slices; s++) {
4378		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4379			/* skip disabled slice */
4380			continue;
4381
4382		sseu->slice_mask |= BIT(s);
4383		sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4384
4385		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4386			unsigned int eu_cnt;
4387
4388			if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4389				/* skip disabled subslice */
4390				continue;
4391
4392			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4393					       eu_mask[ss % 2]);
4394			sseu->eu_total += eu_cnt;
4395			sseu->eu_per_subslice = max_t(unsigned int,
4396						      sseu->eu_per_subslice,
4397						      eu_cnt);
4398		}
4399	}
4400}
4401
4402static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4403				    struct sseu_dev_info *sseu)
4404{
4405	const struct intel_device_info *info = INTEL_INFO(dev_priv);
4406	int s, ss;
4407	u32 s_reg[info->sseu.max_slices];
4408	u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2];
4409
4410	for (s = 0; s < info->sseu.max_slices; s++) {
4411		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4412		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4413		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4414	}
4415
4416	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4417		     GEN9_PGCTL_SSA_EU19_ACK |
4418		     GEN9_PGCTL_SSA_EU210_ACK |
4419		     GEN9_PGCTL_SSA_EU311_ACK;
4420	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4421		     GEN9_PGCTL_SSB_EU19_ACK |
4422		     GEN9_PGCTL_SSB_EU210_ACK |
4423		     GEN9_PGCTL_SSB_EU311_ACK;
4424
4425	for (s = 0; s < info->sseu.max_slices; s++) {
 
 
4426		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4427			/* skip disabled slice */
4428			continue;
4429
4430		sseu->slice_mask |= BIT(s);
4431
4432		if (IS_GEN9_BC(dev_priv))
4433			sseu->subslice_mask[s] =
4434				INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4435
4436		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4437			unsigned int eu_cnt;
4438
4439			if (IS_GEN9_LP(dev_priv)) {
4440				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4441					/* skip disabled subslice */
4442					continue;
4443
4444				sseu->subslice_mask[s] |= BIT(ss);
4445			}
4446
4447			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4448					       eu_mask[ss%2]);
4449			sseu->eu_total += eu_cnt;
4450			sseu->eu_per_subslice = max_t(unsigned int,
4451						      sseu->eu_per_subslice,
4452						      eu_cnt);
4453		}
 
 
 
 
4454	}
4455}
4456
4457static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4458					 struct sseu_dev_info *sseu)
4459{
 
 
4460	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4461	int s;
4462
4463	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4464
4465	if (sseu->slice_mask) {
4466		sseu->eu_per_subslice =
4467				INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4468		for (s = 0; s < fls(sseu->slice_mask); s++) {
4469			sseu->subslice_mask[s] =
4470				INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4471		}
4472		sseu->eu_total = sseu->eu_per_subslice *
4473				 sseu_subslice_total(sseu);
4474
4475		/* subtract fused off EU(s) from enabled slice(s) */
4476		for (s = 0; s < fls(sseu->slice_mask); s++) {
4477			u8 subslice_7eu =
4478				INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4479
4480			sseu->eu_total -= hweight8(subslice_7eu);
4481		}
4482	}
4483}
4484
4485static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4486				 const struct sseu_dev_info *sseu)
4487{
4488	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4489	const char *type = is_available_info ? "Available" : "Enabled";
4490	int s;
4491
4492	seq_printf(m, "  %s Slice Mask: %04x\n", type,
4493		   sseu->slice_mask);
4494	seq_printf(m, "  %s Slice Total: %u\n", type,
4495		   hweight8(sseu->slice_mask));
4496	seq_printf(m, "  %s Subslice Total: %u\n", type,
4497		   sseu_subslice_total(sseu));
4498	for (s = 0; s < fls(sseu->slice_mask); s++) {
4499		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4500			   s, hweight8(sseu->subslice_mask[s]));
4501	}
4502	seq_printf(m, "  %s EU Total: %u\n", type,
4503		   sseu->eu_total);
4504	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4505		   sseu->eu_per_subslice);
4506
4507	if (!is_available_info)
4508		return;
4509
4510	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4511	if (HAS_POOLED_EU(dev_priv))
4512		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4513
 
 
 
 
 
 
 
 
 
 
 
4514	seq_printf(m, "  Has Slice Power Gating: %s\n",
4515		   yesno(sseu->has_slice_pg));
4516	seq_printf(m, "  Has Subslice Power Gating: %s\n",
4517		   yesno(sseu->has_subslice_pg));
4518	seq_printf(m, "  Has EU Power Gating: %s\n",
4519		   yesno(sseu->has_eu_pg));
4520}
4521
4522static int i915_sseu_status(struct seq_file *m, void *unused)
4523{
4524	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4525	struct sseu_dev_info sseu;
4526
4527	if (INTEL_GEN(dev_priv) < 8)
4528		return -ENODEV;
4529
4530	seq_puts(m, "SSEU Device Info\n");
4531	i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4532
4533	seq_puts(m, "SSEU Device Status\n");
4534	memset(&sseu, 0, sizeof(sseu));
4535	sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4536	sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4537	sseu.max_eus_per_subslice =
4538		INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
4539
4540	intel_runtime_pm_get(dev_priv);
4541
4542	if (IS_CHERRYVIEW(dev_priv)) {
4543		cherryview_sseu_device_status(dev_priv, &sseu);
4544	} else if (IS_BROADWELL(dev_priv)) {
4545		broadwell_sseu_device_status(dev_priv, &sseu);
4546	} else if (IS_GEN9(dev_priv)) {
4547		gen9_sseu_device_status(dev_priv, &sseu);
4548	} else if (INTEL_GEN(dev_priv) >= 10) {
4549		gen10_sseu_device_status(dev_priv, &sseu);
4550	}
4551
4552	intel_runtime_pm_put(dev_priv);
4553
4554	i915_print_sseu_info(m, false, &sseu);
4555
4556	return 0;
4557}
4558
4559static int i915_forcewake_open(struct inode *inode, struct file *file)
4560{
4561	struct drm_i915_private *i915 = inode->i_private;
 
4562
4563	if (INTEL_GEN(i915) < 6)
4564		return 0;
4565
4566	intel_runtime_pm_get(i915);
4567	intel_uncore_forcewake_user_get(i915);
4568
4569	return 0;
4570}
4571
4572static int i915_forcewake_release(struct inode *inode, struct file *file)
4573{
4574	struct drm_i915_private *i915 = inode->i_private;
 
4575
4576	if (INTEL_GEN(i915) < 6)
4577		return 0;
4578
4579	intel_uncore_forcewake_user_put(i915);
4580	intel_runtime_pm_put(i915);
4581
4582	return 0;
4583}
4584
4585static const struct file_operations i915_forcewake_fops = {
4586	.owner = THIS_MODULE,
4587	.open = i915_forcewake_open,
4588	.release = i915_forcewake_release,
4589};
4590
4591static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4592{
4593	struct drm_i915_private *dev_priv = m->private;
4594	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4595
4596	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4597	seq_printf(m, "Detected: %s\n",
4598		   yesno(delayed_work_pending(&hotplug->reenable_work)));
 
 
 
4599
4600	return 0;
4601}
4602
4603static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4604					const char __user *ubuf, size_t len,
4605					loff_t *offp)
 
4606{
4607	struct seq_file *m = file->private_data;
4608	struct drm_i915_private *dev_priv = m->private;
4609	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4610	unsigned int new_threshold;
4611	int i;
4612	char *newline;
4613	char tmp[16];
4614
4615	if (len >= sizeof(tmp))
4616		return -EINVAL;
4617
4618	if (copy_from_user(tmp, ubuf, len))
4619		return -EFAULT;
 
4620
4621	tmp[len] = '\0';
4622
4623	/* Strip newline, if any */
4624	newline = strchr(tmp, '\n');
4625	if (newline)
4626		*newline = '\0';
4627
4628	if (strcmp(tmp, "reset") == 0)
4629		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4630	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4631		return -EINVAL;
4632
4633	if (new_threshold > 0)
4634		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4635			      new_threshold);
4636	else
4637		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4638
4639	spin_lock_irq(&dev_priv->irq_lock);
4640	hotplug->hpd_storm_threshold = new_threshold;
4641	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4642	for_each_hpd_pin(i)
4643		hotplug->stats[i].count = 0;
4644	spin_unlock_irq(&dev_priv->irq_lock);
4645
4646	/* Re-enable hpd immediately if we were in an irq storm */
4647	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4648
4649	return len;
4650}
4651
4652static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4653{
4654	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4655}
4656
4657static const struct file_operations i915_hpd_storm_ctl_fops = {
4658	.owner = THIS_MODULE,
4659	.open = i915_hpd_storm_ctl_open,
4660	.read = seq_read,
4661	.llseek = seq_lseek,
4662	.release = single_release,
4663	.write = i915_hpd_storm_ctl_write
4664};
4665
4666static int i915_drrs_ctl_set(void *data, u64 val)
4667{
4668	struct drm_i915_private *dev_priv = data;
4669	struct drm_device *dev = &dev_priv->drm;
4670	struct intel_crtc *intel_crtc;
4671	struct intel_encoder *encoder;
4672	struct intel_dp *intel_dp;
4673
4674	if (INTEL_GEN(dev_priv) < 7)
4675		return -ENODEV;
4676
4677	drm_modeset_lock_all(dev);
4678	for_each_intel_crtc(dev, intel_crtc) {
4679		if (!intel_crtc->base.state->active ||
4680					!intel_crtc->config->has_drrs)
4681			continue;
4682
4683		for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4684			if (encoder->type != INTEL_OUTPUT_EDP)
4685				continue;
4686
4687			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4688						val ? "en" : "dis", val);
4689
4690			intel_dp = enc_to_intel_dp(&encoder->base);
4691			if (val)
4692				intel_edp_drrs_enable(intel_dp,
4693							intel_crtc->config);
4694			else
4695				intel_edp_drrs_disable(intel_dp,
4696							intel_crtc->config);
4697		}
4698	}
4699	drm_modeset_unlock_all(dev);
4700
4701	return 0;
4702}
4703
4704DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4705
4706static const struct drm_info_list i915_debugfs_list[] = {
4707	{"i915_capabilities", i915_capabilities, 0},
4708	{"i915_gem_objects", i915_gem_object_info, 0},
4709	{"i915_gem_gtt", i915_gem_gtt_info, 0},
 
 
 
4710	{"i915_gem_stolen", i915_gem_stolen_list_info },
 
 
 
4711	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4712	{"i915_gem_interrupt", i915_interrupt_info, 0},
 
 
 
 
4713	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4714	{"i915_guc_info", i915_guc_info, 0},
4715	{"i915_guc_load_status", i915_guc_load_status_info, 0},
4716	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4717	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4718	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4719	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4720	{"i915_frequency_info", i915_frequency_info, 0},
4721	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4722	{"i915_reset_info", i915_reset_info, 0},
4723	{"i915_drpc_info", i915_drpc_info, 0},
4724	{"i915_emon_status", i915_emon_status, 0},
4725	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4726	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4727	{"i915_fbc_status", i915_fbc_status, 0},
4728	{"i915_ips_status", i915_ips_status, 0},
4729	{"i915_sr_status", i915_sr_status, 0},
4730	{"i915_opregion", i915_opregion, 0},
4731	{"i915_vbt", i915_vbt, 0},
4732	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4733	{"i915_context_status", i915_context_status, 0},
 
 
4734	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4735	{"i915_swizzle_info", i915_swizzle_info, 0},
4736	{"i915_ppgtt_info", i915_ppgtt_info, 0},
4737	{"i915_llc", i915_llc, 0},
4738	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4739	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
4740	{"i915_energy_uJ", i915_energy_uJ, 0},
4741	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4742	{"i915_power_domain_info", i915_power_domain_info, 0},
4743	{"i915_dmc_info", i915_dmc_info, 0},
4744	{"i915_display_info", i915_display_info, 0},
4745	{"i915_engine_info", i915_engine_info, 0},
4746	{"i915_rcs_topology", i915_rcs_topology, 0},
4747	{"i915_shrinker_info", i915_shrinker_info, 0},
4748	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4749	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4750	{"i915_wa_registers", i915_wa_registers, 0},
4751	{"i915_ddb_info", i915_ddb_info, 0},
4752	{"i915_sseu_status", i915_sseu_status, 0},
4753	{"i915_drrs_status", i915_drrs_status, 0},
4754	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4755};
4756#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4757
4758static const struct i915_debugfs_files {
4759	const char *name;
4760	const struct file_operations *fops;
4761} i915_debugfs_files[] = {
4762	{"i915_wedged", &i915_wedged_fops},
4763	{"i915_max_freq", &i915_max_freq_fops},
4764	{"i915_min_freq", &i915_min_freq_fops},
4765	{"i915_cache_sharing", &i915_cache_sharing_fops},
 
4766	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4767	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
4768	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4769#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4770	{"i915_error_state", &i915_error_state_fops},
4771	{"i915_gpu_info", &i915_gpu_info_fops},
4772#endif
4773	{"i915_next_seqno", &i915_next_seqno_fops},
4774	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
4775	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4776	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4777	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4778	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4779	{"i915_dp_test_data", &i915_displayport_test_data_fops},
4780	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4781	{"i915_dp_test_active", &i915_displayport_test_active_fops},
4782	{"i915_guc_log_control", &i915_guc_log_control_fops},
4783	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4784	{"i915_ipc_status", &i915_ipc_status_fops},
4785	{"i915_drrs_ctl", &i915_drrs_ctl_fops}
4786};
4787
4788int i915_debugfs_register(struct drm_i915_private *dev_priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4789{
4790	struct drm_minor *minor = dev_priv->drm.primary;
4791	struct dentry *ent;
4792	int ret, i;
4793
4794	ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4795				  minor->debugfs_root, to_i915(minor->dev),
4796				  &i915_forcewake_fops);
4797	if (!ent)
4798		return -ENOMEM;
4799
4800	ret = intel_pipe_crc_create(minor);
4801	if (ret)
4802		return ret;
4803
 
 
 
 
 
 
4804	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4805		ent = debugfs_create_file(i915_debugfs_files[i].name,
4806					  S_IRUGO | S_IWUSR,
4807					  minor->debugfs_root,
4808					  to_i915(minor->dev),
4809					  i915_debugfs_files[i].fops);
4810		if (!ent)
4811			return -ENOMEM;
4812	}
4813
4814	return drm_debugfs_create_files(i915_debugfs_list,
4815					I915_DEBUGFS_ENTRIES,
4816					minor->debugfs_root, minor);
4817}
4818
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4819struct dpcd_block {
4820	/* DPCD dump start address. */
4821	unsigned int offset;
4822	/* DPCD dump end address, inclusive. If unset, .size will be used. */
4823	unsigned int end;
4824	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4825	size_t size;
4826	/* Only valid for eDP. */
4827	bool edp;
4828};
4829
4830static const struct dpcd_block i915_dpcd_debug[] = {
4831	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4832	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4833	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4834	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4835	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4836	{ .offset = DP_SET_POWER },
4837	{ .offset = DP_EDP_DPCD_REV },
4838	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4839	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4840	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4841};
4842
4843static int i915_dpcd_show(struct seq_file *m, void *data)
4844{
4845	struct drm_connector *connector = m->private;
4846	struct intel_dp *intel_dp =
4847		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4848	uint8_t buf[16];
4849	ssize_t err;
4850	int i;
4851
4852	if (connector->status != connector_status_connected)
4853		return -ENODEV;
4854
4855	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4856		const struct dpcd_block *b = &i915_dpcd_debug[i];
4857		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4858
4859		if (b->edp &&
4860		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4861			continue;
4862
4863		/* low tech for now */
4864		if (WARN_ON(size > sizeof(buf)))
4865			continue;
4866
4867		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4868		if (err <= 0) {
4869			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4870				  size, b->offset, err);
4871			continue;
4872		}
4873
4874		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4875	}
4876
4877	return 0;
4878}
4879
4880static int i915_dpcd_open(struct inode *inode, struct file *file)
4881{
4882	return single_open(file, i915_dpcd_show, inode->i_private);
4883}
4884
4885static const struct file_operations i915_dpcd_fops = {
4886	.owner = THIS_MODULE,
4887	.open = i915_dpcd_open,
4888	.read = seq_read,
4889	.llseek = seq_lseek,
4890	.release = single_release,
4891};
4892
4893static int i915_panel_show(struct seq_file *m, void *data)
4894{
4895	struct drm_connector *connector = m->private;
4896	struct intel_dp *intel_dp =
4897		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4898
4899	if (connector->status != connector_status_connected)
4900		return -ENODEV;
4901
4902	seq_printf(m, "Panel power up delay: %d\n",
4903		   intel_dp->panel_power_up_delay);
4904	seq_printf(m, "Panel power down delay: %d\n",
4905		   intel_dp->panel_power_down_delay);
4906	seq_printf(m, "Backlight on delay: %d\n",
4907		   intel_dp->backlight_on_delay);
4908	seq_printf(m, "Backlight off delay: %d\n",
4909		   intel_dp->backlight_off_delay);
4910
4911	return 0;
4912}
4913
4914static int i915_panel_open(struct inode *inode, struct file *file)
4915{
4916	return single_open(file, i915_panel_show, inode->i_private);
4917}
4918
4919static const struct file_operations i915_panel_fops = {
4920	.owner = THIS_MODULE,
4921	.open = i915_panel_open,
4922	.read = seq_read,
4923	.llseek = seq_lseek,
4924	.release = single_release,
4925};
4926
4927/**
4928 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4929 * @connector: pointer to a registered drm_connector
4930 *
4931 * Cleanup will be done by drm_connector_unregister() through a call to
4932 * drm_debugfs_connector_remove().
4933 *
4934 * Returns 0 on success, negative error codes on error.
4935 */
4936int i915_debugfs_connector_add(struct drm_connector *connector)
4937{
4938	struct dentry *root = connector->debugfs_entry;
4939
4940	/* The connector must have been registered beforehands. */
4941	if (!root)
4942		return -ENODEV;
4943
4944	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4945	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4946		debugfs_create_file("i915_dpcd", S_IRUGO, root,
4947				    connector, &i915_dpcd_fops);
4948
4949	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4950		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4951				    connector, &i915_panel_fops);
4952
4953	return 0;
4954}