Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Keith Packard <keithp@keithp.com>
  26 *
  27 */
  28
  29#include <linux/seq_file.h>
  30#include <linux/circ_buf.h>
  31#include <linux/ctype.h>
  32#include <linux/debugfs.h>
  33#include <linux/slab.h>
  34#include <linux/export.h>
  35#include <linux/list_sort.h>
  36#include <asm/msr-index.h>
  37#include <drm/drmP.h>
  38#include "intel_drv.h"
  39#include "intel_ringbuffer.h"
  40#include <drm/i915_drm.h>
  41#include "i915_drv.h"
  42
  43enum {
  44	ACTIVE_LIST,
  45	INACTIVE_LIST,
  46	PINNED_LIST,
  47};
  48
  49/* As the drm_debugfs_init() routines are called before dev->dev_private is
  50 * allocated we need to hook into the minor for release. */
  51static int
  52drm_add_fake_info_node(struct drm_minor *minor,
  53		       struct dentry *ent,
  54		       const void *key)
  55{
  56	struct drm_info_node *node;
  57
  58	node = kmalloc(sizeof(*node), GFP_KERNEL);
  59	if (node == NULL) {
  60		debugfs_remove(ent);
  61		return -ENOMEM;
  62	}
  63
  64	node->minor = minor;
  65	node->dent = ent;
  66	node->info_ent = (void *) key;
  67
  68	mutex_lock(&minor->debugfs_lock);
  69	list_add(&node->list, &minor->debugfs_list);
  70	mutex_unlock(&minor->debugfs_lock);
 
 
 
 
 
 
 
 
 
 
 
  71
  72	return 0;
 
 
  73}
  74
  75static int i915_capabilities(struct seq_file *m, void *data)
  76{
  77	struct drm_info_node *node = m->private;
  78	struct drm_device *dev = node->minor->dev;
  79	const struct intel_device_info *info = INTEL_INFO(dev);
  80
  81	seq_printf(m, "gen: %d\n", info->gen);
  82	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
  83#define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
  84#define SEP_SEMICOLON ;
  85	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
  86#undef PRINT_FLAG
  87#undef SEP_SEMICOLON
 
 
 
 
  88
  89	return 0;
  90}
  91
  92static const char *get_pin_flag(struct drm_i915_gem_object *obj)
  93{
  94	if (obj->pin_display)
  95		return "p";
  96	else
  97		return " ";
  98}
  99
 100static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
 101{
 102	switch (obj->tiling_mode) {
 103	default:
 104	case I915_TILING_NONE: return " ";
 105	case I915_TILING_X: return "X";
 106	case I915_TILING_Y: return "Y";
 107	}
 108}
 109
 110static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
 111{
 112	return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
 113}
 114
 115static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
 116{
 117	u64 size = 0;
 118	struct i915_vma *vma;
 119
 120	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 121		if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
 122			size += vma->node.size;
 123	}
 124
 125	return size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 126}
 127
 128static void
 129describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 130{
 131	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 132	struct intel_engine_cs *ring;
 133	struct i915_vma *vma;
 134	int pin_count = 0;
 135	int i;
 136
 137	seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
 138		   &obj->base,
 139		   obj->active ? "*" : " ",
 140		   get_pin_flag(obj),
 141		   get_tiling_flag(obj),
 142		   get_global_flag(obj),
 
 143		   obj->base.size / 1024,
 144		   obj->base.read_domains,
 145		   obj->base.write_domain);
 146	for_each_ring(ring, dev_priv, i)
 147		seq_printf(m, "%x ",
 148				i915_gem_request_get_seqno(obj->last_read_req[i]));
 149	seq_printf(m, "] %x %x%s%s%s",
 150		   i915_gem_request_get_seqno(obj->last_write_req),
 151		   i915_gem_request_get_seqno(obj->last_fenced_req),
 152		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
 153		   obj->dirty ? " dirty" : "",
 154		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 155	if (obj->base.name)
 156		seq_printf(m, " (name: %d)", obj->base.name);
 157	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 158		if (vma->pin_count > 0)
 159			pin_count++;
 160	}
 161	seq_printf(m, " (pinned x %d)", pin_count);
 162	if (obj->pin_display)
 163		seq_printf(m, " (display)");
 164	if (obj->fence_reg != I915_FENCE_REG_NONE)
 165		seq_printf(m, " (fence: %d)", obj->fence_reg);
 166	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 167		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
 168			   vma->is_ggtt ? "g" : "pp",
 169			   vma->node.start, vma->node.size);
 170		if (vma->is_ggtt)
 171			seq_printf(m, ", type: %u", vma->ggtt_view.type);
 172		seq_puts(m, ")");
 173	}
 174	if (obj->stolen)
 175		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
 176	if (obj->pin_display || obj->fault_mappable) {
 177		char s[3], *t = s;
 178		if (obj->pin_display)
 179			*t++ = 'p';
 180		if (obj->fault_mappable)
 181			*t++ = 'f';
 182		*t = '\0';
 183		seq_printf(m, " (%s mappable)", s);
 184	}
 185	if (obj->last_write_req != NULL)
 186		seq_printf(m, " (%s)",
 187			   i915_gem_request_get_ring(obj->last_write_req)->name);
 188	if (obj->frontbuffer_bits)
 189		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
 190}
 191
 192static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
 193{
 194	seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
 195	seq_putc(m, ctx->remap_slice ? 'R' : 'r');
 196	seq_putc(m, ' ');
 197}
 198
 199static int i915_gem_object_list_info(struct seq_file *m, void *data)
 200{
 201	struct drm_info_node *node = m->private;
 202	uintptr_t list = (uintptr_t) node->info_ent->data;
 203	struct list_head *head;
 204	struct drm_device *dev = node->minor->dev;
 205	struct drm_i915_private *dev_priv = dev->dev_private;
 206	struct i915_address_space *vm = &dev_priv->gtt.base;
 207	struct i915_vma *vma;
 208	u64 total_obj_size, total_gtt_size;
 209	int count, ret;
 210
 211	ret = mutex_lock_interruptible(&dev->struct_mutex);
 212	if (ret)
 213		return ret;
 214
 215	/* FIXME: the user of this interface might want more than just GGTT */
 216	switch (list) {
 217	case ACTIVE_LIST:
 218		seq_puts(m, "Active:\n");
 219		head = &vm->active_list;
 220		break;
 221	case INACTIVE_LIST:
 222		seq_puts(m, "Inactive:\n");
 223		head = &vm->inactive_list;
 224		break;
 225	default:
 226		mutex_unlock(&dev->struct_mutex);
 227		return -EINVAL;
 228	}
 229
 230	total_obj_size = total_gtt_size = count = 0;
 231	list_for_each_entry(vma, head, vm_link) {
 232		seq_printf(m, "   ");
 233		describe_obj(m, vma->obj);
 234		seq_printf(m, "\n");
 235		total_obj_size += vma->obj->base.size;
 236		total_gtt_size += vma->node.size;
 237		count++;
 238	}
 239	mutex_unlock(&dev->struct_mutex);
 240
 241	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 242		   count, total_obj_size, total_gtt_size);
 243	return 0;
 244}
 245
 246static int obj_rank_by_stolen(void *priv,
 247			      struct list_head *A, struct list_head *B)
 248{
 249	struct drm_i915_gem_object *a =
 250		container_of(A, struct drm_i915_gem_object, obj_exec_link);
 251	struct drm_i915_gem_object *b =
 252		container_of(B, struct drm_i915_gem_object, obj_exec_link);
 253
 254	if (a->stolen->start < b->stolen->start)
 255		return -1;
 256	if (a->stolen->start > b->stolen->start)
 257		return 1;
 258	return 0;
 259}
 
 
 
 260
 261static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 262{
 263	struct drm_info_node *node = m->private;
 264	struct drm_device *dev = node->minor->dev;
 265	struct drm_i915_private *dev_priv = dev->dev_private;
 266	struct drm_i915_gem_object *obj;
 267	u64 total_obj_size, total_gtt_size;
 268	LIST_HEAD(stolen);
 269	int count, ret;
 270
 271	ret = mutex_lock_interruptible(&dev->struct_mutex);
 272	if (ret)
 273		return ret;
 
 
 
 
 
 
 
 
 274
 275	total_obj_size = total_gtt_size = count = 0;
 276	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 277		if (obj->stolen == NULL)
 278			continue;
 
 
 
 
 
 
 
 279
 280		list_add(&obj->obj_exec_link, &stolen);
 
 
 
 
 
 
 
 281
 282		total_obj_size += obj->base.size;
 283		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 284		count++;
 285	}
 286	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
 287		if (obj->stolen == NULL)
 288			continue;
 289
 290		list_add(&obj->obj_exec_link, &stolen);
 291
 292		total_obj_size += obj->base.size;
 293		count++;
 294	}
 295	list_sort(NULL, &stolen, obj_rank_by_stolen);
 296	seq_puts(m, "Stolen:\n");
 297	while (!list_empty(&stolen)) {
 298		obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
 299		seq_puts(m, "   ");
 300		describe_obj(m, obj);
 301		seq_putc(m, '\n');
 302		list_del_init(&obj->obj_exec_link);
 303	}
 304	mutex_unlock(&dev->struct_mutex);
 305
 306	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 307		   count, total_obj_size, total_gtt_size);
 308	return 0;
 309}
 310
 311#define count_objects(list, member) do { \
 312	list_for_each_entry(obj, list, member) { \
 313		size += i915_gem_obj_total_ggtt_size(obj); \
 314		++count; \
 315		if (obj->map_and_fenceable) { \
 316			mappable_size += i915_gem_obj_ggtt_size(obj); \
 317			++mappable_count; \
 318		} \
 319	} \
 320} while (0)
 321
 322struct file_stats {
 323	struct drm_i915_file_private *file_priv;
 324	unsigned long count;
 325	u64 total, unbound;
 326	u64 global, shared;
 327	u64 active, inactive;
 
 328};
 329
 330static int per_file_stats(int id, void *ptr, void *data)
 331{
 332	struct drm_i915_gem_object *obj = ptr;
 333	struct file_stats *stats = data;
 334	struct i915_vma *vma;
 335
 336	stats->count++;
 337	stats->total += obj->base.size;
 
 
 338
 339	if (obj->base.name || obj->base.dma_buf)
 340		stats->shared += obj->base.size;
 341
 342	if (USES_FULL_PPGTT(obj->base.dev)) {
 343		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 344			struct i915_hw_ppgtt *ppgtt;
 345
 346			if (!drm_mm_node_allocated(&vma->node))
 347				continue;
 348
 349			if (vma->is_ggtt) {
 350				stats->global += obj->base.size;
 351				continue;
 352			}
 353
 354			ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
 355			if (ppgtt->file_priv != stats->file_priv)
 356				continue;
 357
 358			if (obj->active) /* XXX per-vma statistic */
 359				stats->active += obj->base.size;
 360			else
 361				stats->inactive += obj->base.size;
 362
 363			return 0;
 
 364		}
 365	} else {
 366		if (i915_gem_obj_ggtt_bound(obj)) {
 367			stats->global += obj->base.size;
 368			if (obj->active)
 369				stats->active += obj->base.size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 370			else
 371				stats->inactive += obj->base.size;
 372			return 0;
 373		}
 374	}
 375
 376	if (!list_empty(&obj->global_list))
 377		stats->unbound += obj->base.size;
 378
 379	return 0;
 380}
 381
 382#define print_file_stats(m, name, stats) do { \
 383	if (stats.count) \
 384		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
 385			   name, \
 386			   stats.count, \
 387			   stats.total, \
 388			   stats.active, \
 389			   stats.inactive, \
 390			   stats.global, \
 391			   stats.shared, \
 392			   stats.unbound); \
 393} while (0)
 394
 395static void print_batch_pool_stats(struct seq_file *m,
 396				   struct drm_i915_private *dev_priv)
 397{
 398	struct drm_i915_gem_object *obj;
 399	struct file_stats stats;
 400	struct intel_engine_cs *ring;
 401	int i, j;
 402
 403	memset(&stats, 0, sizeof(stats));
 404
 405	for_each_ring(ring, dev_priv, i) {
 406		for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
 407			list_for_each_entry(obj,
 408					    &ring->batch_pool.cache_list[j],
 409					    batch_pool_link)
 410				per_file_stats(0, obj, &stats);
 
 
 
 
 
 
 
 
 411		}
 412	}
 413
 414	print_file_stats(m, "[k]batch pool", stats);
 415}
 416
 417#define count_vmas(list, member) do { \
 418	list_for_each_entry(vma, list, member) { \
 419		size += i915_gem_obj_total_ggtt_size(vma->obj); \
 420		++count; \
 421		if (vma->obj->map_and_fenceable) { \
 422			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
 423			++mappable_count; \
 424		} \
 425	} \
 426} while (0)
 427
 428static int i915_gem_object_info(struct seq_file *m, void* data)
 429{
 430	struct drm_info_node *node = m->private;
 431	struct drm_device *dev = node->minor->dev;
 432	struct drm_i915_private *dev_priv = dev->dev_private;
 433	u32 count, mappable_count, purgeable_count;
 434	u64 size, mappable_size, purgeable_size;
 435	struct drm_i915_gem_object *obj;
 436	struct i915_address_space *vm = &dev_priv->gtt.base;
 437	struct drm_file *file;
 438	struct i915_vma *vma;
 439	int ret;
 440
 441	ret = mutex_lock_interruptible(&dev->struct_mutex);
 442	if (ret)
 443		return ret;
 444
 445	seq_printf(m, "%u objects, %zu bytes\n",
 446		   dev_priv->mm.object_count,
 447		   dev_priv->mm.object_memory);
 448
 449	size = count = mappable_size = mappable_count = 0;
 450	count_objects(&dev_priv->mm.bound_list, global_list);
 451	seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
 452		   count, mappable_count, size, mappable_size);
 453
 454	size = count = mappable_size = mappable_count = 0;
 455	count_vmas(&vm->active_list, vm_link);
 456	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
 457		   count, mappable_count, size, mappable_size);
 458
 459	size = count = mappable_size = mappable_count = 0;
 460	count_vmas(&vm->inactive_list, vm_link);
 461	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
 462		   count, mappable_count, size, mappable_size);
 463
 464	size = count = purgeable_size = purgeable_count = 0;
 465	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
 466		size += obj->base.size, ++count;
 467		if (obj->madv == I915_MADV_DONTNEED)
 468			purgeable_size += obj->base.size, ++purgeable_count;
 469	}
 470	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 471
 472	size = count = mappable_size = mappable_count = 0;
 473	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 474		if (obj->fault_mappable) {
 475			size += i915_gem_obj_ggtt_size(obj);
 476			++count;
 477		}
 478		if (obj->pin_display) {
 479			mappable_size += i915_gem_obj_ggtt_size(obj);
 480			++mappable_count;
 481		}
 482		if (obj->madv == I915_MADV_DONTNEED) {
 483			purgeable_size += obj->base.size;
 484			++purgeable_count;
 485		}
 486	}
 487	seq_printf(m, "%u purgeable objects, %llu bytes\n",
 488		   purgeable_count, purgeable_size);
 489	seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
 490		   mappable_count, mappable_size);
 491	seq_printf(m, "%u fault mappable objects, %llu bytes\n",
 492		   count, size);
 493
 494	seq_printf(m, "%llu [%llu] gtt total\n",
 495		   dev_priv->gtt.base.total,
 496		   (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
 497
 498	seq_putc(m, '\n');
 499	print_batch_pool_stats(m, dev_priv);
 500	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
 501		struct file_stats stats;
 502		struct task_struct *task;
 503
 504		memset(&stats, 0, sizeof(stats));
 505		stats.file_priv = file->driver_priv;
 506		spin_lock(&file->table_lock);
 507		idr_for_each(&file->object_idr, per_file_stats, &stats);
 508		spin_unlock(&file->table_lock);
 509		/*
 510		 * Although we have a valid reference on file->pid, that does
 511		 * not guarantee that the task_struct who called get_pid() is
 512		 * still alive (e.g. get_pid(current) => fork() => exit()).
 513		 * Therefore, we need to protect this ->comm access using RCU.
 514		 */
 515		rcu_read_lock();
 516		task = pid_task(file->pid, PIDTYPE_PID);
 517		print_file_stats(m, task ? task->comm : "<unknown>", stats);
 518		rcu_read_unlock();
 519	}
 520
 521	mutex_unlock(&dev->struct_mutex);
 522
 523	return 0;
 524}
 525
 526static int i915_gem_gtt_info(struct seq_file *m, void *data)
 527{
 528	struct drm_info_node *node = m->private;
 529	struct drm_device *dev = node->minor->dev;
 530	uintptr_t list = (uintptr_t) node->info_ent->data;
 531	struct drm_i915_private *dev_priv = dev->dev_private;
 532	struct drm_i915_gem_object *obj;
 533	u64 total_obj_size, total_gtt_size;
 534	int count, ret;
 535
 536	ret = mutex_lock_interruptible(&dev->struct_mutex);
 537	if (ret)
 538		return ret;
 539
 540	total_obj_size = total_gtt_size = count = 0;
 541	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 542		if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
 543			continue;
 
 544
 545		seq_puts(m, "   ");
 546		describe_obj(m, obj);
 547		seq_putc(m, '\n');
 548		total_obj_size += obj->base.size;
 549		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 550		count++;
 551	}
 552
 553	mutex_unlock(&dev->struct_mutex);
 554
 555	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 556		   count, total_obj_size, total_gtt_size);
 557
 558	return 0;
 559}
 560
 561static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 562{
 563	struct drm_info_node *node = m->private;
 564	struct drm_device *dev = node->minor->dev;
 565	struct drm_i915_private *dev_priv = dev->dev_private;
 566	struct intel_crtc *crtc;
 567	int ret;
 568
 569	ret = mutex_lock_interruptible(&dev->struct_mutex);
 570	if (ret)
 571		return ret;
 572
 573	for_each_intel_crtc(dev, crtc) {
 574		const char pipe = pipe_name(crtc->pipe);
 575		const char plane = plane_name(crtc->plane);
 576		struct intel_unpin_work *work;
 577
 578		spin_lock_irq(&dev->event_lock);
 579		work = crtc->unpin_work;
 580		if (work == NULL) {
 581			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
 582				   pipe, plane);
 583		} else {
 584			u32 addr;
 585
 586			if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
 587				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
 588					   pipe, plane);
 589			} else {
 590				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
 591					   pipe, plane);
 592			}
 593			if (work->flip_queued_req) {
 594				struct intel_engine_cs *ring =
 595					i915_gem_request_get_ring(work->flip_queued_req);
 596
 597				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
 598					   ring->name,
 599					   i915_gem_request_get_seqno(work->flip_queued_req),
 600					   dev_priv->next_seqno,
 601					   ring->get_seqno(ring, true),
 602					   i915_gem_request_completed(work->flip_queued_req, true));
 603			} else
 604				seq_printf(m, "Flip not associated with any ring\n");
 605			seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
 606				   work->flip_queued_vblank,
 607				   work->flip_ready_vblank,
 608				   drm_crtc_vblank_count(&crtc->base));
 609			if (work->enable_stall_check)
 610				seq_puts(m, "Stall check enabled, ");
 611			else
 612				seq_puts(m, "Stall check waiting for page flip ioctl, ");
 613			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
 614
 615			if (INTEL_INFO(dev)->gen >= 4)
 616				addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
 617			else
 618				addr = I915_READ(DSPADDR(crtc->plane));
 619			seq_printf(m, "Current scanout address 0x%08x\n", addr);
 620
 621			if (work->pending_flip_obj) {
 622				seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
 623				seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
 624			}
 625		}
 626		spin_unlock_irq(&dev->event_lock);
 627	}
 628
 629	mutex_unlock(&dev->struct_mutex);
 630
 631	return 0;
 632}
 633
 634static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 635{
 636	struct drm_info_node *node = m->private;
 637	struct drm_device *dev = node->minor->dev;
 638	struct drm_i915_private *dev_priv = dev->dev_private;
 639	struct drm_i915_gem_object *obj;
 640	struct intel_engine_cs *ring;
 641	int total = 0;
 642	int ret, i, j;
 643
 644	ret = mutex_lock_interruptible(&dev->struct_mutex);
 645	if (ret)
 646		return ret;
 647
 648	for_each_ring(ring, dev_priv, i) {
 649		for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
 650			int count;
 651
 652			count = 0;
 653			list_for_each_entry(obj,
 654					    &ring->batch_pool.cache_list[j],
 655					    batch_pool_link)
 656				count++;
 657			seq_printf(m, "%s cache[%d]: %d objects\n",
 658				   ring->name, j, count);
 659
 660			list_for_each_entry(obj,
 661					    &ring->batch_pool.cache_list[j],
 662					    batch_pool_link) {
 663				seq_puts(m, "   ");
 664				describe_obj(m, obj);
 665				seq_putc(m, '\n');
 666			}
 667
 668			total += count;
 669		}
 670	}
 671
 672	seq_printf(m, "total: %d\n", total);
 673
 674	mutex_unlock(&dev->struct_mutex);
 675
 676	return 0;
 677}
 678
 679static int i915_gem_request_info(struct seq_file *m, void *data)
 680{
 681	struct drm_info_node *node = m->private;
 682	struct drm_device *dev = node->minor->dev;
 683	struct drm_i915_private *dev_priv = dev->dev_private;
 684	struct intel_engine_cs *ring;
 685	struct drm_i915_gem_request *req;
 686	int ret, any, i;
 687
 688	ret = mutex_lock_interruptible(&dev->struct_mutex);
 689	if (ret)
 690		return ret;
 691
 692	any = 0;
 693	for_each_ring(ring, dev_priv, i) {
 694		int count;
 695
 696		count = 0;
 697		list_for_each_entry(req, &ring->request_list, list)
 698			count++;
 699		if (count == 0)
 700			continue;
 701
 702		seq_printf(m, "%s requests: %d\n", ring->name, count);
 703		list_for_each_entry(req, &ring->request_list, list) {
 704			struct task_struct *task;
 705
 706			rcu_read_lock();
 707			task = NULL;
 708			if (req->pid)
 709				task = pid_task(req->pid, PIDTYPE_PID);
 710			seq_printf(m, "    %x @ %d: %s [%d]\n",
 711				   req->seqno,
 712				   (int) (jiffies - req->emitted_jiffies),
 713				   task ? task->comm : "<unknown>",
 714				   task ? task->pid : -1);
 715			rcu_read_unlock();
 716		}
 717
 718		any++;
 719	}
 720	mutex_unlock(&dev->struct_mutex);
 721
 722	if (any == 0)
 723		seq_puts(m, "No requests\n");
 724
 725	return 0;
 726}
 727
 728static void i915_ring_seqno_info(struct seq_file *m,
 729				 struct intel_engine_cs *ring)
 730{
 731	if (ring->get_seqno) {
 732		seq_printf(m, "Current sequence (%s): %x\n",
 733			   ring->name, ring->get_seqno(ring, false));
 734	}
 735}
 736
 737static int i915_gem_seqno_info(struct seq_file *m, void *data)
 738{
 739	struct drm_info_node *node = m->private;
 740	struct drm_device *dev = node->minor->dev;
 741	struct drm_i915_private *dev_priv = dev->dev_private;
 742	struct intel_engine_cs *ring;
 743	int ret, i;
 744
 745	ret = mutex_lock_interruptible(&dev->struct_mutex);
 746	if (ret)
 747		return ret;
 748	intel_runtime_pm_get(dev_priv);
 749
 750	for_each_ring(ring, dev_priv, i)
 751		i915_ring_seqno_info(m, ring);
 752
 753	intel_runtime_pm_put(dev_priv);
 754	mutex_unlock(&dev->struct_mutex);
 755
 756	return 0;
 757}
 758
 759
 760static int i915_interrupt_info(struct seq_file *m, void *data)
 761{
 762	struct drm_info_node *node = m->private;
 763	struct drm_device *dev = node->minor->dev;
 764	struct drm_i915_private *dev_priv = dev->dev_private;
 765	struct intel_engine_cs *ring;
 766	int ret, i, pipe;
 767
 768	ret = mutex_lock_interruptible(&dev->struct_mutex);
 769	if (ret)
 770		return ret;
 771	intel_runtime_pm_get(dev_priv);
 772
 773	if (IS_CHERRYVIEW(dev)) {
 774		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 775			   I915_READ(GEN8_MASTER_IRQ));
 776
 777		seq_printf(m, "Display IER:\t%08x\n",
 778			   I915_READ(VLV_IER));
 779		seq_printf(m, "Display IIR:\t%08x\n",
 780			   I915_READ(VLV_IIR));
 781		seq_printf(m, "Display IIR_RW:\t%08x\n",
 782			   I915_READ(VLV_IIR_RW));
 783		seq_printf(m, "Display IMR:\t%08x\n",
 784			   I915_READ(VLV_IMR));
 785		for_each_pipe(dev_priv, pipe)
 
 
 
 
 
 
 
 
 
 
 
 786			seq_printf(m, "Pipe %c stat:\t%08x\n",
 787				   pipe_name(pipe),
 788				   I915_READ(PIPESTAT(pipe)));
 789
 
 
 
 
 790		seq_printf(m, "Port hotplug:\t%08x\n",
 791			   I915_READ(PORT_HOTPLUG_EN));
 792		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 793			   I915_READ(VLV_DPFLIPSTAT));
 794		seq_printf(m, "DPINVGTT:\t%08x\n",
 795			   I915_READ(DPINVGTT));
 
 796
 797		for (i = 0; i < 4; i++) {
 798			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
 799				   i, I915_READ(GEN8_GT_IMR(i)));
 800			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
 801				   i, I915_READ(GEN8_GT_IIR(i)));
 802			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
 803				   i, I915_READ(GEN8_GT_IER(i)));
 804		}
 805
 806		seq_printf(m, "PCU interrupt mask:\t%08x\n",
 807			   I915_READ(GEN8_PCU_IMR));
 808		seq_printf(m, "PCU interrupt identity:\t%08x\n",
 809			   I915_READ(GEN8_PCU_IIR));
 810		seq_printf(m, "PCU interrupt enable:\t%08x\n",
 811			   I915_READ(GEN8_PCU_IER));
 812	} else if (INTEL_INFO(dev)->gen >= 8) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 813		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 814			   I915_READ(GEN8_MASTER_IRQ));
 815
 816		for (i = 0; i < 4; i++) {
 817			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
 818				   i, I915_READ(GEN8_GT_IMR(i)));
 819			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
 820				   i, I915_READ(GEN8_GT_IIR(i)));
 821			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
 822				   i, I915_READ(GEN8_GT_IER(i)));
 823		}
 824
 
 
 
 
 
 
 
 
 
 
 825		for_each_pipe(dev_priv, pipe) {
 826			enum intel_display_power_domain power_domain;
 
 827
 828			power_domain = POWER_DOMAIN_PIPE(pipe);
 829			if (!intel_display_power_get_if_enabled(dev_priv,
 830								power_domain)) {
 
 831				seq_printf(m, "Pipe %c power disabled\n",
 832					   pipe_name(pipe));
 833				continue;
 834			}
 835			seq_printf(m, "Pipe %c IMR:\t%08x\n",
 836				   pipe_name(pipe),
 837				   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
 838			seq_printf(m, "Pipe %c IIR:\t%08x\n",
 839				   pipe_name(pipe),
 840				   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
 841			seq_printf(m, "Pipe %c IER:\t%08x\n",
 842				   pipe_name(pipe),
 843				   I915_READ(GEN8_DE_PIPE_IER(pipe)));
 844
 845			intel_display_power_put(dev_priv, power_domain);
 846		}
 847
 848		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
 849			   I915_READ(GEN8_DE_PORT_IMR));
 850		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
 851			   I915_READ(GEN8_DE_PORT_IIR));
 852		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
 853			   I915_READ(GEN8_DE_PORT_IER));
 854
 855		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
 856			   I915_READ(GEN8_DE_MISC_IMR));
 857		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
 858			   I915_READ(GEN8_DE_MISC_IIR));
 859		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
 860			   I915_READ(GEN8_DE_MISC_IER));
 861
 862		seq_printf(m, "PCU interrupt mask:\t%08x\n",
 863			   I915_READ(GEN8_PCU_IMR));
 864		seq_printf(m, "PCU interrupt identity:\t%08x\n",
 865			   I915_READ(GEN8_PCU_IIR));
 866		seq_printf(m, "PCU interrupt enable:\t%08x\n",
 867			   I915_READ(GEN8_PCU_IER));
 868	} else if (IS_VALLEYVIEW(dev)) {
 869		seq_printf(m, "Display IER:\t%08x\n",
 870			   I915_READ(VLV_IER));
 871		seq_printf(m, "Display IIR:\t%08x\n",
 872			   I915_READ(VLV_IIR));
 873		seq_printf(m, "Display IIR_RW:\t%08x\n",
 874			   I915_READ(VLV_IIR_RW));
 875		seq_printf(m, "Display IMR:\t%08x\n",
 876			   I915_READ(VLV_IMR));
 877		for_each_pipe(dev_priv, pipe)
 878			seq_printf(m, "Pipe %c stat:\t%08x\n",
 879				   pipe_name(pipe),
 880				   I915_READ(PIPESTAT(pipe)));
 
 
 881
 882		seq_printf(m, "Master IER:\t%08x\n",
 883			   I915_READ(VLV_MASTER_IER));
 884
 885		seq_printf(m, "Render IER:\t%08x\n",
 886			   I915_READ(GTIER));
 887		seq_printf(m, "Render IIR:\t%08x\n",
 888			   I915_READ(GTIIR));
 889		seq_printf(m, "Render IMR:\t%08x\n",
 890			   I915_READ(GTIMR));
 891
 892		seq_printf(m, "PM IER:\t\t%08x\n",
 893			   I915_READ(GEN6_PMIER));
 894		seq_printf(m, "PM IIR:\t\t%08x\n",
 895			   I915_READ(GEN6_PMIIR));
 896		seq_printf(m, "PM IMR:\t\t%08x\n",
 897			   I915_READ(GEN6_PMIMR));
 898
 899		seq_printf(m, "Port hotplug:\t%08x\n",
 900			   I915_READ(PORT_HOTPLUG_EN));
 901		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 902			   I915_READ(VLV_DPFLIPSTAT));
 903		seq_printf(m, "DPINVGTT:\t%08x\n",
 904			   I915_READ(DPINVGTT));
 905
 906	} else if (!HAS_PCH_SPLIT(dev)) {
 907		seq_printf(m, "Interrupt enable:    %08x\n",
 908			   I915_READ(IER));
 909		seq_printf(m, "Interrupt identity:  %08x\n",
 910			   I915_READ(IIR));
 911		seq_printf(m, "Interrupt mask:      %08x\n",
 912			   I915_READ(IMR));
 913		for_each_pipe(dev_priv, pipe)
 914			seq_printf(m, "Pipe %c stat:         %08x\n",
 915				   pipe_name(pipe),
 916				   I915_READ(PIPESTAT(pipe)));
 917	} else {
 918		seq_printf(m, "North Display Interrupt enable:		%08x\n",
 919			   I915_READ(DEIER));
 920		seq_printf(m, "North Display Interrupt identity:	%08x\n",
 921			   I915_READ(DEIIR));
 922		seq_printf(m, "North Display Interrupt mask:		%08x\n",
 923			   I915_READ(DEIMR));
 924		seq_printf(m, "South Display Interrupt enable:		%08x\n",
 925			   I915_READ(SDEIER));
 926		seq_printf(m, "South Display Interrupt identity:	%08x\n",
 927			   I915_READ(SDEIIR));
 928		seq_printf(m, "South Display Interrupt mask:		%08x\n",
 929			   I915_READ(SDEIMR));
 930		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
 931			   I915_READ(GTIER));
 932		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
 933			   I915_READ(GTIIR));
 934		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
 935			   I915_READ(GTIMR));
 936	}
 937	for_each_ring(ring, dev_priv, i) {
 938		if (INTEL_INFO(dev)->gen >= 6) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 939			seq_printf(m,
 940				   "Graphics Interrupt mask (%s):	%08x\n",
 941				   ring->name, I915_READ_IMR(ring));
 942		}
 943		i915_ring_seqno_info(m, ring);
 944	}
 945	intel_runtime_pm_put(dev_priv);
 946	mutex_unlock(&dev->struct_mutex);
 947
 948	return 0;
 949}
 950
 951static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 952{
 953	struct drm_info_node *node = m->private;
 954	struct drm_device *dev = node->minor->dev;
 955	struct drm_i915_private *dev_priv = dev->dev_private;
 956	int i, ret;
 957
 958	ret = mutex_lock_interruptible(&dev->struct_mutex);
 959	if (ret)
 960		return ret;
 961
 962	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
 963	for (i = 0; i < dev_priv->num_fence_regs; i++) {
 964		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
 
 965
 966		seq_printf(m, "Fence %d, pin count = %d, object = ",
 967			   i, dev_priv->fence_regs[i].pin_count);
 968		if (obj == NULL)
 969			seq_puts(m, "unused");
 970		else
 971			describe_obj(m, obj);
 972		seq_putc(m, '\n');
 973	}
 
 974
 975	mutex_unlock(&dev->struct_mutex);
 976	return 0;
 977}
 978
 979static int i915_hws_info(struct seq_file *m, void *data)
 
 
 980{
 981	struct drm_info_node *node = m->private;
 982	struct drm_device *dev = node->minor->dev;
 983	struct drm_i915_private *dev_priv = dev->dev_private;
 984	struct intel_engine_cs *ring;
 985	const u32 *hws;
 986	int i;
 987
 988	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
 989	hws = ring->status_page.page_addr;
 990	if (hws == NULL)
 991		return 0;
 992
 993	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
 994		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
 995			   i * 4,
 996			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
 997	}
 998	return 0;
 999}
1000
1001static ssize_t
1002i915_error_state_write(struct file *filp,
1003		       const char __user *ubuf,
1004		       size_t cnt,
1005		       loff_t *ppos)
1006{
1007	struct i915_error_state_file_priv *error_priv = filp->private_data;
1008	struct drm_device *dev = error_priv->dev;
1009	int ret;
1010
1011	DRM_DEBUG_DRIVER("Resetting error state\n");
1012
1013	ret = mutex_lock_interruptible(&dev->struct_mutex);
1014	if (ret)
1015		return ret;
1016
1017	i915_destroy_error_state(dev);
1018	mutex_unlock(&dev->struct_mutex);
1019
1020	return cnt;
1021}
1022
1023static int i915_error_state_open(struct inode *inode, struct file *file)
1024{
1025	struct drm_device *dev = inode->i_private;
1026	struct i915_error_state_file_priv *error_priv;
1027
1028	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
1029	if (!error_priv)
1030		return -ENOMEM;
1031
1032	error_priv->dev = dev;
1033
1034	i915_error_state_get(dev, error_priv);
1035
1036	file->private_data = error_priv;
 
 
 
1037
1038	return 0;
 
 
1039}
1040
1041static int i915_error_state_release(struct inode *inode, struct file *file)
1042{
1043	struct i915_error_state_file_priv *error_priv = file->private_data;
1044
1045	i915_error_state_put(error_priv);
1046	kfree(error_priv);
1047
1048	return 0;
1049}
1050
1051static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
1052				     size_t count, loff_t *pos)
1053{
1054	struct i915_error_state_file_priv *error_priv = file->private_data;
1055	struct drm_i915_error_state_buf error_str;
1056	loff_t tmp_pos = 0;
1057	ssize_t ret_count = 0;
1058	int ret;
1059
1060	ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
1061	if (ret)
1062		return ret;
1063
1064	ret = i915_error_state_to_str(&error_str, error_priv);
1065	if (ret)
1066		goto out;
1067
1068	ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
1069					    error_str.buf,
1070					    error_str.bytes);
 
 
1071
1072	if (ret_count < 0)
1073		ret = ret_count;
1074	else
1075		*pos = error_str.start + ret_count;
1076out:
1077	i915_error_state_buf_release(&error_str);
1078	return ret ?: ret_count;
1079}
1080
1081static const struct file_operations i915_error_state_fops = {
1082	.owner = THIS_MODULE,
1083	.open = i915_error_state_open,
1084	.read = i915_error_state_read,
1085	.write = i915_error_state_write,
1086	.llseek = default_llseek,
1087	.release = i915_error_state_release,
1088};
1089
1090static int
1091i915_next_seqno_get(void *data, u64 *val)
 
 
 
1092{
1093	struct drm_device *dev = data;
1094	struct drm_i915_private *dev_priv = dev->dev_private;
1095	int ret;
1096
1097	ret = mutex_lock_interruptible(&dev->struct_mutex);
1098	if (ret)
1099		return ret;
1100
1101	*val = dev_priv->next_seqno;
1102	mutex_unlock(&dev->struct_mutex);
1103
1104	return 0;
1105}
1106
1107static int
1108i915_next_seqno_set(void *data, u64 val)
1109{
1110	struct drm_device *dev = data;
1111	int ret;
1112
1113	ret = mutex_lock_interruptible(&dev->struct_mutex);
1114	if (ret)
1115		return ret;
1116
1117	ret = i915_gem_set_seqno(dev, val);
1118	mutex_unlock(&dev->struct_mutex);
1119
1120	return ret;
1121}
1122
1123DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1124			i915_next_seqno_get, i915_next_seqno_set,
1125			"0x%llx\n");
 
 
 
 
 
 
1126
1127static int i915_frequency_info(struct seq_file *m, void *unused)
1128{
1129	struct drm_info_node *node = m->private;
1130	struct drm_device *dev = node->minor->dev;
1131	struct drm_i915_private *dev_priv = dev->dev_private;
 
1132	int ret = 0;
1133
1134	intel_runtime_pm_get(dev_priv);
1135
1136	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1137
1138	if (IS_GEN5(dev)) {
1139		u16 rgvswctl = I915_READ16(MEMSWCTL);
1140		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1141
1142		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1143		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1144		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1145			   MEMSTAT_VID_SHIFT);
1146		seq_printf(m, "Current P-state: %d\n",
1147			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1148	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1149		u32 freq_sts;
 
 
 
 
 
 
 
 
 
1150
1151		mutex_lock(&dev_priv->rps.hw_lock);
1152		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 
 
1153		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1154		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1155
1156		seq_printf(m, "actual GPU freq: %d MHz\n",
1157			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1158
1159		seq_printf(m, "current GPU freq: %d MHz\n",
1160			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1161
1162		seq_printf(m, "max GPU freq: %d MHz\n",
1163			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1164
1165		seq_printf(m, "min GPU freq: %d MHz\n",
1166			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1167
1168		seq_printf(m, "idle GPU freq: %d MHz\n",
1169			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1170
1171		seq_printf(m,
1172			   "efficient (RPe) frequency: %d MHz\n",
1173			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1174		mutex_unlock(&dev_priv->rps.hw_lock);
1175	} else if (INTEL_INFO(dev)->gen >= 6) {
1176		u32 rp_state_limits;
1177		u32 gt_perf_status;
1178		u32 rp_state_cap;
1179		u32 rpmodectl, rpinclimit, rpdeclimit;
1180		u32 rpstat, cagf, reqf;
1181		u32 rpupei, rpcurup, rpprevup;
1182		u32 rpdownei, rpcurdown, rpprevdown;
1183		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1184		int max_freq;
1185
1186		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1187		if (IS_BROXTON(dev)) {
1188			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1189			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1190		} else {
1191			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1192			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1193		}
1194
1195		/* RPSTAT1 is in the GT power well */
1196		ret = mutex_lock_interruptible(&dev->struct_mutex);
1197		if (ret)
1198			goto out;
1199
1200		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1201
1202		reqf = I915_READ(GEN6_RPNSWREQ);
1203		if (IS_GEN9(dev))
1204			reqf >>= 23;
1205		else {
1206			reqf &= ~GEN6_TURBO_DISABLE;
1207			if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1208				reqf >>= 24;
1209			else
1210				reqf >>= 25;
1211		}
1212		reqf = intel_gpu_freq(dev_priv, reqf);
1213
1214		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1215		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1216		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1217
1218		rpstat = I915_READ(GEN6_RPSTAT1);
1219		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
1220		rpcurup = I915_READ(GEN6_RP_CUR_UP);
1221		rpprevup = I915_READ(GEN6_RP_PREV_UP);
1222		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
1223		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
1224		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
1225		if (IS_GEN9(dev))
1226			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1227		else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1228			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1229		else
1230			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1231		cagf = intel_gpu_freq(dev_priv, cagf);
1232
1233		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1234		mutex_unlock(&dev->struct_mutex);
1235
1236		if (IS_GEN6(dev) || IS_GEN7(dev)) {
1237			pm_ier = I915_READ(GEN6_PMIER);
1238			pm_imr = I915_READ(GEN6_PMIMR);
1239			pm_isr = I915_READ(GEN6_PMISR);
1240			pm_iir = I915_READ(GEN6_PMIIR);
1241			pm_mask = I915_READ(GEN6_PMINTRMSK);
1242		} else {
1243			pm_ier = I915_READ(GEN8_GT_IER(2));
1244			pm_imr = I915_READ(GEN8_GT_IMR(2));
1245			pm_isr = I915_READ(GEN8_GT_ISR(2));
1246			pm_iir = I915_READ(GEN8_GT_IIR(2));
1247			pm_mask = I915_READ(GEN6_PMINTRMSK);
 
 
 
 
1248		}
1249		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1250			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1251		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1252		seq_printf(m, "Render p-state ratio: %d\n",
1253			   (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
1254		seq_printf(m, "Render p-state VID: %d\n",
1255			   gt_perf_status & 0xff);
1256		seq_printf(m, "Render p-state limit: %d\n",
1257			   rp_state_limits & 0xff);
1258		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1259		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1260		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1261		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1262		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1263		seq_printf(m, "CAGF: %dMHz\n", cagf);
1264		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
1265			   GEN6_CURICONT_MASK);
1266		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
1267			   GEN6_CURBSYTAVG_MASK);
1268		seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
1269			   GEN6_CURBSYTAVG_MASK);
1270		seq_printf(m, "Up threshold: %d%%\n",
1271			   dev_priv->rps.up_threshold);
1272
1273		seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
1274			   GEN6_CURIAVG_MASK);
1275		seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
1276			   GEN6_CURBSYTAVG_MASK);
1277		seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
1278			   GEN6_CURBSYTAVG_MASK);
1279		seq_printf(m, "Down threshold: %d%%\n",
1280			   dev_priv->rps.down_threshold);
1281
1282		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
1283			    rp_state_cap >> 16) & 0xff;
1284		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1285			     GEN9_FREQ_SCALER : 1);
1286		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1287			   intel_gpu_freq(dev_priv, max_freq));
1288
1289		max_freq = (rp_state_cap & 0xff00) >> 8;
1290		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1291			     GEN9_FREQ_SCALER : 1);
1292		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1293			   intel_gpu_freq(dev_priv, max_freq));
1294
1295		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
1296			    rp_state_cap >> 0) & 0xff;
1297		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1298			     GEN9_FREQ_SCALER : 1);
1299		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1300			   intel_gpu_freq(dev_priv, max_freq));
1301		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1302			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1303
1304		seq_printf(m, "Current freq: %d MHz\n",
1305			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1306		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1307		seq_printf(m, "Idle freq: %d MHz\n",
1308			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1309		seq_printf(m, "Min freq: %d MHz\n",
1310			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
 
 
1311		seq_printf(m, "Max freq: %d MHz\n",
1312			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1313		seq_printf(m,
1314			   "efficient (RPe) frequency: %d MHz\n",
1315			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1316	} else {
1317		seq_puts(m, "no P-state info available\n");
1318	}
1319
1320	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
1321	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1322	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1323
1324out:
1325	intel_runtime_pm_put(dev_priv);
1326	return ret;
1327}
1328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1329static int i915_hangcheck_info(struct seq_file *m, void *unused)
1330{
1331	struct drm_info_node *node = m->private;
1332	struct drm_device *dev = node->minor->dev;
1333	struct drm_i915_private *dev_priv = dev->dev_private;
1334	struct intel_engine_cs *ring;
1335	u64 acthd[I915_NUM_RINGS];
1336	u32 seqno[I915_NUM_RINGS];
1337	u32 instdone[I915_NUM_INSTDONE_REG];
1338	int i, j;
 
 
 
1339
1340	if (!i915.enable_hangcheck) {
1341		seq_printf(m, "Hangcheck disabled\n");
1342		return 0;
1343	}
1344
1345	intel_runtime_pm_get(dev_priv);
 
 
 
 
 
 
 
1346
1347	for_each_ring(ring, dev_priv, i) {
1348		seqno[i] = ring->get_seqno(ring, false);
1349		acthd[i] = intel_ring_get_active_head(ring);
1350	}
1351
1352	i915_get_extra_instdone(dev, instdone);
 
 
1353
1354	intel_runtime_pm_put(dev_priv);
 
 
 
1355
1356	if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
1357		seq_printf(m, "Hangcheck active, fires in %dms\n",
1358			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1359					    jiffies));
1360	} else
1361		seq_printf(m, "Hangcheck inactive\n");
1362
1363	for_each_ring(ring, dev_priv, i) {
1364		seq_printf(m, "%s:\n", ring->name);
1365		seq_printf(m, "\tseqno = %x [current %x]\n",
1366			   ring->hangcheck.seqno, seqno[i]);
1367		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1368			   (long long)ring->hangcheck.acthd,
1369			   (long long)acthd[i]);
1370		seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
1371			   (long long)ring->hangcheck.max_acthd);
1372		seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
1373		seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
1374
1375		if (ring->id == RCS) {
1376			seq_puts(m, "\tinstdone read =");
1377
1378			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
1379				seq_printf(m, " 0x%08x", instdone[j]);
1380
1381			seq_puts(m, "\n\tinstdone accu =");
1382
1383			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
1384				seq_printf(m, " 0x%08x",
1385					   ring->hangcheck.instdone[j]);
1386
1387			seq_puts(m, "\n");
 
 
 
 
 
1388		}
1389	}
1390
1391	return 0;
1392}
1393
1394static int ironlake_drpc_info(struct seq_file *m)
1395{
1396	struct drm_info_node *node = m->private;
1397	struct drm_device *dev = node->minor->dev;
1398	struct drm_i915_private *dev_priv = dev->dev_private;
1399	u32 rgvmodectl, rstdbyctl;
1400	u16 crstandvid;
1401	int ret;
1402
1403	ret = mutex_lock_interruptible(&dev->struct_mutex);
1404	if (ret)
1405		return ret;
1406	intel_runtime_pm_get(dev_priv);
1407
1408	rgvmodectl = I915_READ(MEMMODECTL);
1409	rstdbyctl = I915_READ(RSTDBYCTL);
1410	crstandvid = I915_READ16(CRSTANDVID);
1411
1412	intel_runtime_pm_put(dev_priv);
1413	mutex_unlock(&dev->struct_mutex);
1414
1415	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1416	seq_printf(m, "Boost freq: %d\n",
1417		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1418		   MEMMODE_BOOST_FREQ_SHIFT);
1419	seq_printf(m, "HW control enabled: %s\n",
1420		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1421	seq_printf(m, "SW control enabled: %s\n",
1422		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1423	seq_printf(m, "Gated voltage change: %s\n",
1424		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1425	seq_printf(m, "Starting frequency: P%d\n",
1426		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1427	seq_printf(m, "Max P-state: P%d\n",
1428		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1429	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1430	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1431	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1432	seq_printf(m, "Render standby enabled: %s\n",
1433		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1434	seq_puts(m, "Current RS state: ");
1435	switch (rstdbyctl & RSX_STATUS_MASK) {
1436	case RSX_STATUS_ON:
1437		seq_puts(m, "on\n");
1438		break;
1439	case RSX_STATUS_RC1:
1440		seq_puts(m, "RC1\n");
1441		break;
1442	case RSX_STATUS_RC1E:
1443		seq_puts(m, "RC1E\n");
1444		break;
1445	case RSX_STATUS_RS1:
1446		seq_puts(m, "RS1\n");
1447		break;
1448	case RSX_STATUS_RS2:
1449		seq_puts(m, "RS2 (RC6)\n");
1450		break;
1451	case RSX_STATUS_RS3:
1452		seq_puts(m, "RC3 (RC6+)\n");
1453		break;
1454	default:
1455		seq_puts(m, "unknown\n");
1456		break;
1457	}
1458
1459	return 0;
1460}
1461
1462static int i915_forcewake_domains(struct seq_file *m, void *data)
1463{
1464	struct drm_info_node *node = m->private;
1465	struct drm_device *dev = node->minor->dev;
1466	struct drm_i915_private *dev_priv = dev->dev_private;
1467	struct intel_uncore_forcewake_domain *fw_domain;
1468	int i;
 
 
 
1469
1470	spin_lock_irq(&dev_priv->uncore.lock);
1471	for_each_fw_domain(fw_domain, dev_priv, i) {
1472		seq_printf(m, "%s.wake_count = %u\n",
1473			   intel_uncore_forcewake_domain_to_str(i),
1474			   fw_domain->wake_count);
1475	}
1476	spin_unlock_irq(&dev_priv->uncore.lock);
1477
1478	return 0;
1479}
1480
1481static int vlv_drpc_info(struct seq_file *m)
 
 
1482{
1483	struct drm_info_node *node = m->private;
1484	struct drm_device *dev = node->minor->dev;
1485	struct drm_i915_private *dev_priv = dev->dev_private;
1486	u32 rpmodectl1, rcctl1, pw_status;
 
 
1487
1488	intel_runtime_pm_get(dev_priv);
 
 
 
1489
1490	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1491	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1492	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1493
1494	intel_runtime_pm_put(dev_priv);
1495
1496	seq_printf(m, "Video Turbo Mode: %s\n",
1497		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1498	seq_printf(m, "Turbo enabled: %s\n",
1499		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1500	seq_printf(m, "HW control enabled: %s\n",
1501		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1502	seq_printf(m, "SW control enabled: %s\n",
1503		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1504			  GEN6_RP_MEDIA_SW_MODE));
1505	seq_printf(m, "RC6 Enabled: %s\n",
1506		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1507					GEN6_RC_CTL_EI_MODE(1))));
1508	seq_printf(m, "Render Power Well: %s\n",
1509		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1510	seq_printf(m, "Media Power Well: %s\n",
1511		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1512
1513	seq_printf(m, "Render RC6 residency since boot: %u\n",
1514		   I915_READ(VLV_GT_RENDER_RC6));
1515	seq_printf(m, "Media RC6 residency since boot: %u\n",
1516		   I915_READ(VLV_GT_MEDIA_RC6));
1517
1518	return i915_forcewake_domains(m, NULL);
1519}
1520
1521static int gen6_drpc_info(struct seq_file *m)
1522{
1523	struct drm_info_node *node = m->private;
1524	struct drm_device *dev = node->minor->dev;
1525	struct drm_i915_private *dev_priv = dev->dev_private;
1526	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1527	unsigned forcewake_count;
1528	int count = 0, ret;
1529
1530	ret = mutex_lock_interruptible(&dev->struct_mutex);
1531	if (ret)
1532		return ret;
1533	intel_runtime_pm_get(dev_priv);
1534
1535	spin_lock_irq(&dev_priv->uncore.lock);
1536	forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
1537	spin_unlock_irq(&dev_priv->uncore.lock);
1538
1539	if (forcewake_count) {
1540		seq_puts(m, "RC information inaccurate because somebody "
1541			    "holds a forcewake reference \n");
1542	} else {
1543		/* NB: we cannot use forcewake, else we read the wrong values */
1544		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1545			udelay(10);
1546		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1547	}
1548
1549	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1550	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1551
1552	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1553	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1554	mutex_unlock(&dev->struct_mutex);
1555	mutex_lock(&dev_priv->rps.hw_lock);
1556	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1557	mutex_unlock(&dev_priv->rps.hw_lock);
1558
1559	intel_runtime_pm_put(dev_priv);
 
 
1560
1561	seq_printf(m, "Video Turbo Mode: %s\n",
1562		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1563	seq_printf(m, "HW control enabled: %s\n",
1564		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1565	seq_printf(m, "SW control enabled: %s\n",
1566		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1567			  GEN6_RP_MEDIA_SW_MODE));
1568	seq_printf(m, "RC1e Enabled: %s\n",
1569		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1570	seq_printf(m, "RC6 Enabled: %s\n",
1571		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
 
 
 
 
 
 
1572	seq_printf(m, "Deep RC6 Enabled: %s\n",
1573		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1574	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1575		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1576	seq_puts(m, "Current RC state: ");
1577	switch (gt_core_status & GEN6_RCn_MASK) {
1578	case GEN6_RC0:
1579		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1580			seq_puts(m, "Core Power Down\n");
1581		else
1582			seq_puts(m, "on\n");
1583		break;
1584	case GEN6_RC3:
1585		seq_puts(m, "RC3\n");
1586		break;
1587	case GEN6_RC6:
1588		seq_puts(m, "RC6\n");
1589		break;
1590	case GEN6_RC7:
1591		seq_puts(m, "RC7\n");
1592		break;
1593	default:
1594		seq_puts(m, "Unknown\n");
1595		break;
1596	}
1597
1598	seq_printf(m, "Core Power Down: %s\n",
1599		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
 
 
 
 
 
 
 
 
1600
1601	/* Not exactly sure what this is */
1602	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1603		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1604	seq_printf(m, "RC6 residency since boot: %u\n",
1605		   I915_READ(GEN6_GT_GFX_RC6));
1606	seq_printf(m, "RC6+ residency since boot: %u\n",
1607		   I915_READ(GEN6_GT_GFX_RC6p));
1608	seq_printf(m, "RC6++ residency since boot: %u\n",
1609		   I915_READ(GEN6_GT_GFX_RC6pp));
1610
1611	seq_printf(m, "RC6   voltage: %dmV\n",
1612		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1613	seq_printf(m, "RC6+  voltage: %dmV\n",
1614		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1615	seq_printf(m, "RC6++ voltage: %dmV\n",
1616		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1617	return 0;
1618}
1619
1620static int i915_drpc_info(struct seq_file *m, void *unused)
1621{
1622	struct drm_info_node *node = m->private;
1623	struct drm_device *dev = node->minor->dev;
 
 
 
 
 
 
 
 
 
 
1624
1625	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1626		return vlv_drpc_info(m);
1627	else if (INTEL_INFO(dev)->gen >= 6)
1628		return gen6_drpc_info(m);
1629	else
1630		return ironlake_drpc_info(m);
1631}
1632
1633static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1634{
1635	struct drm_info_node *node = m->private;
1636	struct drm_device *dev = node->minor->dev;
1637	struct drm_i915_private *dev_priv = dev->dev_private;
1638
1639	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1640		   dev_priv->fb_tracking.busy_bits);
1641
1642	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1643		   dev_priv->fb_tracking.flip_bits);
1644
1645	return 0;
1646}
1647
1648static int i915_fbc_status(struct seq_file *m, void *unused)
1649{
1650	struct drm_info_node *node = m->private;
1651	struct drm_device *dev = node->minor->dev;
1652	struct drm_i915_private *dev_priv = dev->dev_private;
1653
1654	if (!HAS_FBC(dev)) {
1655		seq_puts(m, "FBC unsupported on this chipset\n");
1656		return 0;
1657	}
1658
1659	intel_runtime_pm_get(dev_priv);
1660	mutex_lock(&dev_priv->fbc.lock);
1661
1662	if (intel_fbc_is_active(dev_priv))
1663		seq_puts(m, "FBC enabled\n");
1664	else
1665		seq_printf(m, "FBC disabled: %s\n",
1666			   dev_priv->fbc.no_fbc_reason);
1667
1668	if (INTEL_INFO(dev_priv)->gen >= 7)
1669		seq_printf(m, "Compressing: %s\n",
1670			   yesno(I915_READ(FBC_STATUS2) &
1671				 FBC_COMPRESSION_MASK));
1672
1673	mutex_unlock(&dev_priv->fbc.lock);
1674	intel_runtime_pm_put(dev_priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1675
1676	return 0;
1677}
1678
1679static int i915_fbc_fc_get(void *data, u64 *val)
1680{
1681	struct drm_device *dev = data;
1682	struct drm_i915_private *dev_priv = dev->dev_private;
1683
1684	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1685		return -ENODEV;
1686
1687	*val = dev_priv->fbc.false_color;
1688
1689	return 0;
1690}
1691
1692static int i915_fbc_fc_set(void *data, u64 val)
1693{
1694	struct drm_device *dev = data;
1695	struct drm_i915_private *dev_priv = dev->dev_private;
1696	u32 reg;
1697
1698	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1699		return -ENODEV;
1700
1701	mutex_lock(&dev_priv->fbc.lock);
1702
1703	reg = I915_READ(ILK_DPFC_CONTROL);
1704	dev_priv->fbc.false_color = val;
1705
1706	I915_WRITE(ILK_DPFC_CONTROL, val ?
1707		   (reg | FBC_CTL_FALSE_COLOR) :
1708		   (reg & ~FBC_CTL_FALSE_COLOR));
1709
1710	mutex_unlock(&dev_priv->fbc.lock);
1711	return 0;
1712}
1713
1714DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
1715			i915_fbc_fc_get, i915_fbc_fc_set,
1716			"%llu\n");
1717
1718static int i915_ips_status(struct seq_file *m, void *unused)
1719{
1720	struct drm_info_node *node = m->private;
1721	struct drm_device *dev = node->minor->dev;
1722	struct drm_i915_private *dev_priv = dev->dev_private;
1723
1724	if (!HAS_IPS(dev)) {
1725		seq_puts(m, "not supported\n");
1726		return 0;
1727	}
1728
1729	intel_runtime_pm_get(dev_priv);
1730
1731	seq_printf(m, "Enabled by kernel parameter: %s\n",
1732		   yesno(i915.enable_ips));
1733
1734	if (INTEL_INFO(dev)->gen >= 8) {
1735		seq_puts(m, "Currently: unknown\n");
1736	} else {
1737		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1738			seq_puts(m, "Currently: enabled\n");
1739		else
1740			seq_puts(m, "Currently: disabled\n");
1741	}
1742
1743	intel_runtime_pm_put(dev_priv);
1744
1745	return 0;
1746}
1747
1748static int i915_sr_status(struct seq_file *m, void *unused)
1749{
1750	struct drm_info_node *node = m->private;
1751	struct drm_device *dev = node->minor->dev;
1752	struct drm_i915_private *dev_priv = dev->dev_private;
1753	bool sr_enabled = false;
1754
1755	intel_runtime_pm_get(dev_priv);
1756
1757	if (HAS_PCH_SPLIT(dev))
 
 
1758		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1759	else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
1760		 IS_I945G(dev) || IS_I945GM(dev))
1761		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1762	else if (IS_I915GM(dev))
1763		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1764	else if (IS_PINEVIEW(dev))
1765		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1766	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1767		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1768
1769	intel_runtime_pm_put(dev_priv);
1770
1771	seq_printf(m, "self-refresh: %s\n",
1772		   sr_enabled ? "enabled" : "disabled");
1773
1774	return 0;
1775}
1776
1777static int i915_emon_status(struct seq_file *m, void *unused)
1778{
1779	struct drm_info_node *node = m->private;
1780	struct drm_device *dev = node->minor->dev;
1781	struct drm_i915_private *dev_priv = dev->dev_private;
1782	unsigned long temp, chipset, gfx;
1783	int ret;
1784
1785	if (!IS_GEN5(dev))
1786		return -ENODEV;
1787
1788	ret = mutex_lock_interruptible(&dev->struct_mutex);
1789	if (ret)
1790		return ret;
1791
1792	temp = i915_mch_val(dev_priv);
1793	chipset = i915_chipset_val(dev_priv);
1794	gfx = i915_gfx_val(dev_priv);
1795	mutex_unlock(&dev->struct_mutex);
1796
1797	seq_printf(m, "GMCH temp: %ld\n", temp);
1798	seq_printf(m, "Chipset power: %ld\n", chipset);
1799	seq_printf(m, "GFX power: %ld\n", gfx);
1800	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1801
1802	return 0;
1803}
1804
1805static int i915_ring_freq_table(struct seq_file *m, void *unused)
1806{
1807	struct drm_info_node *node = m->private;
1808	struct drm_device *dev = node->minor->dev;
1809	struct drm_i915_private *dev_priv = dev->dev_private;
1810	int ret = 0;
1811	int gpu_freq, ia_freq;
1812	unsigned int max_gpu_freq, min_gpu_freq;
 
 
1813
1814	if (!HAS_CORE_RING_FREQ(dev)) {
1815		seq_puts(m, "unsupported on this chipset\n");
1816		return 0;
1817	}
1818
1819	intel_runtime_pm_get(dev_priv);
1820
1821	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1822
1823	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1824	if (ret)
1825		goto out;
1826
1827	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
 
 
1828		/* Convert GT frequency to 50 HZ units */
1829		min_gpu_freq =
1830			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
1831		max_gpu_freq =
1832			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
1833	} else {
1834		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
1835		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
1836	}
1837
1838	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1839
 
1840	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1841		ia_freq = gpu_freq;
1842		sandybridge_pcode_read(dev_priv,
1843				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1844				       &ia_freq);
1845		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1846			   intel_gpu_freq(dev_priv, (gpu_freq *
1847				(IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1848				 GEN9_FREQ_SCALER : 1))),
 
1849			   ((ia_freq >> 0) & 0xff) * 100,
1850			   ((ia_freq >> 8) & 0xff) * 100);
1851	}
 
1852
1853	mutex_unlock(&dev_priv->rps.hw_lock);
1854
1855out:
1856	intel_runtime_pm_put(dev_priv);
1857	return ret;
1858}
1859
1860static int i915_opregion(struct seq_file *m, void *unused)
1861{
1862	struct drm_info_node *node = m->private;
1863	struct drm_device *dev = node->minor->dev;
1864	struct drm_i915_private *dev_priv = dev->dev_private;
1865	struct intel_opregion *opregion = &dev_priv->opregion;
1866	int ret;
1867
1868	ret = mutex_lock_interruptible(&dev->struct_mutex);
1869	if (ret)
1870		goto out;
1871
1872	if (opregion->header)
1873		seq_write(m, opregion->header, OPREGION_SIZE);
1874
1875	mutex_unlock(&dev->struct_mutex);
1876
1877out:
1878	return 0;
1879}
1880
1881static int i915_vbt(struct seq_file *m, void *unused)
1882{
1883	struct drm_info_node *node = m->private;
1884	struct drm_device *dev = node->minor->dev;
1885	struct drm_i915_private *dev_priv = dev->dev_private;
1886	struct intel_opregion *opregion = &dev_priv->opregion;
1887
1888	if (opregion->vbt)
1889		seq_write(m, opregion->vbt, opregion->vbt_size);
1890
1891	return 0;
1892}
1893
1894static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1895{
1896	struct drm_info_node *node = m->private;
1897	struct drm_device *dev = node->minor->dev;
1898	struct intel_framebuffer *fbdev_fb = NULL;
1899	struct drm_framebuffer *drm_fb;
 
 
 
 
 
1900
1901#ifdef CONFIG_DRM_FBDEV_EMULATION
1902       if (to_i915(dev)->fbdev) {
1903               fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
1904
1905               seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1906                         fbdev_fb->base.width,
1907                         fbdev_fb->base.height,
1908                         fbdev_fb->base.depth,
1909                         fbdev_fb->base.bits_per_pixel,
1910                         fbdev_fb->base.modifier[0],
1911                         atomic_read(&fbdev_fb->base.refcount.refcount));
1912               describe_obj(m, fbdev_fb->obj);
1913               seq_putc(m, '\n');
1914       }
1915#endif
1916
1917	mutex_lock(&dev->mode_config.fb_lock);
1918	drm_for_each_fb(drm_fb, dev) {
1919		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1920		if (fb == fbdev_fb)
1921			continue;
1922
1923		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1924			   fb->base.width,
1925			   fb->base.height,
1926			   fb->base.depth,
1927			   fb->base.bits_per_pixel,
1928			   fb->base.modifier[0],
1929			   atomic_read(&fb->base.refcount.refcount));
1930		describe_obj(m, fb->obj);
1931		seq_putc(m, '\n');
1932	}
1933	mutex_unlock(&dev->mode_config.fb_lock);
 
1934
1935	return 0;
1936}
1937
1938static void describe_ctx_ringbuf(struct seq_file *m,
1939				 struct intel_ringbuffer *ringbuf)
1940{
1941	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1942		   ringbuf->space, ringbuf->head, ringbuf->tail,
1943		   ringbuf->last_retired_head);
1944}
1945
1946static int i915_context_status(struct seq_file *m, void *unused)
1947{
1948	struct drm_info_node *node = m->private;
1949	struct drm_device *dev = node->minor->dev;
1950	struct drm_i915_private *dev_priv = dev->dev_private;
1951	struct intel_engine_cs *ring;
1952	struct intel_context *ctx;
1953	int ret, i;
1954
1955	ret = mutex_lock_interruptible(&dev->struct_mutex);
1956	if (ret)
1957		return ret;
1958
1959	list_for_each_entry(ctx, &dev_priv->context_list, link) {
1960		if (!i915.enable_execlists &&
1961		    ctx->legacy_hw_ctx.rcs_state == NULL)
1962			continue;
1963
1964		seq_puts(m, "HW context ");
1965		describe_ctx(m, ctx);
1966		if (ctx == dev_priv->kernel_context)
1967			seq_printf(m, "(kernel context) ");
 
 
1968
1969		if (i915.enable_execlists) {
1970			seq_putc(m, '\n');
1971			for_each_ring(ring, dev_priv, i) {
1972				struct drm_i915_gem_object *ctx_obj =
1973					ctx->engine[i].state;
1974				struct intel_ringbuffer *ringbuf =
1975					ctx->engine[i].ringbuf;
1976
1977				seq_printf(m, "%s: ", ring->name);
1978				if (ctx_obj)
1979					describe_obj(m, ctx_obj);
1980				if (ringbuf)
1981					describe_ctx_ringbuf(m, ringbuf);
1982				seq_putc(m, '\n');
1983			}
 
 
1984		} else {
1985			describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1986		}
1987
 
1988		seq_putc(m, '\n');
1989	}
1990
1991	mutex_unlock(&dev->struct_mutex);
1992
1993	return 0;
1994}
1995
1996static void i915_dump_lrc_obj(struct seq_file *m,
1997			      struct intel_context *ctx,
1998			      struct intel_engine_cs *ring)
1999{
2000	struct page *page;
2001	uint32_t *reg_state;
2002	int j;
2003	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
2004	unsigned long ggtt_offset = 0;
2005
2006	if (ctx_obj == NULL) {
2007		seq_printf(m, "Context on %s with no gem object\n",
2008			   ring->name);
2009		return;
2010	}
2011
2012	seq_printf(m, "CONTEXT: %s %u\n", ring->name,
2013		   intel_execlists_ctx_id(ctx, ring));
2014
2015	if (!i915_gem_obj_ggtt_bound(ctx_obj))
2016		seq_puts(m, "\tNot bound in GGTT\n");
2017	else
2018		ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
2019
2020	if (i915_gem_object_get_pages(ctx_obj)) {
2021		seq_puts(m, "\tFailed to get pages for context object\n");
2022		return;
2023	}
2024
2025	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
2026	if (!WARN_ON(page == NULL)) {
2027		reg_state = kmap_atomic(page);
2028
2029		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
2030			seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
2031				   ggtt_offset + 4096 + (j * 4),
2032				   reg_state[j], reg_state[j + 1],
2033				   reg_state[j + 2], reg_state[j + 3]);
2034		}
2035		kunmap_atomic(reg_state);
2036	}
2037
2038	seq_putc(m, '\n');
2039}
2040
2041static int i915_dump_lrc(struct seq_file *m, void *unused)
2042{
2043	struct drm_info_node *node = (struct drm_info_node *) m->private;
2044	struct drm_device *dev = node->minor->dev;
2045	struct drm_i915_private *dev_priv = dev->dev_private;
2046	struct intel_engine_cs *ring;
2047	struct intel_context *ctx;
2048	int ret, i;
2049
2050	if (!i915.enable_execlists) {
2051		seq_printf(m, "Logical Ring Contexts are disabled\n");
2052		return 0;
2053	}
2054
2055	ret = mutex_lock_interruptible(&dev->struct_mutex);
2056	if (ret)
2057		return ret;
2058
2059	list_for_each_entry(ctx, &dev_priv->context_list, link)
2060		if (ctx != dev_priv->kernel_context)
2061			for_each_ring(ring, dev_priv, i)
2062				i915_dump_lrc_obj(m, ctx, ring);
2063
2064	mutex_unlock(&dev->struct_mutex);
2065
2066	return 0;
2067}
2068
2069static int i915_execlists(struct seq_file *m, void *data)
2070{
2071	struct drm_info_node *node = (struct drm_info_node *)m->private;
2072	struct drm_device *dev = node->minor->dev;
2073	struct drm_i915_private *dev_priv = dev->dev_private;
2074	struct intel_engine_cs *ring;
2075	u32 status_pointer;
2076	u8 read_pointer;
2077	u8 write_pointer;
2078	u32 status;
2079	u32 ctx_id;
2080	struct list_head *cursor;
2081	int ring_id, i;
2082	int ret;
2083
2084	if (!i915.enable_execlists) {
2085		seq_puts(m, "Logical Ring Contexts are disabled\n");
2086		return 0;
2087	}
2088
2089	ret = mutex_lock_interruptible(&dev->struct_mutex);
2090	if (ret)
2091		return ret;
2092
2093	intel_runtime_pm_get(dev_priv);
2094
2095	for_each_ring(ring, dev_priv, ring_id) {
2096		struct drm_i915_gem_request *head_req = NULL;
2097		int count = 0;
2098		unsigned long flags;
2099
2100		seq_printf(m, "%s\n", ring->name);
2101
2102		status = I915_READ(RING_EXECLIST_STATUS_LO(ring));
2103		ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring));
2104		seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
2105			   status, ctx_id);
2106
2107		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
2108		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
2109
2110		read_pointer = ring->next_context_status_buffer;
2111		write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
2112		if (read_pointer > write_pointer)
2113			write_pointer += GEN8_CSB_ENTRIES;
2114		seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
2115			   read_pointer, write_pointer);
2116
2117		for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
2118			status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
2119			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
2120
2121			seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
2122				   i, status, ctx_id);
2123		}
2124
2125		spin_lock_irqsave(&ring->execlist_lock, flags);
2126		list_for_each(cursor, &ring->execlist_queue)
2127			count++;
2128		head_req = list_first_entry_or_null(&ring->execlist_queue,
2129				struct drm_i915_gem_request, execlist_link);
2130		spin_unlock_irqrestore(&ring->execlist_lock, flags);
2131
2132		seq_printf(m, "\t%d requests in queue\n", count);
2133		if (head_req) {
2134			seq_printf(m, "\tHead request id: %u\n",
2135				   intel_execlists_ctx_id(head_req->ctx, ring));
2136			seq_printf(m, "\tHead request tail: %u\n",
2137				   head_req->tail);
2138		}
 
2139
2140		seq_putc(m, '\n');
2141	}
2142
2143	intel_runtime_pm_put(dev_priv);
2144	mutex_unlock(&dev->struct_mutex);
2145
2146	return 0;
2147}
2148
2149static const char *swizzle_string(unsigned swizzle)
2150{
2151	switch (swizzle) {
2152	case I915_BIT_6_SWIZZLE_NONE:
2153		return "none";
2154	case I915_BIT_6_SWIZZLE_9:
2155		return "bit9";
2156	case I915_BIT_6_SWIZZLE_9_10:
2157		return "bit9/bit10";
2158	case I915_BIT_6_SWIZZLE_9_11:
2159		return "bit9/bit11";
2160	case I915_BIT_6_SWIZZLE_9_10_11:
2161		return "bit9/bit10/bit11";
2162	case I915_BIT_6_SWIZZLE_9_17:
2163		return "bit9/bit17";
2164	case I915_BIT_6_SWIZZLE_9_10_17:
2165		return "bit9/bit10/bit17";
2166	case I915_BIT_6_SWIZZLE_UNKNOWN:
2167		return "unknown";
2168	}
2169
2170	return "bug";
2171}
2172
2173static int i915_swizzle_info(struct seq_file *m, void *data)
2174{
2175	struct drm_info_node *node = m->private;
2176	struct drm_device *dev = node->minor->dev;
2177	struct drm_i915_private *dev_priv = dev->dev_private;
2178	int ret;
2179
2180	ret = mutex_lock_interruptible(&dev->struct_mutex);
2181	if (ret)
2182		return ret;
2183	intel_runtime_pm_get(dev_priv);
2184
2185	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2186		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2187	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2188		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2189
2190	if (IS_GEN3(dev) || IS_GEN4(dev)) {
2191		seq_printf(m, "DDC = 0x%08x\n",
2192			   I915_READ(DCC));
2193		seq_printf(m, "DDC2 = 0x%08x\n",
2194			   I915_READ(DCC2));
2195		seq_printf(m, "C0DRB3 = 0x%04x\n",
2196			   I915_READ16(C0DRB3));
2197		seq_printf(m, "C1DRB3 = 0x%04x\n",
2198			   I915_READ16(C1DRB3));
2199	} else if (INTEL_INFO(dev)->gen >= 6) {
2200		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2201			   I915_READ(MAD_DIMM_C0));
2202		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2203			   I915_READ(MAD_DIMM_C1));
2204		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2205			   I915_READ(MAD_DIMM_C2));
2206		seq_printf(m, "TILECTL = 0x%08x\n",
2207			   I915_READ(TILECTL));
2208		if (INTEL_INFO(dev)->gen >= 8)
2209			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2210				   I915_READ(GAMTARBMODE));
2211		else
2212			seq_printf(m, "ARB_MODE = 0x%08x\n",
2213				   I915_READ(ARB_MODE));
2214		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2215			   I915_READ(DISP_ARB_CTL));
2216	}
2217
2218	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2219		seq_puts(m, "L-shaped memory detected\n");
2220
2221	intel_runtime_pm_put(dev_priv);
2222	mutex_unlock(&dev->struct_mutex);
2223
2224	return 0;
2225}
2226
2227static int per_file_ctx(int id, void *ptr, void *data)
2228{
2229	struct intel_context *ctx = ptr;
2230	struct seq_file *m = data;
2231	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2232
2233	if (!ppgtt) {
2234		seq_printf(m, "  no ppgtt for context %d\n",
2235			   ctx->user_handle);
2236		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2237	}
2238
2239	if (i915_gem_context_is_default(ctx))
2240		seq_puts(m, "  default context:\n");
2241	else
2242		seq_printf(m, "  context %d:\n", ctx->user_handle);
2243	ppgtt->debug_dump(ppgtt, m);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2244
2245	return 0;
2246}
2247
2248static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2249{
2250	struct drm_i915_private *dev_priv = dev->dev_private;
2251	struct intel_engine_cs *ring;
2252	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2253	int unused, i;
2254
2255	if (!ppgtt)
2256		return;
 
2257
2258	for_each_ring(ring, dev_priv, unused) {
2259		seq_printf(m, "%s\n", ring->name);
2260		for (i = 0; i < 4; i++) {
2261			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i));
2262			pdp <<= 32;
2263			pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i));
2264			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2265		}
2266	}
2267}
2268
2269static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2270{
2271	struct drm_i915_private *dev_priv = dev->dev_private;
2272	struct intel_engine_cs *ring;
2273	int i;
2274
2275	if (INTEL_INFO(dev)->gen == 6)
2276		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2277
2278	for_each_ring(ring, dev_priv, i) {
2279		seq_printf(m, "%s\n", ring->name);
2280		if (INTEL_INFO(dev)->gen == 7)
2281			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
2282		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
2283		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
2284		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
2285	}
2286	if (dev_priv->mm.aliasing_ppgtt) {
2287		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2288
2289		seq_puts(m, "aliasing PPGTT:\n");
2290		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2291
2292		ppgtt->debug_dump(ppgtt, m);
2293	}
2294
2295	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2296}
2297
2298static int i915_ppgtt_info(struct seq_file *m, void *data)
2299{
2300	struct drm_info_node *node = m->private;
2301	struct drm_device *dev = node->minor->dev;
2302	struct drm_i915_private *dev_priv = dev->dev_private;
2303	struct drm_file *file;
2304
2305	int ret = mutex_lock_interruptible(&dev->struct_mutex);
2306	if (ret)
2307		return ret;
2308	intel_runtime_pm_get(dev_priv);
 
 
 
 
 
2309
2310	if (INTEL_INFO(dev)->gen >= 8)
2311		gen8_ppgtt_info(m, dev);
2312	else if (INTEL_INFO(dev)->gen >= 6)
2313		gen6_ppgtt_info(m, dev);
2314
2315	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2316		struct drm_i915_file_private *file_priv = file->driver_priv;
2317		struct task_struct *task;
2318
2319		task = get_pid_task(file->pid, PIDTYPE_PID);
2320		if (!task) {
2321			ret = -ESRCH;
2322			goto out_put;
2323		}
2324		seq_printf(m, "\nproc: %s\n", task->comm);
2325		put_task_struct(task);
2326		idr_for_each(&file_priv->context_idr, per_file_ctx,
2327			     (void *)(unsigned long)m);
2328	}
2329
2330out_put:
2331	intel_runtime_pm_put(dev_priv);
2332	mutex_unlock(&dev->struct_mutex);
2333
2334	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2335}
2336
2337static int count_irq_waiters(struct drm_i915_private *i915)
 
2338{
2339	struct intel_engine_cs *ring;
2340	int count = 0;
2341	int i;
 
 
 
 
2342
2343	for_each_ring(ring, i915, i)
2344		count += ring->irq_refcount;
2345
2346	return count;
 
 
 
 
 
 
 
 
2347}
2348
2349static int i915_rps_boost_info(struct seq_file *m, void *data)
2350{
2351	struct drm_info_node *node = m->private;
2352	struct drm_device *dev = node->minor->dev;
2353	struct drm_i915_private *dev_priv = dev->dev_private;
2354	struct drm_file *file;
2355
2356	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
2357	seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
2358	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2359	seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2360		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
2361		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
2362		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
2363		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
2364		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
2365	spin_lock(&dev_priv->rps.client_lock);
2366	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2367		struct drm_i915_file_private *file_priv = file->driver_priv;
2368		struct task_struct *task;
2369
2370		rcu_read_lock();
2371		task = pid_task(file->pid, PIDTYPE_PID);
2372		seq_printf(m, "%s [%d]: %d boosts%s\n",
2373			   task ? task->comm : "<unknown>",
2374			   task ? task->pid : -1,
2375			   file_priv->rps.boosts,
2376			   list_empty(&file_priv->rps.link) ? "" : ", active");
2377		rcu_read_unlock();
2378	}
2379	seq_printf(m, "Semaphore boosts: %d%s\n",
2380		   dev_priv->rps.semaphores.boosts,
2381		   list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active");
2382	seq_printf(m, "MMIO flip boosts: %d%s\n",
2383		   dev_priv->rps.mmioflips.boosts,
2384		   list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
2385	seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
2386	spin_unlock(&dev_priv->rps.client_lock);
2387
2388	return 0;
2389}
2390
2391static int i915_llc(struct seq_file *m, void *data)
2392{
2393	struct drm_info_node *node = m->private;
2394	struct drm_device *dev = node->minor->dev;
2395	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2396
2397	/* Size calculation for LLC is a bit of a pain. Ignore for now. */
2398	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
2399	seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
 
 
 
 
 
 
 
 
 
 
 
 
2400
2401	return 0;
2402}
2403
2404static int i915_guc_load_status_info(struct seq_file *m, void *data)
2405{
2406	struct drm_info_node *node = m->private;
2407	struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
2408	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
2409	u32 tmp, i;
 
 
2410
2411	if (!HAS_GUC_UCODE(dev_priv->dev))
 
 
 
 
 
 
 
 
2412		return 0;
2413
2414	seq_printf(m, "GuC firmware status:\n");
2415	seq_printf(m, "\tpath: %s\n",
2416		guc_fw->guc_fw_path);
2417	seq_printf(m, "\tfetch: %s\n",
2418		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
2419	seq_printf(m, "\tload: %s\n",
2420		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
2421	seq_printf(m, "\tversion wanted: %d.%d\n",
2422		guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
2423	seq_printf(m, "\tversion found: %d.%d\n",
2424		guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
2425	seq_printf(m, "\theader: offset is %d; size = %d\n",
2426		guc_fw->header_offset, guc_fw->header_size);
2427	seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2428		guc_fw->ucode_offset, guc_fw->ucode_size);
2429	seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2430		guc_fw->rsa_offset, guc_fw->rsa_size);
2431
2432	tmp = I915_READ(GUC_STATUS);
2433
2434	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2435	seq_printf(m, "\tBootrom status = 0x%x\n",
2436		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2437	seq_printf(m, "\tuKernel status = 0x%x\n",
2438		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2439	seq_printf(m, "\tMIA Core status = 0x%x\n",
2440		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2441	seq_puts(m, "\nScratch registers:\n");
2442	for (i = 0; i < 16; i++)
2443		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2444
2445	return 0;
2446}
2447
2448static void i915_guc_client_info(struct seq_file *m,
2449				 struct drm_i915_private *dev_priv,
2450				 struct i915_guc_client *client)
2451{
2452	struct intel_engine_cs *ring;
2453	uint64_t tot = 0;
2454	uint32_t i;
2455
2456	seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
2457		client->priority, client->ctx_index, client->proc_desc_offset);
2458	seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
2459		client->doorbell_id, client->doorbell_offset, client->cookie);
2460	seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2461		client->wq_size, client->wq_offset, client->wq_tail);
2462
2463	seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
2464	seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
2465	seq_printf(m, "\tLast submission result: %d\n", client->retcode);
2466
2467	for_each_ring(ring, dev_priv, i) {
2468		seq_printf(m, "\tSubmissions: %llu %s\n",
2469				client->submissions[ring->guc_id],
2470				ring->name);
2471		tot += client->submissions[ring->guc_id];
2472	}
2473	seq_printf(m, "\tTotal: %llu\n", tot);
2474}
2475
2476static int i915_guc_info(struct seq_file *m, void *data)
2477{
2478	struct drm_info_node *node = m->private;
2479	struct drm_device *dev = node->minor->dev;
2480	struct drm_i915_private *dev_priv = dev->dev_private;
2481	struct intel_guc guc;
2482	struct i915_guc_client client = {};
2483	struct intel_engine_cs *ring;
2484	enum intel_ring_id i;
2485	u64 total = 0;
2486
2487	if (!HAS_GUC_SCHED(dev_priv->dev))
2488		return 0;
2489
2490	if (mutex_lock_interruptible(&dev->struct_mutex))
2491		return 0;
2492
2493	/* Take a local copy of the GuC data, so we can dump it at leisure */
2494	guc = dev_priv->guc;
2495	if (guc.execbuf_client)
2496		client = *guc.execbuf_client;
2497
2498	mutex_unlock(&dev->struct_mutex);
 
 
 
 
2499
2500	seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
2501	seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
2502	seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
2503	seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
2504	seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
2505
2506	seq_printf(m, "\nGuC submissions:\n");
2507	for_each_ring(ring, dev_priv, i) {
2508		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
2509			ring->name, guc.submissions[ring->guc_id],
2510			guc.last_seqno[ring->guc_id]);
2511		total += guc.submissions[ring->guc_id];
2512	}
2513	seq_printf(m, "\t%s: %llu\n", "Total", total);
2514
2515	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
2516	i915_guc_client_info(m, dev_priv, &client);
2517
2518	/* Add more as required ... */
 
2519
2520	return 0;
 
 
 
 
 
 
 
 
 
2521}
2522
2523static int i915_guc_log_dump(struct seq_file *m, void *data)
2524{
2525	struct drm_info_node *node = m->private;
2526	struct drm_device *dev = node->minor->dev;
2527	struct drm_i915_private *dev_priv = dev->dev_private;
2528	struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
2529	u32 *log;
2530	int i = 0, pg;
2531
2532	if (!log_obj)
2533		return 0;
 
2534
2535	for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
2536		log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));
 
 
 
 
2537
2538		for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
2539			seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2540				   *(log + i), *(log + i + 1),
2541				   *(log + i + 2), *(log + i + 3));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2542
2543		kunmap_atomic(log);
 
 
2544	}
2545
2546	seq_putc(m, '\n');
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2547
2548	return 0;
2549}
 
2550
2551static int i915_edp_psr_status(struct seq_file *m, void *data)
 
2552{
2553	struct drm_info_node *node = m->private;
2554	struct drm_device *dev = node->minor->dev;
2555	struct drm_i915_private *dev_priv = dev->dev_private;
2556	u32 psrperf = 0;
2557	u32 stat[3];
2558	enum pipe pipe;
2559	bool enabled = false;
2560
2561	if (!HAS_PSR(dev)) {
2562		seq_puts(m, "PSR not supported\n");
2563		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2564	}
2565
2566	intel_runtime_pm_get(dev_priv);
2567
2568	mutex_lock(&dev_priv->psr.lock);
2569	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2570	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2571	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2572	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2573	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2574		   dev_priv->psr.busy_frontbuffer_bits);
2575	seq_printf(m, "Re-enable work scheduled: %s\n",
2576		   yesno(work_busy(&dev_priv->psr.work.work)));
2577
2578	if (HAS_DDI(dev))
2579		enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2580	else {
2581		for_each_pipe(dev_priv, pipe) {
2582			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2583				VLV_EDP_PSR_CURR_STATE_MASK;
2584			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2585			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2586				enabled = true;
2587		}
2588	}
2589
2590	seq_printf(m, "Main link in standby mode: %s\n",
2591		   yesno(dev_priv->psr.link_standby));
 
 
 
 
 
 
2592
2593	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
 
2594
2595	if (!HAS_DDI(dev))
2596		for_each_pipe(dev_priv, pipe) {
2597			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2598			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2599				seq_printf(m, " pipe %c", pipe_name(pipe));
2600		}
2601	seq_puts(m, "\n");
2602
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2603	/*
2604	 * VLV/CHV PSR has no kind of performance counter
2605	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2606	 */
2607	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2608		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2609			EDP_PSR_PERF_CNT_MASK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2610
2611		seq_printf(m, "Performance_Counter: %u\n", psrperf);
 
 
 
 
2612	}
2613	mutex_unlock(&dev_priv->psr.lock);
2614
2615	intel_runtime_pm_put(dev_priv);
 
 
 
2616	return 0;
2617}
2618
2619static int i915_sink_crc(struct seq_file *m, void *data)
 
2620{
2621	struct drm_info_node *node = m->private;
2622	struct drm_device *dev = node->minor->dev;
2623	struct intel_encoder *encoder;
2624	struct intel_connector *connector;
2625	struct intel_dp *intel_dp = NULL;
2626	int ret;
2627	u8 crc[6];
2628
2629	drm_modeset_lock_all(dev);
2630	for_each_intel_connector(dev, connector) {
2631
2632		if (connector->base.dpms != DRM_MODE_DPMS_ON)
2633			continue;
2634
2635		if (!connector->base.encoder)
2636			continue;
2637
2638		encoder = to_intel_encoder(connector->base.encoder);
2639		if (encoder->type != INTEL_OUTPUT_EDP)
2640			continue;
2641
2642		intel_dp = enc_to_intel_dp(&encoder->base);
2643
2644		ret = intel_dp_sink_crc(intel_dp, crc);
2645		if (ret)
2646			goto out;
2647
2648		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2649			   crc[0], crc[1], crc[2],
2650			   crc[3], crc[4], crc[5]);
2651		goto out;
2652	}
2653	ret = -ENODEV;
2654out:
2655	drm_modeset_unlock_all(dev);
2656	return ret;
2657}
2658
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2659static int i915_energy_uJ(struct seq_file *m, void *data)
2660{
2661	struct drm_info_node *node = m->private;
2662	struct drm_device *dev = node->minor->dev;
2663	struct drm_i915_private *dev_priv = dev->dev_private;
2664	u64 power;
2665	u32 units;
2666
2667	if (INTEL_INFO(dev)->gen < 6)
2668		return -ENODEV;
2669
2670	intel_runtime_pm_get(dev_priv);
2671
2672	rdmsrl(MSR_RAPL_POWER_UNIT, power);
2673	power = (power & 0x1f00) >> 8;
2674	units = 1000000 / (1 << power); /* convert to uJ */
2675	power = I915_READ(MCH_SECP_NRG_STTS);
2676	power *= units;
2677
2678	intel_runtime_pm_put(dev_priv);
 
 
2679
2680	seq_printf(m, "%llu", (long long unsigned)power);
 
2681
2682	return 0;
2683}
2684
2685static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2686{
2687	struct drm_info_node *node = m->private;
2688	struct drm_device *dev = node->minor->dev;
2689	struct drm_i915_private *dev_priv = dev->dev_private;
2690
2691	if (!HAS_RUNTIME_PM(dev)) {
2692		seq_puts(m, "not supported\n");
2693		return 0;
2694	}
2695
2696	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
 
 
 
2697	seq_printf(m, "IRQs disabled: %s\n",
2698		   yesno(!intel_irqs_enabled(dev_priv)));
2699#ifdef CONFIG_PM
2700	seq_printf(m, "Usage count: %d\n",
2701		   atomic_read(&dev->dev->power.usage_count));
2702#else
2703	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2704#endif
 
 
 
 
 
 
 
 
 
2705
2706	return 0;
2707}
2708
2709static int i915_power_domain_info(struct seq_file *m, void *unused)
2710{
2711	struct drm_info_node *node = m->private;
2712	struct drm_device *dev = node->minor->dev;
2713	struct drm_i915_private *dev_priv = dev->dev_private;
2714	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2715	int i;
2716
2717	mutex_lock(&power_domains->lock);
2718
2719	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2720	for (i = 0; i < power_domains->power_well_count; i++) {
2721		struct i915_power_well *power_well;
2722		enum intel_display_power_domain power_domain;
2723
2724		power_well = &power_domains->power_wells[i];
2725		seq_printf(m, "%-25s %d\n", power_well->name,
2726			   power_well->count);
2727
2728		for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2729		     power_domain++) {
2730			if (!(BIT(power_domain) & power_well->domains))
2731				continue;
2732
2733			seq_printf(m, "  %-23s %d\n",
2734				 intel_display_power_domain_str(power_domain),
 
2735				 power_domains->domain_use_count[power_domain]);
2736		}
2737	}
2738
2739	mutex_unlock(&power_domains->lock);
2740
2741	return 0;
2742}
2743
2744static int i915_dmc_info(struct seq_file *m, void *unused)
2745{
2746	struct drm_info_node *node = m->private;
2747	struct drm_device *dev = node->minor->dev;
2748	struct drm_i915_private *dev_priv = dev->dev_private;
2749	struct intel_csr *csr;
 
2750
2751	if (!HAS_CSR(dev)) {
2752		seq_puts(m, "not supported\n");
2753		return 0;
2754	}
2755
2756	csr = &dev_priv->csr;
2757
2758	intel_runtime_pm_get(dev_priv);
2759
2760	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2761	seq_printf(m, "path: %s\n", csr->fw_path);
2762
2763	if (!csr->dmc_payload)
2764		goto out;
2765
2766	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2767		   CSR_VERSION_MINOR(csr->version));
2768
2769	if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
2770		seq_printf(m, "DC3 -> DC5 count: %d\n",
2771			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
2772		seq_printf(m, "DC5 -> DC6 count: %d\n",
2773			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2774	} else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
2775		seq_printf(m, "DC3 -> DC5 count: %d\n",
2776			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
2777	}
2778
 
 
 
 
2779out:
2780	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2781	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2782	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2783
2784	intel_runtime_pm_put(dev_priv);
2785
2786	return 0;
2787}
2788
2789static void intel_seq_print_mode(struct seq_file *m, int tabs,
2790				 struct drm_display_mode *mode)
2791{
2792	int i;
2793
2794	for (i = 0; i < tabs; i++)
2795		seq_putc(m, '\t');
2796
2797	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2798		   mode->base.id, mode->name,
2799		   mode->vrefresh, mode->clock,
2800		   mode->hdisplay, mode->hsync_start,
2801		   mode->hsync_end, mode->htotal,
2802		   mode->vdisplay, mode->vsync_start,
2803		   mode->vsync_end, mode->vtotal,
2804		   mode->type, mode->flags);
2805}
2806
2807static void intel_encoder_info(struct seq_file *m,
2808			       struct intel_crtc *intel_crtc,
2809			       struct intel_encoder *intel_encoder)
2810{
2811	struct drm_info_node *node = m->private;
2812	struct drm_device *dev = node->minor->dev;
2813	struct drm_crtc *crtc = &intel_crtc->base;
2814	struct intel_connector *intel_connector;
2815	struct drm_encoder *encoder;
2816
2817	encoder = &intel_encoder->base;
2818	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2819		   encoder->base.id, encoder->name);
2820	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2821		struct drm_connector *connector = &intel_connector->base;
2822		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2823			   connector->base.id,
2824			   connector->name,
2825			   drm_get_connector_status_name(connector->status));
2826		if (connector->status == connector_status_connected) {
2827			struct drm_display_mode *mode = &crtc->mode;
2828			seq_printf(m, ", mode:\n");
2829			intel_seq_print_mode(m, 2, mode);
2830		} else {
2831			seq_putc(m, '\n');
2832		}
2833	}
2834}
2835
2836static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2837{
2838	struct drm_info_node *node = m->private;
2839	struct drm_device *dev = node->minor->dev;
2840	struct drm_crtc *crtc = &intel_crtc->base;
2841	struct intel_encoder *intel_encoder;
2842	struct drm_plane_state *plane_state = crtc->primary->state;
2843	struct drm_framebuffer *fb = plane_state->fb;
2844
2845	if (fb)
2846		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2847			   fb->base.id, plane_state->src_x >> 16,
2848			   plane_state->src_y >> 16, fb->width, fb->height);
2849	else
2850		seq_puts(m, "\tprimary plane disabled\n");
2851	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2852		intel_encoder_info(m, intel_crtc, intel_encoder);
2853}
2854
2855static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2856{
2857	struct drm_display_mode *mode = panel->fixed_mode;
2858
2859	seq_printf(m, "\tfixed mode:\n");
2860	intel_seq_print_mode(m, 2, mode);
2861}
2862
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2863static void intel_dp_info(struct seq_file *m,
2864			  struct intel_connector *intel_connector)
2865{
2866	struct intel_encoder *intel_encoder = intel_connector->encoder;
2867	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2868
2869	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2870	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2871	if (intel_encoder->type == INTEL_OUTPUT_EDP)
2872		intel_panel_info(m, &intel_connector->panel);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2873}
2874
2875static void intel_hdmi_info(struct seq_file *m,
2876			    struct intel_connector *intel_connector)
2877{
2878	struct intel_encoder *intel_encoder = intel_connector->encoder;
2879	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2880
2881	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
 
 
 
 
2882}
2883
2884static void intel_lvds_info(struct seq_file *m,
2885			    struct intel_connector *intel_connector)
2886{
2887	intel_panel_info(m, &intel_connector->panel);
2888}
2889
2890static void intel_connector_info(struct seq_file *m,
2891				 struct drm_connector *connector)
2892{
2893	struct intel_connector *intel_connector = to_intel_connector(connector);
2894	struct intel_encoder *intel_encoder = intel_connector->encoder;
2895	struct drm_display_mode *mode;
2896
2897	seq_printf(m, "connector %d: type %s, status: %s\n",
2898		   connector->base.id, connector->name,
2899		   drm_get_connector_status_name(connector->status));
2900	if (connector->status == connector_status_connected) {
2901		seq_printf(m, "\tname: %s\n", connector->display_info.name);
2902		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2903			   connector->display_info.width_mm,
2904			   connector->display_info.height_mm);
2905		seq_printf(m, "\tsubpixel order: %s\n",
2906			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2907		seq_printf(m, "\tCEA rev: %d\n",
2908			   connector->display_info.cea_rev);
2909	}
2910	if (intel_encoder) {
2911		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2912		    intel_encoder->type == INTEL_OUTPUT_EDP)
 
 
 
 
 
 
 
2913			intel_dp_info(m, intel_connector);
2914		else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2915			intel_hdmi_info(m, intel_connector);
2916		else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2917			intel_lvds_info(m, intel_connector);
 
 
 
 
 
 
 
 
2918	}
2919
2920	seq_printf(m, "\tmodes:\n");
2921	list_for_each_entry(mode, &connector->modes, head)
2922		intel_seq_print_mode(m, 2, mode);
2923}
2924
2925static bool cursor_active(struct drm_device *dev, int pipe)
2926{
2927	struct drm_i915_private *dev_priv = dev->dev_private;
2928	u32 state;
2929
2930	if (IS_845G(dev) || IS_I865G(dev))
2931		state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
2932	else
2933		state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2934
2935	return state;
2936}
2937
2938static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2939{
2940	struct drm_i915_private *dev_priv = dev->dev_private;
2941	u32 pos;
2942
2943	pos = I915_READ(CURPOS(pipe));
2944
2945	*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2946	if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2947		*x = -*x;
2948
2949	*y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2950	if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2951		*y = -*y;
2952
2953	return cursor_active(dev, pipe);
2954}
2955
2956static const char *plane_type(enum drm_plane_type type)
2957{
2958	switch (type) {
2959	case DRM_PLANE_TYPE_OVERLAY:
2960		return "OVL";
2961	case DRM_PLANE_TYPE_PRIMARY:
2962		return "PRI";
2963	case DRM_PLANE_TYPE_CURSOR:
2964		return "CUR";
2965	/*
2966	 * Deliberately omitting default: to generate compiler warnings
2967	 * when a new drm_plane_type gets added.
2968	 */
2969	}
2970
2971	return "unknown";
2972}
2973
2974static const char *plane_rotation(unsigned int rotation)
2975{
2976	static char buf[48];
2977	/*
2978	 * According to doc only one DRM_ROTATE_ is allowed but this
2979	 * will print them all to visualize if the values are misused
2980	 */
2981	snprintf(buf, sizeof(buf),
2982		 "%s%s%s%s%s%s(0x%08x)",
2983		 (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "",
2984		 (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "",
2985		 (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "",
2986		 (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "",
2987		 (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "",
2988		 (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "",
2989		 rotation);
2990
2991	return buf;
2992}
2993
2994static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2995{
2996	struct drm_info_node *node = m->private;
2997	struct drm_device *dev = node->minor->dev;
2998	struct intel_plane *intel_plane;
2999
3000	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3001		struct drm_plane_state *state;
3002		struct drm_plane *plane = &intel_plane->base;
 
 
3003
3004		if (!plane->state) {
3005			seq_puts(m, "plane->state is NULL!\n");
3006			continue;
3007		}
3008
3009		state = plane->state;
3010
 
 
 
 
 
 
 
 
 
3011		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3012			   plane->base.id,
3013			   plane_type(intel_plane->base.type),
3014			   state->crtc_x, state->crtc_y,
3015			   state->crtc_w, state->crtc_h,
3016			   (state->src_x >> 16),
3017			   ((state->src_x & 0xffff) * 15625) >> 10,
3018			   (state->src_y >> 16),
3019			   ((state->src_y & 0xffff) * 15625) >> 10,
3020			   (state->src_w >> 16),
3021			   ((state->src_w & 0xffff) * 15625) >> 10,
3022			   (state->src_h >> 16),
3023			   ((state->src_h & 0xffff) * 15625) >> 10,
3024			   state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
3025			   plane_rotation(state->rotation));
3026	}
3027}
3028
3029static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3030{
3031	struct intel_crtc_state *pipe_config;
3032	int num_scalers = intel_crtc->num_scalers;
3033	int i;
3034
3035	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3036
3037	/* Not all platformas have a scaler */
3038	if (num_scalers) {
3039		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3040			   num_scalers,
3041			   pipe_config->scaler_state.scaler_users,
3042			   pipe_config->scaler_state.scaler_id);
3043
3044		for (i = 0; i < SKL_NUM_SCALERS; i++) {
3045			struct intel_scaler *sc =
3046					&pipe_config->scaler_state.scalers[i];
3047
3048			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3049				   i, yesno(sc->in_use), sc->mode);
3050		}
3051		seq_puts(m, "\n");
3052	} else {
3053		seq_puts(m, "\tNo scalers available on this platform\n");
3054	}
3055}
3056
3057static int i915_display_info(struct seq_file *m, void *unused)
3058{
3059	struct drm_info_node *node = m->private;
3060	struct drm_device *dev = node->minor->dev;
3061	struct drm_i915_private *dev_priv = dev->dev_private;
3062	struct intel_crtc *crtc;
3063	struct drm_connector *connector;
 
 
 
 
3064
3065	intel_runtime_pm_get(dev_priv);
3066	drm_modeset_lock_all(dev);
3067	seq_printf(m, "CRTC info\n");
3068	seq_printf(m, "---------\n");
3069	for_each_intel_crtc(dev, crtc) {
3070		bool active;
3071		struct intel_crtc_state *pipe_config;
3072		int x, y;
3073
 
3074		pipe_config = to_intel_crtc_state(crtc->base.state);
3075
3076		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3077			   crtc->base.base.id, pipe_name(crtc->pipe),
3078			   yesno(pipe_config->base.active),
3079			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3080			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
3081
3082		if (pipe_config->base.active) {
 
 
 
3083			intel_crtc_info(m, crtc);
3084
3085			active = cursor_position(dev, crtc->pipe, &x, &y);
3086			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
3087				   yesno(crtc->cursor_base),
3088				   x, y, crtc->base.cursor->state->crtc_w,
3089				   crtc->base.cursor->state->crtc_h,
3090				   crtc->cursor_addr, yesno(active));
 
3091			intel_scaler_info(m, crtc);
3092			intel_plane_info(m, crtc);
3093		}
3094
3095		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3096			   yesno(!crtc->cpu_fifo_underrun_disabled),
3097			   yesno(!crtc->pch_fifo_underrun_disabled));
 
3098	}
3099
3100	seq_printf(m, "\n");
3101	seq_printf(m, "Connector info\n");
3102	seq_printf(m, "--------------\n");
3103	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
 
3104		intel_connector_info(m, connector);
3105	}
3106	drm_modeset_unlock_all(dev);
3107	intel_runtime_pm_put(dev_priv);
 
3108
3109	return 0;
3110}
3111
3112static int i915_semaphore_status(struct seq_file *m, void *unused)
3113{
3114	struct drm_info_node *node = (struct drm_info_node *) m->private;
3115	struct drm_device *dev = node->minor->dev;
3116	struct drm_i915_private *dev_priv = dev->dev_private;
3117	struct intel_engine_cs *ring;
3118	int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
3119	int i, j, ret;
3120
3121	if (!i915_semaphore_is_enabled(dev)) {
3122		seq_puts(m, "Semaphores are disabled\n");
3123		return 0;
3124	}
3125
3126	ret = mutex_lock_interruptible(&dev->struct_mutex);
3127	if (ret)
3128		return ret;
3129	intel_runtime_pm_get(dev_priv);
 
3130
3131	if (IS_BROADWELL(dev)) {
3132		struct page *page;
3133		uint64_t *seqno;
3134
3135		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
3136
3137		seqno = (uint64_t *)kmap_atomic(page);
3138		for_each_ring(ring, dev_priv, i) {
3139			uint64_t offset;
3140
3141			seq_printf(m, "%s\n", ring->name);
 
 
 
3142
3143			seq_puts(m, "  Last signal:");
3144			for (j = 0; j < num_rings; j++) {
3145				offset = i * I915_NUM_RINGS + j;
3146				seq_printf(m, "0x%08llx (0x%02llx) ",
3147					   seqno[offset], offset * 8);
3148			}
3149			seq_putc(m, '\n');
3150
3151			seq_puts(m, "  Last wait:  ");
3152			for (j = 0; j < num_rings; j++) {
3153				offset = i + (j * I915_NUM_RINGS);
3154				seq_printf(m, "0x%08llx (0x%02llx) ",
3155					   seqno[offset], offset * 8);
3156			}
3157			seq_putc(m, '\n');
3158
3159		}
3160		kunmap_atomic(seqno);
3161	} else {
3162		seq_puts(m, "  Last signal:");
3163		for_each_ring(ring, dev_priv, i)
3164			for (j = 0; j < num_rings; j++)
3165				seq_printf(m, "0x%08x\n",
3166					   I915_READ(ring->semaphore.mbox.signal[j]));
3167		seq_putc(m, '\n');
3168	}
3169
3170	seq_puts(m, "\nSync seqno:\n");
3171	for_each_ring(ring, dev_priv, i) {
3172		for (j = 0; j < num_rings; j++) {
3173			seq_printf(m, "  0x%08x ", ring->semaphore.sync_seqno[j]);
3174		}
3175		seq_putc(m, '\n');
3176	}
3177	seq_putc(m, '\n');
3178
3179	intel_runtime_pm_put(dev_priv);
3180	mutex_unlock(&dev->struct_mutex);
3181	return 0;
3182}
3183
3184static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3185{
3186	struct drm_info_node *node = (struct drm_info_node *) m->private;
3187	struct drm_device *dev = node->minor->dev;
3188	struct drm_i915_private *dev_priv = dev->dev_private;
3189	int i;
3190
3191	drm_modeset_lock_all(dev);
3192	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3193		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3194
3195		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
3196		seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
3197			   pll->config.crtc_mask, pll->active, yesno(pll->on));
 
3198		seq_printf(m, " tracked hardware state:\n");
3199		seq_printf(m, " dpll:    0x%08x\n", pll->config.hw_state.dpll);
3200		seq_printf(m, " dpll_md: 0x%08x\n",
3201			   pll->config.hw_state.dpll_md);
3202		seq_printf(m, " fp0:     0x%08x\n", pll->config.hw_state.fp0);
3203		seq_printf(m, " fp1:     0x%08x\n", pll->config.hw_state.fp1);
3204		seq_printf(m, " wrpll:   0x%08x\n", pll->config.hw_state.wrpll);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3205	}
3206	drm_modeset_unlock_all(dev);
3207
3208	return 0;
3209}
3210
3211static int i915_wa_registers(struct seq_file *m, void *unused)
3212{
3213	int i;
3214	int ret;
3215	struct intel_engine_cs *ring;
3216	struct drm_info_node *node = (struct drm_info_node *) m->private;
3217	struct drm_device *dev = node->minor->dev;
3218	struct drm_i915_private *dev_priv = dev->dev_private;
3219	struct i915_workarounds *workarounds = &dev_priv->workarounds;
3220
3221	ret = mutex_lock_interruptible(&dev->struct_mutex);
3222	if (ret)
3223		return ret;
 
 
 
 
 
3224
3225	intel_runtime_pm_get(dev_priv);
 
3226
3227	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3228	for_each_ring(ring, dev_priv, i)
3229		seq_printf(m, "HW whitelist count for %s: %d\n",
3230			   ring->name, workarounds->hw_whitelist_count[i]);
3231	for (i = 0; i < workarounds->count; ++i) {
3232		i915_reg_t addr;
3233		u32 mask, value, read;
3234		bool ok;
3235
3236		addr = workarounds->reg[i].addr;
3237		mask = workarounds->reg[i].mask;
3238		value = workarounds->reg[i].value;
3239		read = I915_READ(addr);
3240		ok = (value & mask) == (read & mask);
3241		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3242			   i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3243	}
3244
3245	intel_runtime_pm_put(dev_priv);
3246	mutex_unlock(&dev->struct_mutex);
 
 
 
 
3247
 
 
3248	return 0;
3249}
3250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3251static int i915_ddb_info(struct seq_file *m, void *unused)
3252{
3253	struct drm_info_node *node = m->private;
3254	struct drm_device *dev = node->minor->dev;
3255	struct drm_i915_private *dev_priv = dev->dev_private;
3256	struct skl_ddb_allocation *ddb;
3257	struct skl_ddb_entry *entry;
3258	enum pipe pipe;
3259	int plane;
3260
3261	if (INTEL_INFO(dev)->gen < 9)
3262		return 0;
3263
3264	drm_modeset_lock_all(dev);
3265
3266	ddb = &dev_priv->wm.skl_hw.ddb;
3267
3268	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3269
3270	for_each_pipe(dev_priv, pipe) {
 
 
 
 
 
3271		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3272
3273		for_each_plane(dev_priv, pipe, plane) {
3274			entry = &ddb->plane[pipe][plane];
3275			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3276				   entry->start, entry->end,
3277				   skl_ddb_entry_size(entry));
3278		}
3279
3280		entry = &ddb->plane[pipe][PLANE_CURSOR];
3281		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3282			   entry->end, skl_ddb_entry_size(entry));
3283	}
3284
3285	drm_modeset_unlock_all(dev);
3286
3287	return 0;
3288}
3289
3290static void drrs_status_per_crtc(struct seq_file *m,
3291		struct drm_device *dev, struct intel_crtc *intel_crtc)
 
3292{
3293	struct intel_encoder *intel_encoder;
3294	struct drm_i915_private *dev_priv = dev->dev_private;
3295	struct i915_drrs *drrs = &dev_priv->drrs;
3296	int vrefresh = 0;
 
 
3297
3298	for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
3299		/* Encoder connected on this CRTC */
3300		switch (intel_encoder->type) {
3301		case INTEL_OUTPUT_EDP:
3302			seq_puts(m, "eDP:\n");
3303			break;
3304		case INTEL_OUTPUT_DSI:
3305			seq_puts(m, "DSI:\n");
3306			break;
3307		case INTEL_OUTPUT_HDMI:
3308			seq_puts(m, "HDMI:\n");
3309			break;
3310		case INTEL_OUTPUT_DISPLAYPORT:
3311			seq_puts(m, "DP:\n");
3312			break;
3313		default:
3314			seq_printf(m, "Other encoder (id=%d).\n",
3315						intel_encoder->type);
3316			return;
3317		}
3318	}
 
3319
3320	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3321		seq_puts(m, "\tVBT: DRRS_type: Static");
3322	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3323		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3324	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3325		seq_puts(m, "\tVBT: DRRS_type: None");
3326	else
3327		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3328
3329	seq_puts(m, "\n\n");
3330
3331	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3332		struct intel_panel *panel;
3333
3334		mutex_lock(&drrs->mutex);
3335		/* DRRS Supported */
3336		seq_puts(m, "\tDRRS Supported: Yes\n");
3337
3338		/* disable_drrs() will make drrs->dp NULL */
3339		if (!drrs->dp) {
3340			seq_puts(m, "Idleness DRRS: Disabled");
 
 
 
3341			mutex_unlock(&drrs->mutex);
3342			return;
3343		}
3344
3345		panel = &drrs->dp->attached_connector->panel;
3346		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3347					drrs->busy_frontbuffer_bits);
3348
3349		seq_puts(m, "\n\t\t");
3350		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3351			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3352			vrefresh = panel->fixed_mode->vrefresh;
3353		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3354			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3355			vrefresh = panel->downclock_mode->vrefresh;
3356		} else {
3357			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3358						drrs->refresh_rate_type);
3359			mutex_unlock(&drrs->mutex);
3360			return;
3361		}
3362		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3363
3364		seq_puts(m, "\n\t\t");
3365		mutex_unlock(&drrs->mutex);
3366	} else {
3367		/* DRRS not supported. Print the VBT parameter*/
3368		seq_puts(m, "\tDRRS Supported : No");
3369	}
3370	seq_puts(m, "\n");
3371}
3372
3373static int i915_drrs_status(struct seq_file *m, void *unused)
3374{
3375	struct drm_info_node *node = m->private;
3376	struct drm_device *dev = node->minor->dev;
3377	struct intel_crtc *intel_crtc;
3378	int active_crtc_cnt = 0;
3379
 
3380	for_each_intel_crtc(dev, intel_crtc) {
3381		drm_modeset_lock(&intel_crtc->base.mutex, NULL);
3382
3383		if (intel_crtc->base.state->active) {
3384			active_crtc_cnt++;
3385			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3386
3387			drrs_status_per_crtc(m, dev, intel_crtc);
3388		}
3389
3390		drm_modeset_unlock(&intel_crtc->base.mutex);
3391	}
 
3392
3393	if (!active_crtc_cnt)
3394		seq_puts(m, "No active crtc found\n");
3395
3396	return 0;
3397}
3398
3399struct pipe_crc_info {
3400	const char *name;
3401	struct drm_device *dev;
3402	enum pipe pipe;
3403};
3404
3405static int i915_dp_mst_info(struct seq_file *m, void *unused)
3406{
3407	struct drm_info_node *node = (struct drm_info_node *) m->private;
3408	struct drm_device *dev = node->minor->dev;
3409	struct drm_encoder *encoder;
3410	struct intel_encoder *intel_encoder;
3411	struct intel_digital_port *intel_dig_port;
3412	drm_modeset_lock_all(dev);
3413	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3414		intel_encoder = to_intel_encoder(encoder);
3415		if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
3416			continue;
3417		intel_dig_port = enc_to_dig_port(encoder);
3418		if (!intel_dig_port->dp.can_mst)
3419			continue;
3420
3421		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3422	}
3423	drm_modeset_unlock_all(dev);
3424	return 0;
3425}
3426
3427static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
3428{
3429	struct pipe_crc_info *info = inode->i_private;
3430	struct drm_i915_private *dev_priv = info->dev->dev_private;
3431	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3432
3433	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
3434		return -ENODEV;
3435
3436	spin_lock_irq(&pipe_crc->lock);
3437
3438	if (pipe_crc->opened) {
3439		spin_unlock_irq(&pipe_crc->lock);
3440		return -EBUSY; /* already open */
3441	}
3442
3443	pipe_crc->opened = true;
3444	filep->private_data = inode->i_private;
3445
3446	spin_unlock_irq(&pipe_crc->lock);
3447
3448	return 0;
3449}
3450
3451static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
3452{
3453	struct pipe_crc_info *info = inode->i_private;
3454	struct drm_i915_private *dev_priv = info->dev->dev_private;
3455	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3456
3457	spin_lock_irq(&pipe_crc->lock);
3458	pipe_crc->opened = false;
3459	spin_unlock_irq(&pipe_crc->lock);
3460
3461	return 0;
3462}
3463
3464/* (6 fields, 8 chars each, space separated (5) + '\n') */
3465#define PIPE_CRC_LINE_LEN	(6 * 8 + 5 + 1)
3466/* account for \'0' */
3467#define PIPE_CRC_BUFFER_LEN	(PIPE_CRC_LINE_LEN + 1)
3468
3469static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
3470{
3471	assert_spin_locked(&pipe_crc->lock);
3472	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3473			INTEL_PIPE_CRC_ENTRIES_NR);
3474}
3475
3476static ssize_t
3477i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
3478		   loff_t *pos)
3479{
3480	struct pipe_crc_info *info = filep->private_data;
3481	struct drm_device *dev = info->dev;
3482	struct drm_i915_private *dev_priv = dev->dev_private;
3483	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3484	char buf[PIPE_CRC_BUFFER_LEN];
3485	int n_entries;
3486	ssize_t bytes_read;
3487
3488	/*
3489	 * Don't allow user space to provide buffers not big enough to hold
3490	 * a line of data.
3491	 */
3492	if (count < PIPE_CRC_LINE_LEN)
3493		return -EINVAL;
3494
3495	if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
3496		return 0;
3497
3498	/* nothing to read */
3499	spin_lock_irq(&pipe_crc->lock);
3500	while (pipe_crc_data_count(pipe_crc) == 0) {
3501		int ret;
3502
3503		if (filep->f_flags & O_NONBLOCK) {
3504			spin_unlock_irq(&pipe_crc->lock);
3505			return -EAGAIN;
3506		}
3507
3508		ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
3509				pipe_crc_data_count(pipe_crc), pipe_crc->lock);
3510		if (ret) {
3511			spin_unlock_irq(&pipe_crc->lock);
3512			return ret;
3513		}
3514	}
3515
3516	/* We now have one or more entries to read */
3517	n_entries = count / PIPE_CRC_LINE_LEN;
3518
3519	bytes_read = 0;
3520	while (n_entries > 0) {
3521		struct intel_pipe_crc_entry *entry =
3522			&pipe_crc->entries[pipe_crc->tail];
3523		int ret;
3524
3525		if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3526			     INTEL_PIPE_CRC_ENTRIES_NR) < 1)
3527			break;
3528
3529		BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
3530		pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
3531
3532		bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
3533				       "%8u %8x %8x %8x %8x %8x\n",
3534				       entry->frame, entry->crc[0],
3535				       entry->crc[1], entry->crc[2],
3536				       entry->crc[3], entry->crc[4]);
3537
3538		spin_unlock_irq(&pipe_crc->lock);
3539
3540		ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
3541		if (ret == PIPE_CRC_LINE_LEN)
3542			return -EFAULT;
3543
3544		user_buf += PIPE_CRC_LINE_LEN;
3545		n_entries--;
3546
3547		spin_lock_irq(&pipe_crc->lock);
3548	}
3549
3550	spin_unlock_irq(&pipe_crc->lock);
3551
3552	return bytes_read;
3553}
3554
3555static const struct file_operations i915_pipe_crc_fops = {
3556	.owner = THIS_MODULE,
3557	.open = i915_pipe_crc_open,
3558	.read = i915_pipe_crc_read,
3559	.release = i915_pipe_crc_release,
3560};
3561
3562static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
3563	{
3564		.name = "i915_pipe_A_crc",
3565		.pipe = PIPE_A,
3566	},
3567	{
3568		.name = "i915_pipe_B_crc",
3569		.pipe = PIPE_B,
3570	},
3571	{
3572		.name = "i915_pipe_C_crc",
3573		.pipe = PIPE_C,
3574	},
3575};
3576
3577static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
3578				enum pipe pipe)
3579{
3580	struct drm_device *dev = minor->dev;
3581	struct dentry *ent;
3582	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
3583
3584	info->dev = dev;
3585	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
3586				  &i915_pipe_crc_fops);
3587	if (!ent)
3588		return -ENOMEM;
3589
3590	return drm_add_fake_info_node(minor, ent, info);
3591}
3592
3593static const char * const pipe_crc_sources[] = {
3594	"none",
3595	"plane1",
3596	"plane2",
3597	"pf",
3598	"pipe",
3599	"TV",
3600	"DP-B",
3601	"DP-C",
3602	"DP-D",
3603	"auto",
3604};
3605
3606static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
3607{
3608	BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
3609	return pipe_crc_sources[source];
3610}
3611
3612static int display_crc_ctl_show(struct seq_file *m, void *data)
3613{
3614	struct drm_device *dev = m->private;
3615	struct drm_i915_private *dev_priv = dev->dev_private;
3616	int i;
3617
3618	for (i = 0; i < I915_MAX_PIPES; i++)
3619		seq_printf(m, "%c %s\n", pipe_name(i),
3620			   pipe_crc_source_name(dev_priv->pipe_crc[i].source));
3621
3622	return 0;
3623}
3624
3625static int display_crc_ctl_open(struct inode *inode, struct file *file)
3626{
3627	struct drm_device *dev = inode->i_private;
3628
3629	return single_open(file, display_crc_ctl_show, dev);
3630}
3631
3632static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3633				 uint32_t *val)
3634{
3635	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3636		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3637
3638	switch (*source) {
3639	case INTEL_PIPE_CRC_SOURCE_PIPE:
3640		*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
3641		break;
3642	case INTEL_PIPE_CRC_SOURCE_NONE:
3643		*val = 0;
3644		break;
3645	default:
3646		return -EINVAL;
3647	}
3648
3649	return 0;
3650}
3651
3652static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
3653				     enum intel_pipe_crc_source *source)
3654{
3655	struct intel_encoder *encoder;
3656	struct intel_crtc *crtc;
3657	struct intel_digital_port *dig_port;
3658	int ret = 0;
3659
3660	*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3661
3662	drm_modeset_lock_all(dev);
3663	for_each_intel_encoder(dev, encoder) {
3664		if (!encoder->base.crtc)
3665			continue;
3666
3667		crtc = to_intel_crtc(encoder->base.crtc);
3668
3669		if (crtc->pipe != pipe)
3670			continue;
3671
3672		switch (encoder->type) {
3673		case INTEL_OUTPUT_TVOUT:
3674			*source = INTEL_PIPE_CRC_SOURCE_TV;
3675			break;
3676		case INTEL_OUTPUT_DISPLAYPORT:
3677		case INTEL_OUTPUT_EDP:
3678			dig_port = enc_to_dig_port(&encoder->base);
3679			switch (dig_port->port) {
3680			case PORT_B:
3681				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
3682				break;
3683			case PORT_C:
3684				*source = INTEL_PIPE_CRC_SOURCE_DP_C;
3685				break;
3686			case PORT_D:
3687				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
3688				break;
3689			default:
3690				WARN(1, "nonexisting DP port %c\n",
3691				     port_name(dig_port->port));
3692				break;
3693			}
3694			break;
3695		default:
3696			break;
3697		}
3698	}
3699	drm_modeset_unlock_all(dev);
3700
3701	return ret;
3702}
3703
3704static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
3705				enum pipe pipe,
3706				enum intel_pipe_crc_source *source,
3707				uint32_t *val)
3708{
3709	struct drm_i915_private *dev_priv = dev->dev_private;
3710	bool need_stable_symbols = false;
3711
3712	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3713		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3714		if (ret)
3715			return ret;
3716	}
3717
3718	switch (*source) {
3719	case INTEL_PIPE_CRC_SOURCE_PIPE:
3720		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
3721		break;
3722	case INTEL_PIPE_CRC_SOURCE_DP_B:
3723		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
3724		need_stable_symbols = true;
3725		break;
3726	case INTEL_PIPE_CRC_SOURCE_DP_C:
3727		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
3728		need_stable_symbols = true;
3729		break;
3730	case INTEL_PIPE_CRC_SOURCE_DP_D:
3731		if (!IS_CHERRYVIEW(dev))
3732			return -EINVAL;
3733		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
3734		need_stable_symbols = true;
3735		break;
3736	case INTEL_PIPE_CRC_SOURCE_NONE:
3737		*val = 0;
3738		break;
3739	default:
3740		return -EINVAL;
3741	}
3742
3743	/*
3744	 * When the pipe CRC tap point is after the transcoders we need
3745	 * to tweak symbol-level features to produce a deterministic series of
3746	 * symbols for a given frame. We need to reset those features only once
3747	 * a frame (instead of every nth symbol):
3748	 *   - DC-balance: used to ensure a better clock recovery from the data
3749	 *     link (SDVO)
3750	 *   - DisplayPort scrambling: used for EMI reduction
3751	 */
3752	if (need_stable_symbols) {
3753		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3754
3755		tmp |= DC_BALANCE_RESET_VLV;
3756		switch (pipe) {
3757		case PIPE_A:
3758			tmp |= PIPE_A_SCRAMBLE_RESET;
3759			break;
3760		case PIPE_B:
3761			tmp |= PIPE_B_SCRAMBLE_RESET;
3762			break;
3763		case PIPE_C:
3764			tmp |= PIPE_C_SCRAMBLE_RESET;
3765			break;
3766		default:
3767			return -EINVAL;
3768		}
3769		I915_WRITE(PORT_DFT2_G4X, tmp);
3770	}
3771
3772	return 0;
3773}
3774
3775static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3776				 enum pipe pipe,
3777				 enum intel_pipe_crc_source *source,
3778				 uint32_t *val)
3779{
3780	struct drm_i915_private *dev_priv = dev->dev_private;
3781	bool need_stable_symbols = false;
3782
3783	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3784		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3785		if (ret)
3786			return ret;
3787	}
3788
3789	switch (*source) {
3790	case INTEL_PIPE_CRC_SOURCE_PIPE:
3791		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
3792		break;
3793	case INTEL_PIPE_CRC_SOURCE_TV:
3794		if (!SUPPORTS_TV(dev))
3795			return -EINVAL;
3796		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
3797		break;
3798	case INTEL_PIPE_CRC_SOURCE_DP_B:
3799		if (!IS_G4X(dev))
3800			return -EINVAL;
3801		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
3802		need_stable_symbols = true;
3803		break;
3804	case INTEL_PIPE_CRC_SOURCE_DP_C:
3805		if (!IS_G4X(dev))
3806			return -EINVAL;
3807		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
3808		need_stable_symbols = true;
3809		break;
3810	case INTEL_PIPE_CRC_SOURCE_DP_D:
3811		if (!IS_G4X(dev))
3812			return -EINVAL;
3813		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
3814		need_stable_symbols = true;
3815		break;
3816	case INTEL_PIPE_CRC_SOURCE_NONE:
3817		*val = 0;
3818		break;
3819	default:
3820		return -EINVAL;
3821	}
3822
3823	/*
3824	 * When the pipe CRC tap point is after the transcoders we need
3825	 * to tweak symbol-level features to produce a deterministic series of
3826	 * symbols for a given frame. We need to reset those features only once
3827	 * a frame (instead of every nth symbol):
3828	 *   - DC-balance: used to ensure a better clock recovery from the data
3829	 *     link (SDVO)
3830	 *   - DisplayPort scrambling: used for EMI reduction
3831	 */
3832	if (need_stable_symbols) {
3833		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3834
3835		WARN_ON(!IS_G4X(dev));
3836
3837		I915_WRITE(PORT_DFT_I9XX,
3838			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
3839
3840		if (pipe == PIPE_A)
3841			tmp |= PIPE_A_SCRAMBLE_RESET;
3842		else
3843			tmp |= PIPE_B_SCRAMBLE_RESET;
3844
3845		I915_WRITE(PORT_DFT2_G4X, tmp);
3846	}
3847
3848	return 0;
3849}
3850
3851static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
3852					 enum pipe pipe)
3853{
3854	struct drm_i915_private *dev_priv = dev->dev_private;
3855	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3856
3857	switch (pipe) {
3858	case PIPE_A:
3859		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3860		break;
3861	case PIPE_B:
3862		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3863		break;
3864	case PIPE_C:
3865		tmp &= ~PIPE_C_SCRAMBLE_RESET;
3866		break;
3867	default:
3868		return;
3869	}
3870	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
3871		tmp &= ~DC_BALANCE_RESET_VLV;
3872	I915_WRITE(PORT_DFT2_G4X, tmp);
3873
3874}
3875
3876static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
3877					 enum pipe pipe)
3878{
3879	struct drm_i915_private *dev_priv = dev->dev_private;
3880	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3881
3882	if (pipe == PIPE_A)
3883		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3884	else
3885		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3886	I915_WRITE(PORT_DFT2_G4X, tmp);
3887
3888	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
3889		I915_WRITE(PORT_DFT_I9XX,
3890			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
3891	}
3892}
3893
3894static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3895				uint32_t *val)
3896{
3897	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3898		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3899
3900	switch (*source) {
3901	case INTEL_PIPE_CRC_SOURCE_PLANE1:
3902		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
3903		break;
3904	case INTEL_PIPE_CRC_SOURCE_PLANE2:
3905		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
3906		break;
3907	case INTEL_PIPE_CRC_SOURCE_PIPE:
3908		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
3909		break;
3910	case INTEL_PIPE_CRC_SOURCE_NONE:
3911		*val = 0;
3912		break;
3913	default:
3914		return -EINVAL;
3915	}
3916
3917	return 0;
3918}
3919
3920static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
3921{
3922	struct drm_i915_private *dev_priv = dev->dev_private;
3923	struct intel_crtc *crtc =
3924		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3925	struct intel_crtc_state *pipe_config;
3926	struct drm_atomic_state *state;
3927	int ret = 0;
3928
3929	drm_modeset_lock_all(dev);
3930	state = drm_atomic_state_alloc(dev);
3931	if (!state) {
3932		ret = -ENOMEM;
3933		goto out;
3934	}
3935
3936	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
3937	pipe_config = intel_atomic_get_crtc_state(state, crtc);
3938	if (IS_ERR(pipe_config)) {
3939		ret = PTR_ERR(pipe_config);
3940		goto out;
3941	}
3942
3943	pipe_config->pch_pfit.force_thru = enable;
3944	if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
3945	    pipe_config->pch_pfit.enabled != enable)
3946		pipe_config->base.connectors_changed = true;
3947
3948	ret = drm_atomic_commit(state);
3949out:
3950	drm_modeset_unlock_all(dev);
3951	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
3952	if (ret)
3953		drm_atomic_state_free(state);
3954}
3955
3956static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
3957				enum pipe pipe,
3958				enum intel_pipe_crc_source *source,
3959				uint32_t *val)
3960{
3961	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3962		*source = INTEL_PIPE_CRC_SOURCE_PF;
3963
3964	switch (*source) {
3965	case INTEL_PIPE_CRC_SOURCE_PLANE1:
3966		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
3967		break;
3968	case INTEL_PIPE_CRC_SOURCE_PLANE2:
3969		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
3970		break;
3971	case INTEL_PIPE_CRC_SOURCE_PF:
3972		if (IS_HASWELL(dev) && pipe == PIPE_A)
3973			hsw_trans_edp_pipe_A_crc_wa(dev, true);
3974
3975		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
3976		break;
3977	case INTEL_PIPE_CRC_SOURCE_NONE:
3978		*val = 0;
3979		break;
3980	default:
3981		return -EINVAL;
3982	}
3983
3984	return 0;
3985}
3986
3987static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3988			       enum intel_pipe_crc_source source)
3989{
3990	struct drm_i915_private *dev_priv = dev->dev_private;
3991	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3992	struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
3993									pipe));
3994	enum intel_display_power_domain power_domain;
3995	u32 val = 0; /* shut up gcc */
3996	int ret;
3997
3998	if (pipe_crc->source == source)
3999		return 0;
4000
4001	/* forbid changing the source without going back to 'none' */
4002	if (pipe_crc->source && source)
4003		return -EINVAL;
4004
4005	power_domain = POWER_DOMAIN_PIPE(pipe);
4006	if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
4007		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
4008		return -EIO;
4009	}
4010
4011	if (IS_GEN2(dev))
4012		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
4013	else if (INTEL_INFO(dev)->gen < 5)
4014		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4015	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4016		ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4017	else if (IS_GEN5(dev) || IS_GEN6(dev))
4018		ret = ilk_pipe_crc_ctl_reg(&source, &val);
4019	else
4020		ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4021
4022	if (ret != 0)
4023		goto out;
4024
4025	/* none -> real source transition */
4026	if (source) {
4027		struct intel_pipe_crc_entry *entries;
4028
4029		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
4030				 pipe_name(pipe), pipe_crc_source_name(source));
4031
4032		entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
4033				  sizeof(pipe_crc->entries[0]),
4034				  GFP_KERNEL);
4035		if (!entries) {
4036			ret = -ENOMEM;
4037			goto out;
4038		}
4039
4040		/*
4041		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4042		 * enabled and disabled dynamically based on package C states,
4043		 * user space can't make reliable use of the CRCs, so let's just
4044		 * completely disable it.
4045		 */
4046		hsw_disable_ips(crtc);
4047
4048		spin_lock_irq(&pipe_crc->lock);
4049		kfree(pipe_crc->entries);
4050		pipe_crc->entries = entries;
4051		pipe_crc->head = 0;
4052		pipe_crc->tail = 0;
4053		spin_unlock_irq(&pipe_crc->lock);
4054	}
4055
4056	pipe_crc->source = source;
4057
4058	I915_WRITE(PIPE_CRC_CTL(pipe), val);
4059	POSTING_READ(PIPE_CRC_CTL(pipe));
4060
4061	/* real source -> none transition */
4062	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
4063		struct intel_pipe_crc_entry *entries;
4064		struct intel_crtc *crtc =
4065			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
4066
4067		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
4068				 pipe_name(pipe));
4069
4070		drm_modeset_lock(&crtc->base.mutex, NULL);
4071		if (crtc->base.state->active)
4072			intel_wait_for_vblank(dev, pipe);
4073		drm_modeset_unlock(&crtc->base.mutex);
4074
4075		spin_lock_irq(&pipe_crc->lock);
4076		entries = pipe_crc->entries;
4077		pipe_crc->entries = NULL;
4078		pipe_crc->head = 0;
4079		pipe_crc->tail = 0;
4080		spin_unlock_irq(&pipe_crc->lock);
4081
4082		kfree(entries);
4083
4084		if (IS_G4X(dev))
4085			g4x_undo_pipe_scramble_reset(dev, pipe);
4086		else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4087			vlv_undo_pipe_scramble_reset(dev, pipe);
4088		else if (IS_HASWELL(dev) && pipe == PIPE_A)
4089			hsw_trans_edp_pipe_A_crc_wa(dev, false);
4090
4091		hsw_enable_ips(crtc);
4092	}
4093
4094	ret = 0;
4095
4096out:
4097	intel_display_power_put(dev_priv, power_domain);
4098
4099	return ret;
4100}
4101
4102/*
4103 * Parse pipe CRC command strings:
4104 *   command: wsp* object wsp+ name wsp+ source wsp*
4105 *   object: 'pipe'
4106 *   name: (A | B | C)
4107 *   source: (none | plane1 | plane2 | pf)
4108 *   wsp: (#0x20 | #0x9 | #0xA)+
4109 *
4110 * eg.:
4111 *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
4112 *  "pipe A none"    ->  Stop CRC
4113 */
4114static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
4115{
4116	int n_words = 0;
4117
4118	while (*buf) {
4119		char *end;
4120
4121		/* skip leading white space */
4122		buf = skip_spaces(buf);
4123		if (!*buf)
4124			break;	/* end of buffer */
4125
4126		/* find end of word */
4127		for (end = buf; *end && !isspace(*end); end++)
4128			;
4129
4130		if (n_words == max_words) {
4131			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
4132					 max_words);
4133			return -EINVAL;	/* ran out of words[] before bytes */
4134		}
4135
4136		if (*end)
4137			*end++ = '\0';
4138		words[n_words++] = buf;
4139		buf = end;
4140	}
4141
4142	return n_words;
4143}
4144
4145enum intel_pipe_crc_object {
4146	PIPE_CRC_OBJECT_PIPE,
4147};
4148
4149static const char * const pipe_crc_objects[] = {
4150	"pipe",
4151};
4152
4153static int
4154display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
4155{
4156	int i;
4157
4158	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
4159		if (!strcmp(buf, pipe_crc_objects[i])) {
4160			*o = i;
4161			return 0;
4162		    }
4163
4164	return -EINVAL;
4165}
4166
4167static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
4168{
4169	const char name = buf[0];
4170
4171	if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
4172		return -EINVAL;
4173
4174	*pipe = name - 'A';
4175
4176	return 0;
4177}
4178
4179static int
4180display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
4181{
4182	int i;
4183
4184	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
4185		if (!strcmp(buf, pipe_crc_sources[i])) {
4186			*s = i;
4187			return 0;
4188		    }
4189
4190	return -EINVAL;
4191}
4192
4193static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
4194{
4195#define N_WORDS 3
4196	int n_words;
4197	char *words[N_WORDS];
4198	enum pipe pipe;
4199	enum intel_pipe_crc_object object;
4200	enum intel_pipe_crc_source source;
4201
4202	n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
4203	if (n_words != N_WORDS) {
4204		DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
4205				 N_WORDS);
4206		return -EINVAL;
4207	}
4208
4209	if (display_crc_ctl_parse_object(words[0], &object) < 0) {
4210		DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
4211		return -EINVAL;
4212	}
4213
4214	if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
4215		DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
4216		return -EINVAL;
4217	}
4218
4219	if (display_crc_ctl_parse_source(words[2], &source) < 0) {
4220		DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
4221		return -EINVAL;
4222	}
4223
4224	return pipe_crc_set_source(dev, pipe, source);
4225}
4226
4227static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
4228				     size_t len, loff_t *offp)
4229{
4230	struct seq_file *m = file->private_data;
4231	struct drm_device *dev = m->private;
4232	char *tmpbuf;
4233	int ret;
4234
4235	if (len == 0)
4236		return 0;
4237
4238	if (len > PAGE_SIZE - 1) {
4239		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
4240				 PAGE_SIZE);
4241		return -E2BIG;
4242	}
4243
4244	tmpbuf = kmalloc(len + 1, GFP_KERNEL);
4245	if (!tmpbuf)
4246		return -ENOMEM;
4247
4248	if (copy_from_user(tmpbuf, ubuf, len)) {
4249		ret = -EFAULT;
4250		goto out;
4251	}
4252	tmpbuf[len] = '\0';
4253
4254	ret = display_crc_ctl_parse(dev, tmpbuf, len);
4255
4256out:
4257	kfree(tmpbuf);
4258	if (ret < 0)
4259		return ret;
4260
4261	*offp += len;
4262	return len;
4263}
4264
4265static const struct file_operations i915_display_crc_ctl_fops = {
4266	.owner = THIS_MODULE,
4267	.open = display_crc_ctl_open,
4268	.read = seq_read,
4269	.llseek = seq_lseek,
4270	.release = single_release,
4271	.write = display_crc_ctl_write
4272};
4273
4274static ssize_t i915_displayport_test_active_write(struct file *file,
4275					    const char __user *ubuf,
4276					    size_t len, loff_t *offp)
4277{
4278	char *input_buffer;
4279	int status = 0;
4280	struct drm_device *dev;
4281	struct drm_connector *connector;
4282	struct list_head *connector_list;
4283	struct intel_dp *intel_dp;
4284	int val = 0;
4285
4286	dev = ((struct seq_file *)file->private_data)->private;
4287
4288	connector_list = &dev->mode_config.connector_list;
4289
4290	if (len == 0)
4291		return 0;
4292
4293	input_buffer = kmalloc(len + 1, GFP_KERNEL);
4294	if (!input_buffer)
4295		return -ENOMEM;
4296
4297	if (copy_from_user(input_buffer, ubuf, len)) {
4298		status = -EFAULT;
4299		goto out;
4300	}
4301
4302	input_buffer[len] = '\0';
4303	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
4304
4305	list_for_each_entry(connector, connector_list, head) {
 
 
4306
4307		if (connector->connector_type !=
4308		    DRM_MODE_CONNECTOR_DisplayPort)
4309			continue;
4310
4311		if (connector->status == connector_status_connected &&
4312		    connector->encoder != NULL) {
4313			intel_dp = enc_to_intel_dp(connector->encoder);
 
 
 
4314			status = kstrtoint(input_buffer, 10, &val);
4315			if (status < 0)
4316				goto out;
4317			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
4318			/* To prevent erroneous activation of the compliance
4319			 * testing code, only accept an actual value of 1 here
4320			 */
4321			if (val == 1)
4322				intel_dp->compliance_test_active = 1;
4323			else
4324				intel_dp->compliance_test_active = 0;
4325		}
4326	}
4327out:
4328	kfree(input_buffer);
4329	if (status < 0)
4330		return status;
4331
4332	*offp += len;
4333	return len;
4334}
4335
4336static int i915_displayport_test_active_show(struct seq_file *m, void *data)
4337{
4338	struct drm_device *dev = m->private;
 
4339	struct drm_connector *connector;
4340	struct list_head *connector_list = &dev->mode_config.connector_list;
4341	struct intel_dp *intel_dp;
4342
4343	list_for_each_entry(connector, connector_list, head) {
 
 
4344
4345		if (connector->connector_type !=
4346		    DRM_MODE_CONNECTOR_DisplayPort)
4347			continue;
4348
4349		if (connector->status == connector_status_connected &&
4350		    connector->encoder != NULL) {
4351			intel_dp = enc_to_intel_dp(connector->encoder);
4352			if (intel_dp->compliance_test_active)
 
 
 
4353				seq_puts(m, "1");
4354			else
4355				seq_puts(m, "0");
4356		} else
4357			seq_puts(m, "0");
4358	}
 
4359
4360	return 0;
4361}
4362
4363static int i915_displayport_test_active_open(struct inode *inode,
4364				       struct file *file)
4365{
4366	struct drm_device *dev = inode->i_private;
4367
4368	return single_open(file, i915_displayport_test_active_show, dev);
4369}
4370
4371static const struct file_operations i915_displayport_test_active_fops = {
4372	.owner = THIS_MODULE,
4373	.open = i915_displayport_test_active_open,
4374	.read = seq_read,
4375	.llseek = seq_lseek,
4376	.release = single_release,
4377	.write = i915_displayport_test_active_write
4378};
4379
4380static int i915_displayport_test_data_show(struct seq_file *m, void *data)
4381{
4382	struct drm_device *dev = m->private;
 
4383	struct drm_connector *connector;
4384	struct list_head *connector_list = &dev->mode_config.connector_list;
4385	struct intel_dp *intel_dp;
4386
4387	list_for_each_entry(connector, connector_list, head) {
 
 
4388
4389		if (connector->connector_type !=
4390		    DRM_MODE_CONNECTOR_DisplayPort)
4391			continue;
4392
4393		if (connector->status == connector_status_connected &&
4394		    connector->encoder != NULL) {
4395			intel_dp = enc_to_intel_dp(connector->encoder);
4396			seq_printf(m, "%lx", intel_dp->compliance_test_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4397		} else
4398			seq_puts(m, "0");
4399	}
 
4400
4401	return 0;
4402}
4403static int i915_displayport_test_data_open(struct inode *inode,
4404				       struct file *file)
4405{
4406	struct drm_device *dev = inode->i_private;
4407
4408	return single_open(file, i915_displayport_test_data_show, dev);
4409}
4410
4411static const struct file_operations i915_displayport_test_data_fops = {
4412	.owner = THIS_MODULE,
4413	.open = i915_displayport_test_data_open,
4414	.read = seq_read,
4415	.llseek = seq_lseek,
4416	.release = single_release
4417};
4418
4419static int i915_displayport_test_type_show(struct seq_file *m, void *data)
4420{
4421	struct drm_device *dev = m->private;
 
4422	struct drm_connector *connector;
4423	struct list_head *connector_list = &dev->mode_config.connector_list;
4424	struct intel_dp *intel_dp;
4425
4426	list_for_each_entry(connector, connector_list, head) {
 
 
4427
4428		if (connector->connector_type !=
4429		    DRM_MODE_CONNECTOR_DisplayPort)
4430			continue;
4431
4432		if (connector->status == connector_status_connected &&
4433		    connector->encoder != NULL) {
4434			intel_dp = enc_to_intel_dp(connector->encoder);
4435			seq_printf(m, "%02lx", intel_dp->compliance_test_type);
 
 
 
4436		} else
4437			seq_puts(m, "0");
4438	}
 
4439
4440	return 0;
4441}
 
4442
4443static int i915_displayport_test_type_open(struct inode *inode,
4444				       struct file *file)
4445{
4446	struct drm_device *dev = inode->i_private;
4447
4448	return single_open(file, i915_displayport_test_type_show, dev);
4449}
4450
4451static const struct file_operations i915_displayport_test_type_fops = {
4452	.owner = THIS_MODULE,
4453	.open = i915_displayport_test_type_open,
4454	.read = seq_read,
4455	.llseek = seq_lseek,
4456	.release = single_release
4457};
4458
4459static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
4460{
4461	struct drm_device *dev = m->private;
 
4462	int level;
4463	int num_levels;
4464
4465	if (IS_CHERRYVIEW(dev))
4466		num_levels = 3;
4467	else if (IS_VALLEYVIEW(dev))
4468		num_levels = 1;
 
 
4469	else
4470		num_levels = ilk_wm_max_level(dev) + 1;
4471
4472	drm_modeset_lock_all(dev);
4473
4474	for (level = 0; level < num_levels; level++) {
4475		unsigned int latency = wm[level];
4476
4477		/*
4478		 * - WM1+ latency values in 0.5us units
4479		 * - latencies are in us on gen9/vlv/chv
4480		 */
4481		if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) ||
4482		    IS_CHERRYVIEW(dev))
 
 
4483			latency *= 10;
4484		else if (level > 0)
4485			latency *= 5;
4486
4487		seq_printf(m, "WM%d %u (%u.%u usec)\n",
4488			   level, wm[level], latency / 10, latency % 10);
4489	}
4490
4491	drm_modeset_unlock_all(dev);
4492}
4493
4494static int pri_wm_latency_show(struct seq_file *m, void *data)
4495{
4496	struct drm_device *dev = m->private;
4497	struct drm_i915_private *dev_priv = dev->dev_private;
4498	const uint16_t *latencies;
4499
4500	if (INTEL_INFO(dev)->gen >= 9)
4501		latencies = dev_priv->wm.skl_latency;
4502	else
4503		latencies = to_i915(dev)->wm.pri_latency;
4504
4505	wm_latency_show(m, latencies);
4506
4507	return 0;
4508}
4509
4510static int spr_wm_latency_show(struct seq_file *m, void *data)
4511{
4512	struct drm_device *dev = m->private;
4513	struct drm_i915_private *dev_priv = dev->dev_private;
4514	const uint16_t *latencies;
4515
4516	if (INTEL_INFO(dev)->gen >= 9)
4517		latencies = dev_priv->wm.skl_latency;
4518	else
4519		latencies = to_i915(dev)->wm.spr_latency;
4520
4521	wm_latency_show(m, latencies);
4522
4523	return 0;
4524}
4525
4526static int cur_wm_latency_show(struct seq_file *m, void *data)
4527{
4528	struct drm_device *dev = m->private;
4529	struct drm_i915_private *dev_priv = dev->dev_private;
4530	const uint16_t *latencies;
4531
4532	if (INTEL_INFO(dev)->gen >= 9)
4533		latencies = dev_priv->wm.skl_latency;
4534	else
4535		latencies = to_i915(dev)->wm.cur_latency;
4536
4537	wm_latency_show(m, latencies);
4538
4539	return 0;
4540}
4541
4542static int pri_wm_latency_open(struct inode *inode, struct file *file)
4543{
4544	struct drm_device *dev = inode->i_private;
4545
4546	if (INTEL_INFO(dev)->gen < 5)
4547		return -ENODEV;
4548
4549	return single_open(file, pri_wm_latency_show, dev);
4550}
4551
4552static int spr_wm_latency_open(struct inode *inode, struct file *file)
4553{
4554	struct drm_device *dev = inode->i_private;
4555
4556	if (HAS_GMCH_DISPLAY(dev))
4557		return -ENODEV;
4558
4559	return single_open(file, spr_wm_latency_show, dev);
4560}
4561
4562static int cur_wm_latency_open(struct inode *inode, struct file *file)
4563{
4564	struct drm_device *dev = inode->i_private;
4565
4566	if (HAS_GMCH_DISPLAY(dev))
4567		return -ENODEV;
4568
4569	return single_open(file, cur_wm_latency_show, dev);
4570}
4571
4572static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
4573				size_t len, loff_t *offp, uint16_t wm[8])
4574{
4575	struct seq_file *m = file->private_data;
4576	struct drm_device *dev = m->private;
4577	uint16_t new[8] = { 0 };
 
4578	int num_levels;
4579	int level;
4580	int ret;
4581	char tmp[32];
4582
4583	if (IS_CHERRYVIEW(dev))
4584		num_levels = 3;
4585	else if (IS_VALLEYVIEW(dev))
4586		num_levels = 1;
 
 
4587	else
4588		num_levels = ilk_wm_max_level(dev) + 1;
4589
4590	if (len >= sizeof(tmp))
4591		return -EINVAL;
4592
4593	if (copy_from_user(tmp, ubuf, len))
4594		return -EFAULT;
4595
4596	tmp[len] = '\0';
4597
4598	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
4599		     &new[0], &new[1], &new[2], &new[3],
4600		     &new[4], &new[5], &new[6], &new[7]);
4601	if (ret != num_levels)
4602		return -EINVAL;
4603
4604	drm_modeset_lock_all(dev);
4605
4606	for (level = 0; level < num_levels; level++)
4607		wm[level] = new[level];
4608
4609	drm_modeset_unlock_all(dev);
4610
4611	return len;
4612}
4613
4614
4615static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
4616				    size_t len, loff_t *offp)
4617{
4618	struct seq_file *m = file->private_data;
4619	struct drm_device *dev = m->private;
4620	struct drm_i915_private *dev_priv = dev->dev_private;
4621	uint16_t *latencies;
4622
4623	if (INTEL_INFO(dev)->gen >= 9)
4624		latencies = dev_priv->wm.skl_latency;
4625	else
4626		latencies = to_i915(dev)->wm.pri_latency;
4627
4628	return wm_latency_write(file, ubuf, len, offp, latencies);
4629}
4630
4631static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
4632				    size_t len, loff_t *offp)
4633{
4634	struct seq_file *m = file->private_data;
4635	struct drm_device *dev = m->private;
4636	struct drm_i915_private *dev_priv = dev->dev_private;
4637	uint16_t *latencies;
4638
4639	if (INTEL_INFO(dev)->gen >= 9)
4640		latencies = dev_priv->wm.skl_latency;
4641	else
4642		latencies = to_i915(dev)->wm.spr_latency;
4643
4644	return wm_latency_write(file, ubuf, len, offp, latencies);
4645}
4646
4647static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4648				    size_t len, loff_t *offp)
4649{
4650	struct seq_file *m = file->private_data;
4651	struct drm_device *dev = m->private;
4652	struct drm_i915_private *dev_priv = dev->dev_private;
4653	uint16_t *latencies;
4654
4655	if (INTEL_INFO(dev)->gen >= 9)
4656		latencies = dev_priv->wm.skl_latency;
4657	else
4658		latencies = to_i915(dev)->wm.cur_latency;
4659
4660	return wm_latency_write(file, ubuf, len, offp, latencies);
4661}
4662
4663static const struct file_operations i915_pri_wm_latency_fops = {
4664	.owner = THIS_MODULE,
4665	.open = pri_wm_latency_open,
4666	.read = seq_read,
4667	.llseek = seq_lseek,
4668	.release = single_release,
4669	.write = pri_wm_latency_write
4670};
4671
4672static const struct file_operations i915_spr_wm_latency_fops = {
4673	.owner = THIS_MODULE,
4674	.open = spr_wm_latency_open,
4675	.read = seq_read,
4676	.llseek = seq_lseek,
4677	.release = single_release,
4678	.write = spr_wm_latency_write
4679};
4680
4681static const struct file_operations i915_cur_wm_latency_fops = {
4682	.owner = THIS_MODULE,
4683	.open = cur_wm_latency_open,
4684	.read = seq_read,
4685	.llseek = seq_lseek,
4686	.release = single_release,
4687	.write = cur_wm_latency_write
4688};
4689
4690static int
4691i915_wedged_get(void *data, u64 *val)
4692{
4693	struct drm_device *dev = data;
4694	struct drm_i915_private *dev_priv = dev->dev_private;
4695
4696	*val = atomic_read(&dev_priv->gpu_error.reset_counter);
4697
4698	return 0;
 
 
 
 
 
 
 
 
 
4699}
4700
4701static int
4702i915_wedged_set(void *data, u64 val)
4703{
4704	struct drm_device *dev = data;
4705	struct drm_i915_private *dev_priv = dev->dev_private;
4706
4707	/*
4708	 * There is no safeguard against this debugfs entry colliding
4709	 * with the hangcheck calling same i915_handle_error() in
4710	 * parallel, causing an explosion. For now we assume that the
4711	 * test harness is responsible enough not to inject gpu hangs
4712	 * while it is writing to 'i915_wedged'
4713	 */
4714
4715	if (i915_reset_in_progress(&dev_priv->gpu_error))
4716		return -EAGAIN;
4717
4718	intel_runtime_pm_get(dev_priv);
4719
4720	i915_handle_error(dev, val,
4721			  "Manually setting wedged to %llu", val);
4722
4723	intel_runtime_pm_put(dev_priv);
4724
 
 
4725	return 0;
4726}
4727
4728DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4729			i915_wedged_get, i915_wedged_set,
4730			"%llu\n");
4731
4732static int
4733i915_ring_stop_get(void *data, u64 *val)
4734{
4735	struct drm_device *dev = data;
4736	struct drm_i915_private *dev_priv = dev->dev_private;
4737
4738	*val = dev_priv->gpu_error.stop_rings;
4739
4740	return 0;
4741}
4742
4743static int
4744i915_ring_stop_set(void *data, u64 val)
4745{
4746	struct drm_device *dev = data;
4747	struct drm_i915_private *dev_priv = dev->dev_private;
4748	int ret;
4749
4750	DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
4751
4752	ret = mutex_lock_interruptible(&dev->struct_mutex);
4753	if (ret)
4754		return ret;
4755
4756	dev_priv->gpu_error.stop_rings = val;
4757	mutex_unlock(&dev->struct_mutex);
4758
4759	return 0;
4760}
4761
4762DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
4763			i915_ring_stop_get, i915_ring_stop_set,
4764			"0x%08llx\n");
4765
4766static int
4767i915_ring_missed_irq_get(void *data, u64 *val)
4768{
4769	struct drm_device *dev = data;
4770	struct drm_i915_private *dev_priv = dev->dev_private;
4771
4772	*val = dev_priv->gpu_error.missed_irq_rings;
4773	return 0;
4774}
4775
4776static int
4777i915_ring_missed_irq_set(void *data, u64 val)
4778{
4779	struct drm_device *dev = data;
4780	struct drm_i915_private *dev_priv = dev->dev_private;
4781	int ret;
4782
4783	/* Lock against concurrent debugfs callers */
4784	ret = mutex_lock_interruptible(&dev->struct_mutex);
4785	if (ret)
4786		return ret;
4787	dev_priv->gpu_error.missed_irq_rings = val;
4788	mutex_unlock(&dev->struct_mutex);
4789
4790	return 0;
4791}
4792
4793DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4794			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4795			"0x%08llx\n");
4796
4797static int
4798i915_ring_test_irq_get(void *data, u64 *val)
4799{
4800	struct drm_device *dev = data;
4801	struct drm_i915_private *dev_priv = dev->dev_private;
4802
4803	*val = dev_priv->gpu_error.test_irq_rings;
4804
4805	return 0;
4806}
4807
4808static int
4809i915_ring_test_irq_set(void *data, u64 val)
4810{
4811	struct drm_device *dev = data;
4812	struct drm_i915_private *dev_priv = dev->dev_private;
4813	int ret;
4814
4815	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4816
4817	/* Lock against concurrent debugfs callers */
4818	ret = mutex_lock_interruptible(&dev->struct_mutex);
4819	if (ret)
4820		return ret;
4821
4822	dev_priv->gpu_error.test_irq_rings = val;
4823	mutex_unlock(&dev->struct_mutex);
4824
4825	return 0;
4826}
4827
4828DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4829			i915_ring_test_irq_get, i915_ring_test_irq_set,
4830			"0x%08llx\n");
4831
4832#define DROP_UNBOUND 0x1
4833#define DROP_BOUND 0x2
4834#define DROP_RETIRE 0x4
4835#define DROP_ACTIVE 0x8
4836#define DROP_ALL (DROP_UNBOUND | \
4837		  DROP_BOUND | \
4838		  DROP_RETIRE | \
4839		  DROP_ACTIVE)
4840static int
4841i915_drop_caches_get(void *data, u64 *val)
4842{
4843	*val = DROP_ALL;
4844
4845	return 0;
4846}
4847
4848static int
4849i915_drop_caches_set(void *data, u64 val)
4850{
4851	struct drm_device *dev = data;
4852	struct drm_i915_private *dev_priv = dev->dev_private;
4853	int ret;
 
4854
4855	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
 
 
 
4856
4857	/* No need to check and wait for gpu resets, only libdrm auto-restarts
4858	 * on ioctls on -EAGAIN. */
4859	ret = mutex_lock_interruptible(&dev->struct_mutex);
4860	if (ret)
4861		return ret;
4862
4863	if (val & DROP_ACTIVE) {
4864		ret = i915_gpu_idle(dev);
4865		if (ret)
4866			goto unlock;
4867	}
4868
4869	if (val & (DROP_RETIRE | DROP_ACTIVE))
4870		i915_gem_retire_requests(dev);
4871
4872	if (val & DROP_BOUND)
4873		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
4874
4875	if (val & DROP_UNBOUND)
4876		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
4877
4878unlock:
4879	mutex_unlock(&dev->struct_mutex);
4880
4881	return ret;
4882}
4883
4884DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4885			i915_drop_caches_get, i915_drop_caches_set,
4886			"0x%08llx\n");
4887
4888static int
4889i915_max_freq_get(void *data, u64 *val)
4890{
4891	struct drm_device *dev = data;
4892	struct drm_i915_private *dev_priv = dev->dev_private;
4893	int ret;
4894
4895	if (INTEL_INFO(dev)->gen < 6)
4896		return -ENODEV;
4897
4898	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4899
4900	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4901	if (ret)
4902		return ret;
4903
4904	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
4905	mutex_unlock(&dev_priv->rps.hw_lock);
4906
4907	return 0;
4908}
4909
4910static int
4911i915_max_freq_set(void *data, u64 val)
4912{
4913	struct drm_device *dev = data;
4914	struct drm_i915_private *dev_priv = dev->dev_private;
4915	u32 hw_max, hw_min;
4916	int ret;
4917
4918	if (INTEL_INFO(dev)->gen < 6)
4919		return -ENODEV;
4920
4921	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
 
 
 
 
 
 
 
 
 
4922
4923	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
 
 
 
 
4924
4925	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4926	if (ret)
4927		return ret;
4928
4929	/*
4930	 * Turbo will still be enabled, but won't go above the set value.
4931	 */
4932	val = intel_freq_opcode(dev_priv, val);
4933
4934	hw_max = dev_priv->rps.max_freq;
4935	hw_min = dev_priv->rps.min_freq;
4936
4937	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
4938		mutex_unlock(&dev_priv->rps.hw_lock);
4939		return -EINVAL;
4940	}
4941
4942	dev_priv->rps.max_freq_softlimit = val;
4943
4944	intel_set_rps(dev, val);
4945
4946	mutex_unlock(&dev_priv->rps.hw_lock);
4947
4948	return 0;
4949}
4950
4951DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
4952			i915_max_freq_get, i915_max_freq_set,
4953			"%llu\n");
4954
4955static int
4956i915_min_freq_get(void *data, u64 *val)
4957{
4958	struct drm_device *dev = data;
4959	struct drm_i915_private *dev_priv = dev->dev_private;
4960	int ret;
4961
4962	if (INTEL_INFO(dev)->gen < 6)
4963		return -ENODEV;
4964
4965	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4966
4967	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4968	if (ret)
4969		return ret;
4970
4971	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
4972	mutex_unlock(&dev_priv->rps.hw_lock);
4973
4974	return 0;
4975}
4976
4977static int
4978i915_min_freq_set(void *data, u64 val)
4979{
4980	struct drm_device *dev = data;
4981	struct drm_i915_private *dev_priv = dev->dev_private;
4982	u32 hw_max, hw_min;
4983	int ret;
4984
4985	if (INTEL_INFO(dev)->gen < 6)
4986		return -ENODEV;
4987
4988	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4989
4990	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
4991
4992	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4993	if (ret)
4994		return ret;
4995
4996	/*
4997	 * Turbo will still be enabled, but won't go below the set value.
4998	 */
4999	val = intel_freq_opcode(dev_priv, val);
5000
5001	hw_max = dev_priv->rps.max_freq;
5002	hw_min = dev_priv->rps.min_freq;
 
5003
5004	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
5005		mutex_unlock(&dev_priv->rps.hw_lock);
5006		return -EINVAL;
5007	}
5008
5009	dev_priv->rps.min_freq_softlimit = val;
5010
5011	intel_set_rps(dev, val);
5012
5013	mutex_unlock(&dev_priv->rps.hw_lock);
5014
5015	return 0;
5016}
5017
5018DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
5019			i915_min_freq_get, i915_min_freq_set,
5020			"%llu\n");
5021
5022static int
5023i915_cache_sharing_get(void *data, u64 *val)
5024{
5025	struct drm_device *dev = data;
5026	struct drm_i915_private *dev_priv = dev->dev_private;
5027	u32 snpcr;
5028	int ret;
5029
5030	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
5031		return -ENODEV;
5032
5033	ret = mutex_lock_interruptible(&dev->struct_mutex);
5034	if (ret)
5035		return ret;
5036	intel_runtime_pm_get(dev_priv);
5037
5038	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5039
5040	intel_runtime_pm_put(dev_priv);
5041	mutex_unlock(&dev_priv->dev->struct_mutex);
5042
5043	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
5044
5045	return 0;
5046}
5047
5048static int
5049i915_cache_sharing_set(void *data, u64 val)
5050{
5051	struct drm_device *dev = data;
5052	struct drm_i915_private *dev_priv = dev->dev_private;
5053	u32 snpcr;
5054
5055	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
5056		return -ENODEV;
5057
5058	if (val > 3)
5059		return -EINVAL;
5060
5061	intel_runtime_pm_get(dev_priv);
5062	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
 
 
5063
5064	/* Update the cache sharing policy here as well */
5065	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5066	snpcr &= ~GEN6_MBC_SNPCR_MASK;
5067	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
5068	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
 
5069
5070	intel_runtime_pm_put(dev_priv);
5071	return 0;
5072}
5073
5074DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
5075			i915_cache_sharing_get, i915_cache_sharing_set,
5076			"%llu\n");
5077
5078struct sseu_dev_status {
5079	unsigned int slice_total;
5080	unsigned int subslice_total;
5081	unsigned int subslice_per_slice;
5082	unsigned int eu_total;
5083	unsigned int eu_per_subslice;
5084};
5085
5086static void cherryview_sseu_device_status(struct drm_device *dev,
5087					  struct sseu_dev_status *stat)
5088{
5089	struct drm_i915_private *dev_priv = dev->dev_private;
5090	int ss_max = 2;
 
5091	int ss;
5092	u32 sig1[ss_max], sig2[ss_max];
5093
5094	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
5095	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
5096	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
5097	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
5098
5099	for (ss = 0; ss < ss_max; ss++) {
5100		unsigned int eu_cnt;
5101
5102		if (sig1[ss] & CHV_SS_PG_ENABLE)
5103			/* skip disabled subslice */
5104			continue;
5105
5106		stat->slice_total = 1;
5107		stat->subslice_per_slice++;
5108		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
5109			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
5110			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
5111			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
5112		stat->eu_total += eu_cnt;
5113		stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
 
5114	}
5115	stat->subslice_total = stat->subslice_per_slice;
5116}
5117
5118static void gen9_sseu_device_status(struct drm_device *dev,
5119				    struct sseu_dev_status *stat)
5120{
5121	struct drm_i915_private *dev_priv = dev->dev_private;
5122	int s_max = 3, ss_max = 4;
 
5123	int s, ss;
5124	u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
5125
5126	/* BXT has a single slice and at most 3 subslices. */
5127	if (IS_BROXTON(dev)) {
5128		s_max = 1;
5129		ss_max = 3;
 
 
 
 
 
 
 
5130	}
5131
5132	for (s = 0; s < s_max; s++) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5133		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
5134		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
5135		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
5136	}
5137
5138	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
5139		     GEN9_PGCTL_SSA_EU19_ACK |
5140		     GEN9_PGCTL_SSA_EU210_ACK |
5141		     GEN9_PGCTL_SSA_EU311_ACK;
5142	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
5143		     GEN9_PGCTL_SSB_EU19_ACK |
5144		     GEN9_PGCTL_SSB_EU210_ACK |
5145		     GEN9_PGCTL_SSB_EU311_ACK;
5146
5147	for (s = 0; s < s_max; s++) {
5148		unsigned int ss_cnt = 0;
5149
5150		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
5151			/* skip disabled slice */
5152			continue;
5153
5154		stat->slice_total++;
5155
5156		if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
5157			ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
 
5158
5159		for (ss = 0; ss < ss_max; ss++) {
5160			unsigned int eu_cnt;
5161
5162			if (IS_BROXTON(dev) &&
5163			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
5164				/* skip disabled subslice */
5165				continue;
5166
5167			if (IS_BROXTON(dev))
5168				ss_cnt++;
5169
5170			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
5171					       eu_mask[ss%2]);
5172			stat->eu_total += eu_cnt;
5173			stat->eu_per_subslice = max(stat->eu_per_subslice,
5174						    eu_cnt);
 
5175		}
5176
5177		stat->subslice_total += ss_cnt;
5178		stat->subslice_per_slice = max(stat->subslice_per_slice,
5179					       ss_cnt);
5180	}
 
5181}
5182
5183static void broadwell_sseu_device_status(struct drm_device *dev,
5184					 struct sseu_dev_status *stat)
5185{
5186	struct drm_i915_private *dev_priv = dev->dev_private;
5187	int s;
5188	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
 
5189
5190	stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
5191
5192	if (stat->slice_total) {
5193		stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
5194		stat->subslice_total = stat->slice_total *
5195				       stat->subslice_per_slice;
5196		stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
5197		stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
 
 
 
5198
5199		/* subtract fused off EU(s) from enabled slice(s) */
5200		for (s = 0; s < stat->slice_total; s++) {
5201			u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
 
5202
5203			stat->eu_total -= hweight8(subslice_7eu);
5204		}
5205	}
5206}
5207
5208static int i915_sseu_status(struct seq_file *m, void *unused)
 
5209{
5210	struct drm_info_node *node = (struct drm_info_node *) m->private;
5211	struct drm_device *dev = node->minor->dev;
5212	struct sseu_dev_status stat;
5213
5214	if (INTEL_INFO(dev)->gen < 8)
5215		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5216
5217	seq_puts(m, "SSEU Device Info\n");
5218	seq_printf(m, "  Available Slice Total: %u\n",
5219		   INTEL_INFO(dev)->slice_total);
5220	seq_printf(m, "  Available Subslice Total: %u\n",
5221		   INTEL_INFO(dev)->subslice_total);
5222	seq_printf(m, "  Available Subslice Per Slice: %u\n",
5223		   INTEL_INFO(dev)->subslice_per_slice);
5224	seq_printf(m, "  Available EU Total: %u\n",
5225		   INTEL_INFO(dev)->eu_total);
5226	seq_printf(m, "  Available EU Per Subslice: %u\n",
5227		   INTEL_INFO(dev)->eu_per_subslice);
5228	seq_printf(m, "  Has Slice Power Gating: %s\n",
5229		   yesno(INTEL_INFO(dev)->has_slice_pg));
5230	seq_printf(m, "  Has Subslice Power Gating: %s\n",
5231		   yesno(INTEL_INFO(dev)->has_subslice_pg));
5232	seq_printf(m, "  Has EU Power Gating: %s\n",
5233		   yesno(INTEL_INFO(dev)->has_eu_pg));
 
 
 
 
 
 
 
 
 
 
 
 
 
5234
5235	seq_puts(m, "SSEU Device Status\n");
5236	memset(&stat, 0, sizeof(stat));
5237	if (IS_CHERRYVIEW(dev)) {
5238		cherryview_sseu_device_status(dev, &stat);
5239	} else if (IS_BROADWELL(dev)) {
5240		broadwell_sseu_device_status(dev, &stat);
5241	} else if (INTEL_INFO(dev)->gen >= 9) {
5242		gen9_sseu_device_status(dev, &stat);
5243	}
5244	seq_printf(m, "  Enabled Slice Total: %u\n",
5245		   stat.slice_total);
5246	seq_printf(m, "  Enabled Subslice Total: %u\n",
5247		   stat.subslice_total);
5248	seq_printf(m, "  Enabled Subslice Per Slice: %u\n",
5249		   stat.subslice_per_slice);
5250	seq_printf(m, "  Enabled EU Total: %u\n",
5251		   stat.eu_total);
5252	seq_printf(m, "  Enabled EU Per Subslice: %u\n",
5253		   stat.eu_per_subslice);
5254
5255	return 0;
5256}
5257
5258static int i915_forcewake_open(struct inode *inode, struct file *file)
5259{
5260	struct drm_device *dev = inode->i_private;
5261	struct drm_i915_private *dev_priv = dev->dev_private;
5262
5263	if (INTEL_INFO(dev)->gen < 6)
5264		return 0;
5265
5266	intel_runtime_pm_get(dev_priv);
5267	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
5268
5269	return 0;
5270}
5271
5272static int i915_forcewake_release(struct inode *inode, struct file *file)
5273{
5274	struct drm_device *dev = inode->i_private;
5275	struct drm_i915_private *dev_priv = dev->dev_private;
5276
5277	if (INTEL_INFO(dev)->gen < 6)
5278		return 0;
5279
5280	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5281	intel_runtime_pm_put(dev_priv);
 
5282
5283	return 0;
5284}
5285
5286static const struct file_operations i915_forcewake_fops = {
5287	.owner = THIS_MODULE,
5288	.open = i915_forcewake_open,
5289	.release = i915_forcewake_release,
5290};
5291
5292static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
5293{
5294	struct drm_device *dev = minor->dev;
5295	struct dentry *ent;
5296
5297	ent = debugfs_create_file("i915_forcewake_user",
5298				  S_IRUSR,
5299				  root, dev,
5300				  &i915_forcewake_fops);
5301	if (!ent)
5302		return -ENOMEM;
 
 
 
 
5303
5304	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
5305}
5306
5307static int i915_debugfs_create(struct dentry *root,
5308			       struct drm_minor *minor,
5309			       const char *name,
5310			       const struct file_operations *fops)
5311{
5312	struct drm_device *dev = minor->dev;
5313	struct dentry *ent;
5314
5315	ent = debugfs_create_file(name,
5316				  S_IRUGO | S_IWUSR,
5317				  root, dev,
5318				  fops);
5319	if (!ent)
5320		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5321
5322	return drm_add_fake_info_node(minor, ent, fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5323}
5324
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5325static const struct drm_info_list i915_debugfs_list[] = {
5326	{"i915_capabilities", i915_capabilities, 0},
5327	{"i915_gem_objects", i915_gem_object_info, 0},
5328	{"i915_gem_gtt", i915_gem_gtt_info, 0},
5329	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
5330	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
5331	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
5332	{"i915_gem_stolen", i915_gem_stolen_list_info },
5333	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
5334	{"i915_gem_request", i915_gem_request_info, 0},
5335	{"i915_gem_seqno", i915_gem_seqno_info, 0},
5336	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
5337	{"i915_gem_interrupt", i915_interrupt_info, 0},
5338	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
5339	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
5340	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
5341	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
5342	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
5343	{"i915_guc_info", i915_guc_info, 0},
5344	{"i915_guc_load_status", i915_guc_load_status_info, 0},
5345	{"i915_guc_log_dump", i915_guc_log_dump, 0},
 
 
 
5346	{"i915_frequency_info", i915_frequency_info, 0},
5347	{"i915_hangcheck_info", i915_hangcheck_info, 0},
5348	{"i915_drpc_info", i915_drpc_info, 0},
5349	{"i915_emon_status", i915_emon_status, 0},
5350	{"i915_ring_freq_table", i915_ring_freq_table, 0},
5351	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
5352	{"i915_fbc_status", i915_fbc_status, 0},
5353	{"i915_ips_status", i915_ips_status, 0},
5354	{"i915_sr_status", i915_sr_status, 0},
5355	{"i915_opregion", i915_opregion, 0},
5356	{"i915_vbt", i915_vbt, 0},
5357	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
5358	{"i915_context_status", i915_context_status, 0},
5359	{"i915_dump_lrc", i915_dump_lrc, 0},
5360	{"i915_execlists", i915_execlists, 0},
5361	{"i915_forcewake_domains", i915_forcewake_domains, 0},
5362	{"i915_swizzle_info", i915_swizzle_info, 0},
5363	{"i915_ppgtt_info", i915_ppgtt_info, 0},
5364	{"i915_llc", i915_llc, 0},
5365	{"i915_edp_psr_status", i915_edp_psr_status, 0},
5366	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
5367	{"i915_energy_uJ", i915_energy_uJ, 0},
5368	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
5369	{"i915_power_domain_info", i915_power_domain_info, 0},
5370	{"i915_dmc_info", i915_dmc_info, 0},
5371	{"i915_display_info", i915_display_info, 0},
5372	{"i915_semaphore_status", i915_semaphore_status, 0},
 
 
5373	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
5374	{"i915_dp_mst_info", i915_dp_mst_info, 0},
5375	{"i915_wa_registers", i915_wa_registers, 0},
5376	{"i915_ddb_info", i915_ddb_info, 0},
5377	{"i915_sseu_status", i915_sseu_status, 0},
5378	{"i915_drrs_status", i915_drrs_status, 0},
5379	{"i915_rps_boost_info", i915_rps_boost_info, 0},
5380};
5381#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
5382
5383static const struct i915_debugfs_files {
5384	const char *name;
5385	const struct file_operations *fops;
5386} i915_debugfs_files[] = {
5387	{"i915_wedged", &i915_wedged_fops},
5388	{"i915_max_freq", &i915_max_freq_fops},
5389	{"i915_min_freq", &i915_min_freq_fops},
5390	{"i915_cache_sharing", &i915_cache_sharing_fops},
5391	{"i915_ring_stop", &i915_ring_stop_fops},
5392	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
5393	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
5394	{"i915_gem_drop_caches", &i915_drop_caches_fops},
 
5395	{"i915_error_state", &i915_error_state_fops},
5396	{"i915_next_seqno", &i915_next_seqno_fops},
5397	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
 
5398	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
5399	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
5400	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
5401	{"i915_fbc_false_color", &i915_fbc_fc_fops},
5402	{"i915_dp_test_data", &i915_displayport_test_data_fops},
5403	{"i915_dp_test_type", &i915_displayport_test_type_fops},
5404	{"i915_dp_test_active", &i915_displayport_test_active_fops}
 
 
 
 
 
 
 
5405};
5406
5407void intel_display_crc_init(struct drm_device *dev)
5408{
5409	struct drm_i915_private *dev_priv = dev->dev_private;
5410	enum pipe pipe;
5411
5412	for_each_pipe(dev_priv, pipe) {
5413		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
5414
5415		pipe_crc->opened = false;
5416		spin_lock_init(&pipe_crc->lock);
5417		init_waitqueue_head(&pipe_crc->wq);
5418	}
5419}
5420
5421int i915_debugfs_init(struct drm_minor *minor)
5422{
5423	int ret, i;
5424
5425	ret = i915_forcewake_create(minor->debugfs_root, minor);
5426	if (ret)
5427		return ret;
5428
5429	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5430		ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
5431		if (ret)
5432			return ret;
5433	}
5434
5435	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
5436		ret = i915_debugfs_create(minor->debugfs_root, minor,
5437					  i915_debugfs_files[i].name,
5438					  i915_debugfs_files[i].fops);
5439		if (ret)
5440			return ret;
5441	}
5442
5443	return drm_debugfs_create_files(i915_debugfs_list,
5444					I915_DEBUGFS_ENTRIES,
5445					minor->debugfs_root, minor);
5446}
5447
5448void i915_debugfs_cleanup(struct drm_minor *minor)
5449{
5450	int i;
5451
5452	drm_debugfs_remove_files(i915_debugfs_list,
5453				 I915_DEBUGFS_ENTRIES, minor);
5454
5455	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
5456				 1, minor);
5457
5458	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5459		struct drm_info_list *info_list =
5460			(struct drm_info_list *)&i915_pipe_crc_data[i];
5461
5462		drm_debugfs_remove_files(info_list, 1, minor);
5463	}
5464
5465	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
5466		struct drm_info_list *info_list =
5467			(struct drm_info_list *) i915_debugfs_files[i].fops;
5468
5469		drm_debugfs_remove_files(info_list, 1, minor);
5470	}
5471}
5472
5473struct dpcd_block {
5474	/* DPCD dump start address. */
5475	unsigned int offset;
5476	/* DPCD dump end address, inclusive. If unset, .size will be used. */
5477	unsigned int end;
5478	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
5479	size_t size;
5480	/* Only valid for eDP. */
5481	bool edp;
5482};
5483
5484static const struct dpcd_block i915_dpcd_debug[] = {
5485	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
5486	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
5487	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
5488	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
5489	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
5490	{ .offset = DP_SET_POWER },
5491	{ .offset = DP_EDP_DPCD_REV },
5492	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
5493	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
5494	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
5495};
5496
5497static int i915_dpcd_show(struct seq_file *m, void *data)
5498{
5499	struct drm_connector *connector = m->private;
5500	struct intel_dp *intel_dp =
5501		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5502	uint8_t buf[16];
5503	ssize_t err;
5504	int i;
5505
5506	if (connector->status != connector_status_connected)
5507		return -ENODEV;
5508
5509	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
5510		const struct dpcd_block *b = &i915_dpcd_debug[i];
5511		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
5512
5513		if (b->edp &&
5514		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
5515			continue;
5516
5517		/* low tech for now */
5518		if (WARN_ON(size > sizeof(buf)))
5519			continue;
5520
5521		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
5522		if (err <= 0) {
5523			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
5524				  size, b->offset, err);
5525			continue;
5526		}
5527
5528		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
5529	}
5530
5531	return 0;
5532}
 
5533
5534static int i915_dpcd_open(struct inode *inode, struct file *file)
5535{
5536	return single_open(file, i915_dpcd_show, inode->i_private);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5537}
 
5538
5539static const struct file_operations i915_dpcd_fops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5540	.owner = THIS_MODULE,
5541	.open = i915_dpcd_open,
5542	.read = seq_read,
5543	.llseek = seq_lseek,
5544	.release = single_release,
 
5545};
5546
5547/**
5548 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5549 * @connector: pointer to a registered drm_connector
5550 *
5551 * Cleanup will be done by drm_connector_unregister() through a call to
5552 * drm_debugfs_connector_remove().
5553 *
5554 * Returns 0 on success, negative error codes on error.
5555 */
5556int i915_debugfs_connector_add(struct drm_connector *connector)
5557{
5558	struct dentry *root = connector->debugfs_entry;
 
5559
5560	/* The connector must have been registered beforehands. */
5561	if (!root)
5562		return -ENODEV;
5563
5564	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5565	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5566		debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
5567				    &i915_dpcd_fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5568
5569	return 0;
5570}
v5.4
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Keith Packard <keithp@keithp.com>
  26 *
  27 */
  28
  29#include <linux/sched/mm.h>
  30#include <linux/sort.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  31
  32#include <drm/drm_debugfs.h>
  33#include <drm/drm_fourcc.h>
 
 
 
 
 
 
 
 
 
 
 
 
  34
  35#include "display/intel_display_types.h"
  36#include "display/intel_dp.h"
  37#include "display/intel_fbc.h"
  38#include "display/intel_hdcp.h"
  39#include "display/intel_hdmi.h"
  40#include "display/intel_psr.h"
  41
  42#include "gem/i915_gem_context.h"
  43#include "gt/intel_gt_pm.h"
  44#include "gt/intel_reset.h"
  45#include "gt/uc/intel_guc_submission.h"
  46
  47#include "i915_debugfs.h"
  48#include "i915_irq.h"
  49#include "i915_trace.h"
  50#include "intel_csr.h"
  51#include "intel_pm.h"
  52#include "intel_sideband.h"
  53
  54static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
  55{
  56	return to_i915(node->minor->dev);
  57}
  58
  59static int i915_capabilities(struct seq_file *m, void *data)
  60{
  61	struct drm_i915_private *dev_priv = node_to_i915(m->private);
  62	const struct intel_device_info *info = INTEL_INFO(dev_priv);
  63	struct drm_printer p = drm_seq_file_printer(m);
  64
  65	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
  66	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
  67	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
  68
  69	intel_device_info_dump_flags(info, &p);
  70	intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
  71	intel_driver_caps_print(&dev_priv->caps, &p);
  72
  73	kernel_param_lock(THIS_MODULE);
  74	i915_params_dump(&i915_modparams, &p);
  75	kernel_param_unlock(THIS_MODULE);
  76
  77	return 0;
  78}
  79
  80static char get_pin_flag(struct drm_i915_gem_object *obj)
  81{
  82	return obj->pin_global ? 'p' : ' ';
 
 
 
  83}
  84
  85static char get_tiling_flag(struct drm_i915_gem_object *obj)
  86{
  87	switch (i915_gem_object_get_tiling(obj)) {
  88	default:
  89	case I915_TILING_NONE: return ' ';
  90	case I915_TILING_X: return 'X';
  91	case I915_TILING_Y: return 'Y';
  92	}
  93}
  94
  95static char get_global_flag(struct drm_i915_gem_object *obj)
  96{
  97	return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
  98}
  99
 100static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
 101{
 102	return obj->mm.mapping ? 'M' : ' ';
 103}
 104
 105static const char *
 106stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
 107{
 108	size_t x = 0;
 109
 110	switch (page_sizes) {
 111	case 0:
 112		return "";
 113	case I915_GTT_PAGE_SIZE_4K:
 114		return "4K";
 115	case I915_GTT_PAGE_SIZE_64K:
 116		return "64K";
 117	case I915_GTT_PAGE_SIZE_2M:
 118		return "2M";
 119	default:
 120		if (!buf)
 121			return "M";
 122
 123		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
 124			x += snprintf(buf + x, len - x, "2M, ");
 125		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
 126			x += snprintf(buf + x, len - x, "64K, ");
 127		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
 128			x += snprintf(buf + x, len - x, "4K, ");
 129		buf[x-2] = '\0';
 130
 131		return buf;
 132	}
 133}
 134
 135static void
 136describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 137{
 138	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 139	struct intel_engine_cs *engine;
 140	struct i915_vma *vma;
 141	int pin_count = 0;
 
 142
 143	seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
 144		   &obj->base,
 
 145		   get_pin_flag(obj),
 146		   get_tiling_flag(obj),
 147		   get_global_flag(obj),
 148		   get_pin_mapped_flag(obj),
 149		   obj->base.size / 1024,
 150		   obj->read_domains,
 151		   obj->write_domain,
 152		   i915_cache_level_str(dev_priv, obj->cache_level),
 153		   obj->mm.dirty ? " dirty" : "",
 154		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
 
 
 
 
 
 
 155	if (obj->base.name)
 156		seq_printf(m, " (name: %d)", obj->base.name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 157
 158	spin_lock(&obj->vma.lock);
 159	list_for_each_entry(vma, &obj->vma.list, obj_link) {
 160		if (!drm_mm_node_allocated(&vma->node))
 161			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162
 163		spin_unlock(&obj->vma.lock);
 
 
 
 164
 165		if (i915_vma_is_pinned(vma))
 166			pin_count++;
 
 
 
 
 
 167
 168		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
 169			   i915_vma_is_ggtt(vma) ? "g" : "pp",
 170			   vma->node.start, vma->node.size,
 171			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
 172		if (i915_vma_is_ggtt(vma)) {
 173			switch (vma->ggtt_view.type) {
 174			case I915_GGTT_VIEW_NORMAL:
 175				seq_puts(m, ", normal");
 176				break;
 177
 178			case I915_GGTT_VIEW_PARTIAL:
 179				seq_printf(m, ", partial [%08llx+%x]",
 180					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
 181					   vma->ggtt_view.partial.size << PAGE_SHIFT);
 182				break;
 
 
 
 
 183
 184			case I915_GGTT_VIEW_ROTATED:
 185				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
 186					   vma->ggtt_view.rotated.plane[0].width,
 187					   vma->ggtt_view.rotated.plane[0].height,
 188					   vma->ggtt_view.rotated.plane[0].stride,
 189					   vma->ggtt_view.rotated.plane[0].offset,
 190					   vma->ggtt_view.rotated.plane[1].width,
 191					   vma->ggtt_view.rotated.plane[1].height,
 192					   vma->ggtt_view.rotated.plane[1].stride,
 193					   vma->ggtt_view.rotated.plane[1].offset);
 194				break;
 195
 196			case I915_GGTT_VIEW_REMAPPED:
 197				seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
 198					   vma->ggtt_view.remapped.plane[0].width,
 199					   vma->ggtt_view.remapped.plane[0].height,
 200					   vma->ggtt_view.remapped.plane[0].stride,
 201					   vma->ggtt_view.remapped.plane[0].offset,
 202					   vma->ggtt_view.remapped.plane[1].width,
 203					   vma->ggtt_view.remapped.plane[1].height,
 204					   vma->ggtt_view.remapped.plane[1].stride,
 205					   vma->ggtt_view.remapped.plane[1].offset);
 206				break;
 207
 208			default:
 209				MISSING_CASE(vma->ggtt_view.type);
 210				break;
 211			}
 212		}
 213		if (vma->fence)
 214			seq_printf(m, " , fence: %d", vma->fence->id);
 215		seq_puts(m, ")");
 216
 217		spin_lock(&obj->vma.lock);
 
 
 218	}
 219	spin_unlock(&obj->vma.lock);
 
 
 220
 221	seq_printf(m, " (pinned x %d)", pin_count);
 222	if (obj->stolen)
 223		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
 224	if (obj->pin_global)
 225		seq_printf(m, " (global)");
 
 
 
 
 
 
 
 
 
 
 226
 227	engine = i915_gem_object_last_write_engine(obj);
 228	if (engine)
 229		seq_printf(m, " (%s)", engine->name);
 230}
 231
 
 
 
 
 
 
 
 
 
 
 
 232struct file_stats {
 233	struct i915_address_space *vm;
 234	unsigned long count;
 235	u64 total, unbound;
 
 236	u64 active, inactive;
 237	u64 closed;
 238};
 239
 240static int per_file_stats(int id, void *ptr, void *data)
 241{
 242	struct drm_i915_gem_object *obj = ptr;
 243	struct file_stats *stats = data;
 244	struct i915_vma *vma;
 245
 246	stats->count++;
 247	stats->total += obj->base.size;
 248	if (!atomic_read(&obj->bind_count))
 249		stats->unbound += obj->base.size;
 250
 251	spin_lock(&obj->vma.lock);
 252	if (!stats->vm) {
 253		for_each_ggtt_vma(vma, obj) {
 
 
 
 
 254			if (!drm_mm_node_allocated(&vma->node))
 255				continue;
 256
 257			if (i915_vma_is_active(vma))
 258				stats->active += vma->node.size;
 
 
 
 
 
 
 
 
 
 259			else
 260				stats->inactive += vma->node.size;
 261
 262			if (i915_vma_is_closed(vma))
 263				stats->closed += vma->node.size;
 264		}
 265	} else {
 266		struct rb_node *p = obj->vma.tree.rb_node;
 267
 268		while (p) {
 269			long cmp;
 270
 271			vma = rb_entry(p, typeof(*vma), obj_node);
 272			cmp = i915_vma_compare(vma, stats->vm, NULL);
 273			if (cmp == 0) {
 274				if (drm_mm_node_allocated(&vma->node)) {
 275					if (i915_vma_is_active(vma))
 276						stats->active += vma->node.size;
 277					else
 278						stats->inactive += vma->node.size;
 279
 280					if (i915_vma_is_closed(vma))
 281						stats->closed += vma->node.size;
 282				}
 283				break;
 284			}
 285			if (cmp < 0)
 286				p = p->rb_right;
 287			else
 288				p = p->rb_left;
 
 289		}
 290	}
 291	spin_unlock(&obj->vma.lock);
 
 
 292
 293	return 0;
 294}
 295
 296#define print_file_stats(m, name, stats) do { \
 297	if (stats.count) \
 298		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
 299			   name, \
 300			   stats.count, \
 301			   stats.total, \
 302			   stats.active, \
 303			   stats.inactive, \
 304			   stats.unbound, \
 305			   stats.closed); \
 
 306} while (0)
 307
 308static void print_context_stats(struct seq_file *m,
 309				struct drm_i915_private *i915)
 310{
 311	struct file_stats kstats = {};
 312	struct i915_gem_context *ctx;
 
 
 
 
 313
 314	list_for_each_entry(ctx, &i915->contexts.list, link) {
 315		struct i915_gem_engines_iter it;
 316		struct intel_context *ce;
 317
 318		for_each_gem_engine(ce,
 319				    i915_gem_context_lock_engines(ctx), it) {
 320			intel_context_lock_pinned(ce);
 321			if (intel_context_is_pinned(ce)) {
 322				if (ce->state)
 323					per_file_stats(0,
 324						       ce->state->obj, &kstats);
 325				per_file_stats(0, ce->ring->vma->obj, &kstats);
 326			}
 327			intel_context_unlock_pinned(ce);
 328		}
 329		i915_gem_context_unlock_engines(ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 330
 331		if (!IS_ERR_OR_NULL(ctx->file_priv)) {
 332			struct file_stats stats = { .vm = ctx->vm, };
 333			struct drm_file *file = ctx->file_priv->file;
 334			struct task_struct *task;
 335			char name[80];
 
 
 
 
 336
 337			spin_lock(&file->table_lock);
 338			idr_for_each(&file->object_idr, per_file_stats, &stats);
 339			spin_unlock(&file->table_lock);
 340
 341			rcu_read_lock();
 342			task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
 343			snprintf(name, sizeof(name), "%s",
 344				 task ? task->comm : "<unknown>");
 345			rcu_read_unlock();
 346
 347			print_file_stats(m, name, stats);
 348		}
 
 
 
 
 349	}
 350
 351	print_file_stats(m, "[k]contexts", kstats);
 
 
 
 
 
 352}
 353
 354static int i915_gem_object_info(struct seq_file *m, void *data)
 355{
 356	struct drm_i915_private *i915 = node_to_i915(m->private);
 
 
 
 357	int ret;
 358
 359	seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
 360		   i915->mm.shrink_count,
 361		   atomic_read(&i915->mm.free_count),
 362		   i915->mm.shrink_memory);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 363
 364	seq_putc(m, '\n');
 
 
 
 
 
 
 
 
 365
 366	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
 367	if (ret)
 368		return ret;
 369
 370	print_context_stats(m, i915);
 371	mutex_unlock(&i915->drm.struct_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 372
 373	return 0;
 374}
 375
 376static void gen8_display_interrupt_info(struct seq_file *m)
 377{
 378	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 379	int pipe;
 
 
 
 
 380
 381	for_each_pipe(dev_priv, pipe) {
 382		enum intel_display_power_domain power_domain;
 383		intel_wakeref_t wakeref;
 384
 385		power_domain = POWER_DOMAIN_PIPE(pipe);
 386		wakeref = intel_display_power_get_if_enabled(dev_priv,
 387							     power_domain);
 388		if (!wakeref) {
 389			seq_printf(m, "Pipe %c power disabled\n",
 390				   pipe_name(pipe));
 
 
 391			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 392		}
 393		seq_printf(m, "Pipe %c IMR:\t%08x\n",
 394			   pipe_name(pipe),
 395			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
 396		seq_printf(m, "Pipe %c IIR:\t%08x\n",
 397			   pipe_name(pipe),
 398			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
 399		seq_printf(m, "Pipe %c IER:\t%08x\n",
 400			   pipe_name(pipe),
 401			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
 402
 403		intel_display_power_put(dev_priv, power_domain, wakeref);
 404	}
 405
 406	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
 407		   I915_READ(GEN8_DE_PORT_IMR));
 408	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
 409		   I915_READ(GEN8_DE_PORT_IIR));
 410	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
 411		   I915_READ(GEN8_DE_PORT_IER));
 412
 413	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
 414		   I915_READ(GEN8_DE_MISC_IMR));
 415	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
 416		   I915_READ(GEN8_DE_MISC_IIR));
 417	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
 418		   I915_READ(GEN8_DE_MISC_IER));
 419
 420	seq_printf(m, "PCU interrupt mask:\t%08x\n",
 421		   I915_READ(GEN8_PCU_IMR));
 422	seq_printf(m, "PCU interrupt identity:\t%08x\n",
 423		   I915_READ(GEN8_PCU_IIR));
 424	seq_printf(m, "PCU interrupt enable:\t%08x\n",
 425		   I915_READ(GEN8_PCU_IER));
 
 
 
 
 
 
 
 426}
 427
 
 428static int i915_interrupt_info(struct seq_file *m, void *data)
 429{
 430	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 431	struct intel_engine_cs *engine;
 432	intel_wakeref_t wakeref;
 433	int i, pipe;
 
 434
 435	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 436
 437	if (IS_CHERRYVIEW(dev_priv)) {
 438		intel_wakeref_t pref;
 439
 
 440		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 441			   I915_READ(GEN8_MASTER_IRQ));
 442
 443		seq_printf(m, "Display IER:\t%08x\n",
 444			   I915_READ(VLV_IER));
 445		seq_printf(m, "Display IIR:\t%08x\n",
 446			   I915_READ(VLV_IIR));
 447		seq_printf(m, "Display IIR_RW:\t%08x\n",
 448			   I915_READ(VLV_IIR_RW));
 449		seq_printf(m, "Display IMR:\t%08x\n",
 450			   I915_READ(VLV_IMR));
 451		for_each_pipe(dev_priv, pipe) {
 452			enum intel_display_power_domain power_domain;
 453
 454			power_domain = POWER_DOMAIN_PIPE(pipe);
 455			pref = intel_display_power_get_if_enabled(dev_priv,
 456								  power_domain);
 457			if (!pref) {
 458				seq_printf(m, "Pipe %c power disabled\n",
 459					   pipe_name(pipe));
 460				continue;
 461			}
 462
 463			seq_printf(m, "Pipe %c stat:\t%08x\n",
 464				   pipe_name(pipe),
 465				   I915_READ(PIPESTAT(pipe)));
 466
 467			intel_display_power_put(dev_priv, power_domain, pref);
 468		}
 469
 470		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 471		seq_printf(m, "Port hotplug:\t%08x\n",
 472			   I915_READ(PORT_HOTPLUG_EN));
 473		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 474			   I915_READ(VLV_DPFLIPSTAT));
 475		seq_printf(m, "DPINVGTT:\t%08x\n",
 476			   I915_READ(DPINVGTT));
 477		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
 478
 479		for (i = 0; i < 4; i++) {
 480			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
 481				   i, I915_READ(GEN8_GT_IMR(i)));
 482			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
 483				   i, I915_READ(GEN8_GT_IIR(i)));
 484			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
 485				   i, I915_READ(GEN8_GT_IER(i)));
 486		}
 487
 488		seq_printf(m, "PCU interrupt mask:\t%08x\n",
 489			   I915_READ(GEN8_PCU_IMR));
 490		seq_printf(m, "PCU interrupt identity:\t%08x\n",
 491			   I915_READ(GEN8_PCU_IIR));
 492		seq_printf(m, "PCU interrupt enable:\t%08x\n",
 493			   I915_READ(GEN8_PCU_IER));
 494	} else if (INTEL_GEN(dev_priv) >= 11) {
 495		seq_printf(m, "Master Interrupt Control:  %08x\n",
 496			   I915_READ(GEN11_GFX_MSTR_IRQ));
 497
 498		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
 499			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
 500		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
 501			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
 502		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
 503			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
 504		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
 505			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
 506		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
 507			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
 508		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
 509			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
 510
 511		seq_printf(m, "Display Interrupt Control:\t%08x\n",
 512			   I915_READ(GEN11_DISPLAY_INT_CTL));
 513
 514		gen8_display_interrupt_info(m);
 515	} else if (INTEL_GEN(dev_priv) >= 8) {
 516		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 517			   I915_READ(GEN8_MASTER_IRQ));
 518
 519		for (i = 0; i < 4; i++) {
 520			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
 521				   i, I915_READ(GEN8_GT_IMR(i)));
 522			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
 523				   i, I915_READ(GEN8_GT_IIR(i)));
 524			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
 525				   i, I915_READ(GEN8_GT_IER(i)));
 526		}
 527
 528		gen8_display_interrupt_info(m);
 529	} else if (IS_VALLEYVIEW(dev_priv)) {
 530		seq_printf(m, "Display IER:\t%08x\n",
 531			   I915_READ(VLV_IER));
 532		seq_printf(m, "Display IIR:\t%08x\n",
 533			   I915_READ(VLV_IIR));
 534		seq_printf(m, "Display IIR_RW:\t%08x\n",
 535			   I915_READ(VLV_IIR_RW));
 536		seq_printf(m, "Display IMR:\t%08x\n",
 537			   I915_READ(VLV_IMR));
 538		for_each_pipe(dev_priv, pipe) {
 539			enum intel_display_power_domain power_domain;
 540			intel_wakeref_t pref;
 541
 542			power_domain = POWER_DOMAIN_PIPE(pipe);
 543			pref = intel_display_power_get_if_enabled(dev_priv,
 544								  power_domain);
 545			if (!pref) {
 546				seq_printf(m, "Pipe %c power disabled\n",
 547					   pipe_name(pipe));
 548				continue;
 549			}
 
 
 
 
 
 
 
 
 
 
 
 
 550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 551			seq_printf(m, "Pipe %c stat:\t%08x\n",
 552				   pipe_name(pipe),
 553				   I915_READ(PIPESTAT(pipe)));
 554			intel_display_power_put(dev_priv, power_domain, pref);
 555		}
 556
 557		seq_printf(m, "Master IER:\t%08x\n",
 558			   I915_READ(VLV_MASTER_IER));
 559
 560		seq_printf(m, "Render IER:\t%08x\n",
 561			   I915_READ(GTIER));
 562		seq_printf(m, "Render IIR:\t%08x\n",
 563			   I915_READ(GTIIR));
 564		seq_printf(m, "Render IMR:\t%08x\n",
 565			   I915_READ(GTIMR));
 566
 567		seq_printf(m, "PM IER:\t\t%08x\n",
 568			   I915_READ(GEN6_PMIER));
 569		seq_printf(m, "PM IIR:\t\t%08x\n",
 570			   I915_READ(GEN6_PMIIR));
 571		seq_printf(m, "PM IMR:\t\t%08x\n",
 572			   I915_READ(GEN6_PMIMR));
 573
 574		seq_printf(m, "Port hotplug:\t%08x\n",
 575			   I915_READ(PORT_HOTPLUG_EN));
 576		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 577			   I915_READ(VLV_DPFLIPSTAT));
 578		seq_printf(m, "DPINVGTT:\t%08x\n",
 579			   I915_READ(DPINVGTT));
 580
 581	} else if (!HAS_PCH_SPLIT(dev_priv)) {
 582		seq_printf(m, "Interrupt enable:    %08x\n",
 583			   I915_READ(GEN2_IER));
 584		seq_printf(m, "Interrupt identity:  %08x\n",
 585			   I915_READ(GEN2_IIR));
 586		seq_printf(m, "Interrupt mask:      %08x\n",
 587			   I915_READ(GEN2_IMR));
 588		for_each_pipe(dev_priv, pipe)
 589			seq_printf(m, "Pipe %c stat:         %08x\n",
 590				   pipe_name(pipe),
 591				   I915_READ(PIPESTAT(pipe)));
 592	} else {
 593		seq_printf(m, "North Display Interrupt enable:		%08x\n",
 594			   I915_READ(DEIER));
 595		seq_printf(m, "North Display Interrupt identity:	%08x\n",
 596			   I915_READ(DEIIR));
 597		seq_printf(m, "North Display Interrupt mask:		%08x\n",
 598			   I915_READ(DEIMR));
 599		seq_printf(m, "South Display Interrupt enable:		%08x\n",
 600			   I915_READ(SDEIER));
 601		seq_printf(m, "South Display Interrupt identity:	%08x\n",
 602			   I915_READ(SDEIIR));
 603		seq_printf(m, "South Display Interrupt mask:		%08x\n",
 604			   I915_READ(SDEIMR));
 605		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
 606			   I915_READ(GTIER));
 607		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
 608			   I915_READ(GTIIR));
 609		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
 610			   I915_READ(GTIMR));
 611	}
 612
 613	if (INTEL_GEN(dev_priv) >= 11) {
 614		seq_printf(m, "RCS Intr Mask:\t %08x\n",
 615			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
 616		seq_printf(m, "BCS Intr Mask:\t %08x\n",
 617			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
 618		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
 619			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
 620		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
 621			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
 622		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
 623			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
 624		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
 625			   I915_READ(GEN11_GUC_SG_INTR_MASK));
 626		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
 627			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
 628		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
 629			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
 630		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
 631			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
 632
 633	} else if (INTEL_GEN(dev_priv) >= 6) {
 634		for_each_uabi_engine(engine, dev_priv) {
 635			seq_printf(m,
 636				   "Graphics Interrupt mask (%s):	%08x\n",
 637				   engine->name, ENGINE_READ(engine, RING_IMR));
 638		}
 
 639	}
 640
 641	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 642
 643	return 0;
 644}
 645
 646static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 647{
 648	struct drm_i915_private *i915 = node_to_i915(m->private);
 649	unsigned int i;
 
 
 650
 651	seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
 
 
 652
 653	rcu_read_lock();
 654	for (i = 0; i < i915->ggtt.num_fences; i++) {
 655		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
 656		struct i915_vma *vma = reg->vma;
 657
 658		seq_printf(m, "Fence %d, pin count = %d, object = ",
 659			   i, atomic_read(&reg->pin_count));
 660		if (!vma)
 661			seq_puts(m, "unused");
 662		else
 663			describe_obj(m, vma->obj);
 664		seq_putc(m, '\n');
 665	}
 666	rcu_read_unlock();
 667
 
 668	return 0;
 669}
 670
 671#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
 672static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
 673			      size_t count, loff_t *pos)
 674{
 675	struct i915_gpu_state *error;
 676	ssize_t ret;
 677	void *buf;
 
 
 
 678
 679	error = file->private_data;
 680	if (!error)
 
 681		return 0;
 682
 683	/* Bounce buffer required because of kernfs __user API convenience. */
 684	buf = kmalloc(count, GFP_KERNEL);
 685	if (!buf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 686		return -ENOMEM;
 687
 688	ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
 689	if (ret <= 0)
 690		goto out;
 691
 692	if (!copy_to_user(ubuf, buf, ret))
 693		*pos += ret;
 694	else
 695		ret = -EFAULT;
 696
 697out:
 698	kfree(buf);
 699	return ret;
 700}
 701
 702static int gpu_state_release(struct inode *inode, struct file *file)
 703{
 704	i915_gpu_state_put(file->private_data);
 
 
 
 
 705	return 0;
 706}
 707
 708static int i915_gpu_info_open(struct inode *inode, struct file *file)
 
 709{
 710	struct drm_i915_private *i915 = inode->i_private;
 711	struct i915_gpu_state *gpu;
 712	intel_wakeref_t wakeref;
 
 
 
 
 
 
 
 
 
 
 713
 714	gpu = NULL;
 715	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
 716		gpu = i915_capture_gpu_state(i915);
 717	if (IS_ERR(gpu))
 718		return PTR_ERR(gpu);
 719
 720	file->private_data = gpu;
 721	return 0;
 
 
 
 
 
 722}
 723
 724static const struct file_operations i915_gpu_info_fops = {
 725	.owner = THIS_MODULE,
 726	.open = i915_gpu_info_open,
 727	.read = gpu_state_read,
 
 728	.llseek = default_llseek,
 729	.release = gpu_state_release,
 730};
 731
 732static ssize_t
 733i915_error_state_write(struct file *filp,
 734		       const char __user *ubuf,
 735		       size_t cnt,
 736		       loff_t *ppos)
 737{
 738	struct i915_gpu_state *error = filp->private_data;
 
 
 739
 740	if (!error)
 741		return 0;
 
 742
 743	DRM_DEBUG_DRIVER("Resetting error state\n");
 744	i915_reset_error_state(error->i915);
 745
 746	return cnt;
 747}
 748
 749static int i915_error_state_open(struct inode *inode, struct file *file)
 
 750{
 751	struct i915_gpu_state *error;
 
 752
 753	error = i915_first_error_state(inode->i_private);
 754	if (IS_ERR(error))
 755		return PTR_ERR(error);
 756
 757	file->private_data  = error;
 758	return 0;
 
 
 759}
 760
 761static const struct file_operations i915_error_state_fops = {
 762	.owner = THIS_MODULE,
 763	.open = i915_error_state_open,
 764	.read = gpu_state_read,
 765	.write = i915_error_state_write,
 766	.llseek = default_llseek,
 767	.release = gpu_state_release,
 768};
 769#endif
 770
 771static int i915_frequency_info(struct seq_file *m, void *unused)
 772{
 773	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 774	struct intel_uncore *uncore = &dev_priv->uncore;
 775	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 776	intel_wakeref_t wakeref;
 777	int ret = 0;
 778
 779	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 
 
 780
 781	if (IS_GEN(dev_priv, 5)) {
 782		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
 783		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
 784
 785		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
 786		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
 787		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
 788			   MEMSTAT_VID_SHIFT);
 789		seq_printf(m, "Current P-state: %d\n",
 790			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
 791	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 792		u32 rpmodectl, freq_sts;
 793
 794		rpmodectl = I915_READ(GEN6_RP_CONTROL);
 795		seq_printf(m, "Video Turbo Mode: %s\n",
 796			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
 797		seq_printf(m, "HW control enabled: %s\n",
 798			   yesno(rpmodectl & GEN6_RP_ENABLE));
 799		seq_printf(m, "SW control enabled: %s\n",
 800			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
 801				  GEN6_RP_MEDIA_SW_MODE));
 802
 803		vlv_punit_get(dev_priv);
 804		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 805		vlv_punit_put(dev_priv);
 806
 807		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
 808		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
 809
 810		seq_printf(m, "actual GPU freq: %d MHz\n",
 811			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
 812
 813		seq_printf(m, "current GPU freq: %d MHz\n",
 814			   intel_gpu_freq(dev_priv, rps->cur_freq));
 815
 816		seq_printf(m, "max GPU freq: %d MHz\n",
 817			   intel_gpu_freq(dev_priv, rps->max_freq));
 818
 819		seq_printf(m, "min GPU freq: %d MHz\n",
 820			   intel_gpu_freq(dev_priv, rps->min_freq));
 821
 822		seq_printf(m, "idle GPU freq: %d MHz\n",
 823			   intel_gpu_freq(dev_priv, rps->idle_freq));
 824
 825		seq_printf(m,
 826			   "efficient (RPe) frequency: %d MHz\n",
 827			   intel_gpu_freq(dev_priv, rps->efficient_freq));
 828	} else if (INTEL_GEN(dev_priv) >= 6) {
 
 829		u32 rp_state_limits;
 830		u32 gt_perf_status;
 831		u32 rp_state_cap;
 832		u32 rpmodectl, rpinclimit, rpdeclimit;
 833		u32 rpstat, cagf, reqf;
 834		u32 rpupei, rpcurup, rpprevup;
 835		u32 rpdownei, rpcurdown, rpprevdown;
 836		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
 837		int max_freq;
 838
 839		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
 840		if (IS_GEN9_LP(dev_priv)) {
 841			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
 842			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
 843		} else {
 844			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 845			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 846		}
 847
 848		/* RPSTAT1 is in the GT power well */
 849		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
 
 
 
 
 850
 851		reqf = I915_READ(GEN6_RPNSWREQ);
 852		if (INTEL_GEN(dev_priv) >= 9)
 853			reqf >>= 23;
 854		else {
 855			reqf &= ~GEN6_TURBO_DISABLE;
 856			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 857				reqf >>= 24;
 858			else
 859				reqf >>= 25;
 860		}
 861		reqf = intel_gpu_freq(dev_priv, reqf);
 862
 863		rpmodectl = I915_READ(GEN6_RP_CONTROL);
 864		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
 865		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
 866
 867		rpstat = I915_READ(GEN6_RPSTAT1);
 868		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
 869		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
 870		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
 871		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
 872		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
 873		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
 874		cagf = intel_gpu_freq(dev_priv,
 875				      intel_get_cagf(dev_priv, rpstat));
 876
 877		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 878
 879		if (INTEL_GEN(dev_priv) >= 11) {
 880			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
 881			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
 882			/*
 883			 * The equivalent to the PM ISR & IIR cannot be read
 884			 * without affecting the current state of the system
 885			 */
 886			pm_isr = 0;
 887			pm_iir = 0;
 888		} else if (INTEL_GEN(dev_priv) >= 8) {
 
 
 
 889			pm_ier = I915_READ(GEN8_GT_IER(2));
 890			pm_imr = I915_READ(GEN8_GT_IMR(2));
 891			pm_isr = I915_READ(GEN8_GT_ISR(2));
 892			pm_iir = I915_READ(GEN8_GT_IIR(2));
 893		} else {
 894			pm_ier = I915_READ(GEN6_PMIER);
 895			pm_imr = I915_READ(GEN6_PMIMR);
 896			pm_isr = I915_READ(GEN6_PMISR);
 897			pm_iir = I915_READ(GEN6_PMIIR);
 898		}
 899		pm_mask = I915_READ(GEN6_PMINTRMSK);
 900
 901		seq_printf(m, "Video Turbo Mode: %s\n",
 902			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
 903		seq_printf(m, "HW control enabled: %s\n",
 904			   yesno(rpmodectl & GEN6_RP_ENABLE));
 905		seq_printf(m, "SW control enabled: %s\n",
 906			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
 907				  GEN6_RP_MEDIA_SW_MODE));
 908
 909		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
 910			   pm_ier, pm_imr, pm_mask);
 911		if (INTEL_GEN(dev_priv) <= 10)
 912			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
 913				   pm_isr, pm_iir);
 914		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
 915			   rps->pm_intrmsk_mbz);
 916		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
 917		seq_printf(m, "Render p-state ratio: %d\n",
 918			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
 919		seq_printf(m, "Render p-state VID: %d\n",
 920			   gt_perf_status & 0xff);
 921		seq_printf(m, "Render p-state limit: %d\n",
 922			   rp_state_limits & 0xff);
 923		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
 924		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
 925		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
 926		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
 927		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
 928		seq_printf(m, "CAGF: %dMHz\n", cagf);
 929		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
 930			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
 931		seq_printf(m, "RP CUR UP: %d (%dus)\n",
 932			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
 933		seq_printf(m, "RP PREV UP: %d (%dus)\n",
 934			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
 935		seq_printf(m, "Up threshold: %d%%\n",
 936			   rps->power.up_threshold);
 937
 938		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
 939			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
 940		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
 941			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
 942		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
 943			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
 944		seq_printf(m, "Down threshold: %d%%\n",
 945			   rps->power.down_threshold);
 946
 947		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
 948			    rp_state_cap >> 16) & 0xff;
 949		max_freq *= (IS_GEN9_BC(dev_priv) ||
 950			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 951		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
 952			   intel_gpu_freq(dev_priv, max_freq));
 953
 954		max_freq = (rp_state_cap & 0xff00) >> 8;
 955		max_freq *= (IS_GEN9_BC(dev_priv) ||
 956			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 957		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
 958			   intel_gpu_freq(dev_priv, max_freq));
 959
 960		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
 961			    rp_state_cap >> 0) & 0xff;
 962		max_freq *= (IS_GEN9_BC(dev_priv) ||
 963			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
 964		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
 965			   intel_gpu_freq(dev_priv, max_freq));
 966		seq_printf(m, "Max overclocked frequency: %dMHz\n",
 967			   intel_gpu_freq(dev_priv, rps->max_freq));
 968
 969		seq_printf(m, "Current freq: %d MHz\n",
 970			   intel_gpu_freq(dev_priv, rps->cur_freq));
 971		seq_printf(m, "Actual freq: %d MHz\n", cagf);
 972		seq_printf(m, "Idle freq: %d MHz\n",
 973			   intel_gpu_freq(dev_priv, rps->idle_freq));
 974		seq_printf(m, "Min freq: %d MHz\n",
 975			   intel_gpu_freq(dev_priv, rps->min_freq));
 976		seq_printf(m, "Boost freq: %d MHz\n",
 977			   intel_gpu_freq(dev_priv, rps->boost_freq));
 978		seq_printf(m, "Max freq: %d MHz\n",
 979			   intel_gpu_freq(dev_priv, rps->max_freq));
 980		seq_printf(m,
 981			   "efficient (RPe) frequency: %d MHz\n",
 982			   intel_gpu_freq(dev_priv, rps->efficient_freq));
 983	} else {
 984		seq_puts(m, "no P-state info available\n");
 985	}
 986
 987	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
 988	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
 989	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
 990
 991	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 
 992	return ret;
 993}
 994
 995static void i915_instdone_info(struct drm_i915_private *dev_priv,
 996			       struct seq_file *m,
 997			       struct intel_instdone *instdone)
 998{
 999	int slice;
1000	int subslice;
1001
1002	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1003		   instdone->instdone);
1004
1005	if (INTEL_GEN(dev_priv) <= 3)
1006		return;
1007
1008	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1009		   instdone->slice_common);
1010
1011	if (INTEL_GEN(dev_priv) <= 6)
1012		return;
1013
1014	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1015		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1016			   slice, subslice, instdone->sampler[slice][subslice]);
1017
1018	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1019		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1020			   slice, subslice, instdone->row[slice][subslice]);
1021}
1022
1023static int i915_hangcheck_info(struct seq_file *m, void *unused)
1024{
1025	struct drm_i915_private *i915 = node_to_i915(m->private);
1026	struct intel_gt *gt = &i915->gt;
1027	struct intel_engine_cs *engine;
1028	intel_wakeref_t wakeref;
1029	enum intel_engine_id id;
1030
1031	seq_printf(m, "Reset flags: %lx\n", gt->reset.flags);
1032	if (test_bit(I915_WEDGED, &gt->reset.flags))
1033		seq_puts(m, "\tWedged\n");
1034	if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
1035		seq_puts(m, "\tDevice (global) reset in progress\n");
1036
1037	if (!i915_modparams.enable_hangcheck) {
1038		seq_puts(m, "Hangcheck disabled\n");
1039		return 0;
1040	}
1041
1042	if (timer_pending(&gt->hangcheck.work.timer))
1043		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1044			   jiffies_to_msecs(gt->hangcheck.work.timer.expires -
1045					    jiffies));
1046	else if (delayed_work_pending(&gt->hangcheck.work))
1047		seq_puts(m, "Hangcheck active, work pending\n");
1048	else
1049		seq_puts(m, "Hangcheck inactive\n");
1050
1051	seq_printf(m, "GT active? %s\n", yesno(gt->awake));
 
 
 
1052
1053	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1054		for_each_engine(engine, i915, id) {
1055			struct intel_instdone instdone;
1056
1057			seq_printf(m, "%s: %d ms ago\n",
1058				   engine->name,
1059				   jiffies_to_msecs(jiffies -
1060						    engine->hangcheck.action_timestamp));
1061
1062			seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1063				   (long long)engine->hangcheck.acthd,
1064				   intel_engine_get_active_head(engine));
 
 
 
1065
1066			intel_engine_get_instdone(engine, &instdone);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1067
1068			seq_puts(m, "\tinstdone read =\n");
1069			i915_instdone_info(i915, m, &instdone);
1070
1071			seq_puts(m, "\tinstdone accu =\n");
1072			i915_instdone_info(i915, m,
1073					   &engine->hangcheck.instdone);
1074		}
1075	}
1076
1077	return 0;
1078}
1079
1080static int ironlake_drpc_info(struct seq_file *m)
1081{
1082	struct drm_i915_private *i915 = node_to_i915(m->private);
1083	struct intel_uncore *uncore = &i915->uncore;
 
1084	u32 rgvmodectl, rstdbyctl;
1085	u16 crstandvid;
 
1086
1087	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1088	rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1089	crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
 
 
 
 
 
 
 
 
1090
1091	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1092	seq_printf(m, "Boost freq: %d\n",
1093		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1094		   MEMMODE_BOOST_FREQ_SHIFT);
1095	seq_printf(m, "HW control enabled: %s\n",
1096		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1097	seq_printf(m, "SW control enabled: %s\n",
1098		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1099	seq_printf(m, "Gated voltage change: %s\n",
1100		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1101	seq_printf(m, "Starting frequency: P%d\n",
1102		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1103	seq_printf(m, "Max P-state: P%d\n",
1104		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1105	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1106	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1107	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1108	seq_printf(m, "Render standby enabled: %s\n",
1109		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1110	seq_puts(m, "Current RS state: ");
1111	switch (rstdbyctl & RSX_STATUS_MASK) {
1112	case RSX_STATUS_ON:
1113		seq_puts(m, "on\n");
1114		break;
1115	case RSX_STATUS_RC1:
1116		seq_puts(m, "RC1\n");
1117		break;
1118	case RSX_STATUS_RC1E:
1119		seq_puts(m, "RC1E\n");
1120		break;
1121	case RSX_STATUS_RS1:
1122		seq_puts(m, "RS1\n");
1123		break;
1124	case RSX_STATUS_RS2:
1125		seq_puts(m, "RS2 (RC6)\n");
1126		break;
1127	case RSX_STATUS_RS3:
1128		seq_puts(m, "RC3 (RC6+)\n");
1129		break;
1130	default:
1131		seq_puts(m, "unknown\n");
1132		break;
1133	}
1134
1135	return 0;
1136}
1137
1138static int i915_forcewake_domains(struct seq_file *m, void *data)
1139{
1140	struct drm_i915_private *i915 = node_to_i915(m->private);
1141	struct intel_uncore *uncore = &i915->uncore;
 
1142	struct intel_uncore_forcewake_domain *fw_domain;
1143	unsigned int tmp;
1144
1145	seq_printf(m, "user.bypass_count = %u\n",
1146		   uncore->user_forcewake_count);
1147
1148	for_each_fw_domain(fw_domain, uncore, tmp)
 
1149		seq_printf(m, "%s.wake_count = %u\n",
1150			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1151			   READ_ONCE(fw_domain->wake_count));
 
 
1152
1153	return 0;
1154}
1155
1156static void print_rc6_res(struct seq_file *m,
1157			  const char *title,
1158			  const i915_reg_t reg)
1159{
1160	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1161
1162	seq_printf(m, "%s %u (%llu us)\n",
1163		   title, I915_READ(reg),
1164		   intel_rc6_residency_us(dev_priv, reg));
1165}
1166
1167static int vlv_drpc_info(struct seq_file *m)
1168{
1169	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1170	u32 rcctl1, pw_status;
1171
1172	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
 
1173	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1174
 
 
 
 
 
 
 
 
 
 
 
1175	seq_printf(m, "RC6 Enabled: %s\n",
1176		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1177					GEN6_RC_CTL_EI_MODE(1))));
1178	seq_printf(m, "Render Power Well: %s\n",
1179		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1180	seq_printf(m, "Media Power Well: %s\n",
1181		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1182
1183	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1184	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
 
 
1185
1186	return i915_forcewake_domains(m, NULL);
1187}
1188
1189static int gen6_drpc_info(struct seq_file *m)
1190{
1191	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1192	u32 gt_core_status, rcctl1, rc6vids = 0;
1193	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1194
1195	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1196	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1197
 
1198	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1199	if (INTEL_GEN(dev_priv) >= 9) {
1200		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1201		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1202	}
1203
1204	if (INTEL_GEN(dev_priv) <= 7)
1205		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1206				       &rc6vids, NULL);
1207
 
 
 
 
 
 
 
1208	seq_printf(m, "RC1e Enabled: %s\n",
1209		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1210	seq_printf(m, "RC6 Enabled: %s\n",
1211		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1212	if (INTEL_GEN(dev_priv) >= 9) {
1213		seq_printf(m, "Render Well Gating Enabled: %s\n",
1214			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1215		seq_printf(m, "Media Well Gating Enabled: %s\n",
1216			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1217	}
1218	seq_printf(m, "Deep RC6 Enabled: %s\n",
1219		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1220	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1221		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1222	seq_puts(m, "Current RC state: ");
1223	switch (gt_core_status & GEN6_RCn_MASK) {
1224	case GEN6_RC0:
1225		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1226			seq_puts(m, "Core Power Down\n");
1227		else
1228			seq_puts(m, "on\n");
1229		break;
1230	case GEN6_RC3:
1231		seq_puts(m, "RC3\n");
1232		break;
1233	case GEN6_RC6:
1234		seq_puts(m, "RC6\n");
1235		break;
1236	case GEN6_RC7:
1237		seq_puts(m, "RC7\n");
1238		break;
1239	default:
1240		seq_puts(m, "Unknown\n");
1241		break;
1242	}
1243
1244	seq_printf(m, "Core Power Down: %s\n",
1245		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1246	if (INTEL_GEN(dev_priv) >= 9) {
1247		seq_printf(m, "Render Power Well: %s\n",
1248			(gen9_powergate_status &
1249			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1250		seq_printf(m, "Media Power Well: %s\n",
1251			(gen9_powergate_status &
1252			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1253	}
1254
1255	/* Not exactly sure what this is */
1256	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1257		      GEN6_GT_GFX_RC6_LOCKED);
1258	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1259	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1260	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1261
1262	if (INTEL_GEN(dev_priv) <= 7) {
1263		seq_printf(m, "RC6   voltage: %dmV\n",
1264			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1265		seq_printf(m, "RC6+  voltage: %dmV\n",
1266			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1267		seq_printf(m, "RC6++ voltage: %dmV\n",
1268			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1269	}
1270
1271	return i915_forcewake_domains(m, NULL);
1272}
1273
1274static int i915_drpc_info(struct seq_file *m, void *unused)
1275{
1276	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1277	intel_wakeref_t wakeref;
1278	int err = -ENODEV;
1279
1280	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1281		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1282			err = vlv_drpc_info(m);
1283		else if (INTEL_GEN(dev_priv) >= 6)
1284			err = gen6_drpc_info(m);
1285		else
1286			err = ironlake_drpc_info(m);
1287	}
1288
1289	return err;
 
 
 
 
 
1290}
1291
1292static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1293{
1294	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
 
1295
1296	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1297		   dev_priv->fb_tracking.busy_bits);
1298
1299	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1300		   dev_priv->fb_tracking.flip_bits);
1301
1302	return 0;
1303}
1304
1305static int i915_fbc_status(struct seq_file *m, void *unused)
1306{
1307	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1308	struct intel_fbc *fbc = &dev_priv->fbc;
1309	intel_wakeref_t wakeref;
1310
1311	if (!HAS_FBC(dev_priv))
1312		return -ENODEV;
 
 
1313
1314	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1315	mutex_lock(&fbc->lock);
1316
1317	if (intel_fbc_is_active(dev_priv))
1318		seq_puts(m, "FBC enabled\n");
1319	else
1320		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
 
1321
1322	if (intel_fbc_is_active(dev_priv)) {
1323		u32 mask;
 
 
1324
1325		if (INTEL_GEN(dev_priv) >= 8)
1326			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1327		else if (INTEL_GEN(dev_priv) >= 7)
1328			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1329		else if (INTEL_GEN(dev_priv) >= 5)
1330			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1331		else if (IS_G4X(dev_priv))
1332			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1333		else
1334			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1335							FBC_STAT_COMPRESSED);
1336
1337		seq_printf(m, "Compressing: %s\n", yesno(mask));
1338	}
1339
1340	mutex_unlock(&fbc->lock);
1341	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1342
1343	return 0;
1344}
1345
1346static int i915_fbc_false_color_get(void *data, u64 *val)
1347{
1348	struct drm_i915_private *dev_priv = data;
 
1349
1350	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1351		return -ENODEV;
1352
1353	*val = dev_priv->fbc.false_color;
1354
1355	return 0;
1356}
1357
1358static int i915_fbc_false_color_set(void *data, u64 val)
1359{
1360	struct drm_i915_private *dev_priv = data;
 
1361	u32 reg;
1362
1363	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1364		return -ENODEV;
1365
1366	mutex_lock(&dev_priv->fbc.lock);
1367
1368	reg = I915_READ(ILK_DPFC_CONTROL);
1369	dev_priv->fbc.false_color = val;
1370
1371	I915_WRITE(ILK_DPFC_CONTROL, val ?
1372		   (reg | FBC_CTL_FALSE_COLOR) :
1373		   (reg & ~FBC_CTL_FALSE_COLOR));
1374
1375	mutex_unlock(&dev_priv->fbc.lock);
1376	return 0;
1377}
1378
1379DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1380			i915_fbc_false_color_get, i915_fbc_false_color_set,
1381			"%llu\n");
1382
1383static int i915_ips_status(struct seq_file *m, void *unused)
1384{
1385	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1386	intel_wakeref_t wakeref;
 
1387
1388	if (!HAS_IPS(dev_priv))
1389		return -ENODEV;
 
 
1390
1391	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1392
1393	seq_printf(m, "Enabled by kernel parameter: %s\n",
1394		   yesno(i915_modparams.enable_ips));
1395
1396	if (INTEL_GEN(dev_priv) >= 8) {
1397		seq_puts(m, "Currently: unknown\n");
1398	} else {
1399		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1400			seq_puts(m, "Currently: enabled\n");
1401		else
1402			seq_puts(m, "Currently: disabled\n");
1403	}
1404
1405	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1406
1407	return 0;
1408}
1409
1410static int i915_sr_status(struct seq_file *m, void *unused)
1411{
1412	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1413	intel_wakeref_t wakeref;
 
1414	bool sr_enabled = false;
1415
1416	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1417
1418	if (INTEL_GEN(dev_priv) >= 9)
1419		/* no global SR status; inspect per-plane WM */;
1420	else if (HAS_PCH_SPLIT(dev_priv))
1421		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1422	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1423		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1424		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1425	else if (IS_I915GM(dev_priv))
1426		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1427	else if (IS_PINEVIEW(dev_priv))
1428		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1429	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1430		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1431
1432	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1433
1434	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
 
 
 
1435
1436	return 0;
1437}
1438
1439static int i915_ring_freq_table(struct seq_file *m, void *unused)
1440{
1441	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1442	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 
 
 
1443	unsigned int max_gpu_freq, min_gpu_freq;
1444	intel_wakeref_t wakeref;
1445	int gpu_freq, ia_freq;
1446
1447	if (!HAS_LLC(dev_priv))
1448		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
1449
1450	min_gpu_freq = rps->min_freq;
1451	max_gpu_freq = rps->max_freq;
1452	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1453		/* Convert GT frequency to 50 HZ units */
1454		min_gpu_freq /= GEN9_FREQ_SCALER;
1455		max_gpu_freq /= GEN9_FREQ_SCALER;
 
 
 
 
 
1456	}
1457
1458	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1459
1460	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1461	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1462		ia_freq = gpu_freq;
1463		sandybridge_pcode_read(dev_priv,
1464				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1465				       &ia_freq, NULL);
1466		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1467			   intel_gpu_freq(dev_priv, (gpu_freq *
1468						     (IS_GEN9_BC(dev_priv) ||
1469						      INTEL_GEN(dev_priv) >= 10 ?
1470						      GEN9_FREQ_SCALER : 1))),
1471			   ((ia_freq >> 0) & 0xff) * 100,
1472			   ((ia_freq >> 8) & 0xff) * 100);
1473	}
1474	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1475
1476	return 0;
 
 
 
 
1477}
1478
1479static int i915_opregion(struct seq_file *m, void *unused)
1480{
1481	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1482	struct drm_device *dev = &dev_priv->drm;
 
1483	struct intel_opregion *opregion = &dev_priv->opregion;
1484	int ret;
1485
1486	ret = mutex_lock_interruptible(&dev->struct_mutex);
1487	if (ret)
1488		goto out;
1489
1490	if (opregion->header)
1491		seq_write(m, opregion->header, OPREGION_SIZE);
1492
1493	mutex_unlock(&dev->struct_mutex);
1494
1495out:
1496	return 0;
1497}
1498
1499static int i915_vbt(struct seq_file *m, void *unused)
1500{
1501	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
 
 
 
1502
1503	if (opregion->vbt)
1504		seq_write(m, opregion->vbt, opregion->vbt_size);
1505
1506	return 0;
1507}
1508
1509static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1510{
1511	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1512	struct drm_device *dev = &dev_priv->drm;
1513	struct intel_framebuffer *fbdev_fb = NULL;
1514	struct drm_framebuffer *drm_fb;
1515	int ret;
1516
1517	ret = mutex_lock_interruptible(&dev->struct_mutex);
1518	if (ret)
1519		return ret;
1520
1521#ifdef CONFIG_DRM_FBDEV_EMULATION
1522	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1523		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1524
1525		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1526			   fbdev_fb->base.width,
1527			   fbdev_fb->base.height,
1528			   fbdev_fb->base.format->depth,
1529			   fbdev_fb->base.format->cpp[0] * 8,
1530			   fbdev_fb->base.modifier,
1531			   drm_framebuffer_read_refcount(&fbdev_fb->base));
1532		describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1533		seq_putc(m, '\n');
1534	}
1535#endif
1536
1537	mutex_lock(&dev->mode_config.fb_lock);
1538	drm_for_each_fb(drm_fb, dev) {
1539		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1540		if (fb == fbdev_fb)
1541			continue;
1542
1543		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1544			   fb->base.width,
1545			   fb->base.height,
1546			   fb->base.format->depth,
1547			   fb->base.format->cpp[0] * 8,
1548			   fb->base.modifier,
1549			   drm_framebuffer_read_refcount(&fb->base));
1550		describe_obj(m, intel_fb_obj(&fb->base));
1551		seq_putc(m, '\n');
1552	}
1553	mutex_unlock(&dev->mode_config.fb_lock);
1554	mutex_unlock(&dev->struct_mutex);
1555
1556	return 0;
1557}
1558
1559static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
 
1560{
1561	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1562		   ring->space, ring->head, ring->tail, ring->emit);
 
1563}
1564
1565static int i915_context_status(struct seq_file *m, void *unused)
1566{
1567	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1568	struct drm_device *dev = &dev_priv->drm;
1569	struct i915_gem_context *ctx;
1570	int ret;
 
 
1571
1572	ret = mutex_lock_interruptible(&dev->struct_mutex);
1573	if (ret)
1574		return ret;
1575
1576	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1577		struct i915_gem_engines_iter it;
1578		struct intel_context *ce;
 
1579
1580		seq_puts(m, "HW context ");
1581		if (!list_empty(&ctx->hw_id_link))
1582			seq_printf(m, "%x [pin %u]", ctx->hw_id,
1583				   atomic_read(&ctx->hw_id_pin_count));
1584		if (ctx->pid) {
1585			struct task_struct *task;
1586
1587			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1588			if (task) {
1589				seq_printf(m, "(%s [%d]) ",
1590					   task->comm, task->pid);
1591				put_task_struct(task);
 
 
 
 
 
 
 
 
 
1592			}
1593		} else if (IS_ERR(ctx->file_priv)) {
1594			seq_puts(m, "(deleted) ");
1595		} else {
1596			seq_puts(m, "(kernel) ");
1597		}
1598
1599		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1600		seq_putc(m, '\n');
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1601
1602		for_each_gem_engine(ce,
1603				    i915_gem_context_lock_engines(ctx), it) {
1604			intel_context_lock_pinned(ce);
1605			if (intel_context_is_pinned(ce)) {
1606				seq_printf(m, "%s: ", ce->engine->name);
1607				if (ce->state)
1608					describe_obj(m, ce->state->obj);
1609				describe_ctx_ring(m, ce->ring);
1610				seq_putc(m, '\n');
1611			}
1612			intel_context_unlock_pinned(ce);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1613		}
1614		i915_gem_context_unlock_engines(ctx);
1615
1616		seq_putc(m, '\n');
1617	}
1618
 
1619	mutex_unlock(&dev->struct_mutex);
1620
1621	return 0;
1622}
1623
1624static const char *swizzle_string(unsigned swizzle)
1625{
1626	switch (swizzle) {
1627	case I915_BIT_6_SWIZZLE_NONE:
1628		return "none";
1629	case I915_BIT_6_SWIZZLE_9:
1630		return "bit9";
1631	case I915_BIT_6_SWIZZLE_9_10:
1632		return "bit9/bit10";
1633	case I915_BIT_6_SWIZZLE_9_11:
1634		return "bit9/bit11";
1635	case I915_BIT_6_SWIZZLE_9_10_11:
1636		return "bit9/bit10/bit11";
1637	case I915_BIT_6_SWIZZLE_9_17:
1638		return "bit9/bit17";
1639	case I915_BIT_6_SWIZZLE_9_10_17:
1640		return "bit9/bit10/bit17";
1641	case I915_BIT_6_SWIZZLE_UNKNOWN:
1642		return "unknown";
1643	}
1644
1645	return "bug";
1646}
1647
1648static int i915_swizzle_info(struct seq_file *m, void *data)
1649{
1650	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1651	struct intel_uncore *uncore = &dev_priv->uncore;
1652	intel_wakeref_t wakeref;
 
1653
1654	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 
 
 
1655
1656	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1657		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1658	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1659		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1660
1661	if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1662		seq_printf(m, "DDC = 0x%08x\n",
1663			   intel_uncore_read(uncore, DCC));
1664		seq_printf(m, "DDC2 = 0x%08x\n",
1665			   intel_uncore_read(uncore, DCC2));
1666		seq_printf(m, "C0DRB3 = 0x%04x\n",
1667			   intel_uncore_read16(uncore, C0DRB3));
1668		seq_printf(m, "C1DRB3 = 0x%04x\n",
1669			   intel_uncore_read16(uncore, C1DRB3));
1670	} else if (INTEL_GEN(dev_priv) >= 6) {
1671		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1672			   intel_uncore_read(uncore, MAD_DIMM_C0));
1673		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1674			   intel_uncore_read(uncore, MAD_DIMM_C1));
1675		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1676			   intel_uncore_read(uncore, MAD_DIMM_C2));
1677		seq_printf(m, "TILECTL = 0x%08x\n",
1678			   intel_uncore_read(uncore, TILECTL));
1679		if (INTEL_GEN(dev_priv) >= 8)
1680			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1681				   intel_uncore_read(uncore, GAMTARBMODE));
1682		else
1683			seq_printf(m, "ARB_MODE = 0x%08x\n",
1684				   intel_uncore_read(uncore, ARB_MODE));
1685		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1686			   intel_uncore_read(uncore, DISP_ARB_CTL));
1687	}
1688
1689	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1690		seq_puts(m, "L-shaped memory detected\n");
1691
1692	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 
1693
1694	return 0;
1695}
1696
1697static const char *rps_power_to_str(unsigned int power)
1698{
1699	static const char * const strings[] = {
1700		[LOW_POWER] = "low power",
1701		[BETWEEN] = "mixed",
1702		[HIGH_POWER] = "high power",
1703	};
1704
1705	if (power >= ARRAY_SIZE(strings) || !strings[power])
1706		return "unknown";
1707
1708	return strings[power];
1709}
1710
1711static int i915_rps_boost_info(struct seq_file *m, void *data)
1712{
1713	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1714	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1715	u32 act_freq = rps->cur_freq;
1716	intel_wakeref_t wakeref;
1717
1718	with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
1719		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1720			vlv_punit_get(dev_priv);
1721			act_freq = vlv_punit_read(dev_priv,
1722						  PUNIT_REG_GPU_FREQ_STS);
1723			vlv_punit_put(dev_priv);
1724			act_freq = (act_freq >> 8) & 0xff;
1725		} else {
1726			act_freq = intel_get_cagf(dev_priv,
1727						  I915_READ(GEN6_RPSTAT1));
1728		}
1729	}
1730
1731	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1732	seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1733	seq_printf(m, "Boosts outstanding? %d\n",
1734		   atomic_read(&rps->num_waiters));
1735	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1736	seq_printf(m, "Frequency requested %d, actual %d\n",
1737		   intel_gpu_freq(dev_priv, rps->cur_freq),
1738		   intel_gpu_freq(dev_priv, act_freq));
1739	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1740		   intel_gpu_freq(dev_priv, rps->min_freq),
1741		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
1742		   intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
1743		   intel_gpu_freq(dev_priv, rps->max_freq));
1744	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
1745		   intel_gpu_freq(dev_priv, rps->idle_freq),
1746		   intel_gpu_freq(dev_priv, rps->efficient_freq),
1747		   intel_gpu_freq(dev_priv, rps->boost_freq));
1748
1749	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1750
1751	if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1752		u32 rpup, rpupei;
1753		u32 rpdown, rpdownei;
1754
1755		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1756		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1757		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1758		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1759		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1760		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1761
1762		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1763			   rps_power_to_str(rps->power.mode));
1764		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
1765			   rpup && rpupei ? 100 * rpup / rpupei : 0,
1766			   rps->power.up_threshold);
1767		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
1768			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1769			   rps->power.down_threshold);
1770	} else {
1771		seq_puts(m, "\nRPS Autotuning inactive\n");
1772	}
1773
1774	return 0;
1775}
1776
1777static int i915_llc(struct seq_file *m, void *data)
1778{
1779	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1780	const bool edram = INTEL_GEN(dev_priv) > 8;
 
 
1781
1782	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1783	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1784		   dev_priv->edram_size_mb);
1785
1786	return 0;
 
 
 
 
 
 
 
 
1787}
1788
1789static int i915_huc_load_status_info(struct seq_file *m, void *data)
1790{
1791	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1792	intel_wakeref_t wakeref;
1793	struct drm_printer p;
1794
1795	if (!HAS_GT_UC(dev_priv))
1796		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
1797
1798	p = drm_seq_file_printer(m);
1799	intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1800
1801	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1802		seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1803
1804	return 0;
1805}
1806
1807static int i915_guc_load_status_info(struct seq_file *m, void *data)
1808{
1809	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1810	intel_wakeref_t wakeref;
1811	struct drm_printer p;
 
1812
1813	if (!HAS_GT_UC(dev_priv))
1814		return -ENODEV;
1815
1816	p = drm_seq_file_printer(m);
1817	intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1818
1819	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1820		u32 tmp = I915_READ(GUC_STATUS);
1821		u32 i;
1822
1823		seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1824		seq_printf(m, "\tBootrom status = 0x%x\n",
1825			   (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1826		seq_printf(m, "\tuKernel status = 0x%x\n",
1827			   (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1828		seq_printf(m, "\tMIA Core status = 0x%x\n",
1829			   (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1830		seq_puts(m, "\nScratch registers:\n");
1831		for (i = 0; i < 16; i++) {
1832			seq_printf(m, "\t%2d: \t0x%x\n",
1833				   i, I915_READ(SOFT_SCRATCH(i)));
1834		}
 
 
 
 
 
 
1835	}
1836
1837	return 0;
1838}
 
1839
1840static const char *
1841stringify_guc_log_type(enum guc_log_buffer_type type)
1842{
1843	switch (type) {
1844	case GUC_ISR_LOG_BUFFER:
1845		return "ISR";
1846	case GUC_DPC_LOG_BUFFER:
1847		return "DPC";
1848	case GUC_CRASH_DUMP_LOG_BUFFER:
1849		return "CRASH";
1850	default:
1851		MISSING_CASE(type);
1852	}
1853
1854	return "";
1855}
1856
1857static void i915_guc_log_info(struct seq_file *m,
1858			      struct drm_i915_private *dev_priv)
1859{
1860	struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
1861	enum guc_log_buffer_type type;
1862
1863	if (!intel_guc_log_relay_enabled(log)) {
1864		seq_puts(m, "GuC log relay disabled\n");
1865		return;
1866	}
1867
1868	seq_puts(m, "GuC logging stats:\n");
 
1869
1870	seq_printf(m, "\tRelay full count: %u\n",
1871		   log->relay.full_count);
1872
1873	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1874		seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1875			   stringify_guc_log_type(type),
1876			   log->stats[type].flush,
1877			   log->stats[type].sampled_overflow);
1878	}
1879}
1880
1881static int i915_guc_info(struct seq_file *m, void *data)
1882{
1883	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1884	const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1885	struct intel_guc_client *client = guc->execbuf_client;
1886
1887	if (!USES_GUC(dev_priv))
1888		return -ENODEV;
1889
1890	i915_guc_log_info(m, dev_priv);
1891
1892	if (!USES_GUC_SUBMISSION(dev_priv))
1893		return 0;
1894
1895	GEM_BUG_ON(!guc->execbuf_client);
1896
1897	seq_printf(m, "\nDoorbell map:\n");
1898	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
1899	seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
1900
1901	seq_printf(m, "\nGuC execbuf client @ %p:\n", client);
1902	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
1903		   client->priority,
1904		   client->stage_id,
1905		   client->proc_desc_offset);
1906	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
1907		   client->doorbell_id, client->doorbell_offset);
1908	/* Add more as required ... */
 
 
 
 
 
 
 
 
 
 
1909
1910	return 0;
1911}
1912
1913static int i915_guc_stage_pool(struct seq_file *m, void *data)
1914{
1915	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1916	const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1917	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
1918	int index;
1919
1920	if (!USES_GUC_SUBMISSION(dev_priv))
1921		return -ENODEV;
1922
1923	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1924		struct intel_engine_cs *engine;
1925
1926		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1927			continue;
1928
1929		seq_printf(m, "GuC stage descriptor %u:\n", index);
1930		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1931		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1932		seq_printf(m, "\tPriority: %d\n", desc->priority);
1933		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1934		seq_printf(m, "\tEngines used: 0x%x\n",
1935			   desc->engines_used);
1936		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1937			   desc->db_trigger_phy,
1938			   desc->db_trigger_cpu,
1939			   desc->db_trigger_uk);
1940		seq_printf(m, "\tProcess descriptor: 0x%x\n",
1941			   desc->process_desc);
1942		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
1943			   desc->wq_addr, desc->wq_size);
1944		seq_putc(m, '\n');
1945
1946		for_each_uabi_engine(engine, dev_priv) {
1947			u32 guc_engine_id = engine->guc_id;
1948			struct guc_execlist_context *lrc =
1949						&desc->lrc[guc_engine_id];
1950
1951			seq_printf(m, "\t%s LRC:\n", engine->name);
1952			seq_printf(m, "\t\tContext desc: 0x%x\n",
1953				   lrc->context_desc);
1954			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1955			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1956			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1957			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1958			seq_putc(m, '\n');
1959		}
1960	}
1961
1962	return 0;
1963}
1964
1965static int i915_guc_log_dump(struct seq_file *m, void *data)
1966{
1967	struct drm_info_node *node = m->private;
1968	struct drm_i915_private *dev_priv = node_to_i915(node);
1969	bool dump_load_err = !!node->info_ent->data;
1970	struct drm_i915_gem_object *obj = NULL;
1971	u32 *log;
1972	int i = 0;
1973
1974	if (!HAS_GT_UC(dev_priv))
1975		return -ENODEV;
1976
1977	if (dump_load_err)
1978		obj = dev_priv->gt.uc.load_err_log;
1979	else if (dev_priv->gt.uc.guc.log.vma)
1980		obj = dev_priv->gt.uc.guc.log.vma->obj;
1981
1982	if (!obj)
1983		return 0;
1984
1985	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
1986	if (IS_ERR(log)) {
1987		DRM_DEBUG("Failed to pin object\n");
1988		seq_puts(m, "(log data unaccessible)\n");
1989		return PTR_ERR(log);
1990	}
1991
1992	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
1993		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
1994			   *(log + i), *(log + i + 1),
1995			   *(log + i + 2), *(log + i + 3));
1996
1997	seq_putc(m, '\n');
1998
1999	i915_gem_object_unpin_map(obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2000
2001	return 0;
2002}
2003
2004static int i915_guc_log_level_get(void *data, u64 *val)
2005{
2006	struct drm_i915_private *dev_priv = data;
2007
2008	if (!USES_GUC(dev_priv))
2009		return -ENODEV;
2010
2011	*val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
2012
2013	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2014}
2015
2016static int i915_guc_log_level_set(void *data, u64 val)
2017{
2018	struct drm_i915_private *dev_priv = data;
 
 
 
 
 
 
 
2019
2020	if (!USES_GUC(dev_priv))
2021		return -ENODEV;
2022
2023	return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
2024}
2025
2026DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2027			i915_guc_log_level_get, i915_guc_log_level_set,
2028			"%lld\n");
 
2029
2030static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2031{
2032	struct drm_i915_private *i915 = inode->i_private;
2033	struct intel_guc *guc = &i915->gt.uc.guc;
2034	struct intel_guc_log *log = &guc->log;
2035
2036	if (!intel_guc_is_running(guc))
2037		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
2038
2039	file->private_data = log;
 
2040
2041	return intel_guc_log_relay_open(log);
2042}
2043
2044static ssize_t
2045i915_guc_log_relay_write(struct file *filp,
2046			 const char __user *ubuf,
2047			 size_t cnt,
2048			 loff_t *ppos)
2049{
2050	struct intel_guc_log *log = filp->private_data;
2051
2052	intel_guc_log_relay_flush(log);
2053	return cnt;
2054}
2055
2056static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2057{
2058	struct drm_i915_private *i915 = inode->i_private;
2059	struct intel_guc *guc = &i915->gt.uc.guc;
 
 
 
 
2060
2061	intel_guc_log_relay_close(&guc->log);
2062	return 0;
2063}
2064
2065static const struct file_operations i915_guc_log_relay_fops = {
2066	.owner = THIS_MODULE,
2067	.open = i915_guc_log_relay_open,
2068	.write = i915_guc_log_relay_write,
2069	.release = i915_guc_log_relay_release,
2070};
2071
2072static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2073{
2074	u8 val;
2075	static const char * const sink_status[] = {
2076		"inactive",
2077		"transition to active, capture and display",
2078		"active, display from RFB",
2079		"active, capture and display on sink device timings",
2080		"transition to inactive, capture and display, timing re-sync",
2081		"reserved",
2082		"reserved",
2083		"sink internal error",
2084	};
2085	struct drm_connector *connector = m->private;
2086	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2087	struct intel_dp *intel_dp =
2088		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2089	int ret;
2090
2091	if (!CAN_PSR(dev_priv)) {
2092		seq_puts(m, "PSR Unsupported\n");
2093		return -ENODEV;
2094	}
2095
2096	if (connector->status != connector_status_connected)
2097		return -ENODEV;
2098
2099	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2100
2101	if (ret == 1) {
2102		const char *str = "unknown";
2103
2104		val &= DP_PSR_SINK_STATE_MASK;
2105		if (val < ARRAY_SIZE(sink_status))
2106			str = sink_status[val];
2107		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2108	} else {
2109		return ret;
2110	}
2111
2112	return 0;
2113}
2114DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2115
2116static void
2117psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2118{
2119	u32 val, status_val;
2120	const char *status = "unknown";
 
 
 
 
 
2121
2122	if (dev_priv->psr.psr2_enabled) {
2123		static const char * const live_status[] = {
2124			"IDLE",
2125			"CAPTURE",
2126			"CAPTURE_FS",
2127			"SLEEP",
2128			"BUFON_FW",
2129			"ML_UP",
2130			"SU_STANDBY",
2131			"FAST_SLEEP",
2132			"DEEP_SLEEP",
2133			"BUF_ON",
2134			"TG_ON"
2135		};
2136		val = I915_READ(EDP_PSR2_STATUS);
2137		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2138			      EDP_PSR2_STATUS_STATE_SHIFT;
2139		if (status_val < ARRAY_SIZE(live_status))
2140			status = live_status[status_val];
2141	} else {
2142		static const char * const live_status[] = {
2143			"IDLE",
2144			"SRDONACK",
2145			"SRDENT",
2146			"BUFOFF",
2147			"BUFON",
2148			"AUXACK",
2149			"SRDOFFACK",
2150			"SRDENT_ON",
2151		};
2152		val = I915_READ(EDP_PSR_STATUS);
2153		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2154			      EDP_PSR_STATUS_STATE_SHIFT;
2155		if (status_val < ARRAY_SIZE(live_status))
2156			status = live_status[status_val];
2157	}
2158
2159	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2160}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2161
2162static int i915_edp_psr_status(struct seq_file *m, void *data)
2163{
2164	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2165	struct i915_psr *psr = &dev_priv->psr;
2166	intel_wakeref_t wakeref;
2167	const char *status;
2168	bool enabled;
2169	u32 val;
2170
2171	if (!HAS_PSR(dev_priv))
2172		return -ENODEV;
2173
2174	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2175	if (psr->dp)
2176		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
 
 
 
2177	seq_puts(m, "\n");
2178
2179	if (!psr->sink_support)
2180		return 0;
2181
2182	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2183	mutex_lock(&psr->lock);
2184
2185	if (psr->enabled)
2186		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2187	else
2188		status = "disabled";
2189	seq_printf(m, "PSR mode: %s\n", status);
2190
2191	if (!psr->enabled)
2192		goto unlock;
2193
2194	if (psr->psr2_enabled) {
2195		val = I915_READ(EDP_PSR2_CTL);
2196		enabled = val & EDP_PSR2_ENABLE;
2197	} else {
2198		val = I915_READ(EDP_PSR_CTL);
2199		enabled = val & EDP_PSR_ENABLE;
2200	}
2201	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2202		   enableddisabled(enabled), val);
2203	psr_source_status(dev_priv, m);
2204	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2205		   psr->busy_frontbuffer_bits);
2206
2207	/*
 
2208	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2209	 */
2210	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2211		val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2212		seq_printf(m, "Performance counter: %u\n", val);
2213	}
2214
2215	if (psr->debug & I915_PSR_DEBUG_IRQ) {
2216		seq_printf(m, "Last attempted entry at: %lld\n",
2217			   psr->last_entry_attempt);
2218		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2219	}
2220
2221	if (psr->psr2_enabled) {
2222		u32 su_frames_val[3];
2223		int frame;
2224
2225		/*
2226		 * Reading all 3 registers before hand to minimize crossing a
2227		 * frame boundary between register reads
2228		 */
2229		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2230			su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2231
2232		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2233
2234		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2235			u32 su_blocks;
2236
2237			su_blocks = su_frames_val[frame / 3] &
2238				    PSR2_SU_STATUS_MASK(frame);
2239			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2240			seq_printf(m, "%d\t%d\n", frame, su_blocks);
2241		}
2242	}
 
2243
2244unlock:
2245	mutex_unlock(&psr->lock);
2246	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2247
2248	return 0;
2249}
2250
2251static int
2252i915_edp_psr_debug_set(void *data, u64 val)
2253{
2254	struct drm_i915_private *dev_priv = data;
2255	intel_wakeref_t wakeref;
 
 
 
2256	int ret;
 
 
 
 
2257
2258	if (!CAN_PSR(dev_priv))
2259		return -ENODEV;
2260
2261	DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
 
2262
2263	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 
 
2264
2265	ret = intel_psr_debug_set(dev_priv, val);
2266
2267	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 
 
2268
 
 
 
 
 
 
 
 
2269	return ret;
2270}
2271
2272static int
2273i915_edp_psr_debug_get(void *data, u64 *val)
2274{
2275	struct drm_i915_private *dev_priv = data;
2276
2277	if (!CAN_PSR(dev_priv))
2278		return -ENODEV;
2279
2280	*val = READ_ONCE(dev_priv->psr.debug);
2281	return 0;
2282}
2283
2284DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2285			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2286			"%llu\n");
2287
2288static int i915_energy_uJ(struct seq_file *m, void *data)
2289{
2290	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2291	unsigned long long power;
2292	intel_wakeref_t wakeref;
 
2293	u32 units;
2294
2295	if (INTEL_GEN(dev_priv) < 6)
2296		return -ENODEV;
2297
2298	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2299		return -ENODEV;
 
 
 
 
 
2300
2301	units = (power & 0x1f00) >> 8;
2302	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2303		power = I915_READ(MCH_SECP_NRG_STTS);
2304
2305	power = (1000000 * power) >> units; /* convert to uJ */
2306	seq_printf(m, "%llu", power);
2307
2308	return 0;
2309}
2310
2311static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2312{
2313	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2314	struct pci_dev *pdev = dev_priv->drm.pdev;
 
2315
2316	if (!HAS_RUNTIME_PM(dev_priv))
2317		seq_puts(m, "Runtime power management not supported\n");
 
 
2318
2319	seq_printf(m, "Runtime power status: %s\n",
2320		   enableddisabled(!dev_priv->power_domains.wakeref));
2321
2322	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2323	seq_printf(m, "IRQs disabled: %s\n",
2324		   yesno(!intel_irqs_enabled(dev_priv)));
2325#ifdef CONFIG_PM
2326	seq_printf(m, "Usage count: %d\n",
2327		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2328#else
2329	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2330#endif
2331	seq_printf(m, "PCI device power state: %s [%d]\n",
2332		   pci_power_name(pdev->current_state),
2333		   pdev->current_state);
2334
2335	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2336		struct drm_printer p = drm_seq_file_printer(m);
2337
2338		print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
2339	}
2340
2341	return 0;
2342}
2343
2344static int i915_power_domain_info(struct seq_file *m, void *unused)
2345{
2346	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 
 
2347	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2348	int i;
2349
2350	mutex_lock(&power_domains->lock);
2351
2352	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2353	for (i = 0; i < power_domains->power_well_count; i++) {
2354		struct i915_power_well *power_well;
2355		enum intel_display_power_domain power_domain;
2356
2357		power_well = &power_domains->power_wells[i];
2358		seq_printf(m, "%-25s %d\n", power_well->desc->name,
2359			   power_well->count);
2360
2361		for_each_power_domain(power_domain, power_well->desc->domains)
 
 
 
 
2362			seq_printf(m, "  %-23s %d\n",
2363				 intel_display_power_domain_str(dev_priv,
2364								power_domain),
2365				 power_domains->domain_use_count[power_domain]);
 
2366	}
2367
2368	mutex_unlock(&power_domains->lock);
2369
2370	return 0;
2371}
2372
2373static int i915_dmc_info(struct seq_file *m, void *unused)
2374{
2375	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2376	intel_wakeref_t wakeref;
 
2377	struct intel_csr *csr;
2378	i915_reg_t dc5_reg, dc6_reg = {};
2379
2380	if (!HAS_CSR(dev_priv))
2381		return -ENODEV;
 
 
2382
2383	csr = &dev_priv->csr;
2384
2385	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2386
2387	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2388	seq_printf(m, "path: %s\n", csr->fw_path);
2389
2390	if (!csr->dmc_payload)
2391		goto out;
2392
2393	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2394		   CSR_VERSION_MINOR(csr->version));
2395
2396	if (INTEL_GEN(dev_priv) >= 12) {
2397		dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
2398		dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
2399	} else {
2400		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2401						 SKL_CSR_DC3_DC5_COUNT;
2402		if (!IS_GEN9_LP(dev_priv))
2403			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
2404	}
2405
2406	seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
2407	if (dc6_reg.reg)
2408		seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
2409
2410out:
2411	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2412	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2413	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2414
2415	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2416
2417	return 0;
2418}
2419
2420static void intel_seq_print_mode(struct seq_file *m, int tabs,
2421				 struct drm_display_mode *mode)
2422{
2423	int i;
2424
2425	for (i = 0; i < tabs; i++)
2426		seq_putc(m, '\t');
2427
2428	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 
 
 
 
 
 
 
2429}
2430
2431static void intel_encoder_info(struct seq_file *m,
2432			       struct intel_crtc *intel_crtc,
2433			       struct intel_encoder *intel_encoder)
2434{
2435	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2436	struct drm_device *dev = &dev_priv->drm;
2437	struct drm_crtc *crtc = &intel_crtc->base;
2438	struct intel_connector *intel_connector;
2439	struct drm_encoder *encoder;
2440
2441	encoder = &intel_encoder->base;
2442	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2443		   encoder->base.id, encoder->name);
2444	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2445		struct drm_connector *connector = &intel_connector->base;
2446		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2447			   connector->base.id,
2448			   connector->name,
2449			   drm_get_connector_status_name(connector->status));
2450		if (connector->status == connector_status_connected) {
2451			struct drm_display_mode *mode = &crtc->mode;
2452			seq_printf(m, ", mode:\n");
2453			intel_seq_print_mode(m, 2, mode);
2454		} else {
2455			seq_putc(m, '\n');
2456		}
2457	}
2458}
2459
2460static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2461{
2462	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2463	struct drm_device *dev = &dev_priv->drm;
2464	struct drm_crtc *crtc = &intel_crtc->base;
2465	struct intel_encoder *intel_encoder;
2466	struct drm_plane_state *plane_state = crtc->primary->state;
2467	struct drm_framebuffer *fb = plane_state->fb;
2468
2469	if (fb)
2470		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2471			   fb->base.id, plane_state->src_x >> 16,
2472			   plane_state->src_y >> 16, fb->width, fb->height);
2473	else
2474		seq_puts(m, "\tprimary plane disabled\n");
2475	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2476		intel_encoder_info(m, intel_crtc, intel_encoder);
2477}
2478
2479static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2480{
2481	struct drm_display_mode *mode = panel->fixed_mode;
2482
2483	seq_printf(m, "\tfixed mode:\n");
2484	intel_seq_print_mode(m, 2, mode);
2485}
2486
2487static void intel_hdcp_info(struct seq_file *m,
2488			    struct intel_connector *intel_connector)
2489{
2490	bool hdcp_cap, hdcp2_cap;
2491
2492	hdcp_cap = intel_hdcp_capable(intel_connector);
2493	hdcp2_cap = intel_hdcp2_capable(intel_connector);
2494
2495	if (hdcp_cap)
2496		seq_puts(m, "HDCP1.4 ");
2497	if (hdcp2_cap)
2498		seq_puts(m, "HDCP2.2 ");
2499
2500	if (!hdcp_cap && !hdcp2_cap)
2501		seq_puts(m, "None");
2502
2503	seq_puts(m, "\n");
2504}
2505
2506static void intel_dp_info(struct seq_file *m,
2507			  struct intel_connector *intel_connector)
2508{
2509	struct intel_encoder *intel_encoder = intel_connector->encoder;
2510	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2511
2512	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2513	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2514	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2515		intel_panel_info(m, &intel_connector->panel);
2516
2517	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2518				&intel_dp->aux);
2519	if (intel_connector->hdcp.shim) {
2520		seq_puts(m, "\tHDCP version: ");
2521		intel_hdcp_info(m, intel_connector);
2522	}
2523}
2524
2525static void intel_dp_mst_info(struct seq_file *m,
2526			  struct intel_connector *intel_connector)
2527{
2528	struct intel_encoder *intel_encoder = intel_connector->encoder;
2529	struct intel_dp_mst_encoder *intel_mst =
2530		enc_to_mst(&intel_encoder->base);
2531	struct intel_digital_port *intel_dig_port = intel_mst->primary;
2532	struct intel_dp *intel_dp = &intel_dig_port->dp;
2533	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2534					intel_connector->port);
2535
2536	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2537}
2538
2539static void intel_hdmi_info(struct seq_file *m,
2540			    struct intel_connector *intel_connector)
2541{
2542	struct intel_encoder *intel_encoder = intel_connector->encoder;
2543	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2544
2545	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2546	if (intel_connector->hdcp.shim) {
2547		seq_puts(m, "\tHDCP version: ");
2548		intel_hdcp_info(m, intel_connector);
2549	}
2550}
2551
2552static void intel_lvds_info(struct seq_file *m,
2553			    struct intel_connector *intel_connector)
2554{
2555	intel_panel_info(m, &intel_connector->panel);
2556}
2557
2558static void intel_connector_info(struct seq_file *m,
2559				 struct drm_connector *connector)
2560{
2561	struct intel_connector *intel_connector = to_intel_connector(connector);
2562	struct intel_encoder *intel_encoder = intel_connector->encoder;
2563	struct drm_display_mode *mode;
2564
2565	seq_printf(m, "connector %d: type %s, status: %s\n",
2566		   connector->base.id, connector->name,
2567		   drm_get_connector_status_name(connector->status));
2568
2569	if (connector->status == connector_status_disconnected)
2570		return;
2571
2572	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2573		   connector->display_info.width_mm,
2574		   connector->display_info.height_mm);
2575	seq_printf(m, "\tsubpixel order: %s\n",
2576		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2577	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2578
2579	if (!intel_encoder)
2580		return;
2581
2582	switch (connector->connector_type) {
2583	case DRM_MODE_CONNECTOR_DisplayPort:
2584	case DRM_MODE_CONNECTOR_eDP:
2585		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2586			intel_dp_mst_info(m, intel_connector);
2587		else
2588			intel_dp_info(m, intel_connector);
2589		break;
2590	case DRM_MODE_CONNECTOR_LVDS:
2591		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2592			intel_lvds_info(m, intel_connector);
2593		break;
2594	case DRM_MODE_CONNECTOR_HDMIA:
2595		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2596		    intel_encoder->type == INTEL_OUTPUT_DDI)
2597			intel_hdmi_info(m, intel_connector);
2598		break;
2599	default:
2600		break;
2601	}
2602
2603	seq_printf(m, "\tmodes:\n");
2604	list_for_each_entry(mode, &connector->modes, head)
2605		intel_seq_print_mode(m, 2, mode);
2606}
2607
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2608static const char *plane_type(enum drm_plane_type type)
2609{
2610	switch (type) {
2611	case DRM_PLANE_TYPE_OVERLAY:
2612		return "OVL";
2613	case DRM_PLANE_TYPE_PRIMARY:
2614		return "PRI";
2615	case DRM_PLANE_TYPE_CURSOR:
2616		return "CUR";
2617	/*
2618	 * Deliberately omitting default: to generate compiler warnings
2619	 * when a new drm_plane_type gets added.
2620	 */
2621	}
2622
2623	return "unknown";
2624}
2625
2626static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2627{
 
2628	/*
2629	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2630	 * will print them all to visualize if the values are misused
2631	 */
2632	snprintf(buf, bufsize,
2633		 "%s%s%s%s%s%s(0x%08x)",
2634		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2635		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2636		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2637		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2638		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2639		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2640		 rotation);
 
 
2641}
2642
2643static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2644{
2645	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2646	struct drm_device *dev = &dev_priv->drm;
2647	struct intel_plane *intel_plane;
2648
2649	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2650		struct drm_plane_state *state;
2651		struct drm_plane *plane = &intel_plane->base;
2652		struct drm_format_name_buf format_name;
2653		char rot_str[48];
2654
2655		if (!plane->state) {
2656			seq_puts(m, "plane->state is NULL!\n");
2657			continue;
2658		}
2659
2660		state = plane->state;
2661
2662		if (state->fb) {
2663			drm_get_format_name(state->fb->format->format,
2664					    &format_name);
2665		} else {
2666			sprintf(format_name.str, "N/A");
2667		}
2668
2669		plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2670
2671		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2672			   plane->base.id,
2673			   plane_type(intel_plane->base.type),
2674			   state->crtc_x, state->crtc_y,
2675			   state->crtc_w, state->crtc_h,
2676			   (state->src_x >> 16),
2677			   ((state->src_x & 0xffff) * 15625) >> 10,
2678			   (state->src_y >> 16),
2679			   ((state->src_y & 0xffff) * 15625) >> 10,
2680			   (state->src_w >> 16),
2681			   ((state->src_w & 0xffff) * 15625) >> 10,
2682			   (state->src_h >> 16),
2683			   ((state->src_h & 0xffff) * 15625) >> 10,
2684			   format_name.str,
2685			   rot_str);
2686	}
2687}
2688
2689static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2690{
2691	struct intel_crtc_state *pipe_config;
2692	int num_scalers = intel_crtc->num_scalers;
2693	int i;
2694
2695	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2696
2697	/* Not all platformas have a scaler */
2698	if (num_scalers) {
2699		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2700			   num_scalers,
2701			   pipe_config->scaler_state.scaler_users,
2702			   pipe_config->scaler_state.scaler_id);
2703
2704		for (i = 0; i < num_scalers; i++) {
2705			struct intel_scaler *sc =
2706					&pipe_config->scaler_state.scalers[i];
2707
2708			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2709				   i, yesno(sc->in_use), sc->mode);
2710		}
2711		seq_puts(m, "\n");
2712	} else {
2713		seq_puts(m, "\tNo scalers available on this platform\n");
2714	}
2715}
2716
2717static int i915_display_info(struct seq_file *m, void *unused)
2718{
2719	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2720	struct drm_device *dev = &dev_priv->drm;
 
2721	struct intel_crtc *crtc;
2722	struct drm_connector *connector;
2723	struct drm_connector_list_iter conn_iter;
2724	intel_wakeref_t wakeref;
2725
2726	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2727
 
 
2728	seq_printf(m, "CRTC info\n");
2729	seq_printf(m, "---------\n");
2730	for_each_intel_crtc(dev, crtc) {
 
2731		struct intel_crtc_state *pipe_config;
 
2732
2733		drm_modeset_lock(&crtc->base.mutex, NULL);
2734		pipe_config = to_intel_crtc_state(crtc->base.state);
2735
2736		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
2737			   crtc->base.base.id, pipe_name(crtc->pipe),
2738			   yesno(pipe_config->base.active),
2739			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
2740			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
2741
2742		if (pipe_config->base.active) {
2743			struct intel_plane *cursor =
2744				to_intel_plane(crtc->base.cursor);
2745
2746			intel_crtc_info(m, crtc);
2747
2748			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
2749				   yesno(cursor->base.state->visible),
2750				   cursor->base.state->crtc_x,
2751				   cursor->base.state->crtc_y,
2752				   cursor->base.state->crtc_w,
2753				   cursor->base.state->crtc_h,
2754				   cursor->cursor.base);
2755			intel_scaler_info(m, crtc);
2756			intel_plane_info(m, crtc);
2757		}
2758
2759		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2760			   yesno(!crtc->cpu_fifo_underrun_disabled),
2761			   yesno(!crtc->pch_fifo_underrun_disabled));
2762		drm_modeset_unlock(&crtc->base.mutex);
2763	}
2764
2765	seq_printf(m, "\n");
2766	seq_printf(m, "Connector info\n");
2767	seq_printf(m, "--------------\n");
2768	mutex_lock(&dev->mode_config.mutex);
2769	drm_connector_list_iter_begin(dev, &conn_iter);
2770	drm_for_each_connector_iter(connector, &conn_iter)
2771		intel_connector_info(m, connector);
2772	drm_connector_list_iter_end(&conn_iter);
2773	mutex_unlock(&dev->mode_config.mutex);
2774
2775	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2776
2777	return 0;
2778}
2779
2780static int i915_engine_info(struct seq_file *m, void *unused)
2781{
2782	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2783	struct intel_engine_cs *engine;
2784	intel_wakeref_t wakeref;
2785	struct drm_printer p;
 
 
2786
2787	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 
 
 
2788
2789	seq_printf(m, "GT awake? %s [%d]\n",
2790		   yesno(dev_priv->gt.awake),
2791		   atomic_read(&dev_priv->gt.wakeref.count));
2792	seq_printf(m, "CS timestamp frequency: %u kHz\n",
2793		   RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
2794
2795	p = drm_seq_file_printer(m);
2796	for_each_uabi_engine(engine, dev_priv)
2797		intel_engine_dump(engine, &p, "%s\n", engine->name);
2798
2799	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2800
2801	return 0;
2802}
 
2803
2804static int i915_rcs_topology(struct seq_file *m, void *unused)
2805{
2806	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2807	struct drm_printer p = drm_seq_file_printer(m);
2808
2809	intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
 
 
 
 
 
 
2810
2811	return 0;
2812}
 
 
 
 
 
2813
2814static int i915_shrinker_info(struct seq_file *m, void *unused)
2815{
2816	struct drm_i915_private *i915 = node_to_i915(m->private);
 
 
 
 
 
 
 
2817
2818	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
2819	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
 
 
 
 
 
 
2820
 
 
2821	return 0;
2822}
2823
2824static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2825{
2826	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2827	struct drm_device *dev = &dev_priv->drm;
 
2828	int i;
2829
2830	drm_modeset_lock_all(dev);
2831	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2832		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2833
2834		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
2835			   pll->info->id);
2836		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2837			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
2838		seq_printf(m, " tracked hardware state:\n");
2839		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
2840		seq_printf(m, " dpll_md: 0x%08x\n",
2841			   pll->state.hw_state.dpll_md);
2842		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
2843		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
2844		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
2845		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
2846		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
2847		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
2848			   pll->state.hw_state.mg_refclkin_ctl);
2849		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
2850			   pll->state.hw_state.mg_clktop2_coreclkctl1);
2851		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
2852			   pll->state.hw_state.mg_clktop2_hsclkctl);
2853		seq_printf(m, " mg_pll_div0:  0x%08x\n",
2854			   pll->state.hw_state.mg_pll_div0);
2855		seq_printf(m, " mg_pll_div1:  0x%08x\n",
2856			   pll->state.hw_state.mg_pll_div1);
2857		seq_printf(m, " mg_pll_lf:    0x%08x\n",
2858			   pll->state.hw_state.mg_pll_lf);
2859		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
2860			   pll->state.hw_state.mg_pll_frac_lock);
2861		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
2862			   pll->state.hw_state.mg_pll_ssc);
2863		seq_printf(m, " mg_pll_bias:  0x%08x\n",
2864			   pll->state.hw_state.mg_pll_bias);
2865		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
2866			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
2867	}
2868	drm_modeset_unlock_all(dev);
2869
2870	return 0;
2871}
2872
2873static int i915_wa_registers(struct seq_file *m, void *unused)
2874{
2875	struct drm_i915_private *i915 = node_to_i915(m->private);
2876	struct intel_engine_cs *engine;
 
 
 
 
 
2877
2878	for_each_uabi_engine(engine, i915) {
2879		const struct i915_wa_list *wal = &engine->ctx_wa_list;
2880		const struct i915_wa *wa;
2881		unsigned int count;
2882
2883		count = wal->count;
2884		if (!count)
2885			continue;
2886
2887		seq_printf(m, "%s: Workarounds applied: %u\n",
2888			   engine->name, count);
2889
2890		for (wa = wal->list; count--; wa++)
2891			seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2892				   i915_mmio_reg_offset(wa->reg),
2893				   wa->val, wa->mask);
2894
2895		seq_printf(m, "\n");
 
 
 
 
 
 
 
 
 
 
2896	}
2897
2898	return 0;
2899}
2900
2901static int i915_ipc_status_show(struct seq_file *m, void *data)
2902{
2903	struct drm_i915_private *dev_priv = m->private;
2904
2905	seq_printf(m, "Isochronous Priority Control: %s\n",
2906			yesno(dev_priv->ipc_enabled));
2907	return 0;
2908}
2909
2910static int i915_ipc_status_open(struct inode *inode, struct file *file)
2911{
2912	struct drm_i915_private *dev_priv = inode->i_private;
2913
2914	if (!HAS_IPC(dev_priv))
2915		return -ENODEV;
2916
2917	return single_open(file, i915_ipc_status_show, dev_priv);
2918}
2919
2920static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
2921				     size_t len, loff_t *offp)
2922{
2923	struct seq_file *m = file->private_data;
2924	struct drm_i915_private *dev_priv = m->private;
2925	intel_wakeref_t wakeref;
2926	bool enable;
2927	int ret;
2928
2929	ret = kstrtobool_from_user(ubuf, len, &enable);
2930	if (ret < 0)
2931		return ret;
2932
2933	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2934		if (!dev_priv->ipc_enabled && enable)
2935			DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
2936		dev_priv->wm.distrust_bios_wm = true;
2937		dev_priv->ipc_enabled = enable;
2938		intel_enable_ipc(dev_priv);
2939	}
2940
2941	return len;
2942}
2943
2944static const struct file_operations i915_ipc_status_fops = {
2945	.owner = THIS_MODULE,
2946	.open = i915_ipc_status_open,
2947	.read = seq_read,
2948	.llseek = seq_lseek,
2949	.release = single_release,
2950	.write = i915_ipc_status_write
2951};
2952
2953static int i915_ddb_info(struct seq_file *m, void *unused)
2954{
2955	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2956	struct drm_device *dev = &dev_priv->drm;
 
 
2957	struct skl_ddb_entry *entry;
2958	struct intel_crtc *crtc;
 
2959
2960	if (INTEL_GEN(dev_priv) < 9)
2961		return -ENODEV;
2962
2963	drm_modeset_lock_all(dev);
2964
 
 
2965	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2966
2967	for_each_intel_crtc(&dev_priv->drm, crtc) {
2968		struct intel_crtc_state *crtc_state =
2969			to_intel_crtc_state(crtc->base.state);
2970		enum pipe pipe = crtc->pipe;
2971		enum plane_id plane_id;
2972
2973		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2974
2975		for_each_plane_id_on_crtc(crtc, plane_id) {
2976			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
2977			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
2978				   entry->start, entry->end,
2979				   skl_ddb_entry_size(entry));
2980		}
2981
2982		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
2983		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
2984			   entry->end, skl_ddb_entry_size(entry));
2985	}
2986
2987	drm_modeset_unlock_all(dev);
2988
2989	return 0;
2990}
2991
2992static void drrs_status_per_crtc(struct seq_file *m,
2993				 struct drm_device *dev,
2994				 struct intel_crtc *intel_crtc)
2995{
2996	struct drm_i915_private *dev_priv = to_i915(dev);
 
2997	struct i915_drrs *drrs = &dev_priv->drrs;
2998	int vrefresh = 0;
2999	struct drm_connector *connector;
3000	struct drm_connector_list_iter conn_iter;
3001
3002	drm_connector_list_iter_begin(dev, &conn_iter);
3003	drm_for_each_connector_iter(connector, &conn_iter) {
3004		if (connector->state->crtc != &intel_crtc->base)
3005			continue;
3006
3007		seq_printf(m, "%s:\n", connector->name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3008	}
3009	drm_connector_list_iter_end(&conn_iter);
3010
3011	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3012		seq_puts(m, "\tVBT: DRRS_type: Static");
3013	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3014		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3015	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3016		seq_puts(m, "\tVBT: DRRS_type: None");
3017	else
3018		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3019
3020	seq_puts(m, "\n\n");
3021
3022	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3023		struct intel_panel *panel;
3024
3025		mutex_lock(&drrs->mutex);
3026		/* DRRS Supported */
3027		seq_puts(m, "\tDRRS Supported: Yes\n");
3028
3029		/* disable_drrs() will make drrs->dp NULL */
3030		if (!drrs->dp) {
3031			seq_puts(m, "Idleness DRRS: Disabled\n");
3032			if (dev_priv->psr.enabled)
3033				seq_puts(m,
3034				"\tAs PSR is enabled, DRRS is not enabled\n");
3035			mutex_unlock(&drrs->mutex);
3036			return;
3037		}
3038
3039		panel = &drrs->dp->attached_connector->panel;
3040		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3041					drrs->busy_frontbuffer_bits);
3042
3043		seq_puts(m, "\n\t\t");
3044		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3045			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3046			vrefresh = panel->fixed_mode->vrefresh;
3047		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3048			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3049			vrefresh = panel->downclock_mode->vrefresh;
3050		} else {
3051			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3052						drrs->refresh_rate_type);
3053			mutex_unlock(&drrs->mutex);
3054			return;
3055		}
3056		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3057
3058		seq_puts(m, "\n\t\t");
3059		mutex_unlock(&drrs->mutex);
3060	} else {
3061		/* DRRS not supported. Print the VBT parameter*/
3062		seq_puts(m, "\tDRRS Supported : No");
3063	}
3064	seq_puts(m, "\n");
3065}
3066
3067static int i915_drrs_status(struct seq_file *m, void *unused)
3068{
3069	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3070	struct drm_device *dev = &dev_priv->drm;
3071	struct intel_crtc *intel_crtc;
3072	int active_crtc_cnt = 0;
3073
3074	drm_modeset_lock_all(dev);
3075	for_each_intel_crtc(dev, intel_crtc) {
 
 
3076		if (intel_crtc->base.state->active) {
3077			active_crtc_cnt++;
3078			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3079
3080			drrs_status_per_crtc(m, dev, intel_crtc);
3081		}
 
 
3082	}
3083	drm_modeset_unlock_all(dev);
3084
3085	if (!active_crtc_cnt)
3086		seq_puts(m, "No active crtc found\n");
3087
3088	return 0;
3089}
3090
 
 
 
 
 
 
3091static int i915_dp_mst_info(struct seq_file *m, void *unused)
3092{
3093	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3094	struct drm_device *dev = &dev_priv->drm;
 
3095	struct intel_encoder *intel_encoder;
3096	struct intel_digital_port *intel_dig_port;
3097	struct drm_connector *connector;
3098	struct drm_connector_list_iter conn_iter;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3099
3100	drm_connector_list_iter_begin(dev, &conn_iter);
3101	drm_for_each_connector_iter(connector, &conn_iter) {
3102		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3103			continue;
3104
3105		intel_encoder = intel_attached_encoder(connector);
3106		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
 
3107			continue;
3108
3109		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3110		if (!intel_dig_port->dp.can_mst)
3111			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3112
3113		seq_printf(m, "MST Source Port %c\n",
3114			   port_name(intel_dig_port->base.port));
3115		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
 
3116	}
3117	drm_connector_list_iter_end(&conn_iter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3118
3119	return 0;
3120}
3121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3122static ssize_t i915_displayport_test_active_write(struct file *file,
3123						  const char __user *ubuf,
3124						  size_t len, loff_t *offp)
3125{
3126	char *input_buffer;
3127	int status = 0;
3128	struct drm_device *dev;
3129	struct drm_connector *connector;
3130	struct drm_connector_list_iter conn_iter;
3131	struct intel_dp *intel_dp;
3132	int val = 0;
3133
3134	dev = ((struct seq_file *)file->private_data)->private;
3135
 
 
3136	if (len == 0)
3137		return 0;
3138
3139	input_buffer = memdup_user_nul(ubuf, len);
3140	if (IS_ERR(input_buffer))
3141		return PTR_ERR(input_buffer);
3142
 
 
 
 
 
 
3143	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3144
3145	drm_connector_list_iter_begin(dev, &conn_iter);
3146	drm_for_each_connector_iter(connector, &conn_iter) {
3147		struct intel_encoder *encoder;
3148
3149		if (connector->connector_type !=
3150		    DRM_MODE_CONNECTOR_DisplayPort)
3151			continue;
3152
3153		encoder = to_intel_encoder(connector->encoder);
3154		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3155			continue;
3156
3157		if (encoder && connector->status == connector_status_connected) {
3158			intel_dp = enc_to_intel_dp(&encoder->base);
3159			status = kstrtoint(input_buffer, 10, &val);
3160			if (status < 0)
3161				break;
3162			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3163			/* To prevent erroneous activation of the compliance
3164			 * testing code, only accept an actual value of 1 here
3165			 */
3166			if (val == 1)
3167				intel_dp->compliance.test_active = 1;
3168			else
3169				intel_dp->compliance.test_active = 0;
3170		}
3171	}
3172	drm_connector_list_iter_end(&conn_iter);
3173	kfree(input_buffer);
3174	if (status < 0)
3175		return status;
3176
3177	*offp += len;
3178	return len;
3179}
3180
3181static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3182{
3183	struct drm_i915_private *dev_priv = m->private;
3184	struct drm_device *dev = &dev_priv->drm;
3185	struct drm_connector *connector;
3186	struct drm_connector_list_iter conn_iter;
3187	struct intel_dp *intel_dp;
3188
3189	drm_connector_list_iter_begin(dev, &conn_iter);
3190	drm_for_each_connector_iter(connector, &conn_iter) {
3191		struct intel_encoder *encoder;
3192
3193		if (connector->connector_type !=
3194		    DRM_MODE_CONNECTOR_DisplayPort)
3195			continue;
3196
3197		encoder = to_intel_encoder(connector->encoder);
3198		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3199			continue;
3200
3201		if (encoder && connector->status == connector_status_connected) {
3202			intel_dp = enc_to_intel_dp(&encoder->base);
3203			if (intel_dp->compliance.test_active)
3204				seq_puts(m, "1");
3205			else
3206				seq_puts(m, "0");
3207		} else
3208			seq_puts(m, "0");
3209	}
3210	drm_connector_list_iter_end(&conn_iter);
3211
3212	return 0;
3213}
3214
3215static int i915_displayport_test_active_open(struct inode *inode,
3216					     struct file *file)
3217{
3218	return single_open(file, i915_displayport_test_active_show,
3219			   inode->i_private);
 
3220}
3221
3222static const struct file_operations i915_displayport_test_active_fops = {
3223	.owner = THIS_MODULE,
3224	.open = i915_displayport_test_active_open,
3225	.read = seq_read,
3226	.llseek = seq_lseek,
3227	.release = single_release,
3228	.write = i915_displayport_test_active_write
3229};
3230
3231static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3232{
3233	struct drm_i915_private *dev_priv = m->private;
3234	struct drm_device *dev = &dev_priv->drm;
3235	struct drm_connector *connector;
3236	struct drm_connector_list_iter conn_iter;
3237	struct intel_dp *intel_dp;
3238
3239	drm_connector_list_iter_begin(dev, &conn_iter);
3240	drm_for_each_connector_iter(connector, &conn_iter) {
3241		struct intel_encoder *encoder;
3242
3243		if (connector->connector_type !=
3244		    DRM_MODE_CONNECTOR_DisplayPort)
3245			continue;
3246
3247		encoder = to_intel_encoder(connector->encoder);
3248		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3249			continue;
3250
3251		if (encoder && connector->status == connector_status_connected) {
3252			intel_dp = enc_to_intel_dp(&encoder->base);
3253			if (intel_dp->compliance.test_type ==
3254			    DP_TEST_LINK_EDID_READ)
3255				seq_printf(m, "%lx",
3256					   intel_dp->compliance.test_data.edid);
3257			else if (intel_dp->compliance.test_type ==
3258				 DP_TEST_LINK_VIDEO_PATTERN) {
3259				seq_printf(m, "hdisplay: %d\n",
3260					   intel_dp->compliance.test_data.hdisplay);
3261				seq_printf(m, "vdisplay: %d\n",
3262					   intel_dp->compliance.test_data.vdisplay);
3263				seq_printf(m, "bpc: %u\n",
3264					   intel_dp->compliance.test_data.bpc);
3265			}
3266		} else
3267			seq_puts(m, "0");
3268	}
3269	drm_connector_list_iter_end(&conn_iter);
3270
3271	return 0;
3272}
3273DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3274
3275static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3276{
3277	struct drm_i915_private *dev_priv = m->private;
3278	struct drm_device *dev = &dev_priv->drm;
3279	struct drm_connector *connector;
3280	struct drm_connector_list_iter conn_iter;
3281	struct intel_dp *intel_dp;
3282
3283	drm_connector_list_iter_begin(dev, &conn_iter);
3284	drm_for_each_connector_iter(connector, &conn_iter) {
3285		struct intel_encoder *encoder;
3286
3287		if (connector->connector_type !=
3288		    DRM_MODE_CONNECTOR_DisplayPort)
3289			continue;
3290
3291		encoder = to_intel_encoder(connector->encoder);
3292		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3293			continue;
3294
3295		if (encoder && connector->status == connector_status_connected) {
3296			intel_dp = enc_to_intel_dp(&encoder->base);
3297			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3298		} else
3299			seq_puts(m, "0");
3300	}
3301	drm_connector_list_iter_end(&conn_iter);
3302
3303	return 0;
3304}
3305DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3306
3307static void wm_latency_show(struct seq_file *m, const u16 wm[8])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3308{
3309	struct drm_i915_private *dev_priv = m->private;
3310	struct drm_device *dev = &dev_priv->drm;
3311	int level;
3312	int num_levels;
3313
3314	if (IS_CHERRYVIEW(dev_priv))
3315		num_levels = 3;
3316	else if (IS_VALLEYVIEW(dev_priv))
3317		num_levels = 1;
3318	else if (IS_G4X(dev_priv))
3319		num_levels = 3;
3320	else
3321		num_levels = ilk_wm_max_level(dev_priv) + 1;
3322
3323	drm_modeset_lock_all(dev);
3324
3325	for (level = 0; level < num_levels; level++) {
3326		unsigned int latency = wm[level];
3327
3328		/*
3329		 * - WM1+ latency values in 0.5us units
3330		 * - latencies are in us on gen9/vlv/chv
3331		 */
3332		if (INTEL_GEN(dev_priv) >= 9 ||
3333		    IS_VALLEYVIEW(dev_priv) ||
3334		    IS_CHERRYVIEW(dev_priv) ||
3335		    IS_G4X(dev_priv))
3336			latency *= 10;
3337		else if (level > 0)
3338			latency *= 5;
3339
3340		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3341			   level, wm[level], latency / 10, latency % 10);
3342	}
3343
3344	drm_modeset_unlock_all(dev);
3345}
3346
3347static int pri_wm_latency_show(struct seq_file *m, void *data)
3348{
3349	struct drm_i915_private *dev_priv = m->private;
3350	const u16 *latencies;
 
3351
3352	if (INTEL_GEN(dev_priv) >= 9)
3353		latencies = dev_priv->wm.skl_latency;
3354	else
3355		latencies = dev_priv->wm.pri_latency;
3356
3357	wm_latency_show(m, latencies);
3358
3359	return 0;
3360}
3361
3362static int spr_wm_latency_show(struct seq_file *m, void *data)
3363{
3364	struct drm_i915_private *dev_priv = m->private;
3365	const u16 *latencies;
 
3366
3367	if (INTEL_GEN(dev_priv) >= 9)
3368		latencies = dev_priv->wm.skl_latency;
3369	else
3370		latencies = dev_priv->wm.spr_latency;
3371
3372	wm_latency_show(m, latencies);
3373
3374	return 0;
3375}
3376
3377static int cur_wm_latency_show(struct seq_file *m, void *data)
3378{
3379	struct drm_i915_private *dev_priv = m->private;
3380	const u16 *latencies;
 
3381
3382	if (INTEL_GEN(dev_priv) >= 9)
3383		latencies = dev_priv->wm.skl_latency;
3384	else
3385		latencies = dev_priv->wm.cur_latency;
3386
3387	wm_latency_show(m, latencies);
3388
3389	return 0;
3390}
3391
3392static int pri_wm_latency_open(struct inode *inode, struct file *file)
3393{
3394	struct drm_i915_private *dev_priv = inode->i_private;
3395
3396	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3397		return -ENODEV;
3398
3399	return single_open(file, pri_wm_latency_show, dev_priv);
3400}
3401
3402static int spr_wm_latency_open(struct inode *inode, struct file *file)
3403{
3404	struct drm_i915_private *dev_priv = inode->i_private;
3405
3406	if (HAS_GMCH(dev_priv))
3407		return -ENODEV;
3408
3409	return single_open(file, spr_wm_latency_show, dev_priv);
3410}
3411
3412static int cur_wm_latency_open(struct inode *inode, struct file *file)
3413{
3414	struct drm_i915_private *dev_priv = inode->i_private;
3415
3416	if (HAS_GMCH(dev_priv))
3417		return -ENODEV;
3418
3419	return single_open(file, cur_wm_latency_show, dev_priv);
3420}
3421
3422static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3423				size_t len, loff_t *offp, u16 wm[8])
3424{
3425	struct seq_file *m = file->private_data;
3426	struct drm_i915_private *dev_priv = m->private;
3427	struct drm_device *dev = &dev_priv->drm;
3428	u16 new[8] = { 0 };
3429	int num_levels;
3430	int level;
3431	int ret;
3432	char tmp[32];
3433
3434	if (IS_CHERRYVIEW(dev_priv))
3435		num_levels = 3;
3436	else if (IS_VALLEYVIEW(dev_priv))
3437		num_levels = 1;
3438	else if (IS_G4X(dev_priv))
3439		num_levels = 3;
3440	else
3441		num_levels = ilk_wm_max_level(dev_priv) + 1;
3442
3443	if (len >= sizeof(tmp))
3444		return -EINVAL;
3445
3446	if (copy_from_user(tmp, ubuf, len))
3447		return -EFAULT;
3448
3449	tmp[len] = '\0';
3450
3451	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3452		     &new[0], &new[1], &new[2], &new[3],
3453		     &new[4], &new[5], &new[6], &new[7]);
3454	if (ret != num_levels)
3455		return -EINVAL;
3456
3457	drm_modeset_lock_all(dev);
3458
3459	for (level = 0; level < num_levels; level++)
3460		wm[level] = new[level];
3461
3462	drm_modeset_unlock_all(dev);
3463
3464	return len;
3465}
3466
3467
3468static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3469				    size_t len, loff_t *offp)
3470{
3471	struct seq_file *m = file->private_data;
3472	struct drm_i915_private *dev_priv = m->private;
3473	u16 *latencies;
 
3474
3475	if (INTEL_GEN(dev_priv) >= 9)
3476		latencies = dev_priv->wm.skl_latency;
3477	else
3478		latencies = dev_priv->wm.pri_latency;
3479
3480	return wm_latency_write(file, ubuf, len, offp, latencies);
3481}
3482
3483static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3484				    size_t len, loff_t *offp)
3485{
3486	struct seq_file *m = file->private_data;
3487	struct drm_i915_private *dev_priv = m->private;
3488	u16 *latencies;
 
3489
3490	if (INTEL_GEN(dev_priv) >= 9)
3491		latencies = dev_priv->wm.skl_latency;
3492	else
3493		latencies = dev_priv->wm.spr_latency;
3494
3495	return wm_latency_write(file, ubuf, len, offp, latencies);
3496}
3497
3498static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3499				    size_t len, loff_t *offp)
3500{
3501	struct seq_file *m = file->private_data;
3502	struct drm_i915_private *dev_priv = m->private;
3503	u16 *latencies;
 
3504
3505	if (INTEL_GEN(dev_priv) >= 9)
3506		latencies = dev_priv->wm.skl_latency;
3507	else
3508		latencies = dev_priv->wm.cur_latency;
3509
3510	return wm_latency_write(file, ubuf, len, offp, latencies);
3511}
3512
3513static const struct file_operations i915_pri_wm_latency_fops = {
3514	.owner = THIS_MODULE,
3515	.open = pri_wm_latency_open,
3516	.read = seq_read,
3517	.llseek = seq_lseek,
3518	.release = single_release,
3519	.write = pri_wm_latency_write
3520};
3521
3522static const struct file_operations i915_spr_wm_latency_fops = {
3523	.owner = THIS_MODULE,
3524	.open = spr_wm_latency_open,
3525	.read = seq_read,
3526	.llseek = seq_lseek,
3527	.release = single_release,
3528	.write = spr_wm_latency_write
3529};
3530
3531static const struct file_operations i915_cur_wm_latency_fops = {
3532	.owner = THIS_MODULE,
3533	.open = cur_wm_latency_open,
3534	.read = seq_read,
3535	.llseek = seq_lseek,
3536	.release = single_release,
3537	.write = cur_wm_latency_write
3538};
3539
3540static int
3541i915_wedged_get(void *data, u64 *val)
3542{
3543	struct drm_i915_private *i915 = data;
3544	int ret = intel_gt_terminally_wedged(&i915->gt);
 
 
3545
3546	switch (ret) {
3547	case -EIO:
3548		*val = 1;
3549		return 0;
3550	case 0:
3551		*val = 0;
3552		return 0;
3553	default:
3554		return ret;
3555	}
3556}
3557
3558static int
3559i915_wedged_set(void *data, u64 val)
3560{
3561	struct drm_i915_private *i915 = data;
 
 
 
 
 
 
 
 
 
3562
3563	/* Flush any previous reset before applying for a new one */
3564	wait_event(i915->gt.reset.queue,
3565		   !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
 
 
 
 
 
 
3566
3567	intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
3568			      "Manually set wedged engine mask = %llx", val);
3569	return 0;
3570}
3571
3572DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3573			i915_wedged_get, i915_wedged_set,
3574			"%llu\n");
3575
3576#define DROP_UNBOUND	BIT(0)
3577#define DROP_BOUND	BIT(1)
3578#define DROP_RETIRE	BIT(2)
3579#define DROP_ACTIVE	BIT(3)
3580#define DROP_FREED	BIT(4)
3581#define DROP_SHRINK_ALL	BIT(5)
3582#define DROP_IDLE	BIT(6)
3583#define DROP_RESET_ACTIVE	BIT(7)
3584#define DROP_RESET_SEQNO	BIT(8)
3585#define DROP_ALL (DROP_UNBOUND	| \
3586		  DROP_BOUND	| \
3587		  DROP_RETIRE	| \
3588		  DROP_ACTIVE	| \
3589		  DROP_FREED	| \
3590		  DROP_SHRINK_ALL |\
3591		  DROP_IDLE	| \
3592		  DROP_RESET_ACTIVE | \
3593		  DROP_RESET_SEQNO)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3594static int
3595i915_drop_caches_get(void *data, u64 *val)
3596{
3597	*val = DROP_ALL;
3598
3599	return 0;
3600}
3601
3602static int
3603i915_drop_caches_set(void *data, u64 val)
3604{
3605	struct drm_i915_private *i915 = data;
3606
3607	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3608		  val, val & DROP_ALL);
3609
3610	if (val & DROP_RESET_ACTIVE &&
3611	    wait_for(intel_engines_are_idle(&i915->gt),
3612		     I915_IDLE_ENGINES_TIMEOUT))
3613		intel_gt_set_wedged(&i915->gt);
3614
3615	/* No need to check and wait for gpu resets, only libdrm auto-restarts
3616	 * on ioctls on -EAGAIN. */
3617	if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3618		int ret;
 
3619
3620		ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
 
3621		if (ret)
3622			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3623
3624		/*
3625		 * To finish the flush of the idle_worker, we must complete
3626		 * the switch-to-kernel-context, which requires a double
3627		 * pass through wait_for_idle: first queues the switch,
3628		 * second waits for the switch.
3629		 */
3630		if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
3631			ret = i915_gem_wait_for_idle(i915,
3632						     I915_WAIT_INTERRUPTIBLE |
3633						     I915_WAIT_LOCKED,
3634						     MAX_SCHEDULE_TIMEOUT);
3635
3636		if (ret == 0 && val & DROP_IDLE)
3637			ret = i915_gem_wait_for_idle(i915,
3638						     I915_WAIT_INTERRUPTIBLE |
3639						     I915_WAIT_LOCKED,
3640						     MAX_SCHEDULE_TIMEOUT);
3641
3642		if (val & DROP_RETIRE)
3643			i915_retire_requests(i915);
 
 
 
 
 
 
3644
3645		mutex_unlock(&i915->drm.struct_mutex);
 
3646
3647		if (ret == 0 && val & DROP_IDLE)
3648			ret = intel_gt_pm_wait_for_idle(&i915->gt);
 
3649	}
3650
3651	if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
3652		intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL);
 
 
 
 
 
 
 
 
 
 
3653
3654	fs_reclaim_acquire(GFP_KERNEL);
3655	if (val & DROP_BOUND)
3656		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3657
3658	if (val & DROP_UNBOUND)
3659		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
 
 
3660
3661	if (val & DROP_SHRINK_ALL)
3662		i915_gem_shrink_all(i915);
3663	fs_reclaim_release(GFP_KERNEL);
3664
3665	if (val & DROP_IDLE) {
3666		flush_delayed_work(&i915->gem.retire_work);
3667		flush_work(&i915->gem.idle_work);
3668	}
3669
3670	if (val & DROP_FREED)
3671		i915_gem_drain_freed_objects(i915);
 
 
 
3672
3673	return 0;
3674}
3675
3676DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3677			i915_drop_caches_get, i915_drop_caches_set,
3678			"0x%08llx\n");
3679
3680static int
3681i915_cache_sharing_get(void *data, u64 *val)
3682{
3683	struct drm_i915_private *dev_priv = data;
3684	intel_wakeref_t wakeref;
3685	u32 snpcr = 0;
 
3686
3687	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3688		return -ENODEV;
3689
3690	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
3691		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
 
 
 
 
 
 
 
3692
3693	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3694
3695	return 0;
3696}
3697
3698static int
3699i915_cache_sharing_set(void *data, u64 val)
3700{
3701	struct drm_i915_private *dev_priv = data;
3702	intel_wakeref_t wakeref;
 
3703
3704	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3705		return -ENODEV;
3706
3707	if (val > 3)
3708		return -EINVAL;
3709
 
3710	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3711	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3712		u32 snpcr;
3713
3714		/* Update the cache sharing policy here as well */
3715		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3716		snpcr &= ~GEN6_MBC_SNPCR_MASK;
3717		snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3718		I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3719	}
3720
 
3721	return 0;
3722}
3723
3724DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3725			i915_cache_sharing_get, i915_cache_sharing_set,
3726			"%llu\n");
3727
3728static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
3729					  struct sseu_dev_info *sseu)
 
 
 
 
 
 
 
 
3730{
3731#define SS_MAX 2
3732	const int ss_max = SS_MAX;
3733	u32 sig1[SS_MAX], sig2[SS_MAX];
3734	int ss;
 
3735
3736	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3737	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3738	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3739	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3740
3741	for (ss = 0; ss < ss_max; ss++) {
3742		unsigned int eu_cnt;
3743
3744		if (sig1[ss] & CHV_SS_PG_ENABLE)
3745			/* skip disabled subslice */
3746			continue;
3747
3748		sseu->slice_mask = BIT(0);
3749		sseu->subslice_mask[0] |= BIT(ss);
3750		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3751			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3752			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3753			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
3754		sseu->eu_total += eu_cnt;
3755		sseu->eu_per_subslice = max_t(unsigned int,
3756					      sseu->eu_per_subslice, eu_cnt);
3757	}
3758#undef SS_MAX
3759}
3760
3761static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
3762				     struct sseu_dev_info *sseu)
3763{
3764#define SS_MAX 6
3765	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3766	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3767	int s, ss;
 
3768
3769	for (s = 0; s < info->sseu.max_slices; s++) {
3770		/*
3771		 * FIXME: Valid SS Mask respects the spec and read
3772		 * only valid bits for those registers, excluding reserved
3773		 * although this seems wrong because it would leave many
3774		 * subslices without ACK.
3775		 */
3776		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
3777			GEN10_PGCTL_VALID_SS_MASK(s);
3778		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
3779		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
3780	}
3781
3782	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3783		     GEN9_PGCTL_SSA_EU19_ACK |
3784		     GEN9_PGCTL_SSA_EU210_ACK |
3785		     GEN9_PGCTL_SSA_EU311_ACK;
3786	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3787		     GEN9_PGCTL_SSB_EU19_ACK |
3788		     GEN9_PGCTL_SSB_EU210_ACK |
3789		     GEN9_PGCTL_SSB_EU311_ACK;
3790
3791	for (s = 0; s < info->sseu.max_slices; s++) {
3792		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3793			/* skip disabled slice */
3794			continue;
3795
3796		sseu->slice_mask |= BIT(s);
3797		sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
3798
3799		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3800			unsigned int eu_cnt;
3801
3802			if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3803				/* skip disabled subslice */
3804				continue;
3805
3806			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
3807					       eu_mask[ss % 2]);
3808			sseu->eu_total += eu_cnt;
3809			sseu->eu_per_subslice = max_t(unsigned int,
3810						      sseu->eu_per_subslice,
3811						      eu_cnt);
3812		}
3813	}
3814#undef SS_MAX
3815}
3816
3817static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
3818				    struct sseu_dev_info *sseu)
3819{
3820#define SS_MAX 3
3821	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3822	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3823	int s, ss;
3824
3825	for (s = 0; s < info->sseu.max_slices; s++) {
3826		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
3827		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
3828		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
3829	}
3830
3831	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3832		     GEN9_PGCTL_SSA_EU19_ACK |
3833		     GEN9_PGCTL_SSA_EU210_ACK |
3834		     GEN9_PGCTL_SSA_EU311_ACK;
3835	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3836		     GEN9_PGCTL_SSB_EU19_ACK |
3837		     GEN9_PGCTL_SSB_EU210_ACK |
3838		     GEN9_PGCTL_SSB_EU311_ACK;
3839
3840	for (s = 0; s < info->sseu.max_slices; s++) {
 
 
3841		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3842			/* skip disabled slice */
3843			continue;
3844
3845		sseu->slice_mask |= BIT(s);
3846
3847		if (IS_GEN9_BC(dev_priv))
3848			sseu->subslice_mask[s] =
3849				RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
3850
3851		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3852			unsigned int eu_cnt;
3853
3854			if (IS_GEN9_LP(dev_priv)) {
3855				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3856					/* skip disabled subslice */
3857					continue;
3858
3859				sseu->subslice_mask[s] |= BIT(ss);
3860			}
3861
3862			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
3863					       eu_mask[ss%2]);
3864			sseu->eu_total += eu_cnt;
3865			sseu->eu_per_subslice = max_t(unsigned int,
3866						      sseu->eu_per_subslice,
3867						      eu_cnt);
3868		}
 
 
 
 
3869	}
3870#undef SS_MAX
3871}
3872
3873static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
3874					 struct sseu_dev_info *sseu)
3875{
 
 
3876	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
3877	int s;
3878
3879	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
3880
3881	if (sseu->slice_mask) {
3882		sseu->eu_per_subslice =
3883			RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
3884		for (s = 0; s < fls(sseu->slice_mask); s++) {
3885			sseu->subslice_mask[s] =
3886				RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
3887		}
3888		sseu->eu_total = sseu->eu_per_subslice *
3889				 intel_sseu_subslice_total(sseu);
3890
3891		/* subtract fused off EU(s) from enabled slice(s) */
3892		for (s = 0; s < fls(sseu->slice_mask); s++) {
3893			u8 subslice_7eu =
3894				RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
3895
3896			sseu->eu_total -= hweight8(subslice_7eu);
3897		}
3898	}
3899}
3900
3901static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
3902				 const struct sseu_dev_info *sseu)
3903{
3904	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3905	const char *type = is_available_info ? "Available" : "Enabled";
3906	int s;
3907
3908	seq_printf(m, "  %s Slice Mask: %04x\n", type,
3909		   sseu->slice_mask);
3910	seq_printf(m, "  %s Slice Total: %u\n", type,
3911		   hweight8(sseu->slice_mask));
3912	seq_printf(m, "  %s Subslice Total: %u\n", type,
3913		   intel_sseu_subslice_total(sseu));
3914	for (s = 0; s < fls(sseu->slice_mask); s++) {
3915		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
3916			   s, intel_sseu_subslices_per_slice(sseu, s));
3917	}
3918	seq_printf(m, "  %s EU Total: %u\n", type,
3919		   sseu->eu_total);
3920	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
3921		   sseu->eu_per_subslice);
3922
3923	if (!is_available_info)
3924		return;
3925
3926	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
3927	if (HAS_POOLED_EU(dev_priv))
3928		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
3929
 
 
 
 
 
 
 
 
 
 
 
3930	seq_printf(m, "  Has Slice Power Gating: %s\n",
3931		   yesno(sseu->has_slice_pg));
3932	seq_printf(m, "  Has Subslice Power Gating: %s\n",
3933		   yesno(sseu->has_subslice_pg));
3934	seq_printf(m, "  Has EU Power Gating: %s\n",
3935		   yesno(sseu->has_eu_pg));
3936}
3937
3938static int i915_sseu_status(struct seq_file *m, void *unused)
3939{
3940	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3941	struct sseu_dev_info sseu;
3942	intel_wakeref_t wakeref;
3943
3944	if (INTEL_GEN(dev_priv) < 8)
3945		return -ENODEV;
3946
3947	seq_puts(m, "SSEU Device Info\n");
3948	i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
3949
3950	seq_puts(m, "SSEU Device Status\n");
3951	memset(&sseu, 0, sizeof(sseu));
3952	sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
3953	sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
3954	sseu.max_eus_per_subslice =
3955		RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
3956
3957	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3958		if (IS_CHERRYVIEW(dev_priv))
3959			cherryview_sseu_device_status(dev_priv, &sseu);
3960		else if (IS_BROADWELL(dev_priv))
3961			broadwell_sseu_device_status(dev_priv, &sseu);
3962		else if (IS_GEN(dev_priv, 9))
3963			gen9_sseu_device_status(dev_priv, &sseu);
3964		else if (INTEL_GEN(dev_priv) >= 10)
3965			gen10_sseu_device_status(dev_priv, &sseu);
3966	}
3967
3968	i915_print_sseu_info(m, false, &sseu);
3969
3970	return 0;
3971}
3972
3973static int i915_forcewake_open(struct inode *inode, struct file *file)
3974{
3975	struct drm_i915_private *i915 = inode->i_private;
 
3976
3977	if (INTEL_GEN(i915) < 6)
3978		return 0;
3979
3980	file->private_data =
3981		(void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
3982	intel_uncore_forcewake_user_get(&i915->uncore);
3983
3984	return 0;
3985}
3986
3987static int i915_forcewake_release(struct inode *inode, struct file *file)
3988{
3989	struct drm_i915_private *i915 = inode->i_private;
 
3990
3991	if (INTEL_GEN(i915) < 6)
3992		return 0;
3993
3994	intel_uncore_forcewake_user_put(&i915->uncore);
3995	intel_runtime_pm_put(&i915->runtime_pm,
3996			     (intel_wakeref_t)(uintptr_t)file->private_data);
3997
3998	return 0;
3999}
4000
4001static const struct file_operations i915_forcewake_fops = {
4002	.owner = THIS_MODULE,
4003	.open = i915_forcewake_open,
4004	.release = i915_forcewake_release,
4005};
4006
4007static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4008{
4009	struct drm_i915_private *dev_priv = m->private;
4010	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4011
4012	/* Synchronize with everything first in case there's been an HPD
4013	 * storm, but we haven't finished handling it in the kernel yet
4014	 */
4015	intel_synchronize_irq(dev_priv);
4016	flush_work(&dev_priv->hotplug.dig_port_work);
4017	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
4018
4019	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4020	seq_printf(m, "Detected: %s\n",
4021		   yesno(delayed_work_pending(&hotplug->reenable_work)));
4022
4023	return 0;
4024}
4025
4026static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4027					const char __user *ubuf, size_t len,
4028					loff_t *offp)
4029{
4030	struct seq_file *m = file->private_data;
4031	struct drm_i915_private *dev_priv = m->private;
4032	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4033	unsigned int new_threshold;
4034	int i;
4035	char *newline;
4036	char tmp[16];
4037
4038	if (len >= sizeof(tmp))
4039		return -EINVAL;
4040
4041	if (copy_from_user(tmp, ubuf, len))
4042		return -EFAULT;
4043
4044	tmp[len] = '\0';
4045
4046	/* Strip newline, if any */
4047	newline = strchr(tmp, '\n');
4048	if (newline)
4049		*newline = '\0';
4050
4051	if (strcmp(tmp, "reset") == 0)
4052		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4053	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4054		return -EINVAL;
4055
4056	if (new_threshold > 0)
4057		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4058			      new_threshold);
4059	else
4060		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4061
4062	spin_lock_irq(&dev_priv->irq_lock);
4063	hotplug->hpd_storm_threshold = new_threshold;
4064	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4065	for_each_hpd_pin(i)
4066		hotplug->stats[i].count = 0;
4067	spin_unlock_irq(&dev_priv->irq_lock);
4068
4069	/* Re-enable hpd immediately if we were in an irq storm */
4070	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4071
4072	return len;
4073}
4074
4075static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4076{
4077	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4078}
4079
4080static const struct file_operations i915_hpd_storm_ctl_fops = {
4081	.owner = THIS_MODULE,
4082	.open = i915_hpd_storm_ctl_open,
4083	.read = seq_read,
4084	.llseek = seq_lseek,
4085	.release = single_release,
4086	.write = i915_hpd_storm_ctl_write
4087};
4088
4089static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4090{
4091	struct drm_i915_private *dev_priv = m->private;
4092
4093	seq_printf(m, "Enabled: %s\n",
4094		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4095
4096	return 0;
4097}
4098
4099static int
4100i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4101{
4102	return single_open(file, i915_hpd_short_storm_ctl_show,
4103			   inode->i_private);
4104}
4105
4106static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4107					      const char __user *ubuf,
4108					      size_t len, loff_t *offp)
4109{
4110	struct seq_file *m = file->private_data;
4111	struct drm_i915_private *dev_priv = m->private;
4112	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4113	char *newline;
4114	char tmp[16];
4115	int i;
4116	bool new_state;
4117
4118	if (len >= sizeof(tmp))
4119		return -EINVAL;
4120
4121	if (copy_from_user(tmp, ubuf, len))
4122		return -EFAULT;
4123
4124	tmp[len] = '\0';
4125
4126	/* Strip newline, if any */
4127	newline = strchr(tmp, '\n');
4128	if (newline)
4129		*newline = '\0';
4130
4131	/* Reset to the "default" state for this system */
4132	if (strcmp(tmp, "reset") == 0)
4133		new_state = !HAS_DP_MST(dev_priv);
4134	else if (kstrtobool(tmp, &new_state) != 0)
4135		return -EINVAL;
4136
4137	DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4138		      new_state ? "En" : "Dis");
4139
4140	spin_lock_irq(&dev_priv->irq_lock);
4141	hotplug->hpd_short_storm_enabled = new_state;
4142	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4143	for_each_hpd_pin(i)
4144		hotplug->stats[i].count = 0;
4145	spin_unlock_irq(&dev_priv->irq_lock);
4146
4147	/* Re-enable hpd immediately if we were in an irq storm */
4148	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4149
4150	return len;
4151}
4152
4153static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4154	.owner = THIS_MODULE,
4155	.open = i915_hpd_short_storm_ctl_open,
4156	.read = seq_read,
4157	.llseek = seq_lseek,
4158	.release = single_release,
4159	.write = i915_hpd_short_storm_ctl_write,
4160};
4161
4162static int i915_drrs_ctl_set(void *data, u64 val)
4163{
4164	struct drm_i915_private *dev_priv = data;
4165	struct drm_device *dev = &dev_priv->drm;
4166	struct intel_crtc *crtc;
4167
4168	if (INTEL_GEN(dev_priv) < 7)
4169		return -ENODEV;
4170
4171	for_each_intel_crtc(dev, crtc) {
4172		struct drm_connector_list_iter conn_iter;
4173		struct intel_crtc_state *crtc_state;
4174		struct drm_connector *connector;
4175		struct drm_crtc_commit *commit;
4176		int ret;
4177
4178		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4179		if (ret)
4180			return ret;
4181
4182		crtc_state = to_intel_crtc_state(crtc->base.state);
4183
4184		if (!crtc_state->base.active ||
4185		    !crtc_state->has_drrs)
4186			goto out;
4187
4188		commit = crtc_state->base.commit;
4189		if (commit) {
4190			ret = wait_for_completion_interruptible(&commit->hw_done);
4191			if (ret)
4192				goto out;
4193		}
4194
4195		drm_connector_list_iter_begin(dev, &conn_iter);
4196		drm_for_each_connector_iter(connector, &conn_iter) {
4197			struct intel_encoder *encoder;
4198			struct intel_dp *intel_dp;
4199
4200			if (!(crtc_state->base.connector_mask &
4201			      drm_connector_mask(connector)))
4202				continue;
4203
4204			encoder = intel_attached_encoder(connector);
4205			if (encoder->type != INTEL_OUTPUT_EDP)
4206				continue;
4207
4208			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4209						val ? "en" : "dis", val);
4210
4211			intel_dp = enc_to_intel_dp(&encoder->base);
4212			if (val)
4213				intel_edp_drrs_enable(intel_dp,
4214						      crtc_state);
4215			else
4216				intel_edp_drrs_disable(intel_dp,
4217						       crtc_state);
4218		}
4219		drm_connector_list_iter_end(&conn_iter);
4220
4221out:
4222		drm_modeset_unlock(&crtc->base.mutex);
4223		if (ret)
4224			return ret;
4225	}
4226
4227	return 0;
4228}
4229
4230DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4231
4232static ssize_t
4233i915_fifo_underrun_reset_write(struct file *filp,
4234			       const char __user *ubuf,
4235			       size_t cnt, loff_t *ppos)
4236{
4237	struct drm_i915_private *dev_priv = filp->private_data;
4238	struct intel_crtc *intel_crtc;
4239	struct drm_device *dev = &dev_priv->drm;
4240	int ret;
4241	bool reset;
4242
4243	ret = kstrtobool_from_user(ubuf, cnt, &reset);
4244	if (ret)
4245		return ret;
4246
4247	if (!reset)
4248		return cnt;
4249
4250	for_each_intel_crtc(dev, intel_crtc) {
4251		struct drm_crtc_commit *commit;
4252		struct intel_crtc_state *crtc_state;
4253
4254		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4255		if (ret)
4256			return ret;
4257
4258		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4259		commit = crtc_state->base.commit;
4260		if (commit) {
4261			ret = wait_for_completion_interruptible(&commit->hw_done);
4262			if (!ret)
4263				ret = wait_for_completion_interruptible(&commit->flip_done);
4264		}
4265
4266		if (!ret && crtc_state->base.active) {
4267			DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4268				      pipe_name(intel_crtc->pipe));
4269
4270			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4271		}
4272
4273		drm_modeset_unlock(&intel_crtc->base.mutex);
4274
4275		if (ret)
4276			return ret;
4277	}
4278
4279	ret = intel_fbc_reset_underrun(dev_priv);
4280	if (ret)
4281		return ret;
4282
4283	return cnt;
4284}
4285
4286static const struct file_operations i915_fifo_underrun_reset_ops = {
4287	.owner = THIS_MODULE,
4288	.open = simple_open,
4289	.write = i915_fifo_underrun_reset_write,
4290	.llseek = default_llseek,
4291};
4292
4293static const struct drm_info_list i915_debugfs_list[] = {
4294	{"i915_capabilities", i915_capabilities, 0},
4295	{"i915_gem_objects", i915_gem_object_info, 0},
 
 
 
 
 
 
 
 
4296	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4297	{"i915_gem_interrupt", i915_interrupt_info, 0},
 
 
 
 
 
4298	{"i915_guc_info", i915_guc_info, 0},
4299	{"i915_guc_load_status", i915_guc_load_status_info, 0},
4300	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4301	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4302	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4303	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4304	{"i915_frequency_info", i915_frequency_info, 0},
4305	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4306	{"i915_drpc_info", i915_drpc_info, 0},
 
4307	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4308	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4309	{"i915_fbc_status", i915_fbc_status, 0},
4310	{"i915_ips_status", i915_ips_status, 0},
4311	{"i915_sr_status", i915_sr_status, 0},
4312	{"i915_opregion", i915_opregion, 0},
4313	{"i915_vbt", i915_vbt, 0},
4314	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4315	{"i915_context_status", i915_context_status, 0},
 
 
4316	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4317	{"i915_swizzle_info", i915_swizzle_info, 0},
 
4318	{"i915_llc", i915_llc, 0},
4319	{"i915_edp_psr_status", i915_edp_psr_status, 0},
 
4320	{"i915_energy_uJ", i915_energy_uJ, 0},
4321	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4322	{"i915_power_domain_info", i915_power_domain_info, 0},
4323	{"i915_dmc_info", i915_dmc_info, 0},
4324	{"i915_display_info", i915_display_info, 0},
4325	{"i915_engine_info", i915_engine_info, 0},
4326	{"i915_rcs_topology", i915_rcs_topology, 0},
4327	{"i915_shrinker_info", i915_shrinker_info, 0},
4328	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4329	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4330	{"i915_wa_registers", i915_wa_registers, 0},
4331	{"i915_ddb_info", i915_ddb_info, 0},
4332	{"i915_sseu_status", i915_sseu_status, 0},
4333	{"i915_drrs_status", i915_drrs_status, 0},
4334	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4335};
4336#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4337
4338static const struct i915_debugfs_files {
4339	const char *name;
4340	const struct file_operations *fops;
4341} i915_debugfs_files[] = {
4342	{"i915_wedged", &i915_wedged_fops},
 
 
4343	{"i915_cache_sharing", &i915_cache_sharing_fops},
 
 
 
4344	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4345#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4346	{"i915_error_state", &i915_error_state_fops},
4347	{"i915_gpu_info", &i915_gpu_info_fops},
4348#endif
4349	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4350	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4351	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4352	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4353	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4354	{"i915_dp_test_data", &i915_displayport_test_data_fops},
4355	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4356	{"i915_dp_test_active", &i915_displayport_test_active_fops},
4357	{"i915_guc_log_level", &i915_guc_log_level_fops},
4358	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
4359	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4360	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4361	{"i915_ipc_status", &i915_ipc_status_fops},
4362	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
4363	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4364};
4365
4366int i915_debugfs_register(struct drm_i915_private *dev_priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4367{
4368	struct drm_minor *minor = dev_priv->drm.primary;
4369	int i;
 
 
 
4370
4371	debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
4372			    to_i915(minor->dev), &i915_forcewake_fops);
 
 
 
4373
4374	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4375		debugfs_create_file(i915_debugfs_files[i].name,
4376				    S_IRUGO | S_IWUSR,
4377				    minor->debugfs_root,
4378				    to_i915(minor->dev),
4379				    i915_debugfs_files[i].fops);
4380	}
4381
4382	return drm_debugfs_create_files(i915_debugfs_list,
4383					I915_DEBUGFS_ENTRIES,
4384					minor->debugfs_root, minor);
4385}
4386
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4387struct dpcd_block {
4388	/* DPCD dump start address. */
4389	unsigned int offset;
4390	/* DPCD dump end address, inclusive. If unset, .size will be used. */
4391	unsigned int end;
4392	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4393	size_t size;
4394	/* Only valid for eDP. */
4395	bool edp;
4396};
4397
4398static const struct dpcd_block i915_dpcd_debug[] = {
4399	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4400	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4401	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4402	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4403	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4404	{ .offset = DP_SET_POWER },
4405	{ .offset = DP_EDP_DPCD_REV },
4406	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4407	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4408	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4409};
4410
4411static int i915_dpcd_show(struct seq_file *m, void *data)
4412{
4413	struct drm_connector *connector = m->private;
4414	struct intel_dp *intel_dp =
4415		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4416	u8 buf[16];
4417	ssize_t err;
4418	int i;
4419
4420	if (connector->status != connector_status_connected)
4421		return -ENODEV;
4422
4423	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4424		const struct dpcd_block *b = &i915_dpcd_debug[i];
4425		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4426
4427		if (b->edp &&
4428		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4429			continue;
4430
4431		/* low tech for now */
4432		if (WARN_ON(size > sizeof(buf)))
4433			continue;
4434
4435		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4436		if (err < 0)
4437			seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4438		else
4439			seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
 
 
 
4440	}
4441
4442	return 0;
4443}
4444DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4445
4446static int i915_panel_show(struct seq_file *m, void *data)
4447{
4448	struct drm_connector *connector = m->private;
4449	struct intel_dp *intel_dp =
4450		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4451
4452	if (connector->status != connector_status_connected)
4453		return -ENODEV;
4454
4455	seq_printf(m, "Panel power up delay: %d\n",
4456		   intel_dp->panel_power_up_delay);
4457	seq_printf(m, "Panel power down delay: %d\n",
4458		   intel_dp->panel_power_down_delay);
4459	seq_printf(m, "Backlight on delay: %d\n",
4460		   intel_dp->backlight_on_delay);
4461	seq_printf(m, "Backlight off delay: %d\n",
4462		   intel_dp->backlight_off_delay);
4463
4464	return 0;
4465}
4466DEFINE_SHOW_ATTRIBUTE(i915_panel);
4467
4468static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4469{
4470	struct drm_connector *connector = m->private;
4471	struct intel_connector *intel_connector = to_intel_connector(connector);
4472
4473	if (connector->status != connector_status_connected)
4474		return -ENODEV;
4475
4476	/* HDCP is supported by connector */
4477	if (!intel_connector->hdcp.shim)
4478		return -EINVAL;
4479
4480	seq_printf(m, "%s:%d HDCP version: ", connector->name,
4481		   connector->base.id);
4482	intel_hdcp_info(m, intel_connector);
4483
4484	return 0;
4485}
4486DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4487
4488static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4489{
4490	struct drm_connector *connector = m->private;
4491	struct drm_device *dev = connector->dev;
4492	struct drm_crtc *crtc;
4493	struct intel_dp *intel_dp;
4494	struct drm_modeset_acquire_ctx ctx;
4495	struct intel_crtc_state *crtc_state = NULL;
4496	int ret = 0;
4497	bool try_again = false;
4498
4499	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4500
4501	do {
4502		try_again = false;
4503		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4504				       &ctx);
4505		if (ret) {
4506			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4507				try_again = true;
4508				continue;
4509			}
4510			break;
4511		}
4512		crtc = connector->state->crtc;
4513		if (connector->status != connector_status_connected || !crtc) {
4514			ret = -ENODEV;
4515			break;
4516		}
4517		ret = drm_modeset_lock(&crtc->mutex, &ctx);
4518		if (ret == -EDEADLK) {
4519			ret = drm_modeset_backoff(&ctx);
4520			if (!ret) {
4521				try_again = true;
4522				continue;
4523			}
4524			break;
4525		} else if (ret) {
4526			break;
4527		}
4528		intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4529		crtc_state = to_intel_crtc_state(crtc->state);
4530		seq_printf(m, "DSC_Enabled: %s\n",
4531			   yesno(crtc_state->dsc_params.compression_enable));
4532		seq_printf(m, "DSC_Sink_Support: %s\n",
4533			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4534		seq_printf(m, "Force_DSC_Enable: %s\n",
4535			   yesno(intel_dp->force_dsc_en));
4536		if (!intel_dp_is_edp(intel_dp))
4537			seq_printf(m, "FEC_Sink_Support: %s\n",
4538				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4539	} while (try_again);
4540
4541	drm_modeset_drop_locks(&ctx);
4542	drm_modeset_acquire_fini(&ctx);
4543
4544	return ret;
4545}
4546
4547static ssize_t i915_dsc_fec_support_write(struct file *file,
4548					  const char __user *ubuf,
4549					  size_t len, loff_t *offp)
4550{
4551	bool dsc_enable = false;
4552	int ret;
4553	struct drm_connector *connector =
4554		((struct seq_file *)file->private_data)->private;
4555	struct intel_encoder *encoder = intel_attached_encoder(connector);
4556	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4557
4558	if (len == 0)
4559		return 0;
4560
4561	DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4562			 len);
4563
4564	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4565	if (ret < 0)
4566		return ret;
4567
4568	DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4569			 (dsc_enable) ? "true" : "false");
4570	intel_dp->force_dsc_en = dsc_enable;
4571
4572	*offp += len;
4573	return len;
4574}
4575
4576static int i915_dsc_fec_support_open(struct inode *inode,
4577				     struct file *file)
4578{
4579	return single_open(file, i915_dsc_fec_support_show,
4580			   inode->i_private);
4581}
4582
4583static const struct file_operations i915_dsc_fec_support_fops = {
4584	.owner = THIS_MODULE,
4585	.open = i915_dsc_fec_support_open,
4586	.read = seq_read,
4587	.llseek = seq_lseek,
4588	.release = single_release,
4589	.write = i915_dsc_fec_support_write
4590};
4591
4592/**
4593 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4594 * @connector: pointer to a registered drm_connector
4595 *
4596 * Cleanup will be done by drm_connector_unregister() through a call to
4597 * drm_debugfs_connector_remove().
4598 *
4599 * Returns 0 on success, negative error codes on error.
4600 */
4601int i915_debugfs_connector_add(struct drm_connector *connector)
4602{
4603	struct dentry *root = connector->debugfs_entry;
4604	struct drm_i915_private *dev_priv = to_i915(connector->dev);
4605
4606	/* The connector must have been registered beforehands. */
4607	if (!root)
4608		return -ENODEV;
4609
4610	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4611	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4612		debugfs_create_file("i915_dpcd", S_IRUGO, root,
4613				    connector, &i915_dpcd_fops);
4614
4615	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4616		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4617				    connector, &i915_panel_fops);
4618		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4619				    connector, &i915_psr_sink_status_fops);
4620	}
4621
4622	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4623	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4624	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4625		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4626				    connector, &i915_hdcp_sink_capability_fops);
4627	}
4628
4629	if (INTEL_GEN(dev_priv) >= 10 &&
4630	    (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4631	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4632		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4633				    connector, &i915_dsc_fec_support_fops);
4634
4635	return 0;
4636}