Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Keith Packard <keithp@keithp.com>
  26 *
  27 */
  28
  29#include <linux/seq_file.h>
 
 
  30#include <linux/debugfs.h>
  31#include <linux/slab.h>
  32#include "drmP.h"
  33#include "drm.h"
 
 
  34#include "intel_drv.h"
  35#include "intel_ringbuffer.h"
  36#include "i915_drm.h"
  37#include "i915_drv.h"
  38
  39#define DRM_I915_RING_DEBUG 1
  40
  41
  42#if defined(CONFIG_DEBUG_FS)
  43
  44enum {
  45	ACTIVE_LIST,
  46	FLUSHING_LIST,
  47	INACTIVE_LIST,
  48	PINNED_LIST,
  49	DEFERRED_FREE_LIST,
  50};
  51
  52static const char *yesno(int v)
 
 
 
 
 
  53{
  54	return v ? "yes" : "no";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  55}
  56
  57static int i915_capabilities(struct seq_file *m, void *data)
  58{
  59	struct drm_info_node *node = (struct drm_info_node *) m->private;
  60	struct drm_device *dev = node->minor->dev;
  61	const struct intel_device_info *info = INTEL_INFO(dev);
  62
  63	seq_printf(m, "gen: %d\n", info->gen);
  64#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
  65	B(is_mobile);
  66	B(is_i85x);
  67	B(is_i915g);
  68	B(is_i945gm);
  69	B(is_g33);
  70	B(need_gfx_hws);
  71	B(is_g4x);
  72	B(is_pineview);
  73	B(is_broadwater);
  74	B(is_crestline);
  75	B(has_fbc);
  76	B(has_pipe_cxsr);
  77	B(has_hotplug);
  78	B(cursor_needs_physical);
  79	B(has_overlay);
  80	B(overlay_needs_physical);
  81	B(supports_tv);
  82	B(has_bsd_ring);
  83	B(has_blt_ring);
  84#undef B
  85
  86	return 0;
  87}
  88
  89static const char *get_pin_flag(struct drm_i915_gem_object *obj)
  90{
  91	if (obj->user_pin_count > 0)
  92		return "P";
  93	else if (obj->pin_count > 0)
  94		return "p";
  95	else
  96		return " ";
  97}
  98
  99static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
 100{
 101    switch (obj->tiling_mode) {
 102    default:
 103    case I915_TILING_NONE: return " ";
 104    case I915_TILING_X: return "X";
 105    case I915_TILING_Y: return "Y";
 106    }
 107}
 108
 109static const char *cache_level_str(int type)
 110{
 111	switch (type) {
 112	case I915_CACHE_NONE: return " uncached";
 113	case I915_CACHE_LLC: return " snooped (LLC)";
 114	case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
 115	default: return "";
 
 
 
 
 
 
 116	}
 
 
 117}
 118
 119static void
 120describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 121{
 122	seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
 
 
 
 
 
 
 123		   &obj->base,
 
 124		   get_pin_flag(obj),
 125		   get_tiling_flag(obj),
 126		   obj->base.size,
 
 127		   obj->base.read_domains,
 128		   obj->base.write_domain,
 129		   obj->last_rendering_seqno,
 130		   obj->last_fenced_seqno,
 131		   cache_level_str(obj->cache_level),
 
 
 
 
 132		   obj->dirty ? " dirty" : "",
 133		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 134	if (obj->base.name)
 135		seq_printf(m, " (name: %d)", obj->base.name);
 
 
 
 
 
 
 
 136	if (obj->fence_reg != I915_FENCE_REG_NONE)
 137		seq_printf(m, " (fence: %d)", obj->fence_reg);
 138	if (obj->gtt_space != NULL)
 139		seq_printf(m, " (gtt offset: %08x, size: %08x)",
 140			   obj->gtt_offset, (unsigned int)obj->gtt_space->size);
 141	if (obj->pin_mappable || obj->fault_mappable) {
 
 
 
 
 
 
 
 142		char s[3], *t = s;
 143		if (obj->pin_mappable)
 144			*t++ = 'p';
 145		if (obj->fault_mappable)
 146			*t++ = 'f';
 147		*t = '\0';
 148		seq_printf(m, " (%s mappable)", s);
 149	}
 150	if (obj->ring != NULL)
 151		seq_printf(m, " (%s)", obj->ring->name);
 
 
 
 
 
 
 
 
 
 
 152}
 153
 154static int i915_gem_object_list_info(struct seq_file *m, void *data)
 155{
 156	struct drm_info_node *node = (struct drm_info_node *) m->private;
 157	uintptr_t list = (uintptr_t) node->info_ent->data;
 158	struct list_head *head;
 159	struct drm_device *dev = node->minor->dev;
 160	drm_i915_private_t *dev_priv = dev->dev_private;
 161	struct drm_i915_gem_object *obj;
 162	size_t total_obj_size, total_gtt_size;
 
 163	int count, ret;
 164
 165	ret = mutex_lock_interruptible(&dev->struct_mutex);
 166	if (ret)
 167		return ret;
 168
 
 169	switch (list) {
 170	case ACTIVE_LIST:
 171		seq_printf(m, "Active:\n");
 172		head = &dev_priv->mm.active_list;
 173		break;
 174	case INACTIVE_LIST:
 175		seq_printf(m, "Inactive:\n");
 176		head = &dev_priv->mm.inactive_list;
 177		break;
 178	case PINNED_LIST:
 179		seq_printf(m, "Pinned:\n");
 180		head = &dev_priv->mm.pinned_list;
 181		break;
 182	case FLUSHING_LIST:
 183		seq_printf(m, "Flushing:\n");
 184		head = &dev_priv->mm.flushing_list;
 185		break;
 186	case DEFERRED_FREE_LIST:
 187		seq_printf(m, "Deferred free:\n");
 188		head = &dev_priv->mm.deferred_free_list;
 189		break;
 190	default:
 191		mutex_unlock(&dev->struct_mutex);
 192		return -EINVAL;
 193	}
 194
 195	total_obj_size = total_gtt_size = count = 0;
 196	list_for_each_entry(obj, head, mm_list) {
 197		seq_printf(m, "   ");
 198		describe_obj(m, obj);
 199		seq_printf(m, "\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 200		total_obj_size += obj->base.size;
 201		total_gtt_size += obj->gtt_space->size;
 202		count++;
 203	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 204	mutex_unlock(&dev->struct_mutex);
 205
 206	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
 207		   count, total_obj_size, total_gtt_size);
 208	return 0;
 209}
 210
 211#define count_objects(list, member) do { \
 212	list_for_each_entry(obj, list, member) { \
 213		size += obj->gtt_space->size; \
 214		++count; \
 215		if (obj->map_and_fenceable) { \
 216			mappable_size += obj->gtt_space->size; \
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 217			++mappable_count; \
 218		} \
 219	} \
 220} while(0)
 221
 222static int i915_gem_object_info(struct seq_file *m, void* data)
 223{
 224	struct drm_info_node *node = (struct drm_info_node *) m->private;
 225	struct drm_device *dev = node->minor->dev;
 226	struct drm_i915_private *dev_priv = dev->dev_private;
 227	u32 count, mappable_count;
 228	size_t size, mappable_size;
 229	struct drm_i915_gem_object *obj;
 
 
 
 230	int ret;
 231
 232	ret = mutex_lock_interruptible(&dev->struct_mutex);
 233	if (ret)
 234		return ret;
 235
 236	seq_printf(m, "%u objects, %zu bytes\n",
 237		   dev_priv->mm.object_count,
 238		   dev_priv->mm.object_memory);
 239
 240	size = count = mappable_size = mappable_count = 0;
 241	count_objects(&dev_priv->mm.gtt_list, gtt_list);
 242	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
 243		   count, mappable_count, size, mappable_size);
 244
 245	size = count = mappable_size = mappable_count = 0;
 246	count_objects(&dev_priv->mm.active_list, mm_list);
 247	count_objects(&dev_priv->mm.flushing_list, mm_list);
 248	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
 249		   count, mappable_count, size, mappable_size);
 250
 251	size = count = mappable_size = mappable_count = 0;
 252	count_objects(&dev_priv->mm.pinned_list, mm_list);
 253	seq_printf(m, "  %u [%u] pinned objects, %zu [%zu] bytes\n",
 254		   count, mappable_count, size, mappable_size);
 255
 256	size = count = mappable_size = mappable_count = 0;
 257	count_objects(&dev_priv->mm.inactive_list, mm_list);
 258	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
 259		   count, mappable_count, size, mappable_size);
 260
 261	size = count = mappable_size = mappable_count = 0;
 262	count_objects(&dev_priv->mm.deferred_free_list, mm_list);
 263	seq_printf(m, "  %u [%u] freed objects, %zu [%zu] bytes\n",
 264		   count, mappable_count, size, mappable_size);
 265
 266	size = count = mappable_size = mappable_count = 0;
 267	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
 268		if (obj->fault_mappable) {
 269			size += obj->gtt_space->size;
 270			++count;
 271		}
 272		if (obj->pin_mappable) {
 273			mappable_size += obj->gtt_space->size;
 274			++mappable_count;
 275		}
 
 
 
 
 276	}
 277	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
 
 
 278		   mappable_count, mappable_size);
 279	seq_printf(m, "%u fault mappable objects, %zu bytes\n",
 280		   count, size);
 281
 282	seq_printf(m, "%zu [%zu] gtt total\n",
 283		   dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284
 285	mutex_unlock(&dev->struct_mutex);
 286
 287	return 0;
 288}
 289
 290static int i915_gem_gtt_info(struct seq_file *m, void* data)
 291{
 292	struct drm_info_node *node = (struct drm_info_node *) m->private;
 293	struct drm_device *dev = node->minor->dev;
 
 294	struct drm_i915_private *dev_priv = dev->dev_private;
 295	struct drm_i915_gem_object *obj;
 296	size_t total_obj_size, total_gtt_size;
 297	int count, ret;
 298
 299	ret = mutex_lock_interruptible(&dev->struct_mutex);
 300	if (ret)
 301		return ret;
 302
 303	total_obj_size = total_gtt_size = count = 0;
 304	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
 305		seq_printf(m, "   ");
 
 
 
 306		describe_obj(m, obj);
 307		seq_printf(m, "\n");
 308		total_obj_size += obj->base.size;
 309		total_gtt_size += obj->gtt_space->size;
 310		count++;
 311	}
 312
 313	mutex_unlock(&dev->struct_mutex);
 314
 315	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
 316		   count, total_obj_size, total_gtt_size);
 317
 318	return 0;
 319}
 320
 321
 322static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 323{
 324	struct drm_info_node *node = (struct drm_info_node *) m->private;
 325	struct drm_device *dev = node->minor->dev;
 326	unsigned long flags;
 327	struct intel_crtc *crtc;
 
 328
 329	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
 
 
 
 
 330		const char pipe = pipe_name(crtc->pipe);
 331		const char plane = plane_name(crtc->plane);
 332		struct intel_unpin_work *work;
 333
 334		spin_lock_irqsave(&dev->event_lock, flags);
 335		work = crtc->unpin_work;
 336		if (work == NULL) {
 337			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
 338				   pipe, plane);
 339		} else {
 340			if (!work->pending) {
 
 
 341				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
 342					   pipe, plane);
 343			} else {
 344				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
 345					   pipe, plane);
 346			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 347			if (work->enable_stall_check)
 348				seq_printf(m, "Stall check enabled, ");
 349			else
 350				seq_printf(m, "Stall check waiting for page flip ioctl, ");
 351			seq_printf(m, "%d prepares\n", work->pending);
 
 
 
 
 
 
 352
 353			if (work->old_fb_obj) {
 354				struct drm_i915_gem_object *obj = work->old_fb_obj;
 355				if (obj)
 356					seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
 357			}
 358			if (work->pending_flip_obj) {
 359				struct drm_i915_gem_object *obj = work->pending_flip_obj;
 360				if (obj)
 361					seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
 362			}
 363		}
 364		spin_unlock_irqrestore(&dev->event_lock, flags);
 365	}
 366
 
 
 367	return 0;
 368}
 369
 370static int i915_gem_request_info(struct seq_file *m, void *data)
 371{
 372	struct drm_info_node *node = (struct drm_info_node *) m->private;
 373	struct drm_device *dev = node->minor->dev;
 374	drm_i915_private_t *dev_priv = dev->dev_private;
 375	struct drm_i915_gem_request *gem_request;
 376	int ret, count;
 
 
 377
 378	ret = mutex_lock_interruptible(&dev->struct_mutex);
 379	if (ret)
 380		return ret;
 381
 382	count = 0;
 383	if (!list_empty(&dev_priv->ring[RCS].request_list)) {
 384		seq_printf(m, "Render requests:\n");
 385		list_for_each_entry(gem_request,
 386				    &dev_priv->ring[RCS].request_list,
 387				    list) {
 388			seq_printf(m, "    %d @ %d\n",
 389				   gem_request->seqno,
 390				   (int) (jiffies - gem_request->emitted_jiffies));
 391		}
 392		count++;
 393	}
 394	if (!list_empty(&dev_priv->ring[VCS].request_list)) {
 395		seq_printf(m, "BSD requests:\n");
 396		list_for_each_entry(gem_request,
 397				    &dev_priv->ring[VCS].request_list,
 398				    list) {
 399			seq_printf(m, "    %d @ %d\n",
 400				   gem_request->seqno,
 401				   (int) (jiffies - gem_request->emitted_jiffies));
 
 402		}
 403		count++;
 404	}
 405	if (!list_empty(&dev_priv->ring[BCS].request_list)) {
 406		seq_printf(m, "BLT requests:\n");
 407		list_for_each_entry(gem_request,
 408				    &dev_priv->ring[BCS].request_list,
 409				    list) {
 410			seq_printf(m, "    %d @ %d\n",
 411				   gem_request->seqno,
 412				   (int) (jiffies - gem_request->emitted_jiffies));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 413		}
 414		count++;
 
 415	}
 416	mutex_unlock(&dev->struct_mutex);
 417
 418	if (count == 0)
 419		seq_printf(m, "No requests\n");
 420
 421	return 0;
 422}
 423
 424static void i915_ring_seqno_info(struct seq_file *m,
 425				 struct intel_ring_buffer *ring)
 426{
 427	if (ring->get_seqno) {
 428		seq_printf(m, "Current sequence (%s): %d\n",
 429			   ring->name, ring->get_seqno(ring));
 430		seq_printf(m, "Waiter sequence (%s):  %d\n",
 431			   ring->name, ring->waiting_seqno);
 432		seq_printf(m, "IRQ sequence (%s):     %d\n",
 433			   ring->name, ring->irq_seqno);
 434	}
 435}
 436
 437static int i915_gem_seqno_info(struct seq_file *m, void *data)
 438{
 439	struct drm_info_node *node = (struct drm_info_node *) m->private;
 440	struct drm_device *dev = node->minor->dev;
 441	drm_i915_private_t *dev_priv = dev->dev_private;
 
 442	int ret, i;
 443
 444	ret = mutex_lock_interruptible(&dev->struct_mutex);
 445	if (ret)
 446		return ret;
 
 447
 448	for (i = 0; i < I915_NUM_RINGS; i++)
 449		i915_ring_seqno_info(m, &dev_priv->ring[i]);
 450
 
 451	mutex_unlock(&dev->struct_mutex);
 452
 453	return 0;
 454}
 455
 456
 457static int i915_interrupt_info(struct seq_file *m, void *data)
 458{
 459	struct drm_info_node *node = (struct drm_info_node *) m->private;
 460	struct drm_device *dev = node->minor->dev;
 461	drm_i915_private_t *dev_priv = dev->dev_private;
 
 462	int ret, i, pipe;
 463
 464	ret = mutex_lock_interruptible(&dev->struct_mutex);
 465	if (ret)
 466		return ret;
 
 467
 468	if (!HAS_PCH_SPLIT(dev)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 469		seq_printf(m, "Interrupt enable:    %08x\n",
 470			   I915_READ(IER));
 471		seq_printf(m, "Interrupt identity:  %08x\n",
 472			   I915_READ(IIR));
 473		seq_printf(m, "Interrupt mask:      %08x\n",
 474			   I915_READ(IMR));
 475		for_each_pipe(pipe)
 476			seq_printf(m, "Pipe %c stat:         %08x\n",
 477				   pipe_name(pipe),
 478				   I915_READ(PIPESTAT(pipe)));
 479	} else {
 480		seq_printf(m, "North Display Interrupt enable:		%08x\n",
 481			   I915_READ(DEIER));
 482		seq_printf(m, "North Display Interrupt identity:	%08x\n",
 483			   I915_READ(DEIIR));
 484		seq_printf(m, "North Display Interrupt mask:		%08x\n",
 485			   I915_READ(DEIMR));
 486		seq_printf(m, "South Display Interrupt enable:		%08x\n",
 487			   I915_READ(SDEIER));
 488		seq_printf(m, "South Display Interrupt identity:	%08x\n",
 489			   I915_READ(SDEIIR));
 490		seq_printf(m, "South Display Interrupt mask:		%08x\n",
 491			   I915_READ(SDEIMR));
 492		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
 493			   I915_READ(GTIER));
 494		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
 495			   I915_READ(GTIIR));
 496		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
 497			   I915_READ(GTIMR));
 498	}
 499	seq_printf(m, "Interrupts received: %d\n",
 500		   atomic_read(&dev_priv->irq_received));
 501	for (i = 0; i < I915_NUM_RINGS; i++) {
 502		if (IS_GEN6(dev) || IS_GEN7(dev)) {
 503			seq_printf(m, "Graphics Interrupt mask (%s):	%08x\n",
 504				   dev_priv->ring[i].name,
 505				   I915_READ_IMR(&dev_priv->ring[i]));
 506		}
 507		i915_ring_seqno_info(m, &dev_priv->ring[i]);
 508	}
 
 509	mutex_unlock(&dev->struct_mutex);
 510
 511	return 0;
 512}
 513
 514static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 515{
 516	struct drm_info_node *node = (struct drm_info_node *) m->private;
 517	struct drm_device *dev = node->minor->dev;
 518	drm_i915_private_t *dev_priv = dev->dev_private;
 519	int i, ret;
 520
 521	ret = mutex_lock_interruptible(&dev->struct_mutex);
 522	if (ret)
 523		return ret;
 524
 525	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
 526	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
 527	for (i = 0; i < dev_priv->num_fence_regs; i++) {
 528		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
 529
 530		seq_printf(m, "Fenced object[%2d] = ", i);
 
 531		if (obj == NULL)
 532			seq_printf(m, "unused");
 533		else
 534			describe_obj(m, obj);
 535		seq_printf(m, "\n");
 536	}
 537
 538	mutex_unlock(&dev->struct_mutex);
 539	return 0;
 540}
 541
 542static int i915_hws_info(struct seq_file *m, void *data)
 543{
 544	struct drm_info_node *node = (struct drm_info_node *) m->private;
 545	struct drm_device *dev = node->minor->dev;
 546	drm_i915_private_t *dev_priv = dev->dev_private;
 547	struct intel_ring_buffer *ring;
 548	const volatile u32 __iomem *hws;
 549	int i;
 550
 551	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
 552	hws = (volatile u32 __iomem *)ring->status_page.page_addr;
 553	if (hws == NULL)
 554		return 0;
 555
 556	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
 557		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
 558			   i * 4,
 559			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
 560	}
 561	return 0;
 562}
 563
 564static void i915_dump_object(struct seq_file *m,
 565			     struct io_mapping *mapping,
 566			     struct drm_i915_gem_object *obj)
 567{
 568	int page, page_count, i;
 569
 570	page_count = obj->base.size / PAGE_SIZE;
 571	for (page = 0; page < page_count; page++) {
 572		u32 *mem = io_mapping_map_wc(mapping,
 573					     obj->gtt_offset + page * PAGE_SIZE);
 574		for (i = 0; i < PAGE_SIZE; i += 4)
 575			seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
 576		io_mapping_unmap(mem);
 577	}
 578}
 579
 580static int i915_batchbuffer_info(struct seq_file *m, void *data)
 581{
 582	struct drm_info_node *node = (struct drm_info_node *) m->private;
 583	struct drm_device *dev = node->minor->dev;
 584	drm_i915_private_t *dev_priv = dev->dev_private;
 585	struct drm_i915_gem_object *obj;
 586	int ret;
 587
 
 
 588	ret = mutex_lock_interruptible(&dev->struct_mutex);
 589	if (ret)
 590		return ret;
 591
 592	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
 593		if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
 594		    seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
 595		    i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
 596		}
 597	}
 598
 599	mutex_unlock(&dev->struct_mutex);
 600	return 0;
 
 601}
 602
 603static int i915_ringbuffer_data(struct seq_file *m, void *data)
 604{
 605	struct drm_info_node *node = (struct drm_info_node *) m->private;
 606	struct drm_device *dev = node->minor->dev;
 607	drm_i915_private_t *dev_priv = dev->dev_private;
 608	struct intel_ring_buffer *ring;
 609	int ret;
 610
 611	ret = mutex_lock_interruptible(&dev->struct_mutex);
 612	if (ret)
 613		return ret;
 614
 615	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
 616	if (!ring->obj) {
 617		seq_printf(m, "No ringbuffer setup\n");
 618	} else {
 619		const u8 __iomem *virt = ring->virtual_start;
 620		uint32_t off;
 621
 622		for (off = 0; off < ring->size; off += 4) {
 623			uint32_t *ptr = (uint32_t *)(virt + off);
 624			seq_printf(m, "%08x :  %08x\n", off, *ptr);
 625		}
 626	}
 627	mutex_unlock(&dev->struct_mutex);
 628
 629	return 0;
 630}
 631
 632static int i915_ringbuffer_info(struct seq_file *m, void *data)
 633{
 634	struct drm_info_node *node = (struct drm_info_node *) m->private;
 635	struct drm_device *dev = node->minor->dev;
 636	drm_i915_private_t *dev_priv = dev->dev_private;
 637	struct intel_ring_buffer *ring;
 638
 639	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
 640	if (ring->size == 0)
 641		return 0;
 642
 643	seq_printf(m, "Ring %s:\n", ring->name);
 644	seq_printf(m, "  Head :    %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
 645	seq_printf(m, "  Tail :    %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
 646	seq_printf(m, "  Size :    %08x\n", ring->size);
 647	seq_printf(m, "  Active :  %08x\n", intel_ring_get_active_head(ring));
 648	seq_printf(m, "  NOPID :   %08x\n", I915_READ_NOPID(ring));
 649	if (IS_GEN6(dev)) {
 650		seq_printf(m, "  Sync 0 :   %08x\n", I915_READ_SYNC_0(ring));
 651		seq_printf(m, "  Sync 1 :   %08x\n", I915_READ_SYNC_1(ring));
 652	}
 653	seq_printf(m, "  Control : %08x\n", I915_READ_CTL(ring));
 654	seq_printf(m, "  Start :   %08x\n", I915_READ_START(ring));
 655
 656	return 0;
 657}
 658
 659static const char *ring_str(int ring)
 
 660{
 661	switch (ring) {
 662	case RING_RENDER: return " render";
 663	case RING_BSD: return " bsd";
 664	case RING_BLT: return " blt";
 665	default: return "";
 666	}
 667}
 668
 669static const char *pin_flag(int pinned)
 670{
 671	if (pinned > 0)
 672		return " P";
 673	else if (pinned < 0)
 674		return " p";
 675	else
 676		return "";
 677}
 678
 679static const char *tiling_flag(int tiling)
 680{
 681	switch (tiling) {
 682	default:
 683	case I915_TILING_NONE: return "";
 684	case I915_TILING_X: return " X";
 685	case I915_TILING_Y: return " Y";
 686	}
 687}
 688
 689static const char *dirty_flag(int dirty)
 690{
 691	return dirty ? " dirty" : "";
 692}
 693
 694static const char *purgeable_flag(int purgeable)
 695{
 696	return purgeable ? " purgeable" : "";
 
 
 
 
 697}
 698
 699static void print_error_buffers(struct seq_file *m,
 700				const char *name,
 701				struct drm_i915_error_buffer *err,
 702				int count)
 
 
 
 
 
 
 
 703{
 704	seq_printf(m, "%s [%d]:\n", name, count);
 
 
 705
 706	while (count--) {
 707		seq_printf(m, "  %08x %8u %04x %04x %08x%s%s%s%s%s%s",
 708			   err->gtt_offset,
 709			   err->size,
 710			   err->read_domains,
 711			   err->write_domain,
 712			   err->seqno,
 713			   pin_flag(err->pinned),
 714			   tiling_flag(err->tiling),
 715			   dirty_flag(err->dirty),
 716			   purgeable_flag(err->purgeable),
 717			   ring_str(err->ring),
 718			   cache_level_str(err->cache_level));
 719
 720		if (err->name)
 721			seq_printf(m, " (name: %d)", err->name);
 722		if (err->fence_reg != I915_FENCE_REG_NONE)
 723			seq_printf(m, " (fence: %d)", err->fence_reg);
 724
 725		seq_printf(m, "\n");
 726		err++;
 727	}
 728}
 729
 730static int i915_error_state(struct seq_file *m, void *unused)
 
 731{
 732	struct drm_info_node *node = (struct drm_info_node *) m->private;
 733	struct drm_device *dev = node->minor->dev;
 734	drm_i915_private_t *dev_priv = dev->dev_private;
 735	struct drm_i915_error_state *error;
 736	unsigned long flags;
 737	int i, page, offset, elt;
 738
 739	spin_lock_irqsave(&dev_priv->error_lock, flags);
 740	if (!dev_priv->first_error) {
 741		seq_printf(m, "no error state collected\n");
 742		goto out;
 743	}
 744
 745	error = dev_priv->first_error;
 746
 747	seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
 748		   error->time.tv_usec);
 749	seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
 750	seq_printf(m, "EIR: 0x%08x\n", error->eir);
 751	seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
 752	if (INTEL_INFO(dev)->gen >= 6) {
 753		seq_printf(m, "ERROR: 0x%08x\n", error->error);
 754		seq_printf(m, "Blitter command stream:\n");
 755		seq_printf(m, "  ACTHD:    0x%08x\n", error->bcs_acthd);
 756		seq_printf(m, "  IPEIR:    0x%08x\n", error->bcs_ipeir);
 757		seq_printf(m, "  IPEHR:    0x%08x\n", error->bcs_ipehr);
 758		seq_printf(m, "  INSTDONE: 0x%08x\n", error->bcs_instdone);
 759		seq_printf(m, "  seqno:    0x%08x\n", error->bcs_seqno);
 760		seq_printf(m, "Video (BSD) command stream:\n");
 761		seq_printf(m, "  ACTHD:    0x%08x\n", error->vcs_acthd);
 762		seq_printf(m, "  IPEIR:    0x%08x\n", error->vcs_ipeir);
 763		seq_printf(m, "  IPEHR:    0x%08x\n", error->vcs_ipehr);
 764		seq_printf(m, "  INSTDONE: 0x%08x\n", error->vcs_instdone);
 765		seq_printf(m, "  seqno:    0x%08x\n", error->vcs_seqno);
 766	}
 767	seq_printf(m, "Render command stream:\n");
 768	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
 769	seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir);
 770	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
 771	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
 772	if (INTEL_INFO(dev)->gen >= 4) {
 773		seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
 774		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
 775	}
 776	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
 777	seq_printf(m, "  seqno: 0x%08x\n", error->seqno);
 778
 779	for (i = 0; i < dev_priv->num_fence_regs; i++)
 780		seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
 781
 782	if (error->active_bo)
 783		print_error_buffers(m, "Active",
 784				    error->active_bo,
 785				    error->active_bo_count);
 786
 787	if (error->pinned_bo)
 788		print_error_buffers(m, "Pinned",
 789				    error->pinned_bo,
 790				    error->pinned_bo_count);
 791
 792	for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
 793		if (error->batchbuffer[i]) {
 794			struct drm_i915_error_object *obj = error->batchbuffer[i];
 795
 796			seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
 797				   dev_priv->ring[i].name,
 798				   obj->gtt_offset);
 799			offset = 0;
 800			for (page = 0; page < obj->page_count; page++) {
 801				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
 802					seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
 803					offset += 4;
 804				}
 805			}
 806		}
 807	}
 808
 809	for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
 810		if (error->ringbuffer[i]) {
 811			struct drm_i915_error_object *obj = error->ringbuffer[i];
 812			seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
 813				   dev_priv->ring[i].name,
 814				   obj->gtt_offset);
 815			offset = 0;
 816			for (page = 0; page < obj->page_count; page++) {
 817				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
 818					seq_printf(m, "%08x :  %08x\n",
 819						   offset,
 820						   obj->pages[page][elt]);
 821					offset += 4;
 822				}
 823			}
 824		}
 825	}
 826
 827	if (error->overlay)
 828		intel_overlay_print_error_state(m, error->overlay);
 829
 830	if (error->display)
 831		intel_display_print_error_state(m, dev, error->display);
 
 832
 833out:
 834	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
 835
 836	return 0;
 837}
 838
 839static int i915_rstdby_delays(struct seq_file *m, void *unused)
 
 
 
 
 840{
 841	struct drm_info_node *node = (struct drm_info_node *) m->private;
 842	struct drm_device *dev = node->minor->dev;
 843	drm_i915_private_t *dev_priv = dev->dev_private;
 844	u16 crstanddelay = I915_READ16(CRSTANDVID);
 845
 846	seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
 847
 848	return 0;
 849}
 850
 851static int i915_cur_delayinfo(struct seq_file *m, void *unused)
 852{
 853	struct drm_info_node *node = (struct drm_info_node *) m->private;
 854	struct drm_device *dev = node->minor->dev;
 855	drm_i915_private_t *dev_priv = dev->dev_private;
 856	int ret;
 857
 858	if (IS_GEN5(dev)) {
 859		u16 rgvswctl = I915_READ16(MEMSWCTL);
 860		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
 861
 862		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
 863		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
 864		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
 865			   MEMSTAT_VID_SHIFT);
 866		seq_printf(m, "Current P-state: %d\n",
 867			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
 868	} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
 869		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 870		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
 871		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 872		u32 rpstat;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 873		u32 rpupei, rpcurup, rpprevup;
 874		u32 rpdownei, rpcurdown, rpprevdown;
 
 875		int max_freq;
 876
 
 
 
 
 
 
 
 
 
 877		/* RPSTAT1 is in the GT power well */
 878		ret = mutex_lock_interruptible(&dev->struct_mutex);
 879		if (ret)
 880			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 881
 882		gen6_gt_force_wake_get(dev_priv);
 
 
 883
 884		rpstat = I915_READ(GEN6_RPSTAT1);
 885		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
 886		rpcurup = I915_READ(GEN6_RP_CUR_UP);
 887		rpprevup = I915_READ(GEN6_RP_PREV_UP);
 888		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
 889		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
 890		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
 
 
 
 
 
 
 
 891
 892		gen6_gt_force_wake_put(dev_priv);
 893		mutex_unlock(&dev->struct_mutex);
 894
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 895		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
 896		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
 897		seq_printf(m, "Render p-state ratio: %d\n",
 898			   (gt_perf_status & 0xff00) >> 8);
 899		seq_printf(m, "Render p-state VID: %d\n",
 900			   gt_perf_status & 0xff);
 901		seq_printf(m, "Render p-state limit: %d\n",
 902			   rp_state_limits & 0xff);
 903		seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
 904						GEN6_CAGF_SHIFT) * 50);
 
 
 
 
 905		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
 906			   GEN6_CURICONT_MASK);
 907		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
 908			   GEN6_CURBSYTAVG_MASK);
 909		seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
 910			   GEN6_CURBSYTAVG_MASK);
 
 
 
 911		seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
 912			   GEN6_CURIAVG_MASK);
 913		seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
 914			   GEN6_CURBSYTAVG_MASK);
 915		seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
 916			   GEN6_CURBSYTAVG_MASK);
 
 
 917
 918		max_freq = (rp_state_cap & 0xff0000) >> 16;
 
 
 
 919		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
 920			   max_freq * 50);
 921
 922		max_freq = (rp_state_cap & 0xff00) >> 8;
 
 
 923		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
 924			   max_freq * 50);
 925
 926		max_freq = rp_state_cap & 0xff;
 
 
 
 927		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
 928			   max_freq * 50);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 929	} else {
 930		seq_printf(m, "no P-state info available\n");
 931	}
 932
 933	return 0;
 
 
 
 
 
 
 934}
 935
 936static int i915_delayfreq_table(struct seq_file *m, void *unused)
 937{
 938	struct drm_info_node *node = (struct drm_info_node *) m->private;
 939	struct drm_device *dev = node->minor->dev;
 940	drm_i915_private_t *dev_priv = dev->dev_private;
 941	u32 delayfreq;
 942	int i;
 
 
 
 943
 944	for (i = 0; i < 16; i++) {
 945		delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
 946		seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
 947			   (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
 948	}
 949
 950	return 0;
 951}
 952
 953static inline int MAP_TO_MV(int map)
 954{
 955	return 1250 - (map * 25);
 956}
 957
 958static int i915_inttoext_table(struct seq_file *m, void *unused)
 959{
 960	struct drm_info_node *node = (struct drm_info_node *) m->private;
 961	struct drm_device *dev = node->minor->dev;
 962	drm_i915_private_t *dev_priv = dev->dev_private;
 963	u32 inttoext;
 964	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 965
 966	for (i = 1; i <= 32; i++) {
 967		inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
 968		seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
 
 
 
 
 
 
 
 
 
 
 
 969	}
 970
 971	return 0;
 972}
 973
 974static int i915_drpc_info(struct seq_file *m, void *unused)
 975{
 976	struct drm_info_node *node = (struct drm_info_node *) m->private;
 977	struct drm_device *dev = node->minor->dev;
 978	drm_i915_private_t *dev_priv = dev->dev_private;
 979	u32 rgvmodectl = I915_READ(MEMMODECTL);
 980	u32 rstdbyctl = I915_READ(RSTDBYCTL);
 981	u16 crstandvid = I915_READ16(CRSTANDVID);
 
 
 
 
 
 982
 983	seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
 984		   "yes" : "no");
 
 
 
 
 
 
 985	seq_printf(m, "Boost freq: %d\n",
 986		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
 987		   MEMMODE_BOOST_FREQ_SHIFT);
 988	seq_printf(m, "HW control enabled: %s\n",
 989		   rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
 990	seq_printf(m, "SW control enabled: %s\n",
 991		   rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
 992	seq_printf(m, "Gated voltage change: %s\n",
 993		   rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
 994	seq_printf(m, "Starting frequency: P%d\n",
 995		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
 996	seq_printf(m, "Max P-state: P%d\n",
 997		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
 998	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
 999	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1000	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1001	seq_printf(m, "Render standby enabled: %s\n",
1002		   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1003	seq_printf(m, "Current RS state: ");
1004	switch (rstdbyctl & RSX_STATUS_MASK) {
1005	case RSX_STATUS_ON:
1006		seq_printf(m, "on\n");
1007		break;
1008	case RSX_STATUS_RC1:
1009		seq_printf(m, "RC1\n");
1010		break;
1011	case RSX_STATUS_RC1E:
1012		seq_printf(m, "RC1E\n");
1013		break;
1014	case RSX_STATUS_RS1:
1015		seq_printf(m, "RS1\n");
1016		break;
1017	case RSX_STATUS_RS2:
1018		seq_printf(m, "RS2 (RC6)\n");
1019		break;
1020	case RSX_STATUS_RS3:
1021		seq_printf(m, "RC3 (RC6+)\n");
1022		break;
1023	default:
1024		seq_printf(m, "unknown\n");
1025		break;
1026	}
1027
1028	return 0;
1029}
1030
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1031static int i915_fbc_status(struct seq_file *m, void *unused)
1032{
1033	struct drm_info_node *node = (struct drm_info_node *) m->private;
1034	struct drm_device *dev = node->minor->dev;
1035	drm_i915_private_t *dev_priv = dev->dev_private;
1036
1037	if (!I915_HAS_FBC(dev)) {
1038		seq_printf(m, "FBC unsupported on this chipset\n");
1039		return 0;
1040	}
1041
1042	if (intel_fbc_enabled(dev)) {
1043		seq_printf(m, "FBC enabled\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1044	} else {
1045		seq_printf(m, "FBC disabled: ");
1046		switch (dev_priv->no_fbc_reason) {
1047		case FBC_NO_OUTPUT:
1048			seq_printf(m, "no outputs");
1049			break;
1050		case FBC_STOLEN_TOO_SMALL:
1051			seq_printf(m, "not enough stolen memory");
1052			break;
1053		case FBC_UNSUPPORTED_MODE:
1054			seq_printf(m, "mode not supported");
1055			break;
1056		case FBC_MODE_TOO_LARGE:
1057			seq_printf(m, "mode too large");
1058			break;
1059		case FBC_BAD_PLANE:
1060			seq_printf(m, "FBC unsupported on plane");
1061			break;
1062		case FBC_NOT_TILED:
1063			seq_printf(m, "scanout buffer not tiled");
1064			break;
1065		case FBC_MULTIPLE_PIPES:
1066			seq_printf(m, "multiple pipes are enabled");
1067			break;
1068		case FBC_MODULE_PARAM:
1069			seq_printf(m, "disabled per module param (default off)");
1070			break;
1071		default:
1072			seq_printf(m, "unknown reason");
1073		}
1074		seq_printf(m, "\n");
1075	}
 
 
 
1076	return 0;
1077}
1078
1079static int i915_sr_status(struct seq_file *m, void *unused)
1080{
1081	struct drm_info_node *node = (struct drm_info_node *) m->private;
1082	struct drm_device *dev = node->minor->dev;
1083	drm_i915_private_t *dev_priv = dev->dev_private;
1084	bool sr_enabled = false;
1085
 
 
1086	if (HAS_PCH_SPLIT(dev))
1087		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1088	else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
 
1089		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1090	else if (IS_I915GM(dev))
1091		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1092	else if (IS_PINEVIEW(dev))
1093		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
 
 
 
 
1094
1095	seq_printf(m, "self-refresh: %s\n",
1096		   sr_enabled ? "enabled" : "disabled");
1097
1098	return 0;
1099}
1100
1101static int i915_emon_status(struct seq_file *m, void *unused)
1102{
1103	struct drm_info_node *node = (struct drm_info_node *) m->private;
1104	struct drm_device *dev = node->minor->dev;
1105	drm_i915_private_t *dev_priv = dev->dev_private;
1106	unsigned long temp, chipset, gfx;
1107	int ret;
1108
 
 
 
1109	ret = mutex_lock_interruptible(&dev->struct_mutex);
1110	if (ret)
1111		return ret;
1112
1113	temp = i915_mch_val(dev_priv);
1114	chipset = i915_chipset_val(dev_priv);
1115	gfx = i915_gfx_val(dev_priv);
1116	mutex_unlock(&dev->struct_mutex);
1117
1118	seq_printf(m, "GMCH temp: %ld\n", temp);
1119	seq_printf(m, "Chipset power: %ld\n", chipset);
1120	seq_printf(m, "GFX power: %ld\n", gfx);
1121	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1122
1123	return 0;
1124}
1125
1126static int i915_ring_freq_table(struct seq_file *m, void *unused)
1127{
1128	struct drm_info_node *node = (struct drm_info_node *) m->private;
1129	struct drm_device *dev = node->minor->dev;
1130	drm_i915_private_t *dev_priv = dev->dev_private;
1131	int ret;
1132	int gpu_freq, ia_freq;
 
1133
1134	if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1135		seq_printf(m, "unsupported on this chipset\n");
1136		return 0;
1137	}
1138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1139	ret = mutex_lock_interruptible(&dev->struct_mutex);
1140	if (ret)
1141		return ret;
 
 
 
1142
1143	seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1144
1145	for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
1146	     gpu_freq++) {
1147		I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1148		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
1149			   GEN6_PCODE_READ_MIN_FREQ_TABLE);
1150		if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
1151			      GEN6_PCODE_READY) == 0, 10)) {
1152			DRM_ERROR("pcode read of freq table timed out\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1153			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1154		}
1155		ia_freq = I915_READ(GEN6_PCODE_DATA);
1156		seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
1157	}
1158
1159	mutex_unlock(&dev->struct_mutex);
1160
1161	return 0;
1162}
1163
1164static int i915_gfxec(struct seq_file *m, void *unused)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1165{
1166	struct drm_info_node *node = (struct drm_info_node *) m->private;
1167	struct drm_device *dev = node->minor->dev;
1168	drm_i915_private_t *dev_priv = dev->dev_private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1169
1170	seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1171
1172	return 0;
1173}
1174
1175static int i915_opregion(struct seq_file *m, void *unused)
1176{
1177	struct drm_info_node *node = (struct drm_info_node *) m->private;
1178	struct drm_device *dev = node->minor->dev;
1179	drm_i915_private_t *dev_priv = dev->dev_private;
1180	struct intel_opregion *opregion = &dev_priv->opregion;
 
 
 
 
 
 
 
1181	int ret;
1182
 
 
 
 
 
1183	ret = mutex_lock_interruptible(&dev->struct_mutex);
1184	if (ret)
1185		return ret;
1186
1187	if (opregion->header)
1188		seq_write(m, opregion->header, OPREGION_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1189
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190	mutex_unlock(&dev->struct_mutex);
1191
1192	return 0;
1193}
1194
1195static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1196{
1197	struct drm_info_node *node = (struct drm_info_node *) m->private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1198	struct drm_device *dev = node->minor->dev;
1199	drm_i915_private_t *dev_priv = dev->dev_private;
1200	struct intel_fbdev *ifbdev;
1201	struct intel_framebuffer *fb;
1202	int ret;
1203
1204	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1205	if (ret)
1206		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1207
1208	ifbdev = dev_priv->fbdev;
1209	fb = to_intel_framebuffer(ifbdev->helper.fb);
1210
1211	seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
1212		   fb->base.width,
1213		   fb->base.height,
1214		   fb->base.depth,
1215		   fb->base.bits_per_pixel);
1216	describe_obj(m, fb->obj);
1217	seq_printf(m, "\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1218
1219	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1220		if (&fb->base == ifbdev->helper.fb)
 
 
1221			continue;
1222
1223		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
1224			   fb->base.width,
1225			   fb->base.height,
1226			   fb->base.depth,
1227			   fb->base.bits_per_pixel);
1228		describe_obj(m, fb->obj);
1229		seq_printf(m, "\n");
 
 
 
 
 
 
 
 
 
 
1230	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1231
1232	mutex_unlock(&dev->mode_config.mutex);
 
 
 
 
 
 
 
 
1233
1234	return 0;
1235}
1236
1237static int i915_context_status(struct seq_file *m, void *unused)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1238{
1239	struct drm_info_node *node = (struct drm_info_node *) m->private;
1240	struct drm_device *dev = node->minor->dev;
1241	drm_i915_private_t *dev_priv = dev->dev_private;
1242	int ret;
 
 
 
 
 
 
 
1243
1244	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1245	if (ret)
1246		return ret;
 
1247
1248	if (dev_priv->pwrctx) {
1249		seq_printf(m, "power context ");
1250		describe_obj(m, dev_priv->pwrctx);
1251		seq_printf(m, "\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1252	}
1253
1254	if (dev_priv->renderctx) {
1255		seq_printf(m, "render context ");
1256		describe_obj(m, dev_priv->renderctx);
1257		seq_printf(m, "\n");
 
 
1258	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1259
1260	mutex_unlock(&dev->mode_config.mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1261
1262	return 0;
1263}
1264
1265static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1266{
 
 
 
1267	struct drm_info_node *node = (struct drm_info_node *) m->private;
1268	struct drm_device *dev = node->minor->dev;
1269	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
 
 
 
1270
1271	seq_printf(m, "forcewake count = %d\n",
1272		   atomic_read(&dev_priv->forcewake_count));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1273
1274	return 0;
1275}
1276
1277static int
1278i915_wedged_open(struct inode *inode,
1279		 struct file *filp)
1280{
1281	filp->private_data = inode->i_private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1282	return 0;
1283}
1284
1285static ssize_t
1286i915_wedged_read(struct file *filp,
1287		 char __user *ubuf,
1288		 size_t max,
1289		 loff_t *ppos)
1290{
1291	struct drm_device *dev = filp->private_data;
1292	drm_i915_private_t *dev_priv = dev->dev_private;
1293	char buf[80];
1294	int len;
1295
1296	len = snprintf(buf, sizeof (buf),
1297		       "wedged :  %d\n",
1298		       atomic_read(&dev_priv->mm.wedged));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1299
1300	if (len > sizeof (buf))
1301		len = sizeof (buf);
1302
1303	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
 
 
 
 
 
 
 
 
 
1304}
1305
1306static ssize_t
1307i915_wedged_write(struct file *filp,
1308		  const char __user *ubuf,
1309		  size_t cnt,
1310		  loff_t *ppos)
1311{
1312	struct drm_device *dev = filp->private_data;
1313	char buf[20];
1314	int val = 1;
 
 
1315
1316	if (cnt > 0) {
1317		if (cnt > sizeof (buf) - 1)
1318			return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1319
1320		if (copy_from_user(buf, ubuf, cnt))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1321			return -EFAULT;
1322		buf[cnt] = 0;
1323
1324		val = simple_strtoul(buf, NULL, 0);
 
 
 
1325	}
1326
1327	DRM_INFO("Manually setting wedged to %d\n", val);
1328	i915_handle_error(dev, val);
1329
1330	return cnt;
1331}
1332
1333static const struct file_operations i915_wedged_fops = {
1334	.owner = THIS_MODULE,
1335	.open = i915_wedged_open,
1336	.read = i915_wedged_read,
1337	.write = i915_wedged_write,
1338	.llseek = default_llseek,
1339};
1340
1341static int
1342i915_max_freq_open(struct inode *inode,
1343		   struct file *filp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1344{
1345	filp->private_data = inode->i_private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1346	return 0;
1347}
1348
1349static ssize_t
1350i915_max_freq_read(struct file *filp,
1351		   char __user *ubuf,
1352		   size_t max,
1353		   loff_t *ppos)
1354{
1355	struct drm_device *dev = filp->private_data;
1356	drm_i915_private_t *dev_priv = dev->dev_private;
1357	char buf[80];
1358	int len;
1359
1360	len = snprintf(buf, sizeof (buf),
1361		       "max freq: %d\n", dev_priv->max_delay * 50);
1362
1363	if (len > sizeof (buf))
1364		len = sizeof (buf);
 
 
 
1365
1366	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
 
 
 
 
 
 
 
 
 
 
 
1367}
1368
1369static ssize_t
1370i915_max_freq_write(struct file *filp,
1371		  const char __user *ubuf,
1372		  size_t cnt,
1373		  loff_t *ppos)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1374{
1375	struct drm_device *dev = filp->private_data;
1376	struct drm_i915_private *dev_priv = dev->dev_private;
1377	char buf[20];
1378	int val = 1;
1379
1380	if (cnt > 0) {
1381		if (cnt > sizeof (buf) - 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1382			return -EINVAL;
 
 
 
 
 
 
 
 
 
1383
1384		if (copy_from_user(buf, ubuf, cnt))
1385			return -EFAULT;
1386		buf[cnt] = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1387
1388		val = simple_strtoul(buf, NULL, 0);
 
 
 
1389	}
1390
1391	DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1392
1393	/*
1394	 * Turbo will still be enabled, but won't go above the set value.
 
 
 
 
 
 
1395	 */
1396	dev_priv->max_delay = val / 50;
 
1397
1398	gen6_set_rps(dev, val / 50);
1399
1400	return cnt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1401}
1402
1403static const struct file_operations i915_max_freq_fops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1404	.owner = THIS_MODULE,
1405	.open = i915_max_freq_open,
1406	.read = i915_max_freq_read,
1407	.write = i915_max_freq_write,
1408	.llseek = default_llseek,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1409};
1410
1411static int
1412i915_cache_sharing_open(struct inode *inode,
1413		   struct file *filp)
1414{
1415	filp->private_data = inode->i_private;
 
 
 
 
1416	return 0;
1417}
1418
1419static ssize_t
1420i915_cache_sharing_read(struct file *filp,
1421		   char __user *ubuf,
1422		   size_t max,
1423		   loff_t *ppos)
1424{
1425	struct drm_device *dev = filp->private_data;
1426	drm_i915_private_t *dev_priv = dev->dev_private;
1427	char buf[80];
1428	u32 snpcr;
1429	int len;
1430
1431	mutex_lock(&dev_priv->dev->struct_mutex);
1432	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1433	mutex_unlock(&dev_priv->dev->struct_mutex);
 
 
 
 
 
 
 
1434
1435	len = snprintf(buf, sizeof (buf),
1436		       "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
1437		       GEN6_MBC_SNPCR_SHIFT);
1438
1439	if (len > sizeof (buf))
1440		len = sizeof (buf);
1441
1442	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
 
 
1443}
1444
1445static ssize_t
1446i915_cache_sharing_write(struct file *filp,
1447		  const char __user *ubuf,
1448		  size_t cnt,
1449		  loff_t *ppos)
 
1450{
1451	struct drm_device *dev = filp->private_data;
1452	struct drm_i915_private *dev_priv = dev->dev_private;
1453	char buf[20];
1454	u32 snpcr;
1455	int val = 1;
1456
1457	if (cnt > 0) {
1458		if (cnt > sizeof (buf) - 1)
1459			return -EINVAL;
1460
1461		if (copy_from_user(buf, ubuf, cnt))
1462			return -EFAULT;
1463		buf[cnt] = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1464
1465		val = simple_strtoul(buf, NULL, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1466	}
1467
1468	if (val < 0 || val > 3)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1469		return -EINVAL;
 
1470
1471	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1472
1473	/* Update the cache sharing policy here as well */
1474	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1475	snpcr &= ~GEN6_MBC_SNPCR_MASK;
1476	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
1477	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1478
1479	return cnt;
 
1480}
1481
1482static const struct file_operations i915_cache_sharing_fops = {
1483	.owner = THIS_MODULE,
1484	.open = i915_cache_sharing_open,
1485	.read = i915_cache_sharing_read,
1486	.write = i915_cache_sharing_write,
1487	.llseek = default_llseek,
 
 
 
 
1488};
1489
1490/* As the drm_debugfs_init() routines are called before dev->dev_private is
1491 * allocated we need to hook into the minor for release. */
1492static int
1493drm_add_fake_info_node(struct drm_minor *minor,
1494		       struct dentry *ent,
1495		       const void *key)
1496{
1497	struct drm_info_node *node;
 
 
 
 
 
 
 
 
1498
1499	node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
1500	if (node == NULL) {
1501		debugfs_remove(ent);
1502		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
1503	}
 
 
1504
1505	node->minor = minor;
1506	node->dent = ent;
1507	node->info_ent = (void *) key;
1508	list_add(&node->list, &minor->debugfs_nodes.list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1509
1510	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1511}
1512
1513static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
 
1514{
1515	struct drm_device *dev = minor->dev;
1516	struct dentry *ent;
 
1517
1518	ent = debugfs_create_file("i915_wedged",
1519				  S_IRUGO | S_IWUSR,
1520				  root, dev,
1521				  &i915_wedged_fops);
1522	if (IS_ERR(ent))
1523		return PTR_ERR(ent);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1524
1525	return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1526}
1527
1528static int i915_forcewake_open(struct inode *inode, struct file *file)
1529{
1530	struct drm_device *dev = inode->i_private;
1531	struct drm_i915_private *dev_priv = dev->dev_private;
1532	int ret;
1533
1534	if (!IS_GEN6(dev))
1535		return 0;
1536
1537	ret = mutex_lock_interruptible(&dev->struct_mutex);
1538	if (ret)
1539		return ret;
1540	gen6_gt_force_wake_get(dev_priv);
1541	mutex_unlock(&dev->struct_mutex);
1542
1543	return 0;
1544}
1545
1546int i915_forcewake_release(struct inode *inode, struct file *file)
1547{
1548	struct drm_device *dev = inode->i_private;
1549	struct drm_i915_private *dev_priv = dev->dev_private;
1550
1551	if (!IS_GEN6(dev))
1552		return 0;
1553
1554	/*
1555	 * It's bad that we can potentially hang userspace if struct_mutex gets
1556	 * forever stuck.  However, if we cannot acquire this lock it means that
1557	 * almost certainly the driver has hung, is not unload-able. Therefore
1558	 * hanging here is probably a minor inconvenience not to be seen my
1559	 * almost every user.
1560	 */
1561	mutex_lock(&dev->struct_mutex);
1562	gen6_gt_force_wake_put(dev_priv);
1563	mutex_unlock(&dev->struct_mutex);
1564
1565	return 0;
1566}
1567
1568static const struct file_operations i915_forcewake_fops = {
1569	.owner = THIS_MODULE,
1570	.open = i915_forcewake_open,
1571	.release = i915_forcewake_release,
1572};
1573
1574static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
1575{
1576	struct drm_device *dev = minor->dev;
1577	struct dentry *ent;
1578
1579	ent = debugfs_create_file("i915_forcewake_user",
1580				  S_IRUSR,
1581				  root, dev,
1582				  &i915_forcewake_fops);
1583	if (IS_ERR(ent))
1584		return PTR_ERR(ent);
1585
1586	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
1587}
1588
1589static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
 
 
 
1590{
1591	struct drm_device *dev = minor->dev;
1592	struct dentry *ent;
1593
1594	ent = debugfs_create_file("i915_max_freq",
1595				  S_IRUGO | S_IWUSR,
1596				  root, dev,
1597				  &i915_max_freq_fops);
1598	if (IS_ERR(ent))
1599		return PTR_ERR(ent);
1600
1601	return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
1602}
1603
1604static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
1605{
1606	struct drm_device *dev = minor->dev;
1607	struct dentry *ent;
1608
1609	ent = debugfs_create_file("i915_cache_sharing",
1610				  S_IRUGO | S_IWUSR,
1611				  root, dev,
1612				  &i915_cache_sharing_fops);
1613	if (IS_ERR(ent))
1614		return PTR_ERR(ent);
1615
1616	return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
1617}
1618
1619static struct drm_info_list i915_debugfs_list[] = {
1620	{"i915_capabilities", i915_capabilities, 0},
1621	{"i915_gem_objects", i915_gem_object_info, 0},
1622	{"i915_gem_gtt", i915_gem_gtt_info, 0},
 
1623	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1624	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1625	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1626	{"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1627	{"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
1628	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
1629	{"i915_gem_request", i915_gem_request_info, 0},
1630	{"i915_gem_seqno", i915_gem_seqno_info, 0},
1631	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1632	{"i915_gem_interrupt", i915_interrupt_info, 0},
1633	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
1634	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
1635	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
1636	{"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
1637	{"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
1638	{"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
1639	{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
1640	{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
1641	{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
1642	{"i915_batchbuffers", i915_batchbuffer_info, 0},
1643	{"i915_error_state", i915_error_state, 0},
1644	{"i915_rstdby_delays", i915_rstdby_delays, 0},
1645	{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1646	{"i915_delayfreq_table", i915_delayfreq_table, 0},
1647	{"i915_inttoext_table", i915_inttoext_table, 0},
1648	{"i915_drpc_info", i915_drpc_info, 0},
1649	{"i915_emon_status", i915_emon_status, 0},
1650	{"i915_ring_freq_table", i915_ring_freq_table, 0},
1651	{"i915_gfxec", i915_gfxec, 0},
1652	{"i915_fbc_status", i915_fbc_status, 0},
 
1653	{"i915_sr_status", i915_sr_status, 0},
1654	{"i915_opregion", i915_opregion, 0},
 
1655	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1656	{"i915_context_status", i915_context_status, 0},
1657	{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1658};
1659#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1660
1661int i915_debugfs_init(struct drm_minor *minor)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1662{
1663	int ret;
 
1664
1665	ret = i915_wedged_create(minor->debugfs_root, minor);
1666	if (ret)
1667		return ret;
 
 
 
 
 
 
 
 
 
1668
1669	ret = i915_forcewake_create(minor->debugfs_root, minor);
1670	if (ret)
1671		return ret;
1672	ret = i915_max_freq_create(minor->debugfs_root, minor);
1673	if (ret)
1674		return ret;
1675	ret = i915_cache_sharing_create(minor->debugfs_root, minor);
1676	if (ret)
1677		return ret;
 
 
 
 
 
 
 
 
1678
1679	return drm_debugfs_create_files(i915_debugfs_list,
1680					I915_DEBUGFS_ENTRIES,
1681					minor->debugfs_root, minor);
1682}
1683
1684void i915_debugfs_cleanup(struct drm_minor *minor)
1685{
 
 
1686	drm_debugfs_remove_files(i915_debugfs_list,
1687				 I915_DEBUGFS_ENTRIES, minor);
 
1688	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1689				 1, minor);
1690	drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1691				 1, minor);
1692	drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
1693				 1, minor);
1694	drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1695				 1, minor);
 
 
 
 
 
 
 
 
1696}
1697
1698#endif /* CONFIG_DEBUG_FS */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v4.6
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Keith Packard <keithp@keithp.com>
  26 *
  27 */
  28
  29#include <linux/seq_file.h>
  30#include <linux/circ_buf.h>
  31#include <linux/ctype.h>
  32#include <linux/debugfs.h>
  33#include <linux/slab.h>
  34#include <linux/export.h>
  35#include <linux/list_sort.h>
  36#include <asm/msr-index.h>
  37#include <drm/drmP.h>
  38#include "intel_drv.h"
  39#include "intel_ringbuffer.h"
  40#include <drm/i915_drm.h>
  41#include "i915_drv.h"
  42
 
 
 
 
 
  43enum {
  44	ACTIVE_LIST,
 
  45	INACTIVE_LIST,
  46	PINNED_LIST,
 
  47};
  48
  49/* As the drm_debugfs_init() routines are called before dev->dev_private is
  50 * allocated we need to hook into the minor for release. */
  51static int
  52drm_add_fake_info_node(struct drm_minor *minor,
  53		       struct dentry *ent,
  54		       const void *key)
  55{
  56	struct drm_info_node *node;
  57
  58	node = kmalloc(sizeof(*node), GFP_KERNEL);
  59	if (node == NULL) {
  60		debugfs_remove(ent);
  61		return -ENOMEM;
  62	}
  63
  64	node->minor = minor;
  65	node->dent = ent;
  66	node->info_ent = (void *) key;
  67
  68	mutex_lock(&minor->debugfs_lock);
  69	list_add(&node->list, &minor->debugfs_list);
  70	mutex_unlock(&minor->debugfs_lock);
  71
  72	return 0;
  73}
  74
  75static int i915_capabilities(struct seq_file *m, void *data)
  76{
  77	struct drm_info_node *node = m->private;
  78	struct drm_device *dev = node->minor->dev;
  79	const struct intel_device_info *info = INTEL_INFO(dev);
  80
  81	seq_printf(m, "gen: %d\n", info->gen);
  82	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
  83#define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
  84#define SEP_SEMICOLON ;
  85	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
  86#undef PRINT_FLAG
  87#undef SEP_SEMICOLON
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  88
  89	return 0;
  90}
  91
  92static const char *get_pin_flag(struct drm_i915_gem_object *obj)
  93{
  94	if (obj->pin_display)
 
 
  95		return "p";
  96	else
  97		return " ";
  98}
  99
 100static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
 101{
 102	switch (obj->tiling_mode) {
 103	default:
 104	case I915_TILING_NONE: return " ";
 105	case I915_TILING_X: return "X";
 106	case I915_TILING_Y: return "Y";
 107	}
 108}
 109
 110static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
 111{
 112	return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
 113}
 114
 115static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
 116{
 117	u64 size = 0;
 118	struct i915_vma *vma;
 119
 120	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 121		if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
 122			size += vma->node.size;
 123	}
 124
 125	return size;
 126}
 127
 128static void
 129describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 130{
 131	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 132	struct intel_engine_cs *ring;
 133	struct i915_vma *vma;
 134	int pin_count = 0;
 135	int i;
 136
 137	seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
 138		   &obj->base,
 139		   obj->active ? "*" : " ",
 140		   get_pin_flag(obj),
 141		   get_tiling_flag(obj),
 142		   get_global_flag(obj),
 143		   obj->base.size / 1024,
 144		   obj->base.read_domains,
 145		   obj->base.write_domain);
 146	for_each_ring(ring, dev_priv, i)
 147		seq_printf(m, "%x ",
 148				i915_gem_request_get_seqno(obj->last_read_req[i]));
 149	seq_printf(m, "] %x %x%s%s%s",
 150		   i915_gem_request_get_seqno(obj->last_write_req),
 151		   i915_gem_request_get_seqno(obj->last_fenced_req),
 152		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
 153		   obj->dirty ? " dirty" : "",
 154		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 155	if (obj->base.name)
 156		seq_printf(m, " (name: %d)", obj->base.name);
 157	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 158		if (vma->pin_count > 0)
 159			pin_count++;
 160	}
 161	seq_printf(m, " (pinned x %d)", pin_count);
 162	if (obj->pin_display)
 163		seq_printf(m, " (display)");
 164	if (obj->fence_reg != I915_FENCE_REG_NONE)
 165		seq_printf(m, " (fence: %d)", obj->fence_reg);
 166	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 167		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
 168			   vma->is_ggtt ? "g" : "pp",
 169			   vma->node.start, vma->node.size);
 170		if (vma->is_ggtt)
 171			seq_printf(m, ", type: %u", vma->ggtt_view.type);
 172		seq_puts(m, ")");
 173	}
 174	if (obj->stolen)
 175		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
 176	if (obj->pin_display || obj->fault_mappable) {
 177		char s[3], *t = s;
 178		if (obj->pin_display)
 179			*t++ = 'p';
 180		if (obj->fault_mappable)
 181			*t++ = 'f';
 182		*t = '\0';
 183		seq_printf(m, " (%s mappable)", s);
 184	}
 185	if (obj->last_write_req != NULL)
 186		seq_printf(m, " (%s)",
 187			   i915_gem_request_get_ring(obj->last_write_req)->name);
 188	if (obj->frontbuffer_bits)
 189		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
 190}
 191
 192static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
 193{
 194	seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
 195	seq_putc(m, ctx->remap_slice ? 'R' : 'r');
 196	seq_putc(m, ' ');
 197}
 198
 199static int i915_gem_object_list_info(struct seq_file *m, void *data)
 200{
 201	struct drm_info_node *node = m->private;
 202	uintptr_t list = (uintptr_t) node->info_ent->data;
 203	struct list_head *head;
 204	struct drm_device *dev = node->minor->dev;
 205	struct drm_i915_private *dev_priv = dev->dev_private;
 206	struct i915_address_space *vm = &dev_priv->gtt.base;
 207	struct i915_vma *vma;
 208	u64 total_obj_size, total_gtt_size;
 209	int count, ret;
 210
 211	ret = mutex_lock_interruptible(&dev->struct_mutex);
 212	if (ret)
 213		return ret;
 214
 215	/* FIXME: the user of this interface might want more than just GGTT */
 216	switch (list) {
 217	case ACTIVE_LIST:
 218		seq_puts(m, "Active:\n");
 219		head = &vm->active_list;
 220		break;
 221	case INACTIVE_LIST:
 222		seq_puts(m, "Inactive:\n");
 223		head = &vm->inactive_list;
 
 
 
 
 
 
 
 
 
 
 
 
 224		break;
 225	default:
 226		mutex_unlock(&dev->struct_mutex);
 227		return -EINVAL;
 228	}
 229
 230	total_obj_size = total_gtt_size = count = 0;
 231	list_for_each_entry(vma, head, vm_link) {
 232		seq_printf(m, "   ");
 233		describe_obj(m, vma->obj);
 234		seq_printf(m, "\n");
 235		total_obj_size += vma->obj->base.size;
 236		total_gtt_size += vma->node.size;
 237		count++;
 238	}
 239	mutex_unlock(&dev->struct_mutex);
 240
 241	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 242		   count, total_obj_size, total_gtt_size);
 243	return 0;
 244}
 245
 246static int obj_rank_by_stolen(void *priv,
 247			      struct list_head *A, struct list_head *B)
 248{
 249	struct drm_i915_gem_object *a =
 250		container_of(A, struct drm_i915_gem_object, obj_exec_link);
 251	struct drm_i915_gem_object *b =
 252		container_of(B, struct drm_i915_gem_object, obj_exec_link);
 253
 254	if (a->stolen->start < b->stolen->start)
 255		return -1;
 256	if (a->stolen->start > b->stolen->start)
 257		return 1;
 258	return 0;
 259}
 260
 261static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 262{
 263	struct drm_info_node *node = m->private;
 264	struct drm_device *dev = node->minor->dev;
 265	struct drm_i915_private *dev_priv = dev->dev_private;
 266	struct drm_i915_gem_object *obj;
 267	u64 total_obj_size, total_gtt_size;
 268	LIST_HEAD(stolen);
 269	int count, ret;
 270
 271	ret = mutex_lock_interruptible(&dev->struct_mutex);
 272	if (ret)
 273		return ret;
 274
 275	total_obj_size = total_gtt_size = count = 0;
 276	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 277		if (obj->stolen == NULL)
 278			continue;
 279
 280		list_add(&obj->obj_exec_link, &stolen);
 281
 282		total_obj_size += obj->base.size;
 283		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 284		count++;
 285	}
 286	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
 287		if (obj->stolen == NULL)
 288			continue;
 289
 290		list_add(&obj->obj_exec_link, &stolen);
 291
 292		total_obj_size += obj->base.size;
 293		count++;
 294	}
 295	list_sort(NULL, &stolen, obj_rank_by_stolen);
 296	seq_puts(m, "Stolen:\n");
 297	while (!list_empty(&stolen)) {
 298		obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
 299		seq_puts(m, "   ");
 300		describe_obj(m, obj);
 301		seq_putc(m, '\n');
 302		list_del_init(&obj->obj_exec_link);
 303	}
 304	mutex_unlock(&dev->struct_mutex);
 305
 306	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 307		   count, total_obj_size, total_gtt_size);
 308	return 0;
 309}
 310
 311#define count_objects(list, member) do { \
 312	list_for_each_entry(obj, list, member) { \
 313		size += i915_gem_obj_total_ggtt_size(obj); \
 314		++count; \
 315		if (obj->map_and_fenceable) { \
 316			mappable_size += i915_gem_obj_ggtt_size(obj); \
 317			++mappable_count; \
 318		} \
 319	} \
 320} while (0)
 321
 322struct file_stats {
 323	struct drm_i915_file_private *file_priv;
 324	unsigned long count;
 325	u64 total, unbound;
 326	u64 global, shared;
 327	u64 active, inactive;
 328};
 329
 330static int per_file_stats(int id, void *ptr, void *data)
 331{
 332	struct drm_i915_gem_object *obj = ptr;
 333	struct file_stats *stats = data;
 334	struct i915_vma *vma;
 335
 336	stats->count++;
 337	stats->total += obj->base.size;
 338
 339	if (obj->base.name || obj->base.dma_buf)
 340		stats->shared += obj->base.size;
 341
 342	if (USES_FULL_PPGTT(obj->base.dev)) {
 343		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 344			struct i915_hw_ppgtt *ppgtt;
 345
 346			if (!drm_mm_node_allocated(&vma->node))
 347				continue;
 348
 349			if (vma->is_ggtt) {
 350				stats->global += obj->base.size;
 351				continue;
 352			}
 353
 354			ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
 355			if (ppgtt->file_priv != stats->file_priv)
 356				continue;
 357
 358			if (obj->active) /* XXX per-vma statistic */
 359				stats->active += obj->base.size;
 360			else
 361				stats->inactive += obj->base.size;
 362
 363			return 0;
 364		}
 365	} else {
 366		if (i915_gem_obj_ggtt_bound(obj)) {
 367			stats->global += obj->base.size;
 368			if (obj->active)
 369				stats->active += obj->base.size;
 370			else
 371				stats->inactive += obj->base.size;
 372			return 0;
 373		}
 374	}
 375
 376	if (!list_empty(&obj->global_list))
 377		stats->unbound += obj->base.size;
 378
 379	return 0;
 380}
 381
 382#define print_file_stats(m, name, stats) do { \
 383	if (stats.count) \
 384		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
 385			   name, \
 386			   stats.count, \
 387			   stats.total, \
 388			   stats.active, \
 389			   stats.inactive, \
 390			   stats.global, \
 391			   stats.shared, \
 392			   stats.unbound); \
 393} while (0)
 394
 395static void print_batch_pool_stats(struct seq_file *m,
 396				   struct drm_i915_private *dev_priv)
 397{
 398	struct drm_i915_gem_object *obj;
 399	struct file_stats stats;
 400	struct intel_engine_cs *ring;
 401	int i, j;
 402
 403	memset(&stats, 0, sizeof(stats));
 404
 405	for_each_ring(ring, dev_priv, i) {
 406		for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
 407			list_for_each_entry(obj,
 408					    &ring->batch_pool.cache_list[j],
 409					    batch_pool_link)
 410				per_file_stats(0, obj, &stats);
 411		}
 412	}
 413
 414	print_file_stats(m, "[k]batch pool", stats);
 415}
 416
 417#define count_vmas(list, member) do { \
 418	list_for_each_entry(vma, list, member) { \
 419		size += i915_gem_obj_total_ggtt_size(vma->obj); \
 420		++count; \
 421		if (vma->obj->map_and_fenceable) { \
 422			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
 423			++mappable_count; \
 424		} \
 425	} \
 426} while (0)
 427
 428static int i915_gem_object_info(struct seq_file *m, void* data)
 429{
 430	struct drm_info_node *node = m->private;
 431	struct drm_device *dev = node->minor->dev;
 432	struct drm_i915_private *dev_priv = dev->dev_private;
 433	u32 count, mappable_count, purgeable_count;
 434	u64 size, mappable_size, purgeable_size;
 435	struct drm_i915_gem_object *obj;
 436	struct i915_address_space *vm = &dev_priv->gtt.base;
 437	struct drm_file *file;
 438	struct i915_vma *vma;
 439	int ret;
 440
 441	ret = mutex_lock_interruptible(&dev->struct_mutex);
 442	if (ret)
 443		return ret;
 444
 445	seq_printf(m, "%u objects, %zu bytes\n",
 446		   dev_priv->mm.object_count,
 447		   dev_priv->mm.object_memory);
 448
 449	size = count = mappable_size = mappable_count = 0;
 450	count_objects(&dev_priv->mm.bound_list, global_list);
 451	seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
 452		   count, mappable_count, size, mappable_size);
 453
 454	size = count = mappable_size = mappable_count = 0;
 455	count_vmas(&vm->active_list, vm_link);
 456	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
 
 457		   count, mappable_count, size, mappable_size);
 458
 459	size = count = mappable_size = mappable_count = 0;
 460	count_vmas(&vm->inactive_list, vm_link);
 461	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
 462		   count, mappable_count, size, mappable_size);
 463
 464	size = count = purgeable_size = purgeable_count = 0;
 465	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
 466		size += obj->base.size, ++count;
 467		if (obj->madv == I915_MADV_DONTNEED)
 468			purgeable_size += obj->base.size, ++purgeable_count;
 469	}
 470	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 
 
 471
 472	size = count = mappable_size = mappable_count = 0;
 473	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 474		if (obj->fault_mappable) {
 475			size += i915_gem_obj_ggtt_size(obj);
 476			++count;
 477		}
 478		if (obj->pin_display) {
 479			mappable_size += i915_gem_obj_ggtt_size(obj);
 480			++mappable_count;
 481		}
 482		if (obj->madv == I915_MADV_DONTNEED) {
 483			purgeable_size += obj->base.size;
 484			++purgeable_count;
 485		}
 486	}
 487	seq_printf(m, "%u purgeable objects, %llu bytes\n",
 488		   purgeable_count, purgeable_size);
 489	seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
 490		   mappable_count, mappable_size);
 491	seq_printf(m, "%u fault mappable objects, %llu bytes\n",
 492		   count, size);
 493
 494	seq_printf(m, "%llu [%llu] gtt total\n",
 495		   dev_priv->gtt.base.total,
 496		   (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
 497
 498	seq_putc(m, '\n');
 499	print_batch_pool_stats(m, dev_priv);
 500	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
 501		struct file_stats stats;
 502		struct task_struct *task;
 503
 504		memset(&stats, 0, sizeof(stats));
 505		stats.file_priv = file->driver_priv;
 506		spin_lock(&file->table_lock);
 507		idr_for_each(&file->object_idr, per_file_stats, &stats);
 508		spin_unlock(&file->table_lock);
 509		/*
 510		 * Although we have a valid reference on file->pid, that does
 511		 * not guarantee that the task_struct who called get_pid() is
 512		 * still alive (e.g. get_pid(current) => fork() => exit()).
 513		 * Therefore, we need to protect this ->comm access using RCU.
 514		 */
 515		rcu_read_lock();
 516		task = pid_task(file->pid, PIDTYPE_PID);
 517		print_file_stats(m, task ? task->comm : "<unknown>", stats);
 518		rcu_read_unlock();
 519	}
 520
 521	mutex_unlock(&dev->struct_mutex);
 522
 523	return 0;
 524}
 525
 526static int i915_gem_gtt_info(struct seq_file *m, void *data)
 527{
 528	struct drm_info_node *node = m->private;
 529	struct drm_device *dev = node->minor->dev;
 530	uintptr_t list = (uintptr_t) node->info_ent->data;
 531	struct drm_i915_private *dev_priv = dev->dev_private;
 532	struct drm_i915_gem_object *obj;
 533	u64 total_obj_size, total_gtt_size;
 534	int count, ret;
 535
 536	ret = mutex_lock_interruptible(&dev->struct_mutex);
 537	if (ret)
 538		return ret;
 539
 540	total_obj_size = total_gtt_size = count = 0;
 541	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 542		if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
 543			continue;
 544
 545		seq_puts(m, "   ");
 546		describe_obj(m, obj);
 547		seq_putc(m, '\n');
 548		total_obj_size += obj->base.size;
 549		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 550		count++;
 551	}
 552
 553	mutex_unlock(&dev->struct_mutex);
 554
 555	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 556		   count, total_obj_size, total_gtt_size);
 557
 558	return 0;
 559}
 560
 
 561static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 562{
 563	struct drm_info_node *node = m->private;
 564	struct drm_device *dev = node->minor->dev;
 565	struct drm_i915_private *dev_priv = dev->dev_private;
 566	struct intel_crtc *crtc;
 567	int ret;
 568
 569	ret = mutex_lock_interruptible(&dev->struct_mutex);
 570	if (ret)
 571		return ret;
 572
 573	for_each_intel_crtc(dev, crtc) {
 574		const char pipe = pipe_name(crtc->pipe);
 575		const char plane = plane_name(crtc->plane);
 576		struct intel_unpin_work *work;
 577
 578		spin_lock_irq(&dev->event_lock);
 579		work = crtc->unpin_work;
 580		if (work == NULL) {
 581			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
 582				   pipe, plane);
 583		} else {
 584			u32 addr;
 585
 586			if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
 587				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
 588					   pipe, plane);
 589			} else {
 590				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
 591					   pipe, plane);
 592			}
 593			if (work->flip_queued_req) {
 594				struct intel_engine_cs *ring =
 595					i915_gem_request_get_ring(work->flip_queued_req);
 596
 597				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
 598					   ring->name,
 599					   i915_gem_request_get_seqno(work->flip_queued_req),
 600					   dev_priv->next_seqno,
 601					   ring->get_seqno(ring, true),
 602					   i915_gem_request_completed(work->flip_queued_req, true));
 603			} else
 604				seq_printf(m, "Flip not associated with any ring\n");
 605			seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
 606				   work->flip_queued_vblank,
 607				   work->flip_ready_vblank,
 608				   drm_crtc_vblank_count(&crtc->base));
 609			if (work->enable_stall_check)
 610				seq_puts(m, "Stall check enabled, ");
 611			else
 612				seq_puts(m, "Stall check waiting for page flip ioctl, ");
 613			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
 614
 615			if (INTEL_INFO(dev)->gen >= 4)
 616				addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
 617			else
 618				addr = I915_READ(DSPADDR(crtc->plane));
 619			seq_printf(m, "Current scanout address 0x%08x\n", addr);
 620
 
 
 
 
 
 621			if (work->pending_flip_obj) {
 622				seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
 623				seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
 
 624			}
 625		}
 626		spin_unlock_irq(&dev->event_lock);
 627	}
 628
 629	mutex_unlock(&dev->struct_mutex);
 630
 631	return 0;
 632}
 633
 634static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 635{
 636	struct drm_info_node *node = m->private;
 637	struct drm_device *dev = node->minor->dev;
 638	struct drm_i915_private *dev_priv = dev->dev_private;
 639	struct drm_i915_gem_object *obj;
 640	struct intel_engine_cs *ring;
 641	int total = 0;
 642	int ret, i, j;
 643
 644	ret = mutex_lock_interruptible(&dev->struct_mutex);
 645	if (ret)
 646		return ret;
 647
 648	for_each_ring(ring, dev_priv, i) {
 649		for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
 650			int count;
 651
 652			count = 0;
 653			list_for_each_entry(obj,
 654					    &ring->batch_pool.cache_list[j],
 655					    batch_pool_link)
 656				count++;
 657			seq_printf(m, "%s cache[%d]: %d objects\n",
 658				   ring->name, j, count);
 659
 660			list_for_each_entry(obj,
 661					    &ring->batch_pool.cache_list[j],
 662					    batch_pool_link) {
 663				seq_puts(m, "   ");
 664				describe_obj(m, obj);
 665				seq_putc(m, '\n');
 666			}
 667
 668			total += count;
 669		}
 
 670	}
 671
 672	seq_printf(m, "total: %d\n", total);
 673
 674	mutex_unlock(&dev->struct_mutex);
 675
 676	return 0;
 677}
 678
 679static int i915_gem_request_info(struct seq_file *m, void *data)
 680{
 681	struct drm_info_node *node = m->private;
 682	struct drm_device *dev = node->minor->dev;
 683	struct drm_i915_private *dev_priv = dev->dev_private;
 684	struct intel_engine_cs *ring;
 685	struct drm_i915_gem_request *req;
 686	int ret, any, i;
 687
 688	ret = mutex_lock_interruptible(&dev->struct_mutex);
 689	if (ret)
 690		return ret;
 691
 692	any = 0;
 693	for_each_ring(ring, dev_priv, i) {
 694		int count;
 695
 696		count = 0;
 697		list_for_each_entry(req, &ring->request_list, list)
 698			count++;
 699		if (count == 0)
 700			continue;
 701
 702		seq_printf(m, "%s requests: %d\n", ring->name, count);
 703		list_for_each_entry(req, &ring->request_list, list) {
 704			struct task_struct *task;
 705
 706			rcu_read_lock();
 707			task = NULL;
 708			if (req->pid)
 709				task = pid_task(req->pid, PIDTYPE_PID);
 710			seq_printf(m, "    %x @ %d: %s [%d]\n",
 711				   req->seqno,
 712				   (int) (jiffies - req->emitted_jiffies),
 713				   task ? task->comm : "<unknown>",
 714				   task ? task->pid : -1);
 715			rcu_read_unlock();
 716		}
 717
 718		any++;
 719	}
 720	mutex_unlock(&dev->struct_mutex);
 721
 722	if (any == 0)
 723		seq_puts(m, "No requests\n");
 724
 725	return 0;
 726}
 727
 728static void i915_ring_seqno_info(struct seq_file *m,
 729				 struct intel_engine_cs *ring)
 730{
 731	if (ring->get_seqno) {
 732		seq_printf(m, "Current sequence (%s): %x\n",
 733			   ring->name, ring->get_seqno(ring, false));
 
 
 
 
 734	}
 735}
 736
 737static int i915_gem_seqno_info(struct seq_file *m, void *data)
 738{
 739	struct drm_info_node *node = m->private;
 740	struct drm_device *dev = node->minor->dev;
 741	struct drm_i915_private *dev_priv = dev->dev_private;
 742	struct intel_engine_cs *ring;
 743	int ret, i;
 744
 745	ret = mutex_lock_interruptible(&dev->struct_mutex);
 746	if (ret)
 747		return ret;
 748	intel_runtime_pm_get(dev_priv);
 749
 750	for_each_ring(ring, dev_priv, i)
 751		i915_ring_seqno_info(m, ring);
 752
 753	intel_runtime_pm_put(dev_priv);
 754	mutex_unlock(&dev->struct_mutex);
 755
 756	return 0;
 757}
 758
 759
 760static int i915_interrupt_info(struct seq_file *m, void *data)
 761{
 762	struct drm_info_node *node = m->private;
 763	struct drm_device *dev = node->minor->dev;
 764	struct drm_i915_private *dev_priv = dev->dev_private;
 765	struct intel_engine_cs *ring;
 766	int ret, i, pipe;
 767
 768	ret = mutex_lock_interruptible(&dev->struct_mutex);
 769	if (ret)
 770		return ret;
 771	intel_runtime_pm_get(dev_priv);
 772
 773	if (IS_CHERRYVIEW(dev)) {
 774		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 775			   I915_READ(GEN8_MASTER_IRQ));
 776
 777		seq_printf(m, "Display IER:\t%08x\n",
 778			   I915_READ(VLV_IER));
 779		seq_printf(m, "Display IIR:\t%08x\n",
 780			   I915_READ(VLV_IIR));
 781		seq_printf(m, "Display IIR_RW:\t%08x\n",
 782			   I915_READ(VLV_IIR_RW));
 783		seq_printf(m, "Display IMR:\t%08x\n",
 784			   I915_READ(VLV_IMR));
 785		for_each_pipe(dev_priv, pipe)
 786			seq_printf(m, "Pipe %c stat:\t%08x\n",
 787				   pipe_name(pipe),
 788				   I915_READ(PIPESTAT(pipe)));
 789
 790		seq_printf(m, "Port hotplug:\t%08x\n",
 791			   I915_READ(PORT_HOTPLUG_EN));
 792		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 793			   I915_READ(VLV_DPFLIPSTAT));
 794		seq_printf(m, "DPINVGTT:\t%08x\n",
 795			   I915_READ(DPINVGTT));
 796
 797		for (i = 0; i < 4; i++) {
 798			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
 799				   i, I915_READ(GEN8_GT_IMR(i)));
 800			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
 801				   i, I915_READ(GEN8_GT_IIR(i)));
 802			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
 803				   i, I915_READ(GEN8_GT_IER(i)));
 804		}
 805
 806		seq_printf(m, "PCU interrupt mask:\t%08x\n",
 807			   I915_READ(GEN8_PCU_IMR));
 808		seq_printf(m, "PCU interrupt identity:\t%08x\n",
 809			   I915_READ(GEN8_PCU_IIR));
 810		seq_printf(m, "PCU interrupt enable:\t%08x\n",
 811			   I915_READ(GEN8_PCU_IER));
 812	} else if (INTEL_INFO(dev)->gen >= 8) {
 813		seq_printf(m, "Master Interrupt Control:\t%08x\n",
 814			   I915_READ(GEN8_MASTER_IRQ));
 815
 816		for (i = 0; i < 4; i++) {
 817			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
 818				   i, I915_READ(GEN8_GT_IMR(i)));
 819			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
 820				   i, I915_READ(GEN8_GT_IIR(i)));
 821			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
 822				   i, I915_READ(GEN8_GT_IER(i)));
 823		}
 824
 825		for_each_pipe(dev_priv, pipe) {
 826			enum intel_display_power_domain power_domain;
 827
 828			power_domain = POWER_DOMAIN_PIPE(pipe);
 829			if (!intel_display_power_get_if_enabled(dev_priv,
 830								power_domain)) {
 831				seq_printf(m, "Pipe %c power disabled\n",
 832					   pipe_name(pipe));
 833				continue;
 834			}
 835			seq_printf(m, "Pipe %c IMR:\t%08x\n",
 836				   pipe_name(pipe),
 837				   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
 838			seq_printf(m, "Pipe %c IIR:\t%08x\n",
 839				   pipe_name(pipe),
 840				   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
 841			seq_printf(m, "Pipe %c IER:\t%08x\n",
 842				   pipe_name(pipe),
 843				   I915_READ(GEN8_DE_PIPE_IER(pipe)));
 844
 845			intel_display_power_put(dev_priv, power_domain);
 846		}
 847
 848		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
 849			   I915_READ(GEN8_DE_PORT_IMR));
 850		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
 851			   I915_READ(GEN8_DE_PORT_IIR));
 852		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
 853			   I915_READ(GEN8_DE_PORT_IER));
 854
 855		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
 856			   I915_READ(GEN8_DE_MISC_IMR));
 857		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
 858			   I915_READ(GEN8_DE_MISC_IIR));
 859		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
 860			   I915_READ(GEN8_DE_MISC_IER));
 861
 862		seq_printf(m, "PCU interrupt mask:\t%08x\n",
 863			   I915_READ(GEN8_PCU_IMR));
 864		seq_printf(m, "PCU interrupt identity:\t%08x\n",
 865			   I915_READ(GEN8_PCU_IIR));
 866		seq_printf(m, "PCU interrupt enable:\t%08x\n",
 867			   I915_READ(GEN8_PCU_IER));
 868	} else if (IS_VALLEYVIEW(dev)) {
 869		seq_printf(m, "Display IER:\t%08x\n",
 870			   I915_READ(VLV_IER));
 871		seq_printf(m, "Display IIR:\t%08x\n",
 872			   I915_READ(VLV_IIR));
 873		seq_printf(m, "Display IIR_RW:\t%08x\n",
 874			   I915_READ(VLV_IIR_RW));
 875		seq_printf(m, "Display IMR:\t%08x\n",
 876			   I915_READ(VLV_IMR));
 877		for_each_pipe(dev_priv, pipe)
 878			seq_printf(m, "Pipe %c stat:\t%08x\n",
 879				   pipe_name(pipe),
 880				   I915_READ(PIPESTAT(pipe)));
 881
 882		seq_printf(m, "Master IER:\t%08x\n",
 883			   I915_READ(VLV_MASTER_IER));
 884
 885		seq_printf(m, "Render IER:\t%08x\n",
 886			   I915_READ(GTIER));
 887		seq_printf(m, "Render IIR:\t%08x\n",
 888			   I915_READ(GTIIR));
 889		seq_printf(m, "Render IMR:\t%08x\n",
 890			   I915_READ(GTIMR));
 891
 892		seq_printf(m, "PM IER:\t\t%08x\n",
 893			   I915_READ(GEN6_PMIER));
 894		seq_printf(m, "PM IIR:\t\t%08x\n",
 895			   I915_READ(GEN6_PMIIR));
 896		seq_printf(m, "PM IMR:\t\t%08x\n",
 897			   I915_READ(GEN6_PMIMR));
 898
 899		seq_printf(m, "Port hotplug:\t%08x\n",
 900			   I915_READ(PORT_HOTPLUG_EN));
 901		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
 902			   I915_READ(VLV_DPFLIPSTAT));
 903		seq_printf(m, "DPINVGTT:\t%08x\n",
 904			   I915_READ(DPINVGTT));
 905
 906	} else if (!HAS_PCH_SPLIT(dev)) {
 907		seq_printf(m, "Interrupt enable:    %08x\n",
 908			   I915_READ(IER));
 909		seq_printf(m, "Interrupt identity:  %08x\n",
 910			   I915_READ(IIR));
 911		seq_printf(m, "Interrupt mask:      %08x\n",
 912			   I915_READ(IMR));
 913		for_each_pipe(dev_priv, pipe)
 914			seq_printf(m, "Pipe %c stat:         %08x\n",
 915				   pipe_name(pipe),
 916				   I915_READ(PIPESTAT(pipe)));
 917	} else {
 918		seq_printf(m, "North Display Interrupt enable:		%08x\n",
 919			   I915_READ(DEIER));
 920		seq_printf(m, "North Display Interrupt identity:	%08x\n",
 921			   I915_READ(DEIIR));
 922		seq_printf(m, "North Display Interrupt mask:		%08x\n",
 923			   I915_READ(DEIMR));
 924		seq_printf(m, "South Display Interrupt enable:		%08x\n",
 925			   I915_READ(SDEIER));
 926		seq_printf(m, "South Display Interrupt identity:	%08x\n",
 927			   I915_READ(SDEIIR));
 928		seq_printf(m, "South Display Interrupt mask:		%08x\n",
 929			   I915_READ(SDEIMR));
 930		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
 931			   I915_READ(GTIER));
 932		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
 933			   I915_READ(GTIIR));
 934		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
 935			   I915_READ(GTIMR));
 936	}
 937	for_each_ring(ring, dev_priv, i) {
 938		if (INTEL_INFO(dev)->gen >= 6) {
 939			seq_printf(m,
 940				   "Graphics Interrupt mask (%s):	%08x\n",
 941				   ring->name, I915_READ_IMR(ring));
 
 
 942		}
 943		i915_ring_seqno_info(m, ring);
 944	}
 945	intel_runtime_pm_put(dev_priv);
 946	mutex_unlock(&dev->struct_mutex);
 947
 948	return 0;
 949}
 950
 951static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 952{
 953	struct drm_info_node *node = m->private;
 954	struct drm_device *dev = node->minor->dev;
 955	struct drm_i915_private *dev_priv = dev->dev_private;
 956	int i, ret;
 957
 958	ret = mutex_lock_interruptible(&dev->struct_mutex);
 959	if (ret)
 960		return ret;
 961
 
 962	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
 963	for (i = 0; i < dev_priv->num_fence_regs; i++) {
 964		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
 965
 966		seq_printf(m, "Fence %d, pin count = %d, object = ",
 967			   i, dev_priv->fence_regs[i].pin_count);
 968		if (obj == NULL)
 969			seq_puts(m, "unused");
 970		else
 971			describe_obj(m, obj);
 972		seq_putc(m, '\n');
 973	}
 974
 975	mutex_unlock(&dev->struct_mutex);
 976	return 0;
 977}
 978
 979static int i915_hws_info(struct seq_file *m, void *data)
 980{
 981	struct drm_info_node *node = m->private;
 982	struct drm_device *dev = node->minor->dev;
 983	struct drm_i915_private *dev_priv = dev->dev_private;
 984	struct intel_engine_cs *ring;
 985	const u32 *hws;
 986	int i;
 987
 988	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
 989	hws = ring->status_page.page_addr;
 990	if (hws == NULL)
 991		return 0;
 992
 993	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
 994		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
 995			   i * 4,
 996			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
 997	}
 998	return 0;
 999}
1000
1001static ssize_t
1002i915_error_state_write(struct file *filp,
1003		       const char __user *ubuf,
1004		       size_t cnt,
1005		       loff_t *ppos)
 
 
 
 
 
 
 
 
 
 
 
 
1006{
1007	struct i915_error_state_file_priv *error_priv = filp->private_data;
1008	struct drm_device *dev = error_priv->dev;
 
 
1009	int ret;
1010
1011	DRM_DEBUG_DRIVER("Resetting error state\n");
1012
1013	ret = mutex_lock_interruptible(&dev->struct_mutex);
1014	if (ret)
1015		return ret;
1016
1017	i915_destroy_error_state(dev);
 
 
 
 
 
 
1018	mutex_unlock(&dev->struct_mutex);
1019
1020	return cnt;
1021}
1022
1023static int i915_error_state_open(struct inode *inode, struct file *file)
1024{
1025	struct drm_device *dev = inode->i_private;
1026	struct i915_error_state_file_priv *error_priv;
 
 
 
1027
1028	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
1029	if (!error_priv)
1030		return -ENOMEM;
1031
1032	error_priv->dev = dev;
 
 
 
 
 
1033
1034	i915_error_state_get(dev, error_priv);
1035
1036	file->private_data = error_priv;
 
 
 
1037
1038	return 0;
1039}
1040
1041static int i915_error_state_release(struct inode *inode, struct file *file)
1042{
1043	struct i915_error_state_file_priv *error_priv = file->private_data;
 
 
 
1044
1045	i915_error_state_put(error_priv);
1046	kfree(error_priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1047
1048	return 0;
1049}
1050
1051static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
1052				     size_t count, loff_t *pos)
1053{
1054	struct i915_error_state_file_priv *error_priv = file->private_data;
1055	struct drm_i915_error_state_buf error_str;
1056	loff_t tmp_pos = 0;
1057	ssize_t ret_count = 0;
1058	int ret;
 
 
1059
1060	ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
1061	if (ret)
1062		return ret;
 
 
 
 
 
 
1063
1064	ret = i915_error_state_to_str(&error_str, error_priv);
1065	if (ret)
1066		goto out;
 
 
 
 
 
 
1067
1068	ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
1069					    error_str.buf,
1070					    error_str.bytes);
 
1071
1072	if (ret_count < 0)
1073		ret = ret_count;
1074	else
1075		*pos = error_str.start + ret_count;
1076out:
1077	i915_error_state_buf_release(&error_str);
1078	return ret ?: ret_count;
1079}
1080
1081static const struct file_operations i915_error_state_fops = {
1082	.owner = THIS_MODULE,
1083	.open = i915_error_state_open,
1084	.read = i915_error_state_read,
1085	.write = i915_error_state_write,
1086	.llseek = default_llseek,
1087	.release = i915_error_state_release,
1088};
1089
1090static int
1091i915_next_seqno_get(void *data, u64 *val)
1092{
1093	struct drm_device *dev = data;
1094	struct drm_i915_private *dev_priv = dev->dev_private;
1095	int ret;
1096
1097	ret = mutex_lock_interruptible(&dev->struct_mutex);
1098	if (ret)
1099		return ret;
 
 
 
 
 
 
 
 
 
 
1100
1101	*val = dev_priv->next_seqno;
1102	mutex_unlock(&dev->struct_mutex);
 
 
1103
1104	return 0;
 
 
1105}
1106
1107static int
1108i915_next_seqno_set(void *data, u64 val)
1109{
1110	struct drm_device *dev = data;
1111	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1112
1113	ret = mutex_lock_interruptible(&dev->struct_mutex);
1114	if (ret)
1115		return ret;
1116
1117	ret = i915_gem_set_seqno(dev, val);
1118	mutex_unlock(&dev->struct_mutex);
1119
1120	return ret;
1121}
1122
1123DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1124			i915_next_seqno_get, i915_next_seqno_set,
1125			"0x%llx\n");
1126
1127static int i915_frequency_info(struct seq_file *m, void *unused)
1128{
1129	struct drm_info_node *node = m->private;
1130	struct drm_device *dev = node->minor->dev;
1131	struct drm_i915_private *dev_priv = dev->dev_private;
1132	int ret = 0;
 
 
1133
1134	intel_runtime_pm_get(dev_priv);
 
1135
1136	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
 
 
 
 
1137
1138	if (IS_GEN5(dev)) {
1139		u16 rgvswctl = I915_READ16(MEMSWCTL);
1140		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1141
1142		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1143		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1144		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1145			   MEMSTAT_VID_SHIFT);
1146		seq_printf(m, "Current P-state: %d\n",
1147			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1148	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1149		u32 freq_sts;
1150
1151		mutex_lock(&dev_priv->rps.hw_lock);
1152		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1153		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1154		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1155
1156		seq_printf(m, "actual GPU freq: %d MHz\n",
1157			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1158
1159		seq_printf(m, "current GPU freq: %d MHz\n",
1160			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1161
1162		seq_printf(m, "max GPU freq: %d MHz\n",
1163			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1164
1165		seq_printf(m, "min GPU freq: %d MHz\n",
1166			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1167
1168		seq_printf(m, "idle GPU freq: %d MHz\n",
1169			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1170
1171		seq_printf(m,
1172			   "efficient (RPe) frequency: %d MHz\n",
1173			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1174		mutex_unlock(&dev_priv->rps.hw_lock);
1175	} else if (INTEL_INFO(dev)->gen >= 6) {
1176		u32 rp_state_limits;
1177		u32 gt_perf_status;
1178		u32 rp_state_cap;
1179		u32 rpmodectl, rpinclimit, rpdeclimit;
1180		u32 rpstat, cagf, reqf;
1181		u32 rpupei, rpcurup, rpprevup;
1182		u32 rpdownei, rpcurdown, rpprevdown;
1183		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1184		int max_freq;
1185
1186		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1187		if (IS_BROXTON(dev)) {
1188			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1189			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1190		} else {
1191			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1192			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1193		}
1194
1195		/* RPSTAT1 is in the GT power well */
1196		ret = mutex_lock_interruptible(&dev->struct_mutex);
1197		if (ret)
1198			goto out;
1199
1200		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1201
1202		reqf = I915_READ(GEN6_RPNSWREQ);
1203		if (IS_GEN9(dev))
1204			reqf >>= 23;
1205		else {
1206			reqf &= ~GEN6_TURBO_DISABLE;
1207			if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1208				reqf >>= 24;
1209			else
1210				reqf >>= 25;
1211		}
1212		reqf = intel_gpu_freq(dev_priv, reqf);
1213
1214		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1215		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1216		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1217
1218		rpstat = I915_READ(GEN6_RPSTAT1);
1219		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
1220		rpcurup = I915_READ(GEN6_RP_CUR_UP);
1221		rpprevup = I915_READ(GEN6_RP_PREV_UP);
1222		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
1223		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
1224		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
1225		if (IS_GEN9(dev))
1226			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1227		else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1228			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1229		else
1230			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1231		cagf = intel_gpu_freq(dev_priv, cagf);
1232
1233		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1234		mutex_unlock(&dev->struct_mutex);
1235
1236		if (IS_GEN6(dev) || IS_GEN7(dev)) {
1237			pm_ier = I915_READ(GEN6_PMIER);
1238			pm_imr = I915_READ(GEN6_PMIMR);
1239			pm_isr = I915_READ(GEN6_PMISR);
1240			pm_iir = I915_READ(GEN6_PMIIR);
1241			pm_mask = I915_READ(GEN6_PMINTRMSK);
1242		} else {
1243			pm_ier = I915_READ(GEN8_GT_IER(2));
1244			pm_imr = I915_READ(GEN8_GT_IMR(2));
1245			pm_isr = I915_READ(GEN8_GT_ISR(2));
1246			pm_iir = I915_READ(GEN8_GT_IIR(2));
1247			pm_mask = I915_READ(GEN6_PMINTRMSK);
1248		}
1249		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1250			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1251		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
 
1252		seq_printf(m, "Render p-state ratio: %d\n",
1253			   (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
1254		seq_printf(m, "Render p-state VID: %d\n",
1255			   gt_perf_status & 0xff);
1256		seq_printf(m, "Render p-state limit: %d\n",
1257			   rp_state_limits & 0xff);
1258		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1259		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1260		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1261		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1262		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1263		seq_printf(m, "CAGF: %dMHz\n", cagf);
1264		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
1265			   GEN6_CURICONT_MASK);
1266		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
1267			   GEN6_CURBSYTAVG_MASK);
1268		seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
1269			   GEN6_CURBSYTAVG_MASK);
1270		seq_printf(m, "Up threshold: %d%%\n",
1271			   dev_priv->rps.up_threshold);
1272
1273		seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
1274			   GEN6_CURIAVG_MASK);
1275		seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
1276			   GEN6_CURBSYTAVG_MASK);
1277		seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
1278			   GEN6_CURBSYTAVG_MASK);
1279		seq_printf(m, "Down threshold: %d%%\n",
1280			   dev_priv->rps.down_threshold);
1281
1282		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
1283			    rp_state_cap >> 16) & 0xff;
1284		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1285			     GEN9_FREQ_SCALER : 1);
1286		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1287			   intel_gpu_freq(dev_priv, max_freq));
1288
1289		max_freq = (rp_state_cap & 0xff00) >> 8;
1290		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1291			     GEN9_FREQ_SCALER : 1);
1292		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1293			   intel_gpu_freq(dev_priv, max_freq));
1294
1295		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
1296			    rp_state_cap >> 0) & 0xff;
1297		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1298			     GEN9_FREQ_SCALER : 1);
1299		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1300			   intel_gpu_freq(dev_priv, max_freq));
1301		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1302			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1303
1304		seq_printf(m, "Current freq: %d MHz\n",
1305			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1306		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1307		seq_printf(m, "Idle freq: %d MHz\n",
1308			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1309		seq_printf(m, "Min freq: %d MHz\n",
1310			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1311		seq_printf(m, "Max freq: %d MHz\n",
1312			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1313		seq_printf(m,
1314			   "efficient (RPe) frequency: %d MHz\n",
1315			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1316	} else {
1317		seq_puts(m, "no P-state info available\n");
1318	}
1319
1320	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
1321	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1322	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1323
1324out:
1325	intel_runtime_pm_put(dev_priv);
1326	return ret;
1327}
1328
1329static int i915_hangcheck_info(struct seq_file *m, void *unused)
1330{
1331	struct drm_info_node *node = m->private;
1332	struct drm_device *dev = node->minor->dev;
1333	struct drm_i915_private *dev_priv = dev->dev_private;
1334	struct intel_engine_cs *ring;
1335	u64 acthd[I915_NUM_RINGS];
1336	u32 seqno[I915_NUM_RINGS];
1337	u32 instdone[I915_NUM_INSTDONE_REG];
1338	int i, j;
1339
1340	if (!i915.enable_hangcheck) {
1341		seq_printf(m, "Hangcheck disabled\n");
1342		return 0;
 
1343	}
1344
1345	intel_runtime_pm_get(dev_priv);
 
1346
1347	for_each_ring(ring, dev_priv, i) {
1348		seqno[i] = ring->get_seqno(ring, false);
1349		acthd[i] = intel_ring_get_active_head(ring);
1350	}
1351
1352	i915_get_extra_instdone(dev, instdone);
1353
1354	intel_runtime_pm_put(dev_priv);
1355
1356	if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
1357		seq_printf(m, "Hangcheck active, fires in %dms\n",
1358			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1359					    jiffies));
1360	} else
1361		seq_printf(m, "Hangcheck inactive\n");
1362
1363	for_each_ring(ring, dev_priv, i) {
1364		seq_printf(m, "%s:\n", ring->name);
1365		seq_printf(m, "\tseqno = %x [current %x]\n",
1366			   ring->hangcheck.seqno, seqno[i]);
1367		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1368			   (long long)ring->hangcheck.acthd,
1369			   (long long)acthd[i]);
1370		seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
1371			   (long long)ring->hangcheck.max_acthd);
1372		seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
1373		seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
1374
1375		if (ring->id == RCS) {
1376			seq_puts(m, "\tinstdone read =");
1377
1378			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
1379				seq_printf(m, " 0x%08x", instdone[j]);
1380
1381			seq_puts(m, "\n\tinstdone accu =");
1382
1383			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
1384				seq_printf(m, " 0x%08x",
1385					   ring->hangcheck.instdone[j]);
1386
1387			seq_puts(m, "\n");
1388		}
1389	}
1390
1391	return 0;
1392}
1393
1394static int ironlake_drpc_info(struct seq_file *m)
1395{
1396	struct drm_info_node *node = m->private;
1397	struct drm_device *dev = node->minor->dev;
1398	struct drm_i915_private *dev_priv = dev->dev_private;
1399	u32 rgvmodectl, rstdbyctl;
1400	u16 crstandvid;
1401	int ret;
1402
1403	ret = mutex_lock_interruptible(&dev->struct_mutex);
1404	if (ret)
1405		return ret;
1406	intel_runtime_pm_get(dev_priv);
1407
1408	rgvmodectl = I915_READ(MEMMODECTL);
1409	rstdbyctl = I915_READ(RSTDBYCTL);
1410	crstandvid = I915_READ16(CRSTANDVID);
1411
1412	intel_runtime_pm_put(dev_priv);
1413	mutex_unlock(&dev->struct_mutex);
1414
1415	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1416	seq_printf(m, "Boost freq: %d\n",
1417		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1418		   MEMMODE_BOOST_FREQ_SHIFT);
1419	seq_printf(m, "HW control enabled: %s\n",
1420		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1421	seq_printf(m, "SW control enabled: %s\n",
1422		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1423	seq_printf(m, "Gated voltage change: %s\n",
1424		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1425	seq_printf(m, "Starting frequency: P%d\n",
1426		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1427	seq_printf(m, "Max P-state: P%d\n",
1428		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1429	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1430	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1431	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1432	seq_printf(m, "Render standby enabled: %s\n",
1433		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1434	seq_puts(m, "Current RS state: ");
1435	switch (rstdbyctl & RSX_STATUS_MASK) {
1436	case RSX_STATUS_ON:
1437		seq_puts(m, "on\n");
1438		break;
1439	case RSX_STATUS_RC1:
1440		seq_puts(m, "RC1\n");
1441		break;
1442	case RSX_STATUS_RC1E:
1443		seq_puts(m, "RC1E\n");
1444		break;
1445	case RSX_STATUS_RS1:
1446		seq_puts(m, "RS1\n");
1447		break;
1448	case RSX_STATUS_RS2:
1449		seq_puts(m, "RS2 (RC6)\n");
1450		break;
1451	case RSX_STATUS_RS3:
1452		seq_puts(m, "RC3 (RC6+)\n");
1453		break;
1454	default:
1455		seq_puts(m, "unknown\n");
1456		break;
1457	}
1458
1459	return 0;
1460}
1461
1462static int i915_forcewake_domains(struct seq_file *m, void *data)
1463{
1464	struct drm_info_node *node = m->private;
1465	struct drm_device *dev = node->minor->dev;
1466	struct drm_i915_private *dev_priv = dev->dev_private;
1467	struct intel_uncore_forcewake_domain *fw_domain;
1468	int i;
1469
1470	spin_lock_irq(&dev_priv->uncore.lock);
1471	for_each_fw_domain(fw_domain, dev_priv, i) {
1472		seq_printf(m, "%s.wake_count = %u\n",
1473			   intel_uncore_forcewake_domain_to_str(i),
1474			   fw_domain->wake_count);
1475	}
1476	spin_unlock_irq(&dev_priv->uncore.lock);
1477
1478	return 0;
1479}
1480
1481static int vlv_drpc_info(struct seq_file *m)
1482{
1483	struct drm_info_node *node = m->private;
1484	struct drm_device *dev = node->minor->dev;
1485	struct drm_i915_private *dev_priv = dev->dev_private;
1486	u32 rpmodectl1, rcctl1, pw_status;
1487
1488	intel_runtime_pm_get(dev_priv);
1489
1490	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1491	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1492	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1493
1494	intel_runtime_pm_put(dev_priv);
1495
1496	seq_printf(m, "Video Turbo Mode: %s\n",
1497		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1498	seq_printf(m, "Turbo enabled: %s\n",
1499		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1500	seq_printf(m, "HW control enabled: %s\n",
1501		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1502	seq_printf(m, "SW control enabled: %s\n",
1503		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1504			  GEN6_RP_MEDIA_SW_MODE));
1505	seq_printf(m, "RC6 Enabled: %s\n",
1506		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1507					GEN6_RC_CTL_EI_MODE(1))));
1508	seq_printf(m, "Render Power Well: %s\n",
1509		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1510	seq_printf(m, "Media Power Well: %s\n",
1511		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1512
1513	seq_printf(m, "Render RC6 residency since boot: %u\n",
1514		   I915_READ(VLV_GT_RENDER_RC6));
1515	seq_printf(m, "Media RC6 residency since boot: %u\n",
1516		   I915_READ(VLV_GT_MEDIA_RC6));
1517
1518	return i915_forcewake_domains(m, NULL);
1519}
1520
1521static int gen6_drpc_info(struct seq_file *m)
1522{
1523	struct drm_info_node *node = m->private;
1524	struct drm_device *dev = node->minor->dev;
1525	struct drm_i915_private *dev_priv = dev->dev_private;
1526	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1527	unsigned forcewake_count;
1528	int count = 0, ret;
1529
1530	ret = mutex_lock_interruptible(&dev->struct_mutex);
1531	if (ret)
1532		return ret;
1533	intel_runtime_pm_get(dev_priv);
1534
1535	spin_lock_irq(&dev_priv->uncore.lock);
1536	forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
1537	spin_unlock_irq(&dev_priv->uncore.lock);
1538
1539	if (forcewake_count) {
1540		seq_puts(m, "RC information inaccurate because somebody "
1541			    "holds a forcewake reference \n");
1542	} else {
1543		/* NB: we cannot use forcewake, else we read the wrong values */
1544		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1545			udelay(10);
1546		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1547	}
1548
1549	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1550	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1551
1552	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1553	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1554	mutex_unlock(&dev->struct_mutex);
1555	mutex_lock(&dev_priv->rps.hw_lock);
1556	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1557	mutex_unlock(&dev_priv->rps.hw_lock);
1558
1559	intel_runtime_pm_put(dev_priv);
1560
1561	seq_printf(m, "Video Turbo Mode: %s\n",
1562		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1563	seq_printf(m, "HW control enabled: %s\n",
1564		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1565	seq_printf(m, "SW control enabled: %s\n",
1566		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1567			  GEN6_RP_MEDIA_SW_MODE));
1568	seq_printf(m, "RC1e Enabled: %s\n",
1569		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1570	seq_printf(m, "RC6 Enabled: %s\n",
1571		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1572	seq_printf(m, "Deep RC6 Enabled: %s\n",
1573		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1574	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1575		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1576	seq_puts(m, "Current RC state: ");
1577	switch (gt_core_status & GEN6_RCn_MASK) {
1578	case GEN6_RC0:
1579		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1580			seq_puts(m, "Core Power Down\n");
1581		else
1582			seq_puts(m, "on\n");
1583		break;
1584	case GEN6_RC3:
1585		seq_puts(m, "RC3\n");
1586		break;
1587	case GEN6_RC6:
1588		seq_puts(m, "RC6\n");
1589		break;
1590	case GEN6_RC7:
1591		seq_puts(m, "RC7\n");
1592		break;
1593	default:
1594		seq_puts(m, "Unknown\n");
1595		break;
1596	}
1597
1598	seq_printf(m, "Core Power Down: %s\n",
1599		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1600
1601	/* Not exactly sure what this is */
1602	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1603		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1604	seq_printf(m, "RC6 residency since boot: %u\n",
1605		   I915_READ(GEN6_GT_GFX_RC6));
1606	seq_printf(m, "RC6+ residency since boot: %u\n",
1607		   I915_READ(GEN6_GT_GFX_RC6p));
1608	seq_printf(m, "RC6++ residency since boot: %u\n",
1609		   I915_READ(GEN6_GT_GFX_RC6pp));
1610
1611	seq_printf(m, "RC6   voltage: %dmV\n",
1612		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1613	seq_printf(m, "RC6+  voltage: %dmV\n",
1614		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1615	seq_printf(m, "RC6++ voltage: %dmV\n",
1616		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1617	return 0;
1618}
1619
1620static int i915_drpc_info(struct seq_file *m, void *unused)
1621{
1622	struct drm_info_node *node = m->private;
1623	struct drm_device *dev = node->minor->dev;
1624
1625	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1626		return vlv_drpc_info(m);
1627	else if (INTEL_INFO(dev)->gen >= 6)
1628		return gen6_drpc_info(m);
1629	else
1630		return ironlake_drpc_info(m);
1631}
1632
1633static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1634{
1635	struct drm_info_node *node = m->private;
1636	struct drm_device *dev = node->minor->dev;
1637	struct drm_i915_private *dev_priv = dev->dev_private;
1638
1639	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1640		   dev_priv->fb_tracking.busy_bits);
1641
1642	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1643		   dev_priv->fb_tracking.flip_bits);
1644
1645	return 0;
1646}
1647
1648static int i915_fbc_status(struct seq_file *m, void *unused)
1649{
1650	struct drm_info_node *node = m->private;
1651	struct drm_device *dev = node->minor->dev;
1652	struct drm_i915_private *dev_priv = dev->dev_private;
1653
1654	if (!HAS_FBC(dev)) {
1655		seq_puts(m, "FBC unsupported on this chipset\n");
1656		return 0;
1657	}
1658
1659	intel_runtime_pm_get(dev_priv);
1660	mutex_lock(&dev_priv->fbc.lock);
1661
1662	if (intel_fbc_is_active(dev_priv))
1663		seq_puts(m, "FBC enabled\n");
1664	else
1665		seq_printf(m, "FBC disabled: %s\n",
1666			   dev_priv->fbc.no_fbc_reason);
1667
1668	if (INTEL_INFO(dev_priv)->gen >= 7)
1669		seq_printf(m, "Compressing: %s\n",
1670			   yesno(I915_READ(FBC_STATUS2) &
1671				 FBC_COMPRESSION_MASK));
1672
1673	mutex_unlock(&dev_priv->fbc.lock);
1674	intel_runtime_pm_put(dev_priv);
1675
1676	return 0;
1677}
1678
1679static int i915_fbc_fc_get(void *data, u64 *val)
1680{
1681	struct drm_device *dev = data;
1682	struct drm_i915_private *dev_priv = dev->dev_private;
1683
1684	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1685		return -ENODEV;
1686
1687	*val = dev_priv->fbc.false_color;
1688
1689	return 0;
1690}
1691
1692static int i915_fbc_fc_set(void *data, u64 val)
1693{
1694	struct drm_device *dev = data;
1695	struct drm_i915_private *dev_priv = dev->dev_private;
1696	u32 reg;
1697
1698	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1699		return -ENODEV;
1700
1701	mutex_lock(&dev_priv->fbc.lock);
1702
1703	reg = I915_READ(ILK_DPFC_CONTROL);
1704	dev_priv->fbc.false_color = val;
1705
1706	I915_WRITE(ILK_DPFC_CONTROL, val ?
1707		   (reg | FBC_CTL_FALSE_COLOR) :
1708		   (reg & ~FBC_CTL_FALSE_COLOR));
1709
1710	mutex_unlock(&dev_priv->fbc.lock);
1711	return 0;
1712}
1713
1714DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
1715			i915_fbc_fc_get, i915_fbc_fc_set,
1716			"%llu\n");
1717
1718static int i915_ips_status(struct seq_file *m, void *unused)
1719{
1720	struct drm_info_node *node = m->private;
1721	struct drm_device *dev = node->minor->dev;
1722	struct drm_i915_private *dev_priv = dev->dev_private;
1723
1724	if (!HAS_IPS(dev)) {
1725		seq_puts(m, "not supported\n");
1726		return 0;
1727	}
1728
1729	intel_runtime_pm_get(dev_priv);
1730
1731	seq_printf(m, "Enabled by kernel parameter: %s\n",
1732		   yesno(i915.enable_ips));
1733
1734	if (INTEL_INFO(dev)->gen >= 8) {
1735		seq_puts(m, "Currently: unknown\n");
1736	} else {
1737		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1738			seq_puts(m, "Currently: enabled\n");
1739		else
1740			seq_puts(m, "Currently: disabled\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1741	}
1742
1743	intel_runtime_pm_put(dev_priv);
1744
1745	return 0;
1746}
1747
1748static int i915_sr_status(struct seq_file *m, void *unused)
1749{
1750	struct drm_info_node *node = m->private;
1751	struct drm_device *dev = node->minor->dev;
1752	struct drm_i915_private *dev_priv = dev->dev_private;
1753	bool sr_enabled = false;
1754
1755	intel_runtime_pm_get(dev_priv);
1756
1757	if (HAS_PCH_SPLIT(dev))
1758		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1759	else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
1760		 IS_I945G(dev) || IS_I945GM(dev))
1761		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1762	else if (IS_I915GM(dev))
1763		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1764	else if (IS_PINEVIEW(dev))
1765		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1766	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1767		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1768
1769	intel_runtime_pm_put(dev_priv);
1770
1771	seq_printf(m, "self-refresh: %s\n",
1772		   sr_enabled ? "enabled" : "disabled");
1773
1774	return 0;
1775}
1776
1777static int i915_emon_status(struct seq_file *m, void *unused)
1778{
1779	struct drm_info_node *node = m->private;
1780	struct drm_device *dev = node->minor->dev;
1781	struct drm_i915_private *dev_priv = dev->dev_private;
1782	unsigned long temp, chipset, gfx;
1783	int ret;
1784
1785	if (!IS_GEN5(dev))
1786		return -ENODEV;
1787
1788	ret = mutex_lock_interruptible(&dev->struct_mutex);
1789	if (ret)
1790		return ret;
1791
1792	temp = i915_mch_val(dev_priv);
1793	chipset = i915_chipset_val(dev_priv);
1794	gfx = i915_gfx_val(dev_priv);
1795	mutex_unlock(&dev->struct_mutex);
1796
1797	seq_printf(m, "GMCH temp: %ld\n", temp);
1798	seq_printf(m, "Chipset power: %ld\n", chipset);
1799	seq_printf(m, "GFX power: %ld\n", gfx);
1800	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1801
1802	return 0;
1803}
1804
1805static int i915_ring_freq_table(struct seq_file *m, void *unused)
1806{
1807	struct drm_info_node *node = m->private;
1808	struct drm_device *dev = node->minor->dev;
1809	struct drm_i915_private *dev_priv = dev->dev_private;
1810	int ret = 0;
1811	int gpu_freq, ia_freq;
1812	unsigned int max_gpu_freq, min_gpu_freq;
1813
1814	if (!HAS_CORE_RING_FREQ(dev)) {
1815		seq_puts(m, "unsupported on this chipset\n");
1816		return 0;
1817	}
1818
1819	intel_runtime_pm_get(dev_priv);
1820
1821	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1822
1823	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1824	if (ret)
1825		goto out;
1826
1827	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1828		/* Convert GT frequency to 50 HZ units */
1829		min_gpu_freq =
1830			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
1831		max_gpu_freq =
1832			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
1833	} else {
1834		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
1835		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
1836	}
1837
1838	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1839
1840	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1841		ia_freq = gpu_freq;
1842		sandybridge_pcode_read(dev_priv,
1843				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1844				       &ia_freq);
1845		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1846			   intel_gpu_freq(dev_priv, (gpu_freq *
1847				(IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1848				 GEN9_FREQ_SCALER : 1))),
1849			   ((ia_freq >> 0) & 0xff) * 100,
1850			   ((ia_freq >> 8) & 0xff) * 100);
1851	}
1852
1853	mutex_unlock(&dev_priv->rps.hw_lock);
1854
1855out:
1856	intel_runtime_pm_put(dev_priv);
1857	return ret;
1858}
1859
1860static int i915_opregion(struct seq_file *m, void *unused)
1861{
1862	struct drm_info_node *node = m->private;
1863	struct drm_device *dev = node->minor->dev;
1864	struct drm_i915_private *dev_priv = dev->dev_private;
1865	struct intel_opregion *opregion = &dev_priv->opregion;
1866	int ret;
1867
1868	ret = mutex_lock_interruptible(&dev->struct_mutex);
1869	if (ret)
1870		goto out;
1871
1872	if (opregion->header)
1873		seq_write(m, opregion->header, OPREGION_SIZE);
1874
1875	mutex_unlock(&dev->struct_mutex);
1876
1877out:
1878	return 0;
1879}
1880
1881static int i915_vbt(struct seq_file *m, void *unused)
1882{
1883	struct drm_info_node *node = m->private;
1884	struct drm_device *dev = node->minor->dev;
1885	struct drm_i915_private *dev_priv = dev->dev_private;
1886	struct intel_opregion *opregion = &dev_priv->opregion;
1887
1888	if (opregion->vbt)
1889		seq_write(m, opregion->vbt, opregion->vbt_size);
1890
1891	return 0;
1892}
1893
1894static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1895{
1896	struct drm_info_node *node = m->private;
1897	struct drm_device *dev = node->minor->dev;
1898	struct intel_framebuffer *fbdev_fb = NULL;
1899	struct drm_framebuffer *drm_fb;
1900
1901#ifdef CONFIG_DRM_FBDEV_EMULATION
1902       if (to_i915(dev)->fbdev) {
1903               fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
1904
1905               seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1906                         fbdev_fb->base.width,
1907                         fbdev_fb->base.height,
1908                         fbdev_fb->base.depth,
1909                         fbdev_fb->base.bits_per_pixel,
1910                         fbdev_fb->base.modifier[0],
1911                         atomic_read(&fbdev_fb->base.refcount.refcount));
1912               describe_obj(m, fbdev_fb->obj);
1913               seq_putc(m, '\n');
1914       }
1915#endif
1916
1917	mutex_lock(&dev->mode_config.fb_lock);
1918	drm_for_each_fb(drm_fb, dev) {
1919		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1920		if (fb == fbdev_fb)
1921			continue;
1922
1923		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1924			   fb->base.width,
1925			   fb->base.height,
1926			   fb->base.depth,
1927			   fb->base.bits_per_pixel,
1928			   fb->base.modifier[0],
1929			   atomic_read(&fb->base.refcount.refcount));
1930		describe_obj(m, fb->obj);
1931		seq_putc(m, '\n');
1932	}
1933	mutex_unlock(&dev->mode_config.fb_lock);
1934
1935	return 0;
1936}
1937
1938static void describe_ctx_ringbuf(struct seq_file *m,
1939				 struct intel_ringbuffer *ringbuf)
1940{
1941	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1942		   ringbuf->space, ringbuf->head, ringbuf->tail,
1943		   ringbuf->last_retired_head);
1944}
1945
1946static int i915_context_status(struct seq_file *m, void *unused)
1947{
1948	struct drm_info_node *node = m->private;
1949	struct drm_device *dev = node->minor->dev;
1950	struct drm_i915_private *dev_priv = dev->dev_private;
1951	struct intel_engine_cs *ring;
1952	struct intel_context *ctx;
1953	int ret, i;
1954
1955	ret = mutex_lock_interruptible(&dev->struct_mutex);
1956	if (ret)
1957		return ret;
1958
1959	list_for_each_entry(ctx, &dev_priv->context_list, link) {
1960		if (!i915.enable_execlists &&
1961		    ctx->legacy_hw_ctx.rcs_state == NULL)
1962			continue;
1963
1964		seq_puts(m, "HW context ");
1965		describe_ctx(m, ctx);
1966		if (ctx == dev_priv->kernel_context)
1967			seq_printf(m, "(kernel context) ");
1968
1969		if (i915.enable_execlists) {
1970			seq_putc(m, '\n');
1971			for_each_ring(ring, dev_priv, i) {
1972				struct drm_i915_gem_object *ctx_obj =
1973					ctx->engine[i].state;
1974				struct intel_ringbuffer *ringbuf =
1975					ctx->engine[i].ringbuf;
1976
1977				seq_printf(m, "%s: ", ring->name);
1978				if (ctx_obj)
1979					describe_obj(m, ctx_obj);
1980				if (ringbuf)
1981					describe_ctx_ringbuf(m, ringbuf);
1982				seq_putc(m, '\n');
1983			}
1984		} else {
1985			describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1986		}
1987
1988		seq_putc(m, '\n');
1989	}
1990
1991	mutex_unlock(&dev->struct_mutex);
1992
1993	return 0;
1994}
1995
1996static void i915_dump_lrc_obj(struct seq_file *m,
1997			      struct intel_context *ctx,
1998			      struct intel_engine_cs *ring)
1999{
2000	struct page *page;
2001	uint32_t *reg_state;
2002	int j;
2003	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
2004	unsigned long ggtt_offset = 0;
2005
2006	if (ctx_obj == NULL) {
2007		seq_printf(m, "Context on %s with no gem object\n",
2008			   ring->name);
2009		return;
2010	}
2011
2012	seq_printf(m, "CONTEXT: %s %u\n", ring->name,
2013		   intel_execlists_ctx_id(ctx, ring));
2014
2015	if (!i915_gem_obj_ggtt_bound(ctx_obj))
2016		seq_puts(m, "\tNot bound in GGTT\n");
2017	else
2018		ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
2019
2020	if (i915_gem_object_get_pages(ctx_obj)) {
2021		seq_puts(m, "\tFailed to get pages for context object\n");
2022		return;
2023	}
2024
2025	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
2026	if (!WARN_ON(page == NULL)) {
2027		reg_state = kmap_atomic(page);
2028
2029		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
2030			seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
2031				   ggtt_offset + 4096 + (j * 4),
2032				   reg_state[j], reg_state[j + 1],
2033				   reg_state[j + 2], reg_state[j + 3]);
2034		}
2035		kunmap_atomic(reg_state);
2036	}
2037
2038	seq_putc(m, '\n');
2039}
2040
2041static int i915_dump_lrc(struct seq_file *m, void *unused)
2042{
2043	struct drm_info_node *node = (struct drm_info_node *) m->private;
2044	struct drm_device *dev = node->minor->dev;
2045	struct drm_i915_private *dev_priv = dev->dev_private;
2046	struct intel_engine_cs *ring;
2047	struct intel_context *ctx;
2048	int ret, i;
2049
2050	if (!i915.enable_execlists) {
2051		seq_printf(m, "Logical Ring Contexts are disabled\n");
2052		return 0;
2053	}
2054
2055	ret = mutex_lock_interruptible(&dev->struct_mutex);
2056	if (ret)
2057		return ret;
2058
2059	list_for_each_entry(ctx, &dev_priv->context_list, link)
2060		if (ctx != dev_priv->kernel_context)
2061			for_each_ring(ring, dev_priv, i)
2062				i915_dump_lrc_obj(m, ctx, ring);
2063
2064	mutex_unlock(&dev->struct_mutex);
2065
2066	return 0;
2067}
2068
2069static int i915_execlists(struct seq_file *m, void *data)
2070{
2071	struct drm_info_node *node = (struct drm_info_node *)m->private;
2072	struct drm_device *dev = node->minor->dev;
2073	struct drm_i915_private *dev_priv = dev->dev_private;
2074	struct intel_engine_cs *ring;
2075	u32 status_pointer;
2076	u8 read_pointer;
2077	u8 write_pointer;
2078	u32 status;
2079	u32 ctx_id;
2080	struct list_head *cursor;
2081	int ring_id, i;
2082	int ret;
2083
2084	if (!i915.enable_execlists) {
2085		seq_puts(m, "Logical Ring Contexts are disabled\n");
2086		return 0;
2087	}
2088
2089	ret = mutex_lock_interruptible(&dev->struct_mutex);
2090	if (ret)
2091		return ret;
2092
2093	intel_runtime_pm_get(dev_priv);
2094
2095	for_each_ring(ring, dev_priv, ring_id) {
2096		struct drm_i915_gem_request *head_req = NULL;
2097		int count = 0;
2098		unsigned long flags;
2099
2100		seq_printf(m, "%s\n", ring->name);
2101
2102		status = I915_READ(RING_EXECLIST_STATUS_LO(ring));
2103		ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring));
2104		seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
2105			   status, ctx_id);
2106
2107		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
2108		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
2109
2110		read_pointer = ring->next_context_status_buffer;
2111		write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
2112		if (read_pointer > write_pointer)
2113			write_pointer += GEN8_CSB_ENTRIES;
2114		seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
2115			   read_pointer, write_pointer);
2116
2117		for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
2118			status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
2119			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
2120
2121			seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
2122				   i, status, ctx_id);
2123		}
2124
2125		spin_lock_irqsave(&ring->execlist_lock, flags);
2126		list_for_each(cursor, &ring->execlist_queue)
2127			count++;
2128		head_req = list_first_entry_or_null(&ring->execlist_queue,
2129				struct drm_i915_gem_request, execlist_link);
2130		spin_unlock_irqrestore(&ring->execlist_lock, flags);
2131
2132		seq_printf(m, "\t%d requests in queue\n", count);
2133		if (head_req) {
2134			seq_printf(m, "\tHead request id: %u\n",
2135				   intel_execlists_ctx_id(head_req->ctx, ring));
2136			seq_printf(m, "\tHead request tail: %u\n",
2137				   head_req->tail);
2138		}
2139
2140		seq_putc(m, '\n');
2141	}
2142
2143	intel_runtime_pm_put(dev_priv);
2144	mutex_unlock(&dev->struct_mutex);
2145
2146	return 0;
2147}
2148
2149static const char *swizzle_string(unsigned swizzle)
2150{
2151	switch (swizzle) {
2152	case I915_BIT_6_SWIZZLE_NONE:
2153		return "none";
2154	case I915_BIT_6_SWIZZLE_9:
2155		return "bit9";
2156	case I915_BIT_6_SWIZZLE_9_10:
2157		return "bit9/bit10";
2158	case I915_BIT_6_SWIZZLE_9_11:
2159		return "bit9/bit11";
2160	case I915_BIT_6_SWIZZLE_9_10_11:
2161		return "bit9/bit10/bit11";
2162	case I915_BIT_6_SWIZZLE_9_17:
2163		return "bit9/bit17";
2164	case I915_BIT_6_SWIZZLE_9_10_17:
2165		return "bit9/bit10/bit17";
2166	case I915_BIT_6_SWIZZLE_UNKNOWN:
2167		return "unknown";
2168	}
2169
2170	return "bug";
2171}
2172
2173static int i915_swizzle_info(struct seq_file *m, void *data)
2174{
2175	struct drm_info_node *node = m->private;
2176	struct drm_device *dev = node->minor->dev;
2177	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
2178	int ret;
2179
2180	ret = mutex_lock_interruptible(&dev->struct_mutex);
2181	if (ret)
2182		return ret;
2183	intel_runtime_pm_get(dev_priv);
2184
2185	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2186		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2187	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2188		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2189
2190	if (IS_GEN3(dev) || IS_GEN4(dev)) {
2191		seq_printf(m, "DDC = 0x%08x\n",
2192			   I915_READ(DCC));
2193		seq_printf(m, "DDC2 = 0x%08x\n",
2194			   I915_READ(DCC2));
2195		seq_printf(m, "C0DRB3 = 0x%04x\n",
2196			   I915_READ16(C0DRB3));
2197		seq_printf(m, "C1DRB3 = 0x%04x\n",
2198			   I915_READ16(C1DRB3));
2199	} else if (INTEL_INFO(dev)->gen >= 6) {
2200		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2201			   I915_READ(MAD_DIMM_C0));
2202		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2203			   I915_READ(MAD_DIMM_C1));
2204		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2205			   I915_READ(MAD_DIMM_C2));
2206		seq_printf(m, "TILECTL = 0x%08x\n",
2207			   I915_READ(TILECTL));
2208		if (INTEL_INFO(dev)->gen >= 8)
2209			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2210				   I915_READ(GAMTARBMODE));
2211		else
2212			seq_printf(m, "ARB_MODE = 0x%08x\n",
2213				   I915_READ(ARB_MODE));
2214		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2215			   I915_READ(DISP_ARB_CTL));
2216	}
2217
2218	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2219		seq_puts(m, "L-shaped memory detected\n");
2220
2221	intel_runtime_pm_put(dev_priv);
2222	mutex_unlock(&dev->struct_mutex);
2223
2224	return 0;
2225}
2226
2227static int per_file_ctx(int id, void *ptr, void *data)
2228{
2229	struct intel_context *ctx = ptr;
2230	struct seq_file *m = data;
2231	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2232
2233	if (!ppgtt) {
2234		seq_printf(m, "  no ppgtt for context %d\n",
2235			   ctx->user_handle);
2236		return 0;
2237	}
2238
2239	if (i915_gem_context_is_default(ctx))
2240		seq_puts(m, "  default context:\n");
2241	else
2242		seq_printf(m, "  context %d:\n", ctx->user_handle);
2243	ppgtt->debug_dump(ppgtt, m);
2244
2245	return 0;
2246}
2247
2248static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2249{
2250	struct drm_i915_private *dev_priv = dev->dev_private;
2251	struct intel_engine_cs *ring;
2252	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2253	int unused, i;
2254
2255	if (!ppgtt)
2256		return;
2257
2258	for_each_ring(ring, dev_priv, unused) {
2259		seq_printf(m, "%s\n", ring->name);
2260		for (i = 0; i < 4; i++) {
2261			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i));
2262			pdp <<= 32;
2263			pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i));
2264			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2265		}
2266	}
2267}
2268
2269static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2270{
2271	struct drm_i915_private *dev_priv = dev->dev_private;
2272	struct intel_engine_cs *ring;
2273	int i;
2274
2275	if (INTEL_INFO(dev)->gen == 6)
2276		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2277
2278	for_each_ring(ring, dev_priv, i) {
2279		seq_printf(m, "%s\n", ring->name);
2280		if (INTEL_INFO(dev)->gen == 7)
2281			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
2282		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
2283		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
2284		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
2285	}
2286	if (dev_priv->mm.aliasing_ppgtt) {
2287		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2288
2289		seq_puts(m, "aliasing PPGTT:\n");
2290		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2291
2292		ppgtt->debug_dump(ppgtt, m);
2293	}
2294
2295	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2296}
2297
2298static int i915_ppgtt_info(struct seq_file *m, void *data)
2299{
2300	struct drm_info_node *node = m->private;
2301	struct drm_device *dev = node->minor->dev;
2302	struct drm_i915_private *dev_priv = dev->dev_private;
2303	struct drm_file *file;
2304
2305	int ret = mutex_lock_interruptible(&dev->struct_mutex);
2306	if (ret)
2307		return ret;
2308	intel_runtime_pm_get(dev_priv);
2309
2310	if (INTEL_INFO(dev)->gen >= 8)
2311		gen8_ppgtt_info(m, dev);
2312	else if (INTEL_INFO(dev)->gen >= 6)
2313		gen6_ppgtt_info(m, dev);
2314
2315	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2316		struct drm_i915_file_private *file_priv = file->driver_priv;
2317		struct task_struct *task;
2318
2319		task = get_pid_task(file->pid, PIDTYPE_PID);
2320		if (!task) {
2321			ret = -ESRCH;
2322			goto out_put;
2323		}
2324		seq_printf(m, "\nproc: %s\n", task->comm);
2325		put_task_struct(task);
2326		idr_for_each(&file_priv->context_idr, per_file_ctx,
2327			     (void *)(unsigned long)m);
2328	}
2329
2330out_put:
2331	intel_runtime_pm_put(dev_priv);
2332	mutex_unlock(&dev->struct_mutex);
2333
2334	return ret;
2335}
2336
2337static int count_irq_waiters(struct drm_i915_private *i915)
2338{
2339	struct intel_engine_cs *ring;
2340	int count = 0;
2341	int i;
2342
2343	for_each_ring(ring, i915, i)
2344		count += ring->irq_refcount;
2345
2346	return count;
2347}
2348
2349static int i915_rps_boost_info(struct seq_file *m, void *data)
2350{
2351	struct drm_info_node *node = m->private;
2352	struct drm_device *dev = node->minor->dev;
2353	struct drm_i915_private *dev_priv = dev->dev_private;
2354	struct drm_file *file;
2355
2356	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
2357	seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
2358	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2359	seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2360		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
2361		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
2362		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
2363		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
2364		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
2365	spin_lock(&dev_priv->rps.client_lock);
2366	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2367		struct drm_i915_file_private *file_priv = file->driver_priv;
2368		struct task_struct *task;
2369
2370		rcu_read_lock();
2371		task = pid_task(file->pid, PIDTYPE_PID);
2372		seq_printf(m, "%s [%d]: %d boosts%s\n",
2373			   task ? task->comm : "<unknown>",
2374			   task ? task->pid : -1,
2375			   file_priv->rps.boosts,
2376			   list_empty(&file_priv->rps.link) ? "" : ", active");
2377		rcu_read_unlock();
2378	}
2379	seq_printf(m, "Semaphore boosts: %d%s\n",
2380		   dev_priv->rps.semaphores.boosts,
2381		   list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active");
2382	seq_printf(m, "MMIO flip boosts: %d%s\n",
2383		   dev_priv->rps.mmioflips.boosts,
2384		   list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
2385	seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
2386	spin_unlock(&dev_priv->rps.client_lock);
2387
2388	return 0;
2389}
2390
2391static int i915_llc(struct seq_file *m, void *data)
2392{
2393	struct drm_info_node *node = m->private;
2394	struct drm_device *dev = node->minor->dev;
2395	struct drm_i915_private *dev_priv = dev->dev_private;
2396
2397	/* Size calculation for LLC is a bit of a pain. Ignore for now. */
2398	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
2399	seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
2400
2401	return 0;
2402}
2403
2404static int i915_guc_load_status_info(struct seq_file *m, void *data)
2405{
2406	struct drm_info_node *node = m->private;
2407	struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
2408	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
2409	u32 tmp, i;
2410
2411	if (!HAS_GUC_UCODE(dev_priv->dev))
2412		return 0;
2413
2414	seq_printf(m, "GuC firmware status:\n");
2415	seq_printf(m, "\tpath: %s\n",
2416		guc_fw->guc_fw_path);
2417	seq_printf(m, "\tfetch: %s\n",
2418		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
2419	seq_printf(m, "\tload: %s\n",
2420		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
2421	seq_printf(m, "\tversion wanted: %d.%d\n",
2422		guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
2423	seq_printf(m, "\tversion found: %d.%d\n",
2424		guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
2425	seq_printf(m, "\theader: offset is %d; size = %d\n",
2426		guc_fw->header_offset, guc_fw->header_size);
2427	seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2428		guc_fw->ucode_offset, guc_fw->ucode_size);
2429	seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2430		guc_fw->rsa_offset, guc_fw->rsa_size);
2431
2432	tmp = I915_READ(GUC_STATUS);
2433
2434	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2435	seq_printf(m, "\tBootrom status = 0x%x\n",
2436		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2437	seq_printf(m, "\tuKernel status = 0x%x\n",
2438		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2439	seq_printf(m, "\tMIA Core status = 0x%x\n",
2440		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2441	seq_puts(m, "\nScratch registers:\n");
2442	for (i = 0; i < 16; i++)
2443		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2444
2445	return 0;
2446}
2447
2448static void i915_guc_client_info(struct seq_file *m,
2449				 struct drm_i915_private *dev_priv,
2450				 struct i915_guc_client *client)
2451{
2452	struct intel_engine_cs *ring;
2453	uint64_t tot = 0;
2454	uint32_t i;
2455
2456	seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
2457		client->priority, client->ctx_index, client->proc_desc_offset);
2458	seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
2459		client->doorbell_id, client->doorbell_offset, client->cookie);
2460	seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2461		client->wq_size, client->wq_offset, client->wq_tail);
2462
2463	seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
2464	seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
2465	seq_printf(m, "\tLast submission result: %d\n", client->retcode);
2466
2467	for_each_ring(ring, dev_priv, i) {
2468		seq_printf(m, "\tSubmissions: %llu %s\n",
2469				client->submissions[ring->guc_id],
2470				ring->name);
2471		tot += client->submissions[ring->guc_id];
2472	}
2473	seq_printf(m, "\tTotal: %llu\n", tot);
2474}
2475
2476static int i915_guc_info(struct seq_file *m, void *data)
2477{
2478	struct drm_info_node *node = m->private;
2479	struct drm_device *dev = node->minor->dev;
2480	struct drm_i915_private *dev_priv = dev->dev_private;
2481	struct intel_guc guc;
2482	struct i915_guc_client client = {};
2483	struct intel_engine_cs *ring;
2484	enum intel_ring_id i;
2485	u64 total = 0;
2486
2487	if (!HAS_GUC_SCHED(dev_priv->dev))
2488		return 0;
2489
2490	if (mutex_lock_interruptible(&dev->struct_mutex))
2491		return 0;
2492
2493	/* Take a local copy of the GuC data, so we can dump it at leisure */
2494	guc = dev_priv->guc;
2495	if (guc.execbuf_client)
2496		client = *guc.execbuf_client;
2497
2498	mutex_unlock(&dev->struct_mutex);
2499
2500	seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
2501	seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
2502	seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
2503	seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
2504	seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
2505
2506	seq_printf(m, "\nGuC submissions:\n");
2507	for_each_ring(ring, dev_priv, i) {
2508		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
2509			ring->name, guc.submissions[ring->guc_id],
2510			guc.last_seqno[ring->guc_id]);
2511		total += guc.submissions[ring->guc_id];
2512	}
2513	seq_printf(m, "\t%s: %llu\n", "Total", total);
2514
2515	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
2516	i915_guc_client_info(m, dev_priv, &client);
2517
2518	/* Add more as required ... */
2519
2520	return 0;
2521}
2522
2523static int i915_guc_log_dump(struct seq_file *m, void *data)
2524{
2525	struct drm_info_node *node = m->private;
2526	struct drm_device *dev = node->minor->dev;
2527	struct drm_i915_private *dev_priv = dev->dev_private;
2528	struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
2529	u32 *log;
2530	int i = 0, pg;
2531
2532	if (!log_obj)
2533		return 0;
2534
2535	for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
2536		log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));
2537
2538		for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
2539			seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2540				   *(log + i), *(log + i + 1),
2541				   *(log + i + 2), *(log + i + 3));
2542
2543		kunmap_atomic(log);
2544	}
2545
2546	seq_putc(m, '\n');
2547
2548	return 0;
2549}
2550
2551static int i915_edp_psr_status(struct seq_file *m, void *data)
2552{
2553	struct drm_info_node *node = m->private;
2554	struct drm_device *dev = node->minor->dev;
2555	struct drm_i915_private *dev_priv = dev->dev_private;
2556	u32 psrperf = 0;
2557	u32 stat[3];
2558	enum pipe pipe;
2559	bool enabled = false;
2560
2561	if (!HAS_PSR(dev)) {
2562		seq_puts(m, "PSR not supported\n");
2563		return 0;
2564	}
2565
2566	intel_runtime_pm_get(dev_priv);
2567
2568	mutex_lock(&dev_priv->psr.lock);
2569	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2570	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2571	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2572	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2573	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2574		   dev_priv->psr.busy_frontbuffer_bits);
2575	seq_printf(m, "Re-enable work scheduled: %s\n",
2576		   yesno(work_busy(&dev_priv->psr.work.work)));
2577
2578	if (HAS_DDI(dev))
2579		enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2580	else {
2581		for_each_pipe(dev_priv, pipe) {
2582			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2583				VLV_EDP_PSR_CURR_STATE_MASK;
2584			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2585			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2586				enabled = true;
2587		}
2588	}
2589
2590	seq_printf(m, "Main link in standby mode: %s\n",
2591		   yesno(dev_priv->psr.link_standby));
2592
2593	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2594
2595	if (!HAS_DDI(dev))
2596		for_each_pipe(dev_priv, pipe) {
2597			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2598			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2599				seq_printf(m, " pipe %c", pipe_name(pipe));
2600		}
2601	seq_puts(m, "\n");
2602
2603	/*
2604	 * VLV/CHV PSR has no kind of performance counter
2605	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2606	 */
2607	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2608		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2609			EDP_PSR_PERF_CNT_MASK;
2610
2611		seq_printf(m, "Performance_Counter: %u\n", psrperf);
2612	}
2613	mutex_unlock(&dev_priv->psr.lock);
2614
2615	intel_runtime_pm_put(dev_priv);
2616	return 0;
2617}
2618
2619static int i915_sink_crc(struct seq_file *m, void *data)
2620{
2621	struct drm_info_node *node = m->private;
2622	struct drm_device *dev = node->minor->dev;
2623	struct intel_encoder *encoder;
2624	struct intel_connector *connector;
2625	struct intel_dp *intel_dp = NULL;
2626	int ret;
2627	u8 crc[6];
2628
2629	drm_modeset_lock_all(dev);
2630	for_each_intel_connector(dev, connector) {
2631
2632		if (connector->base.dpms != DRM_MODE_DPMS_ON)
2633			continue;
2634
2635		if (!connector->base.encoder)
2636			continue;
2637
2638		encoder = to_intel_encoder(connector->base.encoder);
2639		if (encoder->type != INTEL_OUTPUT_EDP)
2640			continue;
2641
2642		intel_dp = enc_to_intel_dp(&encoder->base);
2643
2644		ret = intel_dp_sink_crc(intel_dp, crc);
2645		if (ret)
2646			goto out;
2647
2648		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2649			   crc[0], crc[1], crc[2],
2650			   crc[3], crc[4], crc[5]);
2651		goto out;
2652	}
2653	ret = -ENODEV;
2654out:
2655	drm_modeset_unlock_all(dev);
2656	return ret;
2657}
2658
2659static int i915_energy_uJ(struct seq_file *m, void *data)
2660{
2661	struct drm_info_node *node = m->private;
2662	struct drm_device *dev = node->minor->dev;
2663	struct drm_i915_private *dev_priv = dev->dev_private;
2664	u64 power;
2665	u32 units;
2666
2667	if (INTEL_INFO(dev)->gen < 6)
2668		return -ENODEV;
2669
2670	intel_runtime_pm_get(dev_priv);
2671
2672	rdmsrl(MSR_RAPL_POWER_UNIT, power);
2673	power = (power & 0x1f00) >> 8;
2674	units = 1000000 / (1 << power); /* convert to uJ */
2675	power = I915_READ(MCH_SECP_NRG_STTS);
2676	power *= units;
2677
2678	intel_runtime_pm_put(dev_priv);
2679
2680	seq_printf(m, "%llu", (long long unsigned)power);
2681
2682	return 0;
2683}
2684
2685static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2686{
2687	struct drm_info_node *node = m->private;
2688	struct drm_device *dev = node->minor->dev;
2689	struct drm_i915_private *dev_priv = dev->dev_private;
2690
2691	if (!HAS_RUNTIME_PM(dev)) {
2692		seq_puts(m, "not supported\n");
2693		return 0;
2694	}
2695
2696	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
2697	seq_printf(m, "IRQs disabled: %s\n",
2698		   yesno(!intel_irqs_enabled(dev_priv)));
2699#ifdef CONFIG_PM
2700	seq_printf(m, "Usage count: %d\n",
2701		   atomic_read(&dev->dev->power.usage_count));
2702#else
2703	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2704#endif
2705
2706	return 0;
2707}
2708
2709static int i915_power_domain_info(struct seq_file *m, void *unused)
2710{
2711	struct drm_info_node *node = m->private;
2712	struct drm_device *dev = node->minor->dev;
2713	struct drm_i915_private *dev_priv = dev->dev_private;
2714	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2715	int i;
2716
2717	mutex_lock(&power_domains->lock);
2718
2719	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2720	for (i = 0; i < power_domains->power_well_count; i++) {
2721		struct i915_power_well *power_well;
2722		enum intel_display_power_domain power_domain;
2723
2724		power_well = &power_domains->power_wells[i];
2725		seq_printf(m, "%-25s %d\n", power_well->name,
2726			   power_well->count);
2727
2728		for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2729		     power_domain++) {
2730			if (!(BIT(power_domain) & power_well->domains))
2731				continue;
2732
2733			seq_printf(m, "  %-23s %d\n",
2734				 intel_display_power_domain_str(power_domain),
2735				 power_domains->domain_use_count[power_domain]);
2736		}
2737	}
2738
2739	mutex_unlock(&power_domains->lock);
2740
2741	return 0;
2742}
2743
2744static int i915_dmc_info(struct seq_file *m, void *unused)
2745{
2746	struct drm_info_node *node = m->private;
2747	struct drm_device *dev = node->minor->dev;
2748	struct drm_i915_private *dev_priv = dev->dev_private;
2749	struct intel_csr *csr;
2750
2751	if (!HAS_CSR(dev)) {
2752		seq_puts(m, "not supported\n");
2753		return 0;
2754	}
2755
2756	csr = &dev_priv->csr;
2757
2758	intel_runtime_pm_get(dev_priv);
2759
2760	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2761	seq_printf(m, "path: %s\n", csr->fw_path);
2762
2763	if (!csr->dmc_payload)
2764		goto out;
2765
2766	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2767		   CSR_VERSION_MINOR(csr->version));
2768
2769	if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
2770		seq_printf(m, "DC3 -> DC5 count: %d\n",
2771			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
2772		seq_printf(m, "DC5 -> DC6 count: %d\n",
2773			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2774	} else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
2775		seq_printf(m, "DC3 -> DC5 count: %d\n",
2776			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
2777	}
2778
2779out:
2780	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2781	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2782	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2783
2784	intel_runtime_pm_put(dev_priv);
2785
2786	return 0;
2787}
2788
2789static void intel_seq_print_mode(struct seq_file *m, int tabs,
2790				 struct drm_display_mode *mode)
2791{
2792	int i;
2793
2794	for (i = 0; i < tabs; i++)
2795		seq_putc(m, '\t');
2796
2797	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2798		   mode->base.id, mode->name,
2799		   mode->vrefresh, mode->clock,
2800		   mode->hdisplay, mode->hsync_start,
2801		   mode->hsync_end, mode->htotal,
2802		   mode->vdisplay, mode->vsync_start,
2803		   mode->vsync_end, mode->vtotal,
2804		   mode->type, mode->flags);
2805}
2806
2807static void intel_encoder_info(struct seq_file *m,
2808			       struct intel_crtc *intel_crtc,
2809			       struct intel_encoder *intel_encoder)
2810{
2811	struct drm_info_node *node = m->private;
2812	struct drm_device *dev = node->minor->dev;
2813	struct drm_crtc *crtc = &intel_crtc->base;
2814	struct intel_connector *intel_connector;
2815	struct drm_encoder *encoder;
2816
2817	encoder = &intel_encoder->base;
2818	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2819		   encoder->base.id, encoder->name);
2820	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2821		struct drm_connector *connector = &intel_connector->base;
2822		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2823			   connector->base.id,
2824			   connector->name,
2825			   drm_get_connector_status_name(connector->status));
2826		if (connector->status == connector_status_connected) {
2827			struct drm_display_mode *mode = &crtc->mode;
2828			seq_printf(m, ", mode:\n");
2829			intel_seq_print_mode(m, 2, mode);
2830		} else {
2831			seq_putc(m, '\n');
2832		}
2833	}
2834}
2835
2836static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2837{
2838	struct drm_info_node *node = m->private;
2839	struct drm_device *dev = node->minor->dev;
2840	struct drm_crtc *crtc = &intel_crtc->base;
2841	struct intel_encoder *intel_encoder;
2842	struct drm_plane_state *plane_state = crtc->primary->state;
2843	struct drm_framebuffer *fb = plane_state->fb;
2844
2845	if (fb)
2846		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2847			   fb->base.id, plane_state->src_x >> 16,
2848			   plane_state->src_y >> 16, fb->width, fb->height);
2849	else
2850		seq_puts(m, "\tprimary plane disabled\n");
2851	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2852		intel_encoder_info(m, intel_crtc, intel_encoder);
2853}
2854
2855static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2856{
2857	struct drm_display_mode *mode = panel->fixed_mode;
2858
2859	seq_printf(m, "\tfixed mode:\n");
2860	intel_seq_print_mode(m, 2, mode);
2861}
2862
2863static void intel_dp_info(struct seq_file *m,
2864			  struct intel_connector *intel_connector)
2865{
2866	struct intel_encoder *intel_encoder = intel_connector->encoder;
2867	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2868
2869	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2870	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2871	if (intel_encoder->type == INTEL_OUTPUT_EDP)
2872		intel_panel_info(m, &intel_connector->panel);
2873}
2874
2875static void intel_hdmi_info(struct seq_file *m,
2876			    struct intel_connector *intel_connector)
2877{
2878	struct intel_encoder *intel_encoder = intel_connector->encoder;
2879	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2880
2881	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2882}
2883
2884static void intel_lvds_info(struct seq_file *m,
2885			    struct intel_connector *intel_connector)
2886{
2887	intel_panel_info(m, &intel_connector->panel);
2888}
2889
2890static void intel_connector_info(struct seq_file *m,
2891				 struct drm_connector *connector)
2892{
2893	struct intel_connector *intel_connector = to_intel_connector(connector);
2894	struct intel_encoder *intel_encoder = intel_connector->encoder;
2895	struct drm_display_mode *mode;
2896
2897	seq_printf(m, "connector %d: type %s, status: %s\n",
2898		   connector->base.id, connector->name,
2899		   drm_get_connector_status_name(connector->status));
2900	if (connector->status == connector_status_connected) {
2901		seq_printf(m, "\tname: %s\n", connector->display_info.name);
2902		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2903			   connector->display_info.width_mm,
2904			   connector->display_info.height_mm);
2905		seq_printf(m, "\tsubpixel order: %s\n",
2906			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2907		seq_printf(m, "\tCEA rev: %d\n",
2908			   connector->display_info.cea_rev);
2909	}
2910	if (intel_encoder) {
2911		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2912		    intel_encoder->type == INTEL_OUTPUT_EDP)
2913			intel_dp_info(m, intel_connector);
2914		else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2915			intel_hdmi_info(m, intel_connector);
2916		else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2917			intel_lvds_info(m, intel_connector);
2918	}
2919
2920	seq_printf(m, "\tmodes:\n");
2921	list_for_each_entry(mode, &connector->modes, head)
2922		intel_seq_print_mode(m, 2, mode);
2923}
2924
2925static bool cursor_active(struct drm_device *dev, int pipe)
2926{
2927	struct drm_i915_private *dev_priv = dev->dev_private;
2928	u32 state;
2929
2930	if (IS_845G(dev) || IS_I865G(dev))
2931		state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
2932	else
2933		state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2934
2935	return state;
2936}
2937
2938static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2939{
2940	struct drm_i915_private *dev_priv = dev->dev_private;
2941	u32 pos;
2942
2943	pos = I915_READ(CURPOS(pipe));
2944
2945	*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2946	if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2947		*x = -*x;
2948
2949	*y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2950	if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2951		*y = -*y;
2952
2953	return cursor_active(dev, pipe);
2954}
2955
2956static const char *plane_type(enum drm_plane_type type)
2957{
2958	switch (type) {
2959	case DRM_PLANE_TYPE_OVERLAY:
2960		return "OVL";
2961	case DRM_PLANE_TYPE_PRIMARY:
2962		return "PRI";
2963	case DRM_PLANE_TYPE_CURSOR:
2964		return "CUR";
2965	/*
2966	 * Deliberately omitting default: to generate compiler warnings
2967	 * when a new drm_plane_type gets added.
2968	 */
2969	}
2970
2971	return "unknown";
2972}
2973
2974static const char *plane_rotation(unsigned int rotation)
2975{
2976	static char buf[48];
2977	/*
2978	 * According to doc only one DRM_ROTATE_ is allowed but this
2979	 * will print them all to visualize if the values are misused
2980	 */
2981	snprintf(buf, sizeof(buf),
2982		 "%s%s%s%s%s%s(0x%08x)",
2983		 (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "",
2984		 (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "",
2985		 (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "",
2986		 (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "",
2987		 (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "",
2988		 (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "",
2989		 rotation);
2990
2991	return buf;
2992}
2993
2994static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2995{
2996	struct drm_info_node *node = m->private;
2997	struct drm_device *dev = node->minor->dev;
2998	struct intel_plane *intel_plane;
2999
3000	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3001		struct drm_plane_state *state;
3002		struct drm_plane *plane = &intel_plane->base;
3003
3004		if (!plane->state) {
3005			seq_puts(m, "plane->state is NULL!\n");
3006			continue;
3007		}
3008
3009		state = plane->state;
3010
3011		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3012			   plane->base.id,
3013			   plane_type(intel_plane->base.type),
3014			   state->crtc_x, state->crtc_y,
3015			   state->crtc_w, state->crtc_h,
3016			   (state->src_x >> 16),
3017			   ((state->src_x & 0xffff) * 15625) >> 10,
3018			   (state->src_y >> 16),
3019			   ((state->src_y & 0xffff) * 15625) >> 10,
3020			   (state->src_w >> 16),
3021			   ((state->src_w & 0xffff) * 15625) >> 10,
3022			   (state->src_h >> 16),
3023			   ((state->src_h & 0xffff) * 15625) >> 10,
3024			   state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
3025			   plane_rotation(state->rotation));
3026	}
3027}
3028
3029static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3030{
3031	struct intel_crtc_state *pipe_config;
3032	int num_scalers = intel_crtc->num_scalers;
3033	int i;
3034
3035	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3036
3037	/* Not all platformas have a scaler */
3038	if (num_scalers) {
3039		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3040			   num_scalers,
3041			   pipe_config->scaler_state.scaler_users,
3042			   pipe_config->scaler_state.scaler_id);
3043
3044		for (i = 0; i < SKL_NUM_SCALERS; i++) {
3045			struct intel_scaler *sc =
3046					&pipe_config->scaler_state.scalers[i];
3047
3048			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3049				   i, yesno(sc->in_use), sc->mode);
3050		}
3051		seq_puts(m, "\n");
3052	} else {
3053		seq_puts(m, "\tNo scalers available on this platform\n");
3054	}
3055}
3056
3057static int i915_display_info(struct seq_file *m, void *unused)
3058{
3059	struct drm_info_node *node = m->private;
3060	struct drm_device *dev = node->minor->dev;
3061	struct drm_i915_private *dev_priv = dev->dev_private;
3062	struct intel_crtc *crtc;
3063	struct drm_connector *connector;
3064
3065	intel_runtime_pm_get(dev_priv);
3066	drm_modeset_lock_all(dev);
3067	seq_printf(m, "CRTC info\n");
3068	seq_printf(m, "---------\n");
3069	for_each_intel_crtc(dev, crtc) {
3070		bool active;
3071		struct intel_crtc_state *pipe_config;
3072		int x, y;
3073
3074		pipe_config = to_intel_crtc_state(crtc->base.state);
3075
3076		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3077			   crtc->base.base.id, pipe_name(crtc->pipe),
3078			   yesno(pipe_config->base.active),
3079			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3080			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
3081
3082		if (pipe_config->base.active) {
3083			intel_crtc_info(m, crtc);
3084
3085			active = cursor_position(dev, crtc->pipe, &x, &y);
3086			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
3087				   yesno(crtc->cursor_base),
3088				   x, y, crtc->base.cursor->state->crtc_w,
3089				   crtc->base.cursor->state->crtc_h,
3090				   crtc->cursor_addr, yesno(active));
3091			intel_scaler_info(m, crtc);
3092			intel_plane_info(m, crtc);
3093		}
3094
3095		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3096			   yesno(!crtc->cpu_fifo_underrun_disabled),
3097			   yesno(!crtc->pch_fifo_underrun_disabled));
3098	}
3099
3100	seq_printf(m, "\n");
3101	seq_printf(m, "Connector info\n");
3102	seq_printf(m, "--------------\n");
3103	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3104		intel_connector_info(m, connector);
3105	}
3106	drm_modeset_unlock_all(dev);
3107	intel_runtime_pm_put(dev_priv);
3108
3109	return 0;
3110}
3111
3112static int i915_semaphore_status(struct seq_file *m, void *unused)
3113{
3114	struct drm_info_node *node = (struct drm_info_node *) m->private;
3115	struct drm_device *dev = node->minor->dev;
3116	struct drm_i915_private *dev_priv = dev->dev_private;
3117	struct intel_engine_cs *ring;
3118	int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
3119	int i, j, ret;
3120
3121	if (!i915_semaphore_is_enabled(dev)) {
3122		seq_puts(m, "Semaphores are disabled\n");
3123		return 0;
3124	}
3125
3126	ret = mutex_lock_interruptible(&dev->struct_mutex);
3127	if (ret)
3128		return ret;
3129	intel_runtime_pm_get(dev_priv);
3130
3131	if (IS_BROADWELL(dev)) {
3132		struct page *page;
3133		uint64_t *seqno;
3134
3135		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
3136
3137		seqno = (uint64_t *)kmap_atomic(page);
3138		for_each_ring(ring, dev_priv, i) {
3139			uint64_t offset;
3140
3141			seq_printf(m, "%s\n", ring->name);
3142
3143			seq_puts(m, "  Last signal:");
3144			for (j = 0; j < num_rings; j++) {
3145				offset = i * I915_NUM_RINGS + j;
3146				seq_printf(m, "0x%08llx (0x%02llx) ",
3147					   seqno[offset], offset * 8);
3148			}
3149			seq_putc(m, '\n');
3150
3151			seq_puts(m, "  Last wait:  ");
3152			for (j = 0; j < num_rings; j++) {
3153				offset = i + (j * I915_NUM_RINGS);
3154				seq_printf(m, "0x%08llx (0x%02llx) ",
3155					   seqno[offset], offset * 8);
3156			}
3157			seq_putc(m, '\n');
3158
3159		}
3160		kunmap_atomic(seqno);
3161	} else {
3162		seq_puts(m, "  Last signal:");
3163		for_each_ring(ring, dev_priv, i)
3164			for (j = 0; j < num_rings; j++)
3165				seq_printf(m, "0x%08x\n",
3166					   I915_READ(ring->semaphore.mbox.signal[j]));
3167		seq_putc(m, '\n');
3168	}
3169
3170	seq_puts(m, "\nSync seqno:\n");
3171	for_each_ring(ring, dev_priv, i) {
3172		for (j = 0; j < num_rings; j++) {
3173			seq_printf(m, "  0x%08x ", ring->semaphore.sync_seqno[j]);
3174		}
3175		seq_putc(m, '\n');
3176	}
3177	seq_putc(m, '\n');
3178
3179	intel_runtime_pm_put(dev_priv);
3180	mutex_unlock(&dev->struct_mutex);
3181	return 0;
3182}
3183
3184static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3185{
3186	struct drm_info_node *node = (struct drm_info_node *) m->private;
3187	struct drm_device *dev = node->minor->dev;
3188	struct drm_i915_private *dev_priv = dev->dev_private;
3189	int i;
3190
3191	drm_modeset_lock_all(dev);
3192	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3193		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3194
3195		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
3196		seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
3197			   pll->config.crtc_mask, pll->active, yesno(pll->on));
3198		seq_printf(m, " tracked hardware state:\n");
3199		seq_printf(m, " dpll:    0x%08x\n", pll->config.hw_state.dpll);
3200		seq_printf(m, " dpll_md: 0x%08x\n",
3201			   pll->config.hw_state.dpll_md);
3202		seq_printf(m, " fp0:     0x%08x\n", pll->config.hw_state.fp0);
3203		seq_printf(m, " fp1:     0x%08x\n", pll->config.hw_state.fp1);
3204		seq_printf(m, " wrpll:   0x%08x\n", pll->config.hw_state.wrpll);
3205	}
3206	drm_modeset_unlock_all(dev);
3207
3208	return 0;
3209}
3210
3211static int i915_wa_registers(struct seq_file *m, void *unused)
3212{
3213	int i;
3214	int ret;
3215	struct intel_engine_cs *ring;
3216	struct drm_info_node *node = (struct drm_info_node *) m->private;
3217	struct drm_device *dev = node->minor->dev;
3218	struct drm_i915_private *dev_priv = dev->dev_private;
3219	struct i915_workarounds *workarounds = &dev_priv->workarounds;
3220
3221	ret = mutex_lock_interruptible(&dev->struct_mutex);
3222	if (ret)
3223		return ret;
3224
3225	intel_runtime_pm_get(dev_priv);
3226
3227	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3228	for_each_ring(ring, dev_priv, i)
3229		seq_printf(m, "HW whitelist count for %s: %d\n",
3230			   ring->name, workarounds->hw_whitelist_count[i]);
3231	for (i = 0; i < workarounds->count; ++i) {
3232		i915_reg_t addr;
3233		u32 mask, value, read;
3234		bool ok;
3235
3236		addr = workarounds->reg[i].addr;
3237		mask = workarounds->reg[i].mask;
3238		value = workarounds->reg[i].value;
3239		read = I915_READ(addr);
3240		ok = (value & mask) == (read & mask);
3241		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3242			   i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3243	}
3244
3245	intel_runtime_pm_put(dev_priv);
3246	mutex_unlock(&dev->struct_mutex);
3247
3248	return 0;
3249}
3250
3251static int i915_ddb_info(struct seq_file *m, void *unused)
 
 
3252{
3253	struct drm_info_node *node = m->private;
3254	struct drm_device *dev = node->minor->dev;
3255	struct drm_i915_private *dev_priv = dev->dev_private;
3256	struct skl_ddb_allocation *ddb;
3257	struct skl_ddb_entry *entry;
3258	enum pipe pipe;
3259	int plane;
3260
3261	if (INTEL_INFO(dev)->gen < 9)
3262		return 0;
3263
3264	drm_modeset_lock_all(dev);
3265
3266	ddb = &dev_priv->wm.skl_hw.ddb;
3267
3268	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3269
3270	for_each_pipe(dev_priv, pipe) {
3271		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3272
3273		for_each_plane(dev_priv, pipe, plane) {
3274			entry = &ddb->plane[pipe][plane];
3275			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3276				   entry->start, entry->end,
3277				   skl_ddb_entry_size(entry));
3278		}
3279
3280		entry = &ddb->plane[pipe][PLANE_CURSOR];
3281		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3282			   entry->end, skl_ddb_entry_size(entry));
3283	}
3284
3285	drm_modeset_unlock_all(dev);
3286
3287	return 0;
3288}
3289
3290static void drrs_status_per_crtc(struct seq_file *m,
3291		struct drm_device *dev, struct intel_crtc *intel_crtc)
3292{
3293	struct intel_encoder *intel_encoder;
3294	struct drm_i915_private *dev_priv = dev->dev_private;
3295	struct i915_drrs *drrs = &dev_priv->drrs;
3296	int vrefresh = 0;
3297
3298	for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
3299		/* Encoder connected on this CRTC */
3300		switch (intel_encoder->type) {
3301		case INTEL_OUTPUT_EDP:
3302			seq_puts(m, "eDP:\n");
3303			break;
3304		case INTEL_OUTPUT_DSI:
3305			seq_puts(m, "DSI:\n");
3306			break;
3307		case INTEL_OUTPUT_HDMI:
3308			seq_puts(m, "HDMI:\n");
3309			break;
3310		case INTEL_OUTPUT_DISPLAYPORT:
3311			seq_puts(m, "DP:\n");
3312			break;
3313		default:
3314			seq_printf(m, "Other encoder (id=%d).\n",
3315						intel_encoder->type);
3316			return;
3317		}
3318	}
3319
3320	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3321		seq_puts(m, "\tVBT: DRRS_type: Static");
3322	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3323		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3324	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3325		seq_puts(m, "\tVBT: DRRS_type: None");
3326	else
3327		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3328
3329	seq_puts(m, "\n\n");
3330
3331	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3332		struct intel_panel *panel;
3333
3334		mutex_lock(&drrs->mutex);
3335		/* DRRS Supported */
3336		seq_puts(m, "\tDRRS Supported: Yes\n");
3337
3338		/* disable_drrs() will make drrs->dp NULL */
3339		if (!drrs->dp) {
3340			seq_puts(m, "Idleness DRRS: Disabled");
3341			mutex_unlock(&drrs->mutex);
3342			return;
3343		}
3344
3345		panel = &drrs->dp->attached_connector->panel;
3346		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3347					drrs->busy_frontbuffer_bits);
3348
3349		seq_puts(m, "\n\t\t");
3350		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3351			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3352			vrefresh = panel->fixed_mode->vrefresh;
3353		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3354			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3355			vrefresh = panel->downclock_mode->vrefresh;
3356		} else {
3357			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3358						drrs->refresh_rate_type);
3359			mutex_unlock(&drrs->mutex);
3360			return;
3361		}
3362		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3363
3364		seq_puts(m, "\n\t\t");
3365		mutex_unlock(&drrs->mutex);
3366	} else {
3367		/* DRRS not supported. Print the VBT parameter*/
3368		seq_puts(m, "\tDRRS Supported : No");
3369	}
3370	seq_puts(m, "\n");
3371}
3372
3373static int i915_drrs_status(struct seq_file *m, void *unused)
3374{
3375	struct drm_info_node *node = m->private;
3376	struct drm_device *dev = node->minor->dev;
3377	struct intel_crtc *intel_crtc;
3378	int active_crtc_cnt = 0;
3379
3380	for_each_intel_crtc(dev, intel_crtc) {
3381		drm_modeset_lock(&intel_crtc->base.mutex, NULL);
3382
3383		if (intel_crtc->base.state->active) {
3384			active_crtc_cnt++;
3385			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3386
3387			drrs_status_per_crtc(m, dev, intel_crtc);
3388		}
3389
3390		drm_modeset_unlock(&intel_crtc->base.mutex);
3391	}
3392
3393	if (!active_crtc_cnt)
3394		seq_puts(m, "No active crtc found\n");
3395
3396	return 0;
3397}
3398
3399struct pipe_crc_info {
3400	const char *name;
3401	struct drm_device *dev;
3402	enum pipe pipe;
3403};
3404
3405static int i915_dp_mst_info(struct seq_file *m, void *unused)
3406{
3407	struct drm_info_node *node = (struct drm_info_node *) m->private;
3408	struct drm_device *dev = node->minor->dev;
3409	struct drm_encoder *encoder;
3410	struct intel_encoder *intel_encoder;
3411	struct intel_digital_port *intel_dig_port;
3412	drm_modeset_lock_all(dev);
3413	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3414		intel_encoder = to_intel_encoder(encoder);
3415		if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
3416			continue;
3417		intel_dig_port = enc_to_dig_port(encoder);
3418		if (!intel_dig_port->dp.can_mst)
3419			continue;
3420
3421		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3422	}
3423	drm_modeset_unlock_all(dev);
3424	return 0;
3425}
3426
3427static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
3428{
3429	struct pipe_crc_info *info = inode->i_private;
3430	struct drm_i915_private *dev_priv = info->dev->dev_private;
3431	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3432
3433	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
3434		return -ENODEV;
3435
3436	spin_lock_irq(&pipe_crc->lock);
3437
3438	if (pipe_crc->opened) {
3439		spin_unlock_irq(&pipe_crc->lock);
3440		return -EBUSY; /* already open */
3441	}
3442
3443	pipe_crc->opened = true;
3444	filep->private_data = inode->i_private;
3445
3446	spin_unlock_irq(&pipe_crc->lock);
3447
3448	return 0;
3449}
3450
3451static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
3452{
3453	struct pipe_crc_info *info = inode->i_private;
3454	struct drm_i915_private *dev_priv = info->dev->dev_private;
3455	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3456
3457	spin_lock_irq(&pipe_crc->lock);
3458	pipe_crc->opened = false;
3459	spin_unlock_irq(&pipe_crc->lock);
3460
3461	return 0;
3462}
3463
3464/* (6 fields, 8 chars each, space separated (5) + '\n') */
3465#define PIPE_CRC_LINE_LEN	(6 * 8 + 5 + 1)
3466/* account for \'0' */
3467#define PIPE_CRC_BUFFER_LEN	(PIPE_CRC_LINE_LEN + 1)
3468
3469static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
3470{
3471	assert_spin_locked(&pipe_crc->lock);
3472	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3473			INTEL_PIPE_CRC_ENTRIES_NR);
3474}
3475
3476static ssize_t
3477i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
3478		   loff_t *pos)
3479{
3480	struct pipe_crc_info *info = filep->private_data;
3481	struct drm_device *dev = info->dev;
3482	struct drm_i915_private *dev_priv = dev->dev_private;
3483	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3484	char buf[PIPE_CRC_BUFFER_LEN];
3485	int n_entries;
3486	ssize_t bytes_read;
3487
3488	/*
3489	 * Don't allow user space to provide buffers not big enough to hold
3490	 * a line of data.
3491	 */
3492	if (count < PIPE_CRC_LINE_LEN)
3493		return -EINVAL;
3494
3495	if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
3496		return 0;
3497
3498	/* nothing to read */
3499	spin_lock_irq(&pipe_crc->lock);
3500	while (pipe_crc_data_count(pipe_crc) == 0) {
3501		int ret;
3502
3503		if (filep->f_flags & O_NONBLOCK) {
3504			spin_unlock_irq(&pipe_crc->lock);
3505			return -EAGAIN;
3506		}
3507
3508		ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
3509				pipe_crc_data_count(pipe_crc), pipe_crc->lock);
3510		if (ret) {
3511			spin_unlock_irq(&pipe_crc->lock);
3512			return ret;
3513		}
3514	}
3515
3516	/* We now have one or more entries to read */
3517	n_entries = count / PIPE_CRC_LINE_LEN;
3518
3519	bytes_read = 0;
3520	while (n_entries > 0) {
3521		struct intel_pipe_crc_entry *entry =
3522			&pipe_crc->entries[pipe_crc->tail];
3523		int ret;
3524
3525		if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3526			     INTEL_PIPE_CRC_ENTRIES_NR) < 1)
3527			break;
3528
3529		BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
3530		pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
3531
3532		bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
3533				       "%8u %8x %8x %8x %8x %8x\n",
3534				       entry->frame, entry->crc[0],
3535				       entry->crc[1], entry->crc[2],
3536				       entry->crc[3], entry->crc[4]);
3537
3538		spin_unlock_irq(&pipe_crc->lock);
3539
3540		ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
3541		if (ret == PIPE_CRC_LINE_LEN)
3542			return -EFAULT;
 
3543
3544		user_buf += PIPE_CRC_LINE_LEN;
3545		n_entries--;
3546
3547		spin_lock_irq(&pipe_crc->lock);
3548	}
3549
3550	spin_unlock_irq(&pipe_crc->lock);
 
3551
3552	return bytes_read;
3553}
3554
3555static const struct file_operations i915_pipe_crc_fops = {
3556	.owner = THIS_MODULE,
3557	.open = i915_pipe_crc_open,
3558	.read = i915_pipe_crc_read,
3559	.release = i915_pipe_crc_release,
 
3560};
3561
3562static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
3563	{
3564		.name = "i915_pipe_A_crc",
3565		.pipe = PIPE_A,
3566	},
3567	{
3568		.name = "i915_pipe_B_crc",
3569		.pipe = PIPE_B,
3570	},
3571	{
3572		.name = "i915_pipe_C_crc",
3573		.pipe = PIPE_C,
3574	},
3575};
3576
3577static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
3578				enum pipe pipe)
3579{
3580	struct drm_device *dev = minor->dev;
3581	struct dentry *ent;
3582	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
3583
3584	info->dev = dev;
3585	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
3586				  &i915_pipe_crc_fops);
3587	if (!ent)
3588		return -ENOMEM;
3589
3590	return drm_add_fake_info_node(minor, ent, info);
3591}
3592
3593static const char * const pipe_crc_sources[] = {
3594	"none",
3595	"plane1",
3596	"plane2",
3597	"pf",
3598	"pipe",
3599	"TV",
3600	"DP-B",
3601	"DP-C",
3602	"DP-D",
3603	"auto",
3604};
3605
3606static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
3607{
3608	BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
3609	return pipe_crc_sources[source];
3610}
3611
3612static int display_crc_ctl_show(struct seq_file *m, void *data)
3613{
3614	struct drm_device *dev = m->private;
3615	struct drm_i915_private *dev_priv = dev->dev_private;
3616	int i;
3617
3618	for (i = 0; i < I915_MAX_PIPES; i++)
3619		seq_printf(m, "%c %s\n", pipe_name(i),
3620			   pipe_crc_source_name(dev_priv->pipe_crc[i].source));
3621
3622	return 0;
3623}
3624
3625static int display_crc_ctl_open(struct inode *inode, struct file *file)
3626{
3627	struct drm_device *dev = inode->i_private;
 
 
 
 
 
 
 
3628
3629	return single_open(file, display_crc_ctl_show, dev);
3630}
3631
3632static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3633				 uint32_t *val)
3634{
3635	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3636		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3637
3638	switch (*source) {
3639	case INTEL_PIPE_CRC_SOURCE_PIPE:
3640		*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
3641		break;
3642	case INTEL_PIPE_CRC_SOURCE_NONE:
3643		*val = 0;
3644		break;
3645	default:
3646		return -EINVAL;
3647	}
3648
3649	return 0;
3650}
3651
3652static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
3653				     enum intel_pipe_crc_source *source)
3654{
3655	struct intel_encoder *encoder;
3656	struct intel_crtc *crtc;
3657	struct intel_digital_port *dig_port;
3658	int ret = 0;
3659
3660	*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3661
3662	drm_modeset_lock_all(dev);
3663	for_each_intel_encoder(dev, encoder) {
3664		if (!encoder->base.crtc)
3665			continue;
3666
3667		crtc = to_intel_crtc(encoder->base.crtc);
3668
3669		if (crtc->pipe != pipe)
3670			continue;
3671
3672		switch (encoder->type) {
3673		case INTEL_OUTPUT_TVOUT:
3674			*source = INTEL_PIPE_CRC_SOURCE_TV;
3675			break;
3676		case INTEL_OUTPUT_DISPLAYPORT:
3677		case INTEL_OUTPUT_EDP:
3678			dig_port = enc_to_dig_port(&encoder->base);
3679			switch (dig_port->port) {
3680			case PORT_B:
3681				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
3682				break;
3683			case PORT_C:
3684				*source = INTEL_PIPE_CRC_SOURCE_DP_C;
3685				break;
3686			case PORT_D:
3687				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
3688				break;
3689			default:
3690				WARN(1, "nonexisting DP port %c\n",
3691				     port_name(dig_port->port));
3692				break;
3693			}
3694			break;
3695		default:
3696			break;
3697		}
3698	}
3699	drm_modeset_unlock_all(dev);
3700
3701	return ret;
3702}
3703
3704static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
3705				enum pipe pipe,
3706				enum intel_pipe_crc_source *source,
3707				uint32_t *val)
3708{
 
3709	struct drm_i915_private *dev_priv = dev->dev_private;
3710	bool need_stable_symbols = false;
 
3711
3712	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3713		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3714		if (ret)
3715			return ret;
3716	}
3717
3718	switch (*source) {
3719	case INTEL_PIPE_CRC_SOURCE_PIPE:
3720		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
3721		break;
3722	case INTEL_PIPE_CRC_SOURCE_DP_B:
3723		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
3724		need_stable_symbols = true;
3725		break;
3726	case INTEL_PIPE_CRC_SOURCE_DP_C:
3727		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
3728		need_stable_symbols = true;
3729		break;
3730	case INTEL_PIPE_CRC_SOURCE_DP_D:
3731		if (!IS_CHERRYVIEW(dev))
3732			return -EINVAL;
3733		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
3734		need_stable_symbols = true;
3735		break;
3736	case INTEL_PIPE_CRC_SOURCE_NONE:
3737		*val = 0;
3738		break;
3739	default:
3740		return -EINVAL;
3741	}
3742
3743	/*
3744	 * When the pipe CRC tap point is after the transcoders we need
3745	 * to tweak symbol-level features to produce a deterministic series of
3746	 * symbols for a given frame. We need to reset those features only once
3747	 * a frame (instead of every nth symbol):
3748	 *   - DC-balance: used to ensure a better clock recovery from the data
3749	 *     link (SDVO)
3750	 *   - DisplayPort scrambling: used for EMI reduction
3751	 */
3752	if (need_stable_symbols) {
3753		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3754
3755		tmp |= DC_BALANCE_RESET_VLV;
3756		switch (pipe) {
3757		case PIPE_A:
3758			tmp |= PIPE_A_SCRAMBLE_RESET;
3759			break;
3760		case PIPE_B:
3761			tmp |= PIPE_B_SCRAMBLE_RESET;
3762			break;
3763		case PIPE_C:
3764			tmp |= PIPE_C_SCRAMBLE_RESET;
3765			break;
3766		default:
3767			return -EINVAL;
3768		}
3769		I915_WRITE(PORT_DFT2_G4X, tmp);
3770	}
3771
3772	return 0;
3773}
3774
3775static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3776				 enum pipe pipe,
3777				 enum intel_pipe_crc_source *source,
3778				 uint32_t *val)
3779{
3780	struct drm_i915_private *dev_priv = dev->dev_private;
3781	bool need_stable_symbols = false;
3782
3783	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3784		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3785		if (ret)
3786			return ret;
3787	}
3788
3789	switch (*source) {
3790	case INTEL_PIPE_CRC_SOURCE_PIPE:
3791		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
3792		break;
3793	case INTEL_PIPE_CRC_SOURCE_TV:
3794		if (!SUPPORTS_TV(dev))
3795			return -EINVAL;
3796		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
3797		break;
3798	case INTEL_PIPE_CRC_SOURCE_DP_B:
3799		if (!IS_G4X(dev))
3800			return -EINVAL;
3801		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
3802		need_stable_symbols = true;
3803		break;
3804	case INTEL_PIPE_CRC_SOURCE_DP_C:
3805		if (!IS_G4X(dev))
3806			return -EINVAL;
3807		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
3808		need_stable_symbols = true;
3809		break;
3810	case INTEL_PIPE_CRC_SOURCE_DP_D:
3811		if (!IS_G4X(dev))
3812			return -EINVAL;
3813		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
3814		need_stable_symbols = true;
3815		break;
3816	case INTEL_PIPE_CRC_SOURCE_NONE:
3817		*val = 0;
3818		break;
3819	default:
3820		return -EINVAL;
3821	}
3822
3823	/*
3824	 * When the pipe CRC tap point is after the transcoders we need
3825	 * to tweak symbol-level features to produce a deterministic series of
3826	 * symbols for a given frame. We need to reset those features only once
3827	 * a frame (instead of every nth symbol):
3828	 *   - DC-balance: used to ensure a better clock recovery from the data
3829	 *     link (SDVO)
3830	 *   - DisplayPort scrambling: used for EMI reduction
3831	 */
3832	if (need_stable_symbols) {
3833		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3834
3835		WARN_ON(!IS_G4X(dev));
3836
3837		I915_WRITE(PORT_DFT_I9XX,
3838			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
3839
3840		if (pipe == PIPE_A)
3841			tmp |= PIPE_A_SCRAMBLE_RESET;
3842		else
3843			tmp |= PIPE_B_SCRAMBLE_RESET;
3844
3845		I915_WRITE(PORT_DFT2_G4X, tmp);
3846	}
3847
3848	return 0;
3849}
3850
3851static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
3852					 enum pipe pipe)
3853{
3854	struct drm_i915_private *dev_priv = dev->dev_private;
3855	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3856
3857	switch (pipe) {
3858	case PIPE_A:
3859		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3860		break;
3861	case PIPE_B:
3862		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3863		break;
3864	case PIPE_C:
3865		tmp &= ~PIPE_C_SCRAMBLE_RESET;
3866		break;
3867	default:
3868		return;
3869	}
3870	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
3871		tmp &= ~DC_BALANCE_RESET_VLV;
3872	I915_WRITE(PORT_DFT2_G4X, tmp);
3873
3874}
3875
3876static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
3877					 enum pipe pipe)
3878{
3879	struct drm_i915_private *dev_priv = dev->dev_private;
3880	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3881
3882	if (pipe == PIPE_A)
3883		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3884	else
3885		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3886	I915_WRITE(PORT_DFT2_G4X, tmp);
3887
3888	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
3889		I915_WRITE(PORT_DFT_I9XX,
3890			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
3891	}
3892}
3893
3894static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3895				uint32_t *val)
3896{
3897	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3898		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3899
3900	switch (*source) {
3901	case INTEL_PIPE_CRC_SOURCE_PLANE1:
3902		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
3903		break;
3904	case INTEL_PIPE_CRC_SOURCE_PLANE2:
3905		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
3906		break;
3907	case INTEL_PIPE_CRC_SOURCE_PIPE:
3908		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
3909		break;
3910	case INTEL_PIPE_CRC_SOURCE_NONE:
3911		*val = 0;
3912		break;
3913	default:
3914		return -EINVAL;
3915	}
3916
3917	return 0;
3918}
3919
3920static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
3921{
3922	struct drm_i915_private *dev_priv = dev->dev_private;
3923	struct intel_crtc *crtc =
3924		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3925	struct intel_crtc_state *pipe_config;
3926	struct drm_atomic_state *state;
3927	int ret = 0;
3928
3929	drm_modeset_lock_all(dev);
3930	state = drm_atomic_state_alloc(dev);
3931	if (!state) {
3932		ret = -ENOMEM;
3933		goto out;
3934	}
3935
3936	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
3937	pipe_config = intel_atomic_get_crtc_state(state, crtc);
3938	if (IS_ERR(pipe_config)) {
3939		ret = PTR_ERR(pipe_config);
3940		goto out;
3941	}
3942
3943	pipe_config->pch_pfit.force_thru = enable;
3944	if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
3945	    pipe_config->pch_pfit.enabled != enable)
3946		pipe_config->base.connectors_changed = true;
3947
3948	ret = drm_atomic_commit(state);
3949out:
3950	drm_modeset_unlock_all(dev);
3951	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
3952	if (ret)
3953		drm_atomic_state_free(state);
3954}
3955
3956static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
3957				enum pipe pipe,
3958				enum intel_pipe_crc_source *source,
3959				uint32_t *val)
3960{
3961	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3962		*source = INTEL_PIPE_CRC_SOURCE_PF;
3963
3964	switch (*source) {
3965	case INTEL_PIPE_CRC_SOURCE_PLANE1:
3966		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
3967		break;
3968	case INTEL_PIPE_CRC_SOURCE_PLANE2:
3969		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
3970		break;
3971	case INTEL_PIPE_CRC_SOURCE_PF:
3972		if (IS_HASWELL(dev) && pipe == PIPE_A)
3973			hsw_trans_edp_pipe_A_crc_wa(dev, true);
3974
3975		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
3976		break;
3977	case INTEL_PIPE_CRC_SOURCE_NONE:
3978		*val = 0;
3979		break;
3980	default:
3981		return -EINVAL;
3982	}
3983
3984	return 0;
3985}
3986
3987static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3988			       enum intel_pipe_crc_source source)
3989{
3990	struct drm_i915_private *dev_priv = dev->dev_private;
3991	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3992	struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
3993									pipe));
3994	enum intel_display_power_domain power_domain;
3995	u32 val = 0; /* shut up gcc */
3996	int ret;
3997
3998	if (pipe_crc->source == source)
3999		return 0;
4000
4001	/* forbid changing the source without going back to 'none' */
4002	if (pipe_crc->source && source)
4003		return -EINVAL;
4004
4005	power_domain = POWER_DOMAIN_PIPE(pipe);
4006	if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
4007		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
4008		return -EIO;
4009	}
4010
4011	if (IS_GEN2(dev))
4012		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
4013	else if (INTEL_INFO(dev)->gen < 5)
4014		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4015	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4016		ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4017	else if (IS_GEN5(dev) || IS_GEN6(dev))
4018		ret = ilk_pipe_crc_ctl_reg(&source, &val);
4019	else
4020		ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4021
4022	if (ret != 0)
4023		goto out;
4024
4025	/* none -> real source transition */
4026	if (source) {
4027		struct intel_pipe_crc_entry *entries;
4028
4029		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
4030				 pipe_name(pipe), pipe_crc_source_name(source));
4031
4032		entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
4033				  sizeof(pipe_crc->entries[0]),
4034				  GFP_KERNEL);
4035		if (!entries) {
4036			ret = -ENOMEM;
4037			goto out;
4038		}
4039
4040		/*
4041		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4042		 * enabled and disabled dynamically based on package C states,
4043		 * user space can't make reliable use of the CRCs, so let's just
4044		 * completely disable it.
4045		 */
4046		hsw_disable_ips(crtc);
4047
4048		spin_lock_irq(&pipe_crc->lock);
4049		kfree(pipe_crc->entries);
4050		pipe_crc->entries = entries;
4051		pipe_crc->head = 0;
4052		pipe_crc->tail = 0;
4053		spin_unlock_irq(&pipe_crc->lock);
4054	}
4055
4056	pipe_crc->source = source;
4057
4058	I915_WRITE(PIPE_CRC_CTL(pipe), val);
4059	POSTING_READ(PIPE_CRC_CTL(pipe));
4060
4061	/* real source -> none transition */
4062	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
4063		struct intel_pipe_crc_entry *entries;
4064		struct intel_crtc *crtc =
4065			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
4066
4067		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
4068				 pipe_name(pipe));
4069
4070		drm_modeset_lock(&crtc->base.mutex, NULL);
4071		if (crtc->base.state->active)
4072			intel_wait_for_vblank(dev, pipe);
4073		drm_modeset_unlock(&crtc->base.mutex);
4074
4075		spin_lock_irq(&pipe_crc->lock);
4076		entries = pipe_crc->entries;
4077		pipe_crc->entries = NULL;
4078		pipe_crc->head = 0;
4079		pipe_crc->tail = 0;
4080		spin_unlock_irq(&pipe_crc->lock);
4081
4082		kfree(entries);
4083
4084		if (IS_G4X(dev))
4085			g4x_undo_pipe_scramble_reset(dev, pipe);
4086		else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4087			vlv_undo_pipe_scramble_reset(dev, pipe);
4088		else if (IS_HASWELL(dev) && pipe == PIPE_A)
4089			hsw_trans_edp_pipe_A_crc_wa(dev, false);
4090
4091		hsw_enable_ips(crtc);
4092	}
4093
4094	ret = 0;
4095
4096out:
4097	intel_display_power_put(dev_priv, power_domain);
4098
4099	return ret;
4100}
4101
4102/*
4103 * Parse pipe CRC command strings:
4104 *   command: wsp* object wsp+ name wsp+ source wsp*
4105 *   object: 'pipe'
4106 *   name: (A | B | C)
4107 *   source: (none | plane1 | plane2 | pf)
4108 *   wsp: (#0x20 | #0x9 | #0xA)+
4109 *
4110 * eg.:
4111 *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
4112 *  "pipe A none"    ->  Stop CRC
4113 */
4114static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
4115{
4116	int n_words = 0;
4117
4118	while (*buf) {
4119		char *end;
4120
4121		/* skip leading white space */
4122		buf = skip_spaces(buf);
4123		if (!*buf)
4124			break;	/* end of buffer */
4125
4126		/* find end of word */
4127		for (end = buf; *end && !isspace(*end); end++)
4128			;
4129
4130		if (n_words == max_words) {
4131			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
4132					 max_words);
4133			return -EINVAL;	/* ran out of words[] before bytes */
4134		}
4135
4136		if (*end)
4137			*end++ = '\0';
4138		words[n_words++] = buf;
4139		buf = end;
4140	}
4141
4142	return n_words;
4143}
4144
4145enum intel_pipe_crc_object {
4146	PIPE_CRC_OBJECT_PIPE,
4147};
4148
4149static const char * const pipe_crc_objects[] = {
4150	"pipe",
4151};
4152
4153static int
4154display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
4155{
4156	int i;
4157
4158	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
4159		if (!strcmp(buf, pipe_crc_objects[i])) {
4160			*o = i;
4161			return 0;
4162		    }
4163
4164	return -EINVAL;
4165}
4166
4167static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
4168{
4169	const char name = buf[0];
4170
4171	if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
4172		return -EINVAL;
4173
4174	*pipe = name - 'A';
4175
4176	return 0;
4177}
4178
4179static int
4180display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
4181{
4182	int i;
4183
4184	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
4185		if (!strcmp(buf, pipe_crc_sources[i])) {
4186			*s = i;
4187			return 0;
4188		    }
4189
4190	return -EINVAL;
4191}
4192
4193static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
4194{
4195#define N_WORDS 3
4196	int n_words;
4197	char *words[N_WORDS];
4198	enum pipe pipe;
4199	enum intel_pipe_crc_object object;
4200	enum intel_pipe_crc_source source;
4201
4202	n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
4203	if (n_words != N_WORDS) {
4204		DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
4205				 N_WORDS);
4206		return -EINVAL;
4207	}
4208
4209	if (display_crc_ctl_parse_object(words[0], &object) < 0) {
4210		DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
4211		return -EINVAL;
4212	}
4213
4214	if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
4215		DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
4216		return -EINVAL;
4217	}
4218
4219	if (display_crc_ctl_parse_source(words[2], &source) < 0) {
4220		DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
4221		return -EINVAL;
4222	}
4223
4224	return pipe_crc_set_source(dev, pipe, source);
4225}
4226
4227static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
4228				     size_t len, loff_t *offp)
4229{
4230	struct seq_file *m = file->private_data;
4231	struct drm_device *dev = m->private;
4232	char *tmpbuf;
4233	int ret;
4234
4235	if (len == 0)
4236		return 0;
4237
4238	if (len > PAGE_SIZE - 1) {
4239		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
4240				 PAGE_SIZE);
4241		return -E2BIG;
4242	}
4243
4244	tmpbuf = kmalloc(len + 1, GFP_KERNEL);
4245	if (!tmpbuf)
4246		return -ENOMEM;
4247
4248	if (copy_from_user(tmpbuf, ubuf, len)) {
4249		ret = -EFAULT;
4250		goto out;
4251	}
4252	tmpbuf[len] = '\0';
4253
4254	ret = display_crc_ctl_parse(dev, tmpbuf, len);
4255
4256out:
4257	kfree(tmpbuf);
4258	if (ret < 0)
4259		return ret;
4260
4261	*offp += len;
4262	return len;
4263}
4264
4265static const struct file_operations i915_display_crc_ctl_fops = {
4266	.owner = THIS_MODULE,
4267	.open = display_crc_ctl_open,
4268	.read = seq_read,
4269	.llseek = seq_lseek,
4270	.release = single_release,
4271	.write = display_crc_ctl_write
4272};
4273
4274static ssize_t i915_displayport_test_active_write(struct file *file,
4275					    const char __user *ubuf,
4276					    size_t len, loff_t *offp)
4277{
4278	char *input_buffer;
4279	int status = 0;
4280	struct drm_device *dev;
4281	struct drm_connector *connector;
4282	struct list_head *connector_list;
4283	struct intel_dp *intel_dp;
4284	int val = 0;
4285
4286	dev = ((struct seq_file *)file->private_data)->private;
4287
4288	connector_list = &dev->mode_config.connector_list;
4289
4290	if (len == 0)
4291		return 0;
4292
4293	input_buffer = kmalloc(len + 1, GFP_KERNEL);
4294	if (!input_buffer)
4295		return -ENOMEM;
4296
4297	if (copy_from_user(input_buffer, ubuf, len)) {
4298		status = -EFAULT;
4299		goto out;
4300	}
4301
4302	input_buffer[len] = '\0';
4303	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
4304
4305	list_for_each_entry(connector, connector_list, head) {
4306
4307		if (connector->connector_type !=
4308		    DRM_MODE_CONNECTOR_DisplayPort)
4309			continue;
4310
4311		if (connector->status == connector_status_connected &&
4312		    connector->encoder != NULL) {
4313			intel_dp = enc_to_intel_dp(connector->encoder);
4314			status = kstrtoint(input_buffer, 10, &val);
4315			if (status < 0)
4316				goto out;
4317			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
4318			/* To prevent erroneous activation of the compliance
4319			 * testing code, only accept an actual value of 1 here
4320			 */
4321			if (val == 1)
4322				intel_dp->compliance_test_active = 1;
4323			else
4324				intel_dp->compliance_test_active = 0;
4325		}
4326	}
4327out:
4328	kfree(input_buffer);
4329	if (status < 0)
4330		return status;
4331
4332	*offp += len;
4333	return len;
4334}
4335
4336static int i915_displayport_test_active_show(struct seq_file *m, void *data)
4337{
4338	struct drm_device *dev = m->private;
4339	struct drm_connector *connector;
4340	struct list_head *connector_list = &dev->mode_config.connector_list;
4341	struct intel_dp *intel_dp;
4342
4343	list_for_each_entry(connector, connector_list, head) {
4344
4345		if (connector->connector_type !=
4346		    DRM_MODE_CONNECTOR_DisplayPort)
4347			continue;
4348
4349		if (connector->status == connector_status_connected &&
4350		    connector->encoder != NULL) {
4351			intel_dp = enc_to_intel_dp(connector->encoder);
4352			if (intel_dp->compliance_test_active)
4353				seq_puts(m, "1");
4354			else
4355				seq_puts(m, "0");
4356		} else
4357			seq_puts(m, "0");
4358	}
4359
4360	return 0;
4361}
4362
4363static int i915_displayport_test_active_open(struct inode *inode,
4364				       struct file *file)
4365{
4366	struct drm_device *dev = inode->i_private;
4367
4368	return single_open(file, i915_displayport_test_active_show, dev);
4369}
4370
4371static const struct file_operations i915_displayport_test_active_fops = {
4372	.owner = THIS_MODULE,
4373	.open = i915_displayport_test_active_open,
4374	.read = seq_read,
4375	.llseek = seq_lseek,
4376	.release = single_release,
4377	.write = i915_displayport_test_active_write
4378};
4379
4380static int i915_displayport_test_data_show(struct seq_file *m, void *data)
4381{
4382	struct drm_device *dev = m->private;
4383	struct drm_connector *connector;
4384	struct list_head *connector_list = &dev->mode_config.connector_list;
4385	struct intel_dp *intel_dp;
4386
4387	list_for_each_entry(connector, connector_list, head) {
4388
4389		if (connector->connector_type !=
4390		    DRM_MODE_CONNECTOR_DisplayPort)
4391			continue;
4392
4393		if (connector->status == connector_status_connected &&
4394		    connector->encoder != NULL) {
4395			intel_dp = enc_to_intel_dp(connector->encoder);
4396			seq_printf(m, "%lx", intel_dp->compliance_test_data);
4397		} else
4398			seq_puts(m, "0");
4399	}
4400
4401	return 0;
4402}
4403static int i915_displayport_test_data_open(struct inode *inode,
4404				       struct file *file)
4405{
4406	struct drm_device *dev = inode->i_private;
4407
4408	return single_open(file, i915_displayport_test_data_show, dev);
4409}
4410
4411static const struct file_operations i915_displayport_test_data_fops = {
4412	.owner = THIS_MODULE,
4413	.open = i915_displayport_test_data_open,
4414	.read = seq_read,
4415	.llseek = seq_lseek,
4416	.release = single_release
4417};
4418
4419static int i915_displayport_test_type_show(struct seq_file *m, void *data)
4420{
4421	struct drm_device *dev = m->private;
4422	struct drm_connector *connector;
4423	struct list_head *connector_list = &dev->mode_config.connector_list;
4424	struct intel_dp *intel_dp;
4425
4426	list_for_each_entry(connector, connector_list, head) {
4427
4428		if (connector->connector_type !=
4429		    DRM_MODE_CONNECTOR_DisplayPort)
4430			continue;
4431
4432		if (connector->status == connector_status_connected &&
4433		    connector->encoder != NULL) {
4434			intel_dp = enc_to_intel_dp(connector->encoder);
4435			seq_printf(m, "%02lx", intel_dp->compliance_test_type);
4436		} else
4437			seq_puts(m, "0");
4438	}
4439
4440	return 0;
4441}
4442
4443static int i915_displayport_test_type_open(struct inode *inode,
4444				       struct file *file)
4445{
4446	struct drm_device *dev = inode->i_private;
4447
4448	return single_open(file, i915_displayport_test_type_show, dev);
4449}
4450
4451static const struct file_operations i915_displayport_test_type_fops = {
4452	.owner = THIS_MODULE,
4453	.open = i915_displayport_test_type_open,
4454	.read = seq_read,
4455	.llseek = seq_lseek,
4456	.release = single_release
4457};
4458
4459static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
4460{
4461	struct drm_device *dev = m->private;
4462	int level;
4463	int num_levels;
4464
4465	if (IS_CHERRYVIEW(dev))
4466		num_levels = 3;
4467	else if (IS_VALLEYVIEW(dev))
4468		num_levels = 1;
4469	else
4470		num_levels = ilk_wm_max_level(dev) + 1;
4471
4472	drm_modeset_lock_all(dev);
4473
4474	for (level = 0; level < num_levels; level++) {
4475		unsigned int latency = wm[level];
4476
4477		/*
4478		 * - WM1+ latency values in 0.5us units
4479		 * - latencies are in us on gen9/vlv/chv
4480		 */
4481		if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) ||
4482		    IS_CHERRYVIEW(dev))
4483			latency *= 10;
4484		else if (level > 0)
4485			latency *= 5;
4486
4487		seq_printf(m, "WM%d %u (%u.%u usec)\n",
4488			   level, wm[level], latency / 10, latency % 10);
4489	}
4490
4491	drm_modeset_unlock_all(dev);
4492}
4493
4494static int pri_wm_latency_show(struct seq_file *m, void *data)
4495{
4496	struct drm_device *dev = m->private;
4497	struct drm_i915_private *dev_priv = dev->dev_private;
4498	const uint16_t *latencies;
4499
4500	if (INTEL_INFO(dev)->gen >= 9)
4501		latencies = dev_priv->wm.skl_latency;
4502	else
4503		latencies = to_i915(dev)->wm.pri_latency;
4504
4505	wm_latency_show(m, latencies);
4506
4507	return 0;
4508}
4509
4510static int spr_wm_latency_show(struct seq_file *m, void *data)
4511{
4512	struct drm_device *dev = m->private;
4513	struct drm_i915_private *dev_priv = dev->dev_private;
4514	const uint16_t *latencies;
4515
4516	if (INTEL_INFO(dev)->gen >= 9)
4517		latencies = dev_priv->wm.skl_latency;
4518	else
4519		latencies = to_i915(dev)->wm.spr_latency;
4520
4521	wm_latency_show(m, latencies);
4522
4523	return 0;
4524}
4525
4526static int cur_wm_latency_show(struct seq_file *m, void *data)
4527{
4528	struct drm_device *dev = m->private;
4529	struct drm_i915_private *dev_priv = dev->dev_private;
4530	const uint16_t *latencies;
4531
4532	if (INTEL_INFO(dev)->gen >= 9)
4533		latencies = dev_priv->wm.skl_latency;
4534	else
4535		latencies = to_i915(dev)->wm.cur_latency;
4536
4537	wm_latency_show(m, latencies);
4538
4539	return 0;
4540}
4541
4542static int pri_wm_latency_open(struct inode *inode, struct file *file)
4543{
4544	struct drm_device *dev = inode->i_private;
4545
4546	if (INTEL_INFO(dev)->gen < 5)
4547		return -ENODEV;
4548
4549	return single_open(file, pri_wm_latency_show, dev);
4550}
4551
4552static int spr_wm_latency_open(struct inode *inode, struct file *file)
4553{
4554	struct drm_device *dev = inode->i_private;
4555
4556	if (HAS_GMCH_DISPLAY(dev))
4557		return -ENODEV;
4558
4559	return single_open(file, spr_wm_latency_show, dev);
4560}
4561
4562static int cur_wm_latency_open(struct inode *inode, struct file *file)
4563{
4564	struct drm_device *dev = inode->i_private;
4565
4566	if (HAS_GMCH_DISPLAY(dev))
4567		return -ENODEV;
4568
4569	return single_open(file, cur_wm_latency_show, dev);
4570}
4571
4572static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
4573				size_t len, loff_t *offp, uint16_t wm[8])
4574{
4575	struct seq_file *m = file->private_data;
4576	struct drm_device *dev = m->private;
4577	uint16_t new[8] = { 0 };
4578	int num_levels;
4579	int level;
4580	int ret;
4581	char tmp[32];
4582
4583	if (IS_CHERRYVIEW(dev))
4584		num_levels = 3;
4585	else if (IS_VALLEYVIEW(dev))
4586		num_levels = 1;
4587	else
4588		num_levels = ilk_wm_max_level(dev) + 1;
4589
4590	if (len >= sizeof(tmp))
4591		return -EINVAL;
4592
4593	if (copy_from_user(tmp, ubuf, len))
4594		return -EFAULT;
4595
4596	tmp[len] = '\0';
4597
4598	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
4599		     &new[0], &new[1], &new[2], &new[3],
4600		     &new[4], &new[5], &new[6], &new[7]);
4601	if (ret != num_levels)
4602		return -EINVAL;
4603
4604	drm_modeset_lock_all(dev);
4605
4606	for (level = 0; level < num_levels; level++)
4607		wm[level] = new[level];
4608
4609	drm_modeset_unlock_all(dev);
4610
4611	return len;
4612}
4613
4614
4615static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
4616				    size_t len, loff_t *offp)
4617{
4618	struct seq_file *m = file->private_data;
4619	struct drm_device *dev = m->private;
4620	struct drm_i915_private *dev_priv = dev->dev_private;
4621	uint16_t *latencies;
4622
4623	if (INTEL_INFO(dev)->gen >= 9)
4624		latencies = dev_priv->wm.skl_latency;
4625	else
4626		latencies = to_i915(dev)->wm.pri_latency;
4627
4628	return wm_latency_write(file, ubuf, len, offp, latencies);
4629}
4630
4631static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
4632				    size_t len, loff_t *offp)
4633{
4634	struct seq_file *m = file->private_data;
4635	struct drm_device *dev = m->private;
4636	struct drm_i915_private *dev_priv = dev->dev_private;
4637	uint16_t *latencies;
4638
4639	if (INTEL_INFO(dev)->gen >= 9)
4640		latencies = dev_priv->wm.skl_latency;
4641	else
4642		latencies = to_i915(dev)->wm.spr_latency;
4643
4644	return wm_latency_write(file, ubuf, len, offp, latencies);
4645}
4646
4647static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4648				    size_t len, loff_t *offp)
4649{
4650	struct seq_file *m = file->private_data;
4651	struct drm_device *dev = m->private;
4652	struct drm_i915_private *dev_priv = dev->dev_private;
4653	uint16_t *latencies;
4654
4655	if (INTEL_INFO(dev)->gen >= 9)
4656		latencies = dev_priv->wm.skl_latency;
4657	else
4658		latencies = to_i915(dev)->wm.cur_latency;
4659
4660	return wm_latency_write(file, ubuf, len, offp, latencies);
4661}
4662
4663static const struct file_operations i915_pri_wm_latency_fops = {
4664	.owner = THIS_MODULE,
4665	.open = pri_wm_latency_open,
4666	.read = seq_read,
4667	.llseek = seq_lseek,
4668	.release = single_release,
4669	.write = pri_wm_latency_write
4670};
4671
4672static const struct file_operations i915_spr_wm_latency_fops = {
4673	.owner = THIS_MODULE,
4674	.open = spr_wm_latency_open,
4675	.read = seq_read,
4676	.llseek = seq_lseek,
4677	.release = single_release,
4678	.write = spr_wm_latency_write
4679};
4680
4681static const struct file_operations i915_cur_wm_latency_fops = {
4682	.owner = THIS_MODULE,
4683	.open = cur_wm_latency_open,
4684	.read = seq_read,
4685	.llseek = seq_lseek,
4686	.release = single_release,
4687	.write = cur_wm_latency_write
4688};
4689
4690static int
4691i915_wedged_get(void *data, u64 *val)
 
4692{
4693	struct drm_device *dev = data;
4694	struct drm_i915_private *dev_priv = dev->dev_private;
4695
4696	*val = atomic_read(&dev_priv->gpu_error.reset_counter);
4697
4698	return 0;
4699}
4700
4701static int
4702i915_wedged_set(void *data, u64 val)
4703{
4704	struct drm_device *dev = data;
4705	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
 
 
 
 
4706
4707	/*
4708	 * There is no safeguard against this debugfs entry colliding
4709	 * with the hangcheck calling same i915_handle_error() in
4710	 * parallel, causing an explosion. For now we assume that the
4711	 * test harness is responsible enough not to inject gpu hangs
4712	 * while it is writing to 'i915_wedged'
4713	 */
4714
4715	if (i915_reset_in_progress(&dev_priv->gpu_error))
4716		return -EAGAIN;
4717
4718	intel_runtime_pm_get(dev_priv);
 
 
4719
4720	i915_handle_error(dev, val,
4721			  "Manually setting wedged to %llu", val);
4722
4723	intel_runtime_pm_put(dev_priv);
4724
4725	return 0;
4726}
4727
4728DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4729			i915_wedged_get, i915_wedged_set,
4730			"%llu\n");
4731
4732static int
4733i915_ring_stop_get(void *data, u64 *val)
4734{
4735	struct drm_device *dev = data;
4736	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
 
4737
4738	*val = dev_priv->gpu_error.stop_rings;
 
 
4739
4740	return 0;
4741}
4742
4743static int
4744i915_ring_stop_set(void *data, u64 val)
4745{
4746	struct drm_device *dev = data;
4747	struct drm_i915_private *dev_priv = dev->dev_private;
4748	int ret;
4749
4750	DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
4751
4752	ret = mutex_lock_interruptible(&dev->struct_mutex);
4753	if (ret)
4754		return ret;
4755
4756	dev_priv->gpu_error.stop_rings = val;
4757	mutex_unlock(&dev->struct_mutex);
4758
4759	return 0;
4760}
4761
4762DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
4763			i915_ring_stop_get, i915_ring_stop_set,
4764			"0x%08llx\n");
4765
4766static int
4767i915_ring_missed_irq_get(void *data, u64 *val)
4768{
4769	struct drm_device *dev = data;
4770	struct drm_i915_private *dev_priv = dev->dev_private;
4771
4772	*val = dev_priv->gpu_error.missed_irq_rings;
4773	return 0;
4774}
4775
4776static int
4777i915_ring_missed_irq_set(void *data, u64 val)
4778{
4779	struct drm_device *dev = data;
4780	struct drm_i915_private *dev_priv = dev->dev_private;
4781	int ret;
4782
4783	/* Lock against concurrent debugfs callers */
4784	ret = mutex_lock_interruptible(&dev->struct_mutex);
4785	if (ret)
4786		return ret;
4787	dev_priv->gpu_error.missed_irq_rings = val;
4788	mutex_unlock(&dev->struct_mutex);
4789
4790	return 0;
4791}
4792
4793DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4794			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4795			"0x%08llx\n");
4796
4797static int
4798i915_ring_test_irq_get(void *data, u64 *val)
4799{
4800	struct drm_device *dev = data;
4801	struct drm_i915_private *dev_priv = dev->dev_private;
4802
4803	*val = dev_priv->gpu_error.test_irq_rings;
4804
4805	return 0;
4806}
4807
4808static int
4809i915_ring_test_irq_set(void *data, u64 val)
4810{
4811	struct drm_device *dev = data;
4812	struct drm_i915_private *dev_priv = dev->dev_private;
4813	int ret;
4814
4815	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4816
4817	/* Lock against concurrent debugfs callers */
4818	ret = mutex_lock_interruptible(&dev->struct_mutex);
4819	if (ret)
4820		return ret;
4821
4822	dev_priv->gpu_error.test_irq_rings = val;
4823	mutex_unlock(&dev->struct_mutex);
4824
4825	return 0;
4826}
4827
4828DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4829			i915_ring_test_irq_get, i915_ring_test_irq_set,
4830			"0x%08llx\n");
4831
4832#define DROP_UNBOUND 0x1
4833#define DROP_BOUND 0x2
4834#define DROP_RETIRE 0x4
4835#define DROP_ACTIVE 0x8
4836#define DROP_ALL (DROP_UNBOUND | \
4837		  DROP_BOUND | \
4838		  DROP_RETIRE | \
4839		  DROP_ACTIVE)
4840static int
4841i915_drop_caches_get(void *data, u64 *val)
4842{
4843	*val = DROP_ALL;
4844
4845	return 0;
4846}
4847
4848static int
4849i915_drop_caches_set(void *data, u64 val)
4850{
4851	struct drm_device *dev = data;
4852	struct drm_i915_private *dev_priv = dev->dev_private;
4853	int ret;
4854
4855	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
4856
4857	/* No need to check and wait for gpu resets, only libdrm auto-restarts
4858	 * on ioctls on -EAGAIN. */
4859	ret = mutex_lock_interruptible(&dev->struct_mutex);
4860	if (ret)
4861		return ret;
4862
4863	if (val & DROP_ACTIVE) {
4864		ret = i915_gpu_idle(dev);
4865		if (ret)
4866			goto unlock;
4867	}
4868
4869	if (val & (DROP_RETIRE | DROP_ACTIVE))
4870		i915_gem_retire_requests(dev);
4871
4872	if (val & DROP_BOUND)
4873		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
4874
4875	if (val & DROP_UNBOUND)
4876		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
4877
4878unlock:
4879	mutex_unlock(&dev->struct_mutex);
4880
4881	return ret;
4882}
4883
4884DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4885			i915_drop_caches_get, i915_drop_caches_set,
4886			"0x%08llx\n");
4887
4888static int
4889i915_max_freq_get(void *data, u64 *val)
4890{
4891	struct drm_device *dev = data;
4892	struct drm_i915_private *dev_priv = dev->dev_private;
4893	int ret;
4894
4895	if (INTEL_INFO(dev)->gen < 6)
4896		return -ENODEV;
4897
4898	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4899
4900	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4901	if (ret)
4902		return ret;
4903
4904	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
4905	mutex_unlock(&dev_priv->rps.hw_lock);
4906
4907	return 0;
4908}
4909
4910static int
4911i915_max_freq_set(void *data, u64 val)
4912{
4913	struct drm_device *dev = data;
4914	struct drm_i915_private *dev_priv = dev->dev_private;
4915	u32 hw_max, hw_min;
4916	int ret;
4917
4918	if (INTEL_INFO(dev)->gen < 6)
4919		return -ENODEV;
4920
4921	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4922
4923	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4924
4925	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4926	if (ret)
4927		return ret;
4928
4929	/*
4930	 * Turbo will still be enabled, but won't go above the set value.
4931	 */
4932	val = intel_freq_opcode(dev_priv, val);
4933
4934	hw_max = dev_priv->rps.max_freq;
4935	hw_min = dev_priv->rps.min_freq;
4936
4937	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
4938		mutex_unlock(&dev_priv->rps.hw_lock);
4939		return -EINVAL;
4940	}
4941
4942	dev_priv->rps.max_freq_softlimit = val;
4943
4944	intel_set_rps(dev, val);
4945
4946	mutex_unlock(&dev_priv->rps.hw_lock);
4947
4948	return 0;
4949}
4950
4951DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
4952			i915_max_freq_get, i915_max_freq_set,
4953			"%llu\n");
4954
4955static int
4956i915_min_freq_get(void *data, u64 *val)
4957{
4958	struct drm_device *dev = data;
4959	struct drm_i915_private *dev_priv = dev->dev_private;
4960	int ret;
4961
4962	if (INTEL_INFO(dev)->gen < 6)
4963		return -ENODEV;
4964
4965	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4966
4967	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4968	if (ret)
4969		return ret;
4970
4971	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
4972	mutex_unlock(&dev_priv->rps.hw_lock);
4973
4974	return 0;
4975}
4976
4977static int
4978i915_min_freq_set(void *data, u64 val)
4979{
4980	struct drm_device *dev = data;
4981	struct drm_i915_private *dev_priv = dev->dev_private;
4982	u32 hw_max, hw_min;
4983	int ret;
4984
4985	if (INTEL_INFO(dev)->gen < 6)
4986		return -ENODEV;
4987
4988	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4989
4990	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
4991
4992	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4993	if (ret)
4994		return ret;
4995
4996	/*
4997	 * Turbo will still be enabled, but won't go below the set value.
4998	 */
4999	val = intel_freq_opcode(dev_priv, val);
5000
5001	hw_max = dev_priv->rps.max_freq;
5002	hw_min = dev_priv->rps.min_freq;
5003
5004	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
5005		mutex_unlock(&dev_priv->rps.hw_lock);
5006		return -EINVAL;
5007	}
5008
5009	dev_priv->rps.min_freq_softlimit = val;
5010
5011	intel_set_rps(dev, val);
5012
5013	mutex_unlock(&dev_priv->rps.hw_lock);
5014
5015	return 0;
5016}
5017
5018DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
5019			i915_min_freq_get, i915_min_freq_set,
5020			"%llu\n");
5021
5022static int
5023i915_cache_sharing_get(void *data, u64 *val)
5024{
5025	struct drm_device *dev = data;
5026	struct drm_i915_private *dev_priv = dev->dev_private;
5027	u32 snpcr;
5028	int ret;
5029
5030	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
5031		return -ENODEV;
5032
5033	ret = mutex_lock_interruptible(&dev->struct_mutex);
5034	if (ret)
5035		return ret;
5036	intel_runtime_pm_get(dev_priv);
5037
5038	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5039
5040	intel_runtime_pm_put(dev_priv);
5041	mutex_unlock(&dev_priv->dev->struct_mutex);
5042
5043	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
5044
5045	return 0;
5046}
5047
5048static int
5049i915_cache_sharing_set(void *data, u64 val)
5050{
5051	struct drm_device *dev = data;
5052	struct drm_i915_private *dev_priv = dev->dev_private;
5053	u32 snpcr;
5054
5055	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
5056		return -ENODEV;
5057
5058	if (val > 3)
5059		return -EINVAL;
5060
5061	intel_runtime_pm_get(dev_priv);
5062	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
5063
5064	/* Update the cache sharing policy here as well */
5065	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5066	snpcr &= ~GEN6_MBC_SNPCR_MASK;
5067	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
5068	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5069
5070	intel_runtime_pm_put(dev_priv);
5071	return 0;
5072}
5073
5074DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
5075			i915_cache_sharing_get, i915_cache_sharing_set,
5076			"%llu\n");
5077
5078struct sseu_dev_status {
5079	unsigned int slice_total;
5080	unsigned int subslice_total;
5081	unsigned int subslice_per_slice;
5082	unsigned int eu_total;
5083	unsigned int eu_per_subslice;
5084};
5085
5086static void cherryview_sseu_device_status(struct drm_device *dev,
5087					  struct sseu_dev_status *stat)
 
 
 
 
5088{
5089	struct drm_i915_private *dev_priv = dev->dev_private;
5090	int ss_max = 2;
5091	int ss;
5092	u32 sig1[ss_max], sig2[ss_max];
5093
5094	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
5095	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
5096	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
5097	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
5098
5099	for (ss = 0; ss < ss_max; ss++) {
5100		unsigned int eu_cnt;
5101
5102		if (sig1[ss] & CHV_SS_PG_ENABLE)
5103			/* skip disabled subslice */
5104			continue;
5105
5106		stat->slice_total = 1;
5107		stat->subslice_per_slice++;
5108		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
5109			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
5110			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
5111			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
5112		stat->eu_total += eu_cnt;
5113		stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
5114	}
5115	stat->subslice_total = stat->subslice_per_slice;
5116}
5117
5118static void gen9_sseu_device_status(struct drm_device *dev,
5119				    struct sseu_dev_status *stat)
5120{
5121	struct drm_i915_private *dev_priv = dev->dev_private;
5122	int s_max = 3, ss_max = 4;
5123	int s, ss;
5124	u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
5125
5126	/* BXT has a single slice and at most 3 subslices. */
5127	if (IS_BROXTON(dev)) {
5128		s_max = 1;
5129		ss_max = 3;
5130	}
5131
5132	for (s = 0; s < s_max; s++) {
5133		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
5134		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
5135		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
5136	}
5137
5138	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
5139		     GEN9_PGCTL_SSA_EU19_ACK |
5140		     GEN9_PGCTL_SSA_EU210_ACK |
5141		     GEN9_PGCTL_SSA_EU311_ACK;
5142	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
5143		     GEN9_PGCTL_SSB_EU19_ACK |
5144		     GEN9_PGCTL_SSB_EU210_ACK |
5145		     GEN9_PGCTL_SSB_EU311_ACK;
5146
5147	for (s = 0; s < s_max; s++) {
5148		unsigned int ss_cnt = 0;
5149
5150		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
5151			/* skip disabled slice */
5152			continue;
5153
5154		stat->slice_total++;
5155
5156		if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
5157			ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
5158
5159		for (ss = 0; ss < ss_max; ss++) {
5160			unsigned int eu_cnt;
5161
5162			if (IS_BROXTON(dev) &&
5163			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
5164				/* skip disabled subslice */
5165				continue;
5166
5167			if (IS_BROXTON(dev))
5168				ss_cnt++;
5169
5170			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
5171					       eu_mask[ss%2]);
5172			stat->eu_total += eu_cnt;
5173			stat->eu_per_subslice = max(stat->eu_per_subslice,
5174						    eu_cnt);
5175		}
5176
5177		stat->subslice_total += ss_cnt;
5178		stat->subslice_per_slice = max(stat->subslice_per_slice,
5179					       ss_cnt);
5180	}
5181}
5182
5183static void broadwell_sseu_device_status(struct drm_device *dev,
5184					 struct sseu_dev_status *stat)
5185{
5186	struct drm_i915_private *dev_priv = dev->dev_private;
5187	int s;
5188	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
5189
5190	stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
5191
5192	if (stat->slice_total) {
5193		stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
5194		stat->subslice_total = stat->slice_total *
5195				       stat->subslice_per_slice;
5196		stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
5197		stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
5198
5199		/* subtract fused off EU(s) from enabled slice(s) */
5200		for (s = 0; s < stat->slice_total; s++) {
5201			u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
5202
5203			stat->eu_total -= hweight8(subslice_7eu);
5204		}
5205	}
5206}
5207
5208static int i915_sseu_status(struct seq_file *m, void *unused)
5209{
5210	struct drm_info_node *node = (struct drm_info_node *) m->private;
5211	struct drm_device *dev = node->minor->dev;
5212	struct sseu_dev_status stat;
5213
5214	if (INTEL_INFO(dev)->gen < 8)
5215		return -ENODEV;
5216
5217	seq_puts(m, "SSEU Device Info\n");
5218	seq_printf(m, "  Available Slice Total: %u\n",
5219		   INTEL_INFO(dev)->slice_total);
5220	seq_printf(m, "  Available Subslice Total: %u\n",
5221		   INTEL_INFO(dev)->subslice_total);
5222	seq_printf(m, "  Available Subslice Per Slice: %u\n",
5223		   INTEL_INFO(dev)->subslice_per_slice);
5224	seq_printf(m, "  Available EU Total: %u\n",
5225		   INTEL_INFO(dev)->eu_total);
5226	seq_printf(m, "  Available EU Per Subslice: %u\n",
5227		   INTEL_INFO(dev)->eu_per_subslice);
5228	seq_printf(m, "  Has Slice Power Gating: %s\n",
5229		   yesno(INTEL_INFO(dev)->has_slice_pg));
5230	seq_printf(m, "  Has Subslice Power Gating: %s\n",
5231		   yesno(INTEL_INFO(dev)->has_subslice_pg));
5232	seq_printf(m, "  Has EU Power Gating: %s\n",
5233		   yesno(INTEL_INFO(dev)->has_eu_pg));
5234
5235	seq_puts(m, "SSEU Device Status\n");
5236	memset(&stat, 0, sizeof(stat));
5237	if (IS_CHERRYVIEW(dev)) {
5238		cherryview_sseu_device_status(dev, &stat);
5239	} else if (IS_BROADWELL(dev)) {
5240		broadwell_sseu_device_status(dev, &stat);
5241	} else if (INTEL_INFO(dev)->gen >= 9) {
5242		gen9_sseu_device_status(dev, &stat);
5243	}
5244	seq_printf(m, "  Enabled Slice Total: %u\n",
5245		   stat.slice_total);
5246	seq_printf(m, "  Enabled Subslice Total: %u\n",
5247		   stat.subslice_total);
5248	seq_printf(m, "  Enabled Subslice Per Slice: %u\n",
5249		   stat.subslice_per_slice);
5250	seq_printf(m, "  Enabled EU Total: %u\n",
5251		   stat.eu_total);
5252	seq_printf(m, "  Enabled EU Per Subslice: %u\n",
5253		   stat.eu_per_subslice);
5254
5255	return 0;
5256}
5257
5258static int i915_forcewake_open(struct inode *inode, struct file *file)
5259{
5260	struct drm_device *dev = inode->i_private;
5261	struct drm_i915_private *dev_priv = dev->dev_private;
 
5262
5263	if (INTEL_INFO(dev)->gen < 6)
5264		return 0;
5265
5266	intel_runtime_pm_get(dev_priv);
5267	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
 
 
5268
5269	return 0;
5270}
5271
5272static int i915_forcewake_release(struct inode *inode, struct file *file)
5273{
5274	struct drm_device *dev = inode->i_private;
5275	struct drm_i915_private *dev_priv = dev->dev_private;
5276
5277	if (INTEL_INFO(dev)->gen < 6)
5278		return 0;
5279
5280	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5281	intel_runtime_pm_put(dev_priv);
 
 
 
 
 
 
 
 
5282
5283	return 0;
5284}
5285
5286static const struct file_operations i915_forcewake_fops = {
5287	.owner = THIS_MODULE,
5288	.open = i915_forcewake_open,
5289	.release = i915_forcewake_release,
5290};
5291
5292static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
5293{
5294	struct drm_device *dev = minor->dev;
5295	struct dentry *ent;
5296
5297	ent = debugfs_create_file("i915_forcewake_user",
5298				  S_IRUSR,
5299				  root, dev,
5300				  &i915_forcewake_fops);
5301	if (!ent)
5302		return -ENOMEM;
5303
5304	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
5305}
5306
5307static int i915_debugfs_create(struct dentry *root,
5308			       struct drm_minor *minor,
5309			       const char *name,
5310			       const struct file_operations *fops)
5311{
5312	struct drm_device *dev = minor->dev;
5313	struct dentry *ent;
5314
5315	ent = debugfs_create_file(name,
5316				  S_IRUGO | S_IWUSR,
5317				  root, dev,
5318				  fops);
5319	if (!ent)
5320		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5321
5322	return drm_add_fake_info_node(minor, ent, fops);
5323}
5324
5325static const struct drm_info_list i915_debugfs_list[] = {
5326	{"i915_capabilities", i915_capabilities, 0},
5327	{"i915_gem_objects", i915_gem_object_info, 0},
5328	{"i915_gem_gtt", i915_gem_gtt_info, 0},
5329	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
5330	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
 
5331	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
5332	{"i915_gem_stolen", i915_gem_stolen_list_info },
 
5333	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
5334	{"i915_gem_request", i915_gem_request_info, 0},
5335	{"i915_gem_seqno", i915_gem_seqno_info, 0},
5336	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
5337	{"i915_gem_interrupt", i915_interrupt_info, 0},
5338	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
5339	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
5340	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
5341	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
5342	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
5343	{"i915_guc_info", i915_guc_info, 0},
5344	{"i915_guc_load_status", i915_guc_load_status_info, 0},
5345	{"i915_guc_log_dump", i915_guc_log_dump, 0},
5346	{"i915_frequency_info", i915_frequency_info, 0},
5347	{"i915_hangcheck_info", i915_hangcheck_info, 0},
 
 
 
 
 
5348	{"i915_drpc_info", i915_drpc_info, 0},
5349	{"i915_emon_status", i915_emon_status, 0},
5350	{"i915_ring_freq_table", i915_ring_freq_table, 0},
5351	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
5352	{"i915_fbc_status", i915_fbc_status, 0},
5353	{"i915_ips_status", i915_ips_status, 0},
5354	{"i915_sr_status", i915_sr_status, 0},
5355	{"i915_opregion", i915_opregion, 0},
5356	{"i915_vbt", i915_vbt, 0},
5357	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
5358	{"i915_context_status", i915_context_status, 0},
5359	{"i915_dump_lrc", i915_dump_lrc, 0},
5360	{"i915_execlists", i915_execlists, 0},
5361	{"i915_forcewake_domains", i915_forcewake_domains, 0},
5362	{"i915_swizzle_info", i915_swizzle_info, 0},
5363	{"i915_ppgtt_info", i915_ppgtt_info, 0},
5364	{"i915_llc", i915_llc, 0},
5365	{"i915_edp_psr_status", i915_edp_psr_status, 0},
5366	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
5367	{"i915_energy_uJ", i915_energy_uJ, 0},
5368	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
5369	{"i915_power_domain_info", i915_power_domain_info, 0},
5370	{"i915_dmc_info", i915_dmc_info, 0},
5371	{"i915_display_info", i915_display_info, 0},
5372	{"i915_semaphore_status", i915_semaphore_status, 0},
5373	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
5374	{"i915_dp_mst_info", i915_dp_mst_info, 0},
5375	{"i915_wa_registers", i915_wa_registers, 0},
5376	{"i915_ddb_info", i915_ddb_info, 0},
5377	{"i915_sseu_status", i915_sseu_status, 0},
5378	{"i915_drrs_status", i915_drrs_status, 0},
5379	{"i915_rps_boost_info", i915_rps_boost_info, 0},
5380};
5381#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
5382
5383static const struct i915_debugfs_files {
5384	const char *name;
5385	const struct file_operations *fops;
5386} i915_debugfs_files[] = {
5387	{"i915_wedged", &i915_wedged_fops},
5388	{"i915_max_freq", &i915_max_freq_fops},
5389	{"i915_min_freq", &i915_min_freq_fops},
5390	{"i915_cache_sharing", &i915_cache_sharing_fops},
5391	{"i915_ring_stop", &i915_ring_stop_fops},
5392	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
5393	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
5394	{"i915_gem_drop_caches", &i915_drop_caches_fops},
5395	{"i915_error_state", &i915_error_state_fops},
5396	{"i915_next_seqno", &i915_next_seqno_fops},
5397	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
5398	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
5399	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
5400	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
5401	{"i915_fbc_false_color", &i915_fbc_fc_fops},
5402	{"i915_dp_test_data", &i915_displayport_test_data_fops},
5403	{"i915_dp_test_type", &i915_displayport_test_type_fops},
5404	{"i915_dp_test_active", &i915_displayport_test_active_fops}
5405};
5406
5407void intel_display_crc_init(struct drm_device *dev)
5408{
5409	struct drm_i915_private *dev_priv = dev->dev_private;
5410	enum pipe pipe;
5411
5412	for_each_pipe(dev_priv, pipe) {
5413		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
5414
5415		pipe_crc->opened = false;
5416		spin_lock_init(&pipe_crc->lock);
5417		init_waitqueue_head(&pipe_crc->wq);
5418	}
5419}
5420
5421int i915_debugfs_init(struct drm_minor *minor)
5422{
5423	int ret, i;
5424
5425	ret = i915_forcewake_create(minor->debugfs_root, minor);
5426	if (ret)
5427		return ret;
5428
5429	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5430		ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
5431		if (ret)
5432			return ret;
5433	}
5434
5435	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
5436		ret = i915_debugfs_create(minor->debugfs_root, minor,
5437					  i915_debugfs_files[i].name,
5438					  i915_debugfs_files[i].fops);
5439		if (ret)
5440			return ret;
5441	}
5442
5443	return drm_debugfs_create_files(i915_debugfs_list,
5444					I915_DEBUGFS_ENTRIES,
5445					minor->debugfs_root, minor);
5446}
5447
5448void i915_debugfs_cleanup(struct drm_minor *minor)
5449{
5450	int i;
5451
5452	drm_debugfs_remove_files(i915_debugfs_list,
5453				 I915_DEBUGFS_ENTRIES, minor);
5454
5455	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
5456				 1, minor);
5457
5458	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5459		struct drm_info_list *info_list =
5460			(struct drm_info_list *)&i915_pipe_crc_data[i];
5461
5462		drm_debugfs_remove_files(info_list, 1, minor);
5463	}
5464
5465	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
5466		struct drm_info_list *info_list =
5467			(struct drm_info_list *) i915_debugfs_files[i].fops;
5468
5469		drm_debugfs_remove_files(info_list, 1, minor);
5470	}
5471}
5472
5473struct dpcd_block {
5474	/* DPCD dump start address. */
5475	unsigned int offset;
5476	/* DPCD dump end address, inclusive. If unset, .size will be used. */
5477	unsigned int end;
5478	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
5479	size_t size;
5480	/* Only valid for eDP. */
5481	bool edp;
5482};
5483
5484static const struct dpcd_block i915_dpcd_debug[] = {
5485	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
5486	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
5487	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
5488	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
5489	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
5490	{ .offset = DP_SET_POWER },
5491	{ .offset = DP_EDP_DPCD_REV },
5492	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
5493	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
5494	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
5495};
5496
5497static int i915_dpcd_show(struct seq_file *m, void *data)
5498{
5499	struct drm_connector *connector = m->private;
5500	struct intel_dp *intel_dp =
5501		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5502	uint8_t buf[16];
5503	ssize_t err;
5504	int i;
5505
5506	if (connector->status != connector_status_connected)
5507		return -ENODEV;
5508
5509	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
5510		const struct dpcd_block *b = &i915_dpcd_debug[i];
5511		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
5512
5513		if (b->edp &&
5514		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
5515			continue;
5516
5517		/* low tech for now */
5518		if (WARN_ON(size > sizeof(buf)))
5519			continue;
5520
5521		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
5522		if (err <= 0) {
5523			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
5524				  size, b->offset, err);
5525			continue;
5526		}
5527
5528		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
5529	}
5530
5531	return 0;
5532}
5533
5534static int i915_dpcd_open(struct inode *inode, struct file *file)
5535{
5536	return single_open(file, i915_dpcd_show, inode->i_private);
5537}
5538
5539static const struct file_operations i915_dpcd_fops = {
5540	.owner = THIS_MODULE,
5541	.open = i915_dpcd_open,
5542	.read = seq_read,
5543	.llseek = seq_lseek,
5544	.release = single_release,
5545};
5546
5547/**
5548 * i915_debugfs_connector_add - add i915 specific connector debugfs files
5549 * @connector: pointer to a registered drm_connector
5550 *
5551 * Cleanup will be done by drm_connector_unregister() through a call to
5552 * drm_debugfs_connector_remove().
5553 *
5554 * Returns 0 on success, negative error codes on error.
5555 */
5556int i915_debugfs_connector_add(struct drm_connector *connector)
5557{
5558	struct dentry *root = connector->debugfs_entry;
5559
5560	/* The connector must have been registered beforehands. */
5561	if (!root)
5562		return -ENODEV;
5563
5564	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5565	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5566		debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
5567				    &i915_dpcd_fops);
5568
5569	return 0;
5570}