Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28#include "vmwgfx_kms.h"
  29
  30#include "vmwgfx_bo.h"
  31#include "vmwgfx_vkms.h"
  32#include "vmw_surface_cache.h"
  33
  34#include <drm/drm_atomic.h>
  35#include <drm/drm_atomic_helper.h>
  36#include <drm/drm_damage_helper.h>
  37#include <drm/drm_fourcc.h>
  38#include <drm/drm_rect.h>
  39#include <drm/drm_sysfs.h>
  40#include <drm/drm_edid.h>
  41
  42void vmw_du_init(struct vmw_display_unit *du)
  43{
  44	vmw_vkms_crtc_init(&du->crtc);
  45}
  46
  47void vmw_du_cleanup(struct vmw_display_unit *du)
  48{
  49	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
  50
  51	vmw_vkms_crtc_cleanup(&du->crtc);
  52	drm_plane_cleanup(&du->primary);
  53	if (vmw_cmd_supported(dev_priv))
  54		drm_plane_cleanup(&du->cursor.base);
  55
  56	drm_connector_unregister(&du->connector);
  57	drm_crtc_cleanup(&du->crtc);
  58	drm_encoder_cleanup(&du->encoder);
  59	drm_connector_cleanup(&du->connector);
  60}
  61
  62/*
  63 * Display Unit Cursor functions
  64 */
  65
  66static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
  67static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
  68				  struct vmw_plane_state *vps,
  69				  u32 *image, u32 width, u32 height,
  70				  u32 hotspotX, u32 hotspotY);
  71
  72struct vmw_svga_fifo_cmd_define_cursor {
  73	u32 cmd;
  74	SVGAFifoCmdDefineAlphaCursor cursor;
  75};
  76
  77/**
  78 * vmw_send_define_cursor_cmd - queue a define cursor command
  79 * @dev_priv: the private driver struct
  80 * @image: buffer which holds the cursor image
  81 * @width: width of the mouse cursor image
  82 * @height: height of the mouse cursor image
  83 * @hotspotX: the horizontal position of mouse hotspot
  84 * @hotspotY: the vertical position of mouse hotspot
  85 */
  86static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
  87				       u32 *image, u32 width, u32 height,
  88				       u32 hotspotX, u32 hotspotY)
  89{
  90	struct vmw_svga_fifo_cmd_define_cursor *cmd;
  91	const u32 image_size = width * height * sizeof(*image);
  92	const u32 cmd_size = sizeof(*cmd) + image_size;
  93
  94	/* Try to reserve fifocmd space and swallow any failures;
  95	   such reservations cannot be left unconsumed for long
  96	   under the risk of clogging other fifocmd users, so
  97	   we treat reservations separtely from the way we treat
  98	   other fallible KMS-atomic resources at prepare_fb */
  99	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
 100
 101	if (unlikely(!cmd))
 102		return;
 103
 104	memset(cmd, 0, sizeof(*cmd));
 105
 106	memcpy(&cmd[1], image, image_size);
 107
 108	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
 109	cmd->cursor.id = 0;
 110	cmd->cursor.width = width;
 111	cmd->cursor.height = height;
 112	cmd->cursor.hotspotX = hotspotX;
 113	cmd->cursor.hotspotY = hotspotY;
 114
 115	vmw_cmd_commit_flush(dev_priv, cmd_size);
 116}
 117
 118/**
 119 * vmw_cursor_update_image - update the cursor image on the provided plane
 120 * @dev_priv: the private driver struct
 121 * @vps: the plane state of the cursor plane
 122 * @image: buffer which holds the cursor image
 123 * @width: width of the mouse cursor image
 124 * @height: height of the mouse cursor image
 125 * @hotspotX: the horizontal position of mouse hotspot
 126 * @hotspotY: the vertical position of mouse hotspot
 127 */
 128static void vmw_cursor_update_image(struct vmw_private *dev_priv,
 129				    struct vmw_plane_state *vps,
 130				    u32 *image, u32 width, u32 height,
 131				    u32 hotspotX, u32 hotspotY)
 132{
 133	if (vps->cursor.bo)
 134		vmw_cursor_update_mob(dev_priv, vps, image,
 135				      vps->base.crtc_w, vps->base.crtc_h,
 136				      hotspotX, hotspotY);
 137
 138	else
 139		vmw_send_define_cursor_cmd(dev_priv, image, width, height,
 140					   hotspotX, hotspotY);
 141}
 142
 143
 144/**
 145 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
 146 *
 147 * Called from inside vmw_du_cursor_plane_atomic_update to actually
 148 * make the cursor-image live.
 149 *
 150 * @dev_priv: device to work with
 151 * @vps: the plane state of the cursor plane
 152 * @image: cursor source data to fill the MOB with
 153 * @width: source data width
 154 * @height: source data height
 155 * @hotspotX: cursor hotspot x
 156 * @hotspotY: cursor hotspot Y
 157 */
 158static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
 159				  struct vmw_plane_state *vps,
 160				  u32 *image, u32 width, u32 height,
 161				  u32 hotspotX, u32 hotspotY)
 162{
 163	SVGAGBCursorHeader *header;
 164	SVGAGBAlphaCursorHeader *alpha_header;
 165	const u32 image_size = width * height * sizeof(*image);
 
 166
 167	header = vmw_bo_map_and_cache(vps->cursor.bo);
 168	alpha_header = &header->header.alphaHeader;
 169
 170	memset(header, 0, sizeof(*header));
 171
 172	header->type = SVGA_ALPHA_CURSOR;
 173	header->sizeInBytes = image_size;
 174
 175	alpha_header->hotspotX = hotspotX;
 176	alpha_header->hotspotY = hotspotY;
 177	alpha_header->width = width;
 178	alpha_header->height = height;
 179
 180	memcpy(header + 1, image, image_size);
 181	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
 182		  vps->cursor.bo->tbo.resource->start);
 183}
 184
 185
 186static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
 187{
 188	return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
 189}
 190
 191/**
 192 * vmw_du_cursor_plane_acquire_image -- Acquire the image data
 193 * @vps: cursor plane state
 194 */
 195static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
 196{
 197	struct vmw_surface *surf;
 198
 199	if (vmw_user_object_is_null(&vps->uo))
 200		return NULL;
 201
 202	surf = vmw_user_object_surface(&vps->uo);
 203	if (surf && !vmw_user_object_is_mapped(&vps->uo))
 204		return surf->snooper.image;
 205
 206	return vmw_user_object_map(&vps->uo);
 207}
 208
 209static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
 210					    struct vmw_plane_state *new_vps)
 211{
 212	void *old_image;
 213	void *new_image;
 214	u32 size;
 215	bool changed;
 216
 217	if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
 218	    old_vps->base.crtc_h != new_vps->base.crtc_h)
 219	    return true;
 220
 221	if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
 222	    old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
 223	    return true;
 224
 225	size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
 226
 227	old_image = vmw_du_cursor_plane_acquire_image(old_vps);
 228	new_image = vmw_du_cursor_plane_acquire_image(new_vps);
 229
 230	changed = false;
 231	if (old_image && new_image && old_image != new_image)
 232		changed = memcmp(old_image, new_image, size) != 0;
 233
 234	return changed;
 235}
 236
 237static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
 238{
 239	if (!(*vbo))
 240		return;
 241
 242	ttm_bo_unpin(&(*vbo)->tbo);
 243	vmw_bo_unreference(vbo);
 
 
 244}
 245
 246static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
 247				  struct vmw_plane_state *vps)
 248{
 249	u32 i;
 250
 251	if (!vps->cursor.bo)
 252		return;
 253
 254	vmw_du_cursor_plane_unmap_cm(vps);
 255
 256	/* Look for a free slot to return this mob to the cache. */
 257	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 258		if (!vcp->cursor_mobs[i]) {
 259			vcp->cursor_mobs[i] = vps->cursor.bo;
 260			vps->cursor.bo = NULL;
 261			return;
 262		}
 263	}
 264
 265	/* Cache is full: See if this mob is bigger than an existing mob. */
 266	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 267		if (vcp->cursor_mobs[i]->tbo.base.size <
 268		    vps->cursor.bo->tbo.base.size) {
 269			vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
 270			vcp->cursor_mobs[i] = vps->cursor.bo;
 271			vps->cursor.bo = NULL;
 272			return;
 273		}
 274	}
 275
 276	/* Destroy it if it's not worth caching. */
 277	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
 278}
 279
 280static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
 281				 struct vmw_plane_state *vps)
 282{
 283	struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
 284	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
 285	u32 i;
 286	u32 cursor_max_dim, mob_max_size;
 287	struct vmw_fence_obj *fence = NULL;
 288	int ret;
 289
 290	if (!dev_priv->has_mob ||
 291	    (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
 292		return -EINVAL;
 293
 294	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
 295	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
 296
 297	if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
 298	    vps->base.crtc_h > cursor_max_dim)
 299		return -EINVAL;
 300
 301	if (vps->cursor.bo) {
 302		if (vps->cursor.bo->tbo.base.size >= size)
 303			return 0;
 304		vmw_du_put_cursor_mob(vcp, vps);
 305	}
 306
 307	/* Look for an unused mob in the cache. */
 308	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 309		if (vcp->cursor_mobs[i] &&
 310		    vcp->cursor_mobs[i]->tbo.base.size >= size) {
 311			vps->cursor.bo = vcp->cursor_mobs[i];
 312			vcp->cursor_mobs[i] = NULL;
 313			return 0;
 314		}
 315	}
 316	/* Create a new mob if we can't find an existing one. */
 317	ret = vmw_bo_create_and_populate(dev_priv, size,
 318					 VMW_BO_DOMAIN_MOB,
 319					 &vps->cursor.bo);
 320
 321	if (ret != 0)
 322		return ret;
 323
 324	/* Fence the mob creation so we are guarateed to have the mob */
 325	ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
 326	if (ret != 0)
 327		goto teardown;
 328
 329	ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 330	if (ret != 0) {
 331		ttm_bo_unreserve(&vps->cursor.bo->tbo);
 332		goto teardown;
 333	}
 334
 335	dma_fence_wait(&fence->base, false);
 336	dma_fence_put(&fence->base);
 337
 338	ttm_bo_unreserve(&vps->cursor.bo->tbo);
 339	return 0;
 340
 341teardown:
 342	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
 343	return ret;
 344}
 345
 346
 347static void vmw_cursor_update_position(struct vmw_private *dev_priv,
 348				       bool show, int x, int y)
 349{
 350	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
 351					     : SVGA_CURSOR_ON_HIDE;
 352	uint32_t count;
 353
 354	spin_lock(&dev_priv->cursor_lock);
 355	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
 356		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
 357		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
 358		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
 359		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
 360		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
 361	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
 362		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
 363		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
 364		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
 365		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
 366		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
 367	} else {
 368		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
 369		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
 370		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
 371	}
 372	spin_unlock(&dev_priv->cursor_lock);
 373}
 374
 375void vmw_kms_cursor_snoop(struct vmw_surface *srf,
 376			  struct ttm_object_file *tfile,
 377			  struct ttm_buffer_object *bo,
 378			  SVGA3dCmdHeader *header)
 379{
 380	struct ttm_bo_kmap_obj map;
 381	unsigned long kmap_offset;
 382	unsigned long kmap_num;
 383	SVGA3dCopyBox *box;
 384	unsigned box_count;
 385	void *virtual;
 386	bool is_iomem;
 387	struct vmw_dma_cmd {
 388		SVGA3dCmdHeader header;
 389		SVGA3dCmdSurfaceDMA dma;
 390	} *cmd;
 391	int i, ret;
 392	const struct SVGA3dSurfaceDesc *desc =
 393		vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
 394	const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
 395
 396	cmd = container_of(header, struct vmw_dma_cmd, header);
 397
 398	/* No snooper installed, nothing to copy */
 399	if (!srf->snooper.image)
 400		return;
 401
 402	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
 403		DRM_ERROR("face and mipmap for cursors should never != 0\n");
 404		return;
 405	}
 406
 407	if (cmd->header.size < 64) {
 408		DRM_ERROR("at least one full copy box must be given\n");
 409		return;
 410	}
 411
 412	box = (SVGA3dCopyBox *)&cmd[1];
 413	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
 414			sizeof(SVGA3dCopyBox);
 415
 416	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
 417	    box->x != 0    || box->y != 0    || box->z != 0    ||
 418	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
 419	    box->d != 1    || box_count != 1 ||
 420	    box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
 421		/* TODO handle none page aligned offsets */
 422		/* TODO handle more dst & src != 0 */
 423		/* TODO handle more then one copy */
 424		DRM_ERROR("Can't snoop dma request for cursor!\n");
 425		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
 426			  box->srcx, box->srcy, box->srcz,
 427			  box->x, box->y, box->z,
 428			  box->w, box->h, box->d, box_count,
 429			  cmd->dma.guest.ptr.offset);
 430		return;
 431	}
 432
 433	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
 434	kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
 435
 436	ret = ttm_bo_reserve(bo, true, false, NULL);
 437	if (unlikely(ret != 0)) {
 438		DRM_ERROR("reserve failed\n");
 439		return;
 440	}
 441
 442	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
 443	if (unlikely(ret != 0))
 444		goto err_unreserve;
 445
 446	virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
 447
 448	if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
 449		memcpy(srf->snooper.image, virtual,
 450		       VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
 451	} else {
 452		/* Image is unsigned pointer. */
 453		for (i = 0; i < box->h; i++)
 454			memcpy(srf->snooper.image + i * image_pitch,
 455			       virtual + i * cmd->dma.guest.pitch,
 456			       box->w * desc->pitchBytesPerBlock);
 457	}
 458
 459	srf->snooper.age++;
 460
 461	ttm_bo_kunmap(&map);
 462err_unreserve:
 463	ttm_bo_unreserve(bo);
 464}
 465
 466/**
 467 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
 468 *
 469 * @dev_priv: Pointer to the device private struct.
 470 *
 471 * Clears all legacy hotspots.
 472 */
 473void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
 474{
 475	struct drm_device *dev = &dev_priv->drm;
 476	struct vmw_display_unit *du;
 477	struct drm_crtc *crtc;
 478
 479	drm_modeset_lock_all(dev);
 480	drm_for_each_crtc(crtc, dev) {
 481		du = vmw_crtc_to_du(crtc);
 482
 483		du->hotspot_x = 0;
 484		du->hotspot_y = 0;
 485	}
 486	drm_modeset_unlock_all(dev);
 487}
 488
 489void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 490{
 491	struct drm_device *dev = &dev_priv->drm;
 492	struct vmw_display_unit *du;
 493	struct drm_crtc *crtc;
 494
 495	mutex_lock(&dev->mode_config.mutex);
 496
 497	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 498		du = vmw_crtc_to_du(crtc);
 499		if (!du->cursor_surface ||
 500		    du->cursor_age == du->cursor_surface->snooper.age ||
 501		    !du->cursor_surface->snooper.image)
 502			continue;
 503
 504		du->cursor_age = du->cursor_surface->snooper.age;
 505		vmw_send_define_cursor_cmd(dev_priv,
 506					   du->cursor_surface->snooper.image,
 507					   VMW_CURSOR_SNOOP_WIDTH,
 508					   VMW_CURSOR_SNOOP_HEIGHT,
 509					   du->hotspot_x + du->core_hotspot_x,
 510					   du->hotspot_y + du->core_hotspot_y);
 511	}
 512
 513	mutex_unlock(&dev->mode_config.mutex);
 514}
 515
 516
 517void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
 518{
 519	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 520	u32 i;
 521
 522	vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
 523
 524	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
 525		vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
 526
 527	drm_plane_cleanup(plane);
 528}
 529
 530
 531void vmw_du_primary_plane_destroy(struct drm_plane *plane)
 532{
 533	drm_plane_cleanup(plane);
 534
 535	/* Planes are static in our case so we don't free it */
 536}
 537
 538
 539/**
 540 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
 541 *
 542 * @vps: plane state associated with the display surface
 
 543 */
 544void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps)
 
 545{
 546	struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
 547
 548	if (surf) {
 549		if (vps->pinned) {
 550			vmw_resource_unpin(&surf->res);
 551			vps->pinned--;
 552		}
 
 
 
 
 
 
 553	}
 554}
 555
 556
 557/**
 558 * vmw_du_plane_cleanup_fb - Unpins the plane surface
 559 *
 560 * @plane:  display plane
 561 * @old_state: Contains the FB to clean up
 562 *
 563 * Unpins the framebuffer surface
 564 *
 565 * Returns 0 on success
 566 */
 567void
 568vmw_du_plane_cleanup_fb(struct drm_plane *plane,
 569			struct drm_plane_state *old_state)
 570{
 571	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 572
 573	vmw_du_plane_unpin_surf(vps);
 574}
 575
 576
 577/**
 578 * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
 579 *
 580 * @vps: plane_state
 581 *
 582 * Returns 0 on success
 583 */
 584
 585static int
 586vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
 587{
 588	int ret;
 589	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
 590	struct ttm_buffer_object *bo;
 591
 592	if (!vps->cursor.bo)
 593		return -EINVAL;
 594
 595	bo = &vps->cursor.bo->tbo;
 596
 597	if (bo->base.size < size)
 598		return -EINVAL;
 599
 600	if (vps->cursor.bo->map.virtual)
 601		return 0;
 602
 603	ret = ttm_bo_reserve(bo, false, false, NULL);
 
 604	if (unlikely(ret != 0))
 605		return -ENOMEM;
 606
 607	vmw_bo_map_and_cache(vps->cursor.bo);
 
 
 
 
 
 
 
 
 608
 609	ttm_bo_unreserve(bo);
 610
 611	if (unlikely(ret != 0))
 612		return -ENOMEM;
 613
 
 
 614	return 0;
 615}
 616
 617
 618/**
 619 * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
 620 *
 621 * @vps: state of the cursor plane
 622 *
 623 * Returns 0 on success
 624 */
 625
 626static int
 627vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
 628{
 629	int ret = 0;
 630	struct vmw_bo *vbo = vps->cursor.bo;
 
 
 
 631
 632	if (!vbo || !vbo->map.virtual)
 633		return 0;
 634
 635	ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
 636	if (likely(ret == 0)) {
 637		vmw_bo_unmap(vbo);
 638		ttm_bo_unreserve(&vbo->tbo);
 
 639	}
 640
 641	return ret;
 642}
 643
 644
 645/**
 646 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
 647 *
 648 * @plane: cursor plane
 649 * @old_state: contains the state to clean up
 650 *
 651 * Unmaps all cursor bo mappings and unpins the cursor surface
 652 *
 653 * Returns 0 on success
 654 */
 655void
 656vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
 657			       struct drm_plane_state *old_state)
 658{
 659	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 660	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 
 
 
 
 
 
 661
 662	if (!vmw_user_object_is_null(&vps->uo))
 663		vmw_user_object_unmap(&vps->uo);
 
 
 
 
 
 
 
 664
 665	vmw_du_cursor_plane_unmap_cm(vps);
 666	vmw_du_put_cursor_mob(vcp, vps);
 667
 668	vmw_du_plane_unpin_surf(vps);
 669	vmw_user_object_unref(&vps->uo);
 
 
 
 
 
 
 
 
 
 670}
 671
 672
 673/**
 674 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
 675 *
 676 * @plane:  display plane
 677 * @new_state: info on the new plane state, including the FB
 678 *
 679 * Returns 0 on success
 680 */
 681int
 682vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
 683			       struct drm_plane_state *new_state)
 684{
 685	struct drm_framebuffer *fb = new_state->fb;
 686	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 687	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
 688	struct vmw_bo *bo = NULL;
 689	int ret = 0;
 690
 691	if (!vmw_user_object_is_null(&vps->uo)) {
 692		vmw_user_object_unmap(&vps->uo);
 693		vmw_user_object_unref(&vps->uo);
 
 
 
 
 
 694	}
 695
 696	if (fb) {
 697		if (vmw_framebuffer_to_vfb(fb)->bo) {
 698			vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
 699			vps->uo.surface = NULL;
 700		} else {
 701			memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
 
 702		}
 703		vmw_user_object_ref(&vps->uo);
 704	}
 705
 706	bo = vmw_user_object_buffer(&vps->uo);
 707	if (bo) {
 708		struct ttm_operation_ctx ctx = {false, false};
 709
 710		ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
 711		if (ret != 0)
 712			return -ENOMEM;
 
 
 
 713
 714		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 715		if (ret != 0)
 716			return -ENOMEM;
 717
 718		vmw_bo_pin_reserved(bo, true);
 719		if (vmw_framebuffer_to_vfb(fb)->bo) {
 720			const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
 721
 722			(void)vmw_bo_map_and_cache_size(bo, size);
 723		} else {
 724			vmw_bo_map_and_cache(bo);
 725		}
 726		ttm_bo_unreserve(&bo->tbo);
 
 
 
 
 
 
 
 
 
 
 
 
 727	}
 728
 729	if (!vmw_user_object_is_null(&vps->uo)) {
 730		vmw_du_get_cursor_mob(vcp, vps);
 731		vmw_du_cursor_plane_map_cm(vps);
 732	}
 733
 734	return 0;
 735}
 736
 737
 738void
 739vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
 740				  struct drm_atomic_state *state)
 741{
 742	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 743									   plane);
 744	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
 745									   plane);
 746	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
 747	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 748	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 749	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
 750	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
 751	struct vmw_bo *old_bo = NULL;
 752	struct vmw_bo *new_bo = NULL;
 753	struct ww_acquire_ctx ctx;
 754	s32 hotspot_x, hotspot_y;
 755	int ret;
 
 
 
 756
 757	hotspot_x = du->hotspot_x + new_state->hotspot_x;
 758	hotspot_y = du->hotspot_y + new_state->hotspot_y;
 
 
 759
 760	du->cursor_surface = vmw_user_object_surface(&vps->uo);
 
 761
 762	if (vmw_user_object_is_null(&vps->uo)) {
 763		vmw_cursor_update_position(dev_priv, false, 0, 0);
 764		return;
 765	}
 766
 767	vps->cursor.hotspot_x = hotspot_x;
 768	vps->cursor.hotspot_y = hotspot_y;
 769
 770	if (du->cursor_surface)
 771		du->cursor_age = du->cursor_surface->snooper.age;
 772
 773	ww_acquire_init(&ctx, &reservation_ww_class);
 774
 775	if (!vmw_user_object_is_null(&old_vps->uo)) {
 776		old_bo = vmw_user_object_buffer(&old_vps->uo);
 777		ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
 778		if (ret != 0)
 779			return;
 780	}
 781
 782	if (!vmw_user_object_is_null(&vps->uo)) {
 783		new_bo = vmw_user_object_buffer(&vps->uo);
 784		if (old_bo != new_bo) {
 785			ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
 786			if (ret != 0) {
 787				if (old_bo) {
 788					ttm_bo_unreserve(&old_bo->tbo);
 789					ww_acquire_fini(&ctx);
 790				}
 791				return;
 792			}
 793		} else {
 794			new_bo = NULL;
 795		}
 796	}
 797	if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
 798		/*
 799		 * If it hasn't changed, avoid making the device do extra
 800		 * work by keeping the old cursor active.
 801		 */
 802		struct vmw_cursor_plane_state tmp = old_vps->cursor;
 803		old_vps->cursor = vps->cursor;
 804		vps->cursor = tmp;
 805	} else {
 806		void *image = vmw_du_cursor_plane_acquire_image(vps);
 807		if (image)
 808			vmw_cursor_update_image(dev_priv, vps, image,
 809						new_state->crtc_w,
 810						new_state->crtc_h,
 811						hotspot_x, hotspot_y);
 812	}
 813
 814	if (new_bo)
 815		ttm_bo_unreserve(&new_bo->tbo);
 816	if (old_bo)
 817		ttm_bo_unreserve(&old_bo->tbo);
 818
 819	ww_acquire_fini(&ctx);
 820
 821	du->cursor_x = new_state->crtc_x + du->set_gui_x;
 822	du->cursor_y = new_state->crtc_y + du->set_gui_y;
 823
 824	vmw_cursor_update_position(dev_priv, true,
 825				   du->cursor_x + hotspot_x,
 826				   du->cursor_y + hotspot_y);
 827
 828	du->core_hotspot_x = hotspot_x - du->hotspot_x;
 829	du->core_hotspot_y = hotspot_y - du->hotspot_y;
 830}
 831
 832
 833/**
 834 * vmw_du_primary_plane_atomic_check - check if the new state is okay
 835 *
 836 * @plane: display plane
 837 * @state: info on the new plane state, including the FB
 838 *
 839 * Check if the new state is settable given the current state.  Other
 840 * than what the atomic helper checks, we care about crtc fitting
 841 * the FB and maintaining one active framebuffer.
 842 *
 843 * Returns 0 on success
 844 */
 845int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
 846				      struct drm_atomic_state *state)
 847{
 848	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 849									   plane);
 850	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
 851									   plane);
 852	struct drm_crtc_state *crtc_state = NULL;
 853	struct drm_framebuffer *new_fb = new_state->fb;
 854	struct drm_framebuffer *old_fb = old_state->fb;
 855	int ret;
 856
 857	/*
 858	 * Ignore damage clips if the framebuffer attached to the plane's state
 859	 * has changed since the last plane update (page-flip). In this case, a
 860	 * full plane update should happen because uploads are done per-buffer.
 861	 */
 862	if (old_fb != new_fb)
 863		new_state->ignore_damage_clips = true;
 864
 865	if (new_state->crtc)
 866		crtc_state = drm_atomic_get_new_crtc_state(state,
 867							   new_state->crtc);
 868
 869	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
 870						  DRM_PLANE_NO_SCALING,
 871						  DRM_PLANE_NO_SCALING,
 872						  false, true);
 
 
 
 
 
 
 
 
 
 873	return ret;
 874}
 875
 876
 877/**
 878 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
 879 *
 880 * @plane: cursor plane
 881 * @state: info on the new plane state
 882 *
 883 * This is a chance to fail if the new cursor state does not fit
 884 * our requirements.
 885 *
 886 * Returns 0 on success
 887 */
 888int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
 889				     struct drm_atomic_state *state)
 890{
 891	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 892									   plane);
 893	int ret = 0;
 894	struct drm_crtc_state *crtc_state = NULL;
 895	struct vmw_surface *surface = NULL;
 896	struct drm_framebuffer *fb = new_state->fb;
 897
 898	if (new_state->crtc)
 899		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
 900							   new_state->crtc);
 901
 902	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
 903						  DRM_PLANE_NO_SCALING,
 904						  DRM_PLANE_NO_SCALING,
 905						  true, true);
 906	if (ret)
 907		return ret;
 908
 909	/* Turning off */
 910	if (!fb)
 911		return 0;
 912
 913	/* A lot of the code assumes this */
 914	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
 915		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
 916			  new_state->crtc_w, new_state->crtc_h);
 917		return -EINVAL;
 918	}
 919
 920	if (!vmw_framebuffer_to_vfb(fb)->bo) {
 921		surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
 922
 923		WARN_ON(!surface);
 924
 925		if (!surface ||
 926		    (!surface->snooper.image && !surface->res.guest_memory_bo)) {
 927			DRM_ERROR("surface not suitable for cursor\n");
 928			return -EINVAL;
 929		}
 930	}
 931
 932	return 0;
 933}
 934
 935
 936int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
 937			     struct drm_atomic_state *state)
 938{
 939	struct vmw_private *vmw = vmw_priv(crtc->dev);
 940	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
 941									 crtc);
 942	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
 943	int connector_mask = drm_connector_mask(&du->connector);
 944	bool has_primary = new_state->plane_mask &
 945			   drm_plane_mask(crtc->primary);
 946
 947	/*
 948	 * This is fine in general, but broken userspace might expect
 949	 * some actual rendering so give a clue as why it's blank.
 950	 */
 951	if (new_state->enable && !has_primary)
 952		drm_dbg_driver(&vmw->drm,
 953			       "CRTC without a primary plane will be blank.\n");
 954
 955
 956	if (new_state->connector_mask != connector_mask &&
 957	    new_state->connector_mask != 0) {
 958		DRM_ERROR("Invalid connectors configuration\n");
 959		return -EINVAL;
 960	}
 961
 962	/*
 963	 * Our virtual device does not have a dot clock, so use the logical
 964	 * clock value as the dot clock.
 965	 */
 966	if (new_state->mode.crtc_clock == 0)
 967		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
 968
 969	return 0;
 970}
 971
 972
 973void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
 974			      struct drm_atomic_state *state)
 975{
 976	vmw_vkms_crtc_atomic_begin(crtc, state);
 977}
 978
 
 
 
 
 
 
 
 979/**
 980 * vmw_du_crtc_duplicate_state - duplicate crtc state
 981 * @crtc: DRM crtc
 982 *
 983 * Allocates and returns a copy of the crtc state (both common and
 984 * vmw-specific) for the specified crtc.
 985 *
 986 * Returns: The newly allocated crtc state, or NULL on failure.
 987 */
 988struct drm_crtc_state *
 989vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
 990{
 991	struct drm_crtc_state *state;
 992	struct vmw_crtc_state *vcs;
 993
 994	if (WARN_ON(!crtc->state))
 995		return NULL;
 996
 997	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
 998
 999	if (!vcs)
1000		return NULL;
1001
1002	state = &vcs->base;
1003
1004	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
1005
1006	return state;
1007}
1008
1009
1010/**
1011 * vmw_du_crtc_reset - creates a blank vmw crtc state
1012 * @crtc: DRM crtc
1013 *
1014 * Resets the atomic state for @crtc by freeing the state pointer (which
1015 * might be NULL, e.g. at driver load time) and allocating a new empty state
1016 * object.
1017 */
1018void vmw_du_crtc_reset(struct drm_crtc *crtc)
1019{
1020	struct vmw_crtc_state *vcs;
1021
1022
1023	if (crtc->state) {
1024		__drm_atomic_helper_crtc_destroy_state(crtc->state);
1025
1026		kfree(vmw_crtc_state_to_vcs(crtc->state));
1027	}
1028
1029	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1030
1031	if (!vcs) {
1032		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1033		return;
1034	}
1035
1036	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1037}
1038
1039
1040/**
1041 * vmw_du_crtc_destroy_state - destroy crtc state
1042 * @crtc: DRM crtc
1043 * @state: state object to destroy
1044 *
1045 * Destroys the crtc state (both common and vmw-specific) for the
1046 * specified plane.
1047 */
1048void
1049vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1050			  struct drm_crtc_state *state)
1051{
1052	drm_atomic_helper_crtc_destroy_state(crtc, state);
1053}
1054
1055
1056/**
1057 * vmw_du_plane_duplicate_state - duplicate plane state
1058 * @plane: drm plane
1059 *
1060 * Allocates and returns a copy of the plane state (both common and
1061 * vmw-specific) for the specified plane.
1062 *
1063 * Returns: The newly allocated plane state, or NULL on failure.
1064 */
1065struct drm_plane_state *
1066vmw_du_plane_duplicate_state(struct drm_plane *plane)
1067{
1068	struct drm_plane_state *state;
1069	struct vmw_plane_state *vps;
1070
1071	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1072
1073	if (!vps)
1074		return NULL;
1075
1076	vps->pinned = 0;
1077	vps->cpp = 0;
1078
1079	memset(&vps->cursor, 0, sizeof(vps->cursor));
1080
1081	/* Each ref counted resource needs to be acquired again */
1082	vmw_user_object_ref(&vps->uo);
 
 
 
 
 
1083	state = &vps->base;
1084
1085	__drm_atomic_helper_plane_duplicate_state(plane, state);
1086
1087	return state;
1088}
1089
1090
1091/**
1092 * vmw_du_plane_reset - creates a blank vmw plane state
1093 * @plane: drm plane
1094 *
1095 * Resets the atomic state for @plane by freeing the state pointer (which might
1096 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1097 */
1098void vmw_du_plane_reset(struct drm_plane *plane)
1099{
1100	struct vmw_plane_state *vps;
1101
1102	if (plane->state)
1103		vmw_du_plane_destroy_state(plane, plane->state);
1104
1105	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1106
1107	if (!vps) {
1108		DRM_ERROR("Cannot allocate vmw_plane_state\n");
1109		return;
1110	}
1111
1112	__drm_atomic_helper_plane_reset(plane, &vps->base);
1113}
1114
1115
1116/**
1117 * vmw_du_plane_destroy_state - destroy plane state
1118 * @plane: DRM plane
1119 * @state: state object to destroy
1120 *
1121 * Destroys the plane state (both common and vmw-specific) for the
1122 * specified plane.
1123 */
1124void
1125vmw_du_plane_destroy_state(struct drm_plane *plane,
1126			   struct drm_plane_state *state)
1127{
1128	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1129
1130	/* Should have been freed by cleanup_fb */
1131	vmw_user_object_unref(&vps->uo);
 
 
 
 
1132
1133	drm_atomic_helper_plane_destroy_state(plane, state);
1134}
1135
1136
1137/**
1138 * vmw_du_connector_duplicate_state - duplicate connector state
1139 * @connector: DRM connector
1140 *
1141 * Allocates and returns a copy of the connector state (both common and
1142 * vmw-specific) for the specified connector.
1143 *
1144 * Returns: The newly allocated connector state, or NULL on failure.
1145 */
1146struct drm_connector_state *
1147vmw_du_connector_duplicate_state(struct drm_connector *connector)
1148{
1149	struct drm_connector_state *state;
1150	struct vmw_connector_state *vcs;
1151
1152	if (WARN_ON(!connector->state))
1153		return NULL;
1154
1155	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1156
1157	if (!vcs)
1158		return NULL;
1159
1160	state = &vcs->base;
1161
1162	__drm_atomic_helper_connector_duplicate_state(connector, state);
1163
1164	return state;
1165}
1166
1167
1168/**
1169 * vmw_du_connector_reset - creates a blank vmw connector state
1170 * @connector: DRM connector
1171 *
1172 * Resets the atomic state for @connector by freeing the state pointer (which
1173 * might be NULL, e.g. at driver load time) and allocating a new empty state
1174 * object.
1175 */
1176void vmw_du_connector_reset(struct drm_connector *connector)
1177{
1178	struct vmw_connector_state *vcs;
1179
1180
1181	if (connector->state) {
1182		__drm_atomic_helper_connector_destroy_state(connector->state);
1183
1184		kfree(vmw_connector_state_to_vcs(connector->state));
1185	}
1186
1187	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1188
1189	if (!vcs) {
1190		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1191		return;
1192	}
1193
1194	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1195}
1196
1197
1198/**
1199 * vmw_du_connector_destroy_state - destroy connector state
1200 * @connector: DRM connector
1201 * @state: state object to destroy
1202 *
1203 * Destroys the connector state (both common and vmw-specific) for the
1204 * specified plane.
1205 */
1206void
1207vmw_du_connector_destroy_state(struct drm_connector *connector,
1208			  struct drm_connector_state *state)
1209{
1210	drm_atomic_helper_connector_destroy_state(connector, state);
1211}
1212/*
1213 * Generic framebuffer code
1214 */
1215
1216/*
1217 * Surface framebuffer code
1218 */
1219
1220static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1221{
1222	struct vmw_framebuffer_surface *vfbs =
1223		vmw_framebuffer_to_vfbs(framebuffer);
1224
1225	drm_framebuffer_cleanup(framebuffer);
1226	vmw_user_object_unref(&vfbs->uo);
1227
1228	kfree(vfbs);
1229}
1230
1231/**
1232 * vmw_kms_readback - Perform a readback from the screen system to
1233 * a buffer-object backed framebuffer.
1234 *
1235 * @dev_priv: Pointer to the device private structure.
1236 * @file_priv: Pointer to a struct drm_file identifying the caller.
1237 * Must be set to NULL if @user_fence_rep is NULL.
1238 * @vfb: Pointer to the buffer-object backed framebuffer.
1239 * @user_fence_rep: User-space provided structure for fence information.
1240 * Must be set to non-NULL if @file_priv is non-NULL.
1241 * @vclips: Array of clip rects.
1242 * @num_clips: Number of clip rects in @vclips.
1243 *
1244 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1245 * interrupted.
1246 */
1247int vmw_kms_readback(struct vmw_private *dev_priv,
1248		     struct drm_file *file_priv,
1249		     struct vmw_framebuffer *vfb,
1250		     struct drm_vmw_fence_rep __user *user_fence_rep,
1251		     struct drm_vmw_rect *vclips,
1252		     uint32_t num_clips)
1253{
1254	switch (dev_priv->active_display_unit) {
1255	case vmw_du_screen_object:
1256		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1257					    user_fence_rep, vclips, num_clips,
1258					    NULL);
1259	case vmw_du_screen_target:
1260		return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1261					     user_fence_rep, NULL, vclips, num_clips,
1262					     1, NULL);
1263	default:
1264		WARN_ONCE(true,
1265			  "Readback called with invalid display system.\n");
1266}
1267
1268	return -ENOSYS;
1269}
1270
1271static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
1272						 struct drm_file *file_priv,
1273						 unsigned int *handle)
1274{
1275	struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
1276	struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
1277
1278	if (WARN_ON(!bo))
1279		return -EINVAL;
1280	return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
1281}
1282
1283static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1284	.create_handle = vmw_framebuffer_surface_create_handle,
1285	.destroy = vmw_framebuffer_surface_destroy,
1286	.dirty = drm_atomic_helper_dirtyfb,
1287};
1288
1289static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1290					   struct vmw_user_object *uo,
1291					   struct vmw_framebuffer **out,
1292					   const struct drm_mode_fb_cmd2
1293					   *mode_cmd)
 
1294
1295{
1296	struct drm_device *dev = &dev_priv->drm;
1297	struct vmw_framebuffer_surface *vfbs;
1298	struct vmw_surface *surface;
1299	int ret;
1300
1301	/* 3D is only supported on HWv8 and newer hosts */
1302	if (dev_priv->active_display_unit == vmw_du_legacy)
1303		return -ENOSYS;
1304
1305	surface = vmw_user_object_surface(uo);
1306
1307	/*
1308	 * Sanity checks.
1309	 */
1310
1311	if (!drm_any_plane_has_format(&dev_priv->drm,
1312				      mode_cmd->pixel_format,
1313				      mode_cmd->modifier[0])) {
1314		drm_dbg(&dev_priv->drm,
1315			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1316			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1317		return -EINVAL;
1318	}
1319
1320	/* Surface must be marked as a scanout. */
1321	if (unlikely(!surface->metadata.scanout))
1322		return -EINVAL;
1323
1324	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1325		     surface->metadata.num_sizes != 1 ||
1326		     surface->metadata.base_size.width < mode_cmd->width ||
1327		     surface->metadata.base_size.height < mode_cmd->height ||
1328		     surface->metadata.base_size.depth != 1)) {
1329		DRM_ERROR("Incompatible surface dimensions "
1330			  "for requested mode.\n");
1331		return -EINVAL;
1332	}
1333
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1334	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1335	if (!vfbs) {
1336		ret = -ENOMEM;
1337		goto out_err1;
1338	}
1339
1340	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1341	memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
1342	vmw_user_object_ref(&vfbs->uo);
 
1343
1344	*out = &vfbs->base;
1345
1346	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1347				   &vmw_framebuffer_surface_funcs);
1348	if (ret)
1349		goto out_err2;
1350
1351	return 0;
1352
1353out_err2:
1354	vmw_user_object_unref(&vfbs->uo);
1355	kfree(vfbs);
1356out_err1:
1357	return ret;
1358}
1359
1360/*
1361 * Buffer-object framebuffer code
1362 */
1363
1364static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1365					    struct drm_file *file_priv,
1366					    unsigned int *handle)
1367{
1368	struct vmw_framebuffer_bo *vfbd =
1369			vmw_framebuffer_to_vfbd(fb);
1370	return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
 
1371}
1372
1373static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1374{
1375	struct vmw_framebuffer_bo *vfbd =
1376		vmw_framebuffer_to_vfbd(framebuffer);
1377
1378	drm_framebuffer_cleanup(framebuffer);
1379	vmw_bo_unreference(&vfbd->buffer);
1380
1381	kfree(vfbd);
1382}
1383
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1384static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1385	.create_handle = vmw_framebuffer_bo_create_handle,
1386	.destroy = vmw_framebuffer_bo_destroy,
1387	.dirty = drm_atomic_helper_dirtyfb,
1388};
1389
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1390static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1391				      struct vmw_bo *bo,
1392				      struct vmw_framebuffer **out,
1393				      const struct drm_mode_fb_cmd2
1394				      *mode_cmd)
1395
1396{
1397	struct drm_device *dev = &dev_priv->drm;
1398	struct vmw_framebuffer_bo *vfbd;
1399	unsigned int requested_size;
1400	int ret;
1401
1402	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1403	if (unlikely(requested_size > bo->tbo.base.size)) {
1404		DRM_ERROR("Screen buffer object size is too small "
1405			  "for requested mode.\n");
1406		return -EINVAL;
1407	}
1408
1409	if (!drm_any_plane_has_format(&dev_priv->drm,
1410				      mode_cmd->pixel_format,
1411				      mode_cmd->modifier[0])) {
1412		drm_dbg(&dev_priv->drm,
1413			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1414			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1415		return -EINVAL;
1416	}
1417
1418	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1419	if (!vfbd) {
1420		ret = -ENOMEM;
1421		goto out_err1;
1422	}
1423
1424	vfbd->base.base.obj[0] = &bo->tbo.base;
1425	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1426	vfbd->base.bo = true;
1427	vfbd->buffer = vmw_bo_reference(bo);
 
1428	*out = &vfbd->base;
1429
1430	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1431				   &vmw_framebuffer_bo_funcs);
1432	if (ret)
1433		goto out_err2;
1434
1435	return 0;
1436
1437out_err2:
1438	vmw_bo_unreference(&bo);
1439	kfree(vfbd);
1440out_err1:
1441	return ret;
1442}
1443
1444
1445/**
1446 * vmw_kms_srf_ok - check if a surface can be created
1447 *
1448 * @dev_priv: Pointer to device private struct.
1449 * @width: requested width
1450 * @height: requested height
1451 *
1452 * Surfaces need to be less than texture size
1453 */
1454static bool
1455vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1456{
1457	if (width  > dev_priv->texture_max_width ||
1458	    height > dev_priv->texture_max_height)
1459		return false;
1460
1461	return true;
1462}
1463
1464/**
1465 * vmw_kms_new_framebuffer - Create a new framebuffer.
1466 *
1467 * @dev_priv: Pointer to device private struct.
1468 * @uo: Pointer to user object to wrap the kms framebuffer around.
1469 * Either the buffer or surface inside the user object must be NULL.
 
 
 
 
1470 * @mode_cmd: Frame-buffer metadata.
1471 */
1472struct vmw_framebuffer *
1473vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1474			struct vmw_user_object *uo,
 
 
1475			const struct drm_mode_fb_cmd2 *mode_cmd)
1476{
1477	struct vmw_framebuffer *vfb = NULL;
 
1478	int ret;
1479
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1480	/* Create the new framebuffer depending one what we have */
1481	if (vmw_user_object_surface(uo)) {
1482		ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
1483						      mode_cmd);
1484	} else if (uo->buffer) {
1485		ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
 
 
 
 
 
 
 
1486						 mode_cmd);
1487	} else {
1488		BUG();
1489	}
1490
1491	if (ret)
1492		return ERR_PTR(ret);
1493
 
 
 
1494	return vfb;
1495}
1496
1497/*
1498 * Generic Kernel modesetting functions
1499 */
1500
1501static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1502						 struct drm_file *file_priv,
1503						 const struct drm_mode_fb_cmd2 *mode_cmd)
1504{
1505	struct vmw_private *dev_priv = vmw_priv(dev);
1506	struct vmw_framebuffer *vfb = NULL;
1507	struct vmw_user_object uo = {0};
 
1508	int ret;
1509
1510	/* returns either a bo or surface */
1511	ret = vmw_user_object_lookup(dev_priv, file_priv, mode_cmd->handles[0],
1512				     &uo);
 
1513	if (ret) {
1514		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1515			  mode_cmd->handles[0], mode_cmd->handles[0]);
1516		goto err_out;
1517	}
1518
1519
1520	if (vmw_user_object_surface(&uo) &&
1521	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1522		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1523			dev_priv->texture_max_width,
1524			dev_priv->texture_max_height);
1525		ret = -EINVAL;
1526		goto err_out;
1527	}
1528
1529
1530	vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
 
 
1531	if (IS_ERR(vfb)) {
1532		ret = PTR_ERR(vfb);
1533		goto err_out;
1534	}
1535
1536err_out:
1537	/* vmw_user_object_lookup takes one ref so does new_fb */
1538	vmw_user_object_unref(&uo);
 
 
 
 
 
1539
1540	if (ret) {
1541		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1542		return ERR_PTR(ret);
1543	}
1544
1545	return &vfb->base;
1546}
1547
1548/**
1549 * vmw_kms_check_display_memory - Validates display memory required for a
1550 * topology
1551 * @dev: DRM device
1552 * @num_rects: number of drm_rect in rects
1553 * @rects: array of drm_rect representing the topology to validate indexed by
1554 * crtc index.
1555 *
1556 * Returns:
1557 * 0 on success otherwise negative error code
1558 */
1559static int vmw_kms_check_display_memory(struct drm_device *dev,
1560					uint32_t num_rects,
1561					struct drm_rect *rects)
1562{
1563	struct vmw_private *dev_priv = vmw_priv(dev);
1564	struct drm_rect bounding_box = {0};
1565	u64 total_pixels = 0, pixel_mem, bb_mem;
1566	int i;
1567
1568	for (i = 0; i < num_rects; i++) {
1569		/*
1570		 * For STDU only individual screen (screen target) is limited by
1571		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1572		 */
1573		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1574		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1575		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1576			VMW_DEBUG_KMS("Screen size not supported.\n");
1577			return -EINVAL;
1578		}
1579
1580		/* Bounding box upper left is at (0,0). */
1581		if (rects[i].x2 > bounding_box.x2)
1582			bounding_box.x2 = rects[i].x2;
1583
1584		if (rects[i].y2 > bounding_box.y2)
1585			bounding_box.y2 = rects[i].y2;
1586
1587		total_pixels += (u64) drm_rect_width(&rects[i]) *
1588			(u64) drm_rect_height(&rects[i]);
1589	}
1590
1591	/* Virtual svga device primary limits are always in 32-bpp. */
1592	pixel_mem = total_pixels * 4;
1593
1594	/*
1595	 * For HV10 and below prim_bb_mem is vram size. When
1596	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1597	 * limit on primary bounding box
1598	 */
1599	if (pixel_mem > dev_priv->max_primary_mem) {
1600		VMW_DEBUG_KMS("Combined output size too large.\n");
1601		return -EINVAL;
1602	}
1603
1604	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1605	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1606	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1607		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1608
1609		if (bb_mem > dev_priv->max_primary_mem) {
1610			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1611			return -EINVAL;
1612		}
1613	}
1614
1615	return 0;
1616}
1617
1618/**
1619 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1620 * crtc mutex
1621 * @state: The atomic state pointer containing the new atomic state
1622 * @crtc: The crtc
1623 *
1624 * This function returns the new crtc state if it's part of the state update.
1625 * Otherwise returns the current crtc state. It also makes sure that the
1626 * crtc mutex is locked.
1627 *
1628 * Returns: A valid crtc state pointer or NULL. It may also return a
1629 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1630 */
1631static struct drm_crtc_state *
1632vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1633{
1634	struct drm_crtc_state *crtc_state;
1635
1636	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1637	if (crtc_state) {
1638		lockdep_assert_held(&crtc->mutex.mutex.base);
1639	} else {
1640		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1641
1642		if (ret != 0 && ret != -EALREADY)
1643			return ERR_PTR(ret);
1644
1645		crtc_state = crtc->state;
1646	}
1647
1648	return crtc_state;
1649}
1650
1651/**
1652 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1653 * from the same fb after the new state is committed.
1654 * @dev: The drm_device.
1655 * @state: The new state to be checked.
1656 *
1657 * Returns:
1658 *   Zero on success,
1659 *   -EINVAL on invalid state,
1660 *   -EDEADLK if modeset locking needs to be rerun.
1661 */
1662static int vmw_kms_check_implicit(struct drm_device *dev,
1663				  struct drm_atomic_state *state)
1664{
1665	struct drm_framebuffer *implicit_fb = NULL;
1666	struct drm_crtc *crtc;
1667	struct drm_crtc_state *crtc_state;
1668	struct drm_plane_state *plane_state;
1669
1670	drm_for_each_crtc(crtc, dev) {
1671		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1672
1673		if (!du->is_implicit)
1674			continue;
1675
1676		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1677		if (IS_ERR(crtc_state))
1678			return PTR_ERR(crtc_state);
1679
1680		if (!crtc_state || !crtc_state->enable)
1681			continue;
1682
1683		/*
1684		 * Can't move primary planes across crtcs, so this is OK.
1685		 * It also means we don't need to take the plane mutex.
1686		 */
1687		plane_state = du->primary.state;
1688		if (plane_state->crtc != crtc)
1689			continue;
1690
1691		if (!implicit_fb)
1692			implicit_fb = plane_state->fb;
1693		else if (implicit_fb != plane_state->fb)
1694			return -EINVAL;
1695	}
1696
1697	return 0;
1698}
1699
1700/**
1701 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1702 * @dev: DRM device
1703 * @state: the driver state object
1704 *
1705 * Returns:
1706 * 0 on success otherwise negative error code
1707 */
1708static int vmw_kms_check_topology(struct drm_device *dev,
1709				  struct drm_atomic_state *state)
1710{
1711	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1712	struct drm_rect *rects;
1713	struct drm_crtc *crtc;
1714	uint32_t i;
1715	int ret = 0;
1716
1717	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1718			GFP_KERNEL);
1719	if (!rects)
1720		return -ENOMEM;
1721
1722	drm_for_each_crtc(crtc, dev) {
1723		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1724		struct drm_crtc_state *crtc_state;
1725
1726		i = drm_crtc_index(crtc);
1727
1728		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1729		if (IS_ERR(crtc_state)) {
1730			ret = PTR_ERR(crtc_state);
1731			goto clean;
1732		}
1733
1734		if (!crtc_state)
1735			continue;
1736
1737		if (crtc_state->enable) {
1738			rects[i].x1 = du->gui_x;
1739			rects[i].y1 = du->gui_y;
1740			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1741			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1742		} else {
1743			rects[i].x1 = 0;
1744			rects[i].y1 = 0;
1745			rects[i].x2 = 0;
1746			rects[i].y2 = 0;
1747		}
1748	}
1749
1750	/* Determine change to topology due to new atomic state */
1751	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1752				      new_crtc_state, i) {
1753		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1754		struct drm_connector *connector;
1755		struct drm_connector_state *conn_state;
1756		struct vmw_connector_state *vmw_conn_state;
1757
1758		if (!du->pref_active && new_crtc_state->enable) {
1759			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1760			ret = -EINVAL;
1761			goto clean;
1762		}
1763
1764		/*
1765		 * For vmwgfx each crtc has only one connector attached and it
1766		 * is not changed so don't really need to check the
1767		 * crtc->connector_mask and iterate over it.
1768		 */
1769		connector = &du->connector;
1770		conn_state = drm_atomic_get_connector_state(state, connector);
1771		if (IS_ERR(conn_state)) {
1772			ret = PTR_ERR(conn_state);
1773			goto clean;
1774		}
1775
1776		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1777		vmw_conn_state->gui_x = du->gui_x;
1778		vmw_conn_state->gui_y = du->gui_y;
1779	}
1780
1781	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1782					   rects);
1783
1784clean:
1785	kfree(rects);
1786	return ret;
1787}
1788
1789/**
1790 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1791 *
1792 * @dev: DRM device
1793 * @state: the driver state object
1794 *
1795 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1796 * us to assign a value to mode->crtc_clock so that
1797 * drm_calc_timestamping_constants() won't throw an error message
1798 *
1799 * Returns:
1800 * Zero for success or -errno
1801 */
1802static int
1803vmw_kms_atomic_check_modeset(struct drm_device *dev,
1804			     struct drm_atomic_state *state)
1805{
1806	struct drm_crtc *crtc;
1807	struct drm_crtc_state *crtc_state;
1808	bool need_modeset = false;
1809	int i, ret;
1810
1811	ret = drm_atomic_helper_check(dev, state);
1812	if (ret)
1813		return ret;
1814
1815	ret = vmw_kms_check_implicit(dev, state);
1816	if (ret) {
1817		VMW_DEBUG_KMS("Invalid implicit state\n");
1818		return ret;
1819	}
1820
1821	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1822		if (drm_atomic_crtc_needs_modeset(crtc_state))
1823			need_modeset = true;
1824	}
1825
1826	if (need_modeset)
1827		return vmw_kms_check_topology(dev, state);
1828
1829	return ret;
1830}
1831
1832static const struct drm_mode_config_funcs vmw_kms_funcs = {
1833	.fb_create = vmw_kms_fb_create,
1834	.atomic_check = vmw_kms_atomic_check_modeset,
1835	.atomic_commit = drm_atomic_helper_commit,
1836};
1837
1838static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1839				   struct drm_file *file_priv,
1840				   struct vmw_framebuffer *vfb,
1841				   struct vmw_surface *surface,
1842				   uint32_t sid,
1843				   int32_t destX, int32_t destY,
1844				   struct drm_vmw_rect *clips,
1845				   uint32_t num_clips)
1846{
1847	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1848					    &surface->res, destX, destY,
1849					    num_clips, 1, NULL, NULL);
1850}
1851
1852
1853int vmw_kms_present(struct vmw_private *dev_priv,
1854		    struct drm_file *file_priv,
1855		    struct vmw_framebuffer *vfb,
1856		    struct vmw_surface *surface,
1857		    uint32_t sid,
1858		    int32_t destX, int32_t destY,
1859		    struct drm_vmw_rect *clips,
1860		    uint32_t num_clips)
1861{
1862	int ret;
1863
1864	switch (dev_priv->active_display_unit) {
1865	case vmw_du_screen_target:
1866		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1867						 &surface->res, destX, destY,
1868						 num_clips, 1, NULL, NULL);
1869		break;
1870	case vmw_du_screen_object:
1871		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1872					      sid, destX, destY, clips,
1873					      num_clips);
1874		break;
1875	default:
1876		WARN_ONCE(true,
1877			  "Present called with invalid display system.\n");
1878		ret = -ENOSYS;
1879		break;
1880	}
1881	if (ret)
1882		return ret;
1883
1884	vmw_cmd_flush(dev_priv, false);
1885
1886	return 0;
1887}
1888
1889static void
1890vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1891{
1892	if (dev_priv->hotplug_mode_update_property)
1893		return;
1894
1895	dev_priv->hotplug_mode_update_property =
1896		drm_property_create_range(&dev_priv->drm,
1897					  DRM_MODE_PROP_IMMUTABLE,
1898					  "hotplug_mode_update", 0, 1);
1899}
1900
1901static void
1902vmw_atomic_commit_tail(struct drm_atomic_state *old_state)
1903{
1904	struct vmw_private *vmw = vmw_priv(old_state->dev);
1905	struct drm_crtc *crtc;
1906	struct drm_crtc_state *old_crtc_state;
1907	int i;
1908
1909	drm_atomic_helper_commit_tail(old_state);
1910
1911	if (vmw->vkms_enabled) {
1912		for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
1913			struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1914			(void)old_crtc_state;
1915			flush_work(&du->vkms.crc_generator_work);
1916		}
1917	}
1918}
1919
1920static const struct drm_mode_config_helper_funcs vmw_mode_config_helpers = {
1921	.atomic_commit_tail = vmw_atomic_commit_tail,
1922};
1923
1924int vmw_kms_init(struct vmw_private *dev_priv)
1925{
1926	struct drm_device *dev = &dev_priv->drm;
1927	int ret;
1928	static const char *display_unit_names[] = {
1929		"Invalid",
1930		"Legacy",
1931		"Screen Object",
1932		"Screen Target",
1933		"Invalid (max)"
1934	};
1935
1936	drm_mode_config_init(dev);
1937	dev->mode_config.funcs = &vmw_kms_funcs;
1938	dev->mode_config.min_width = 1;
1939	dev->mode_config.min_height = 1;
1940	dev->mode_config.max_width = dev_priv->texture_max_width;
1941	dev->mode_config.max_height = dev_priv->texture_max_height;
1942	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
1943	dev->mode_config.helper_private = &vmw_mode_config_helpers;
1944
1945	drm_mode_create_suggested_offset_properties(dev);
1946	vmw_kms_create_hotplug_mode_update_property(dev_priv);
1947
1948	ret = vmw_kms_stdu_init_display(dev_priv);
1949	if (ret) {
1950		ret = vmw_kms_sou_init_display(dev_priv);
1951		if (ret) /* Fallback */
1952			ret = vmw_kms_ldu_init_display(dev_priv);
1953	}
1954	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
1955	drm_info(&dev_priv->drm, "%s display unit initialized\n",
1956		 display_unit_names[dev_priv->active_display_unit]);
1957
1958	return ret;
1959}
1960
1961int vmw_kms_close(struct vmw_private *dev_priv)
1962{
1963	int ret = 0;
1964
1965	/*
1966	 * Docs says we should take the lock before calling this function
1967	 * but since it destroys encoders and our destructor calls
1968	 * drm_encoder_cleanup which takes the lock we deadlock.
1969	 */
1970	drm_mode_config_cleanup(&dev_priv->drm);
1971	if (dev_priv->active_display_unit == vmw_du_legacy)
1972		ret = vmw_kms_ldu_close_display(dev_priv);
1973
1974	return ret;
1975}
1976
1977int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1978				struct drm_file *file_priv)
1979{
1980	struct drm_vmw_cursor_bypass_arg *arg = data;
1981	struct vmw_display_unit *du;
1982	struct drm_crtc *crtc;
1983	int ret = 0;
1984
1985	mutex_lock(&dev->mode_config.mutex);
1986	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1987
1988		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1989			du = vmw_crtc_to_du(crtc);
1990			du->hotspot_x = arg->xhot;
1991			du->hotspot_y = arg->yhot;
1992		}
1993
1994		mutex_unlock(&dev->mode_config.mutex);
1995		return 0;
1996	}
1997
1998	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
1999	if (!crtc) {
2000		ret = -ENOENT;
2001		goto out;
2002	}
2003
2004	du = vmw_crtc_to_du(crtc);
2005
2006	du->hotspot_x = arg->xhot;
2007	du->hotspot_y = arg->yhot;
2008
2009out:
2010	mutex_unlock(&dev->mode_config.mutex);
2011
2012	return ret;
2013}
2014
2015int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2016			unsigned width, unsigned height, unsigned pitch,
2017			unsigned bpp, unsigned depth)
2018{
2019	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2020		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2021	else if (vmw_fifo_have_pitchlock(vmw_priv))
2022		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2023	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2024	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2025	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2026		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2027
2028	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2029		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2030			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2031		return -EINVAL;
2032	}
2033
2034	return 0;
2035}
2036
2037static
2038bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2039				u64 pitch,
2040				u64 height)
2041{
2042	return (pitch * height) < (u64)dev_priv->vram_size;
 
 
2043}
2044
2045/**
2046 * vmw_du_update_layout - Update the display unit with topology from resolution
2047 * plugin and generate DRM uevent
2048 * @dev_priv: device private
2049 * @num_rects: number of drm_rect in rects
2050 * @rects: toplogy to update
2051 */
2052static int vmw_du_update_layout(struct vmw_private *dev_priv,
2053				unsigned int num_rects, struct drm_rect *rects)
2054{
2055	struct drm_device *dev = &dev_priv->drm;
2056	struct vmw_display_unit *du;
2057	struct drm_connector *con;
2058	struct drm_connector_list_iter conn_iter;
2059	struct drm_modeset_acquire_ctx ctx;
2060	struct drm_crtc *crtc;
2061	int ret;
2062
2063	/* Currently gui_x/y is protected with the crtc mutex */
2064	mutex_lock(&dev->mode_config.mutex);
2065	drm_modeset_acquire_init(&ctx, 0);
2066retry:
2067	drm_for_each_crtc(crtc, dev) {
2068		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2069		if (ret < 0) {
2070			if (ret == -EDEADLK) {
2071				drm_modeset_backoff(&ctx);
2072				goto retry;
2073		}
2074			goto out_fini;
2075		}
2076	}
2077
2078	drm_connector_list_iter_begin(dev, &conn_iter);
2079	drm_for_each_connector_iter(con, &conn_iter) {
2080		du = vmw_connector_to_du(con);
2081		if (num_rects > du->unit) {
2082			du->pref_width = drm_rect_width(&rects[du->unit]);
2083			du->pref_height = drm_rect_height(&rects[du->unit]);
2084			du->pref_active = true;
2085			du->gui_x = rects[du->unit].x1;
2086			du->gui_y = rects[du->unit].y1;
2087		} else {
2088			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2089			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2090			du->pref_active = false;
2091			du->gui_x = 0;
2092			du->gui_y = 0;
2093		}
2094	}
2095	drm_connector_list_iter_end(&conn_iter);
2096
2097	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2098		du = vmw_connector_to_du(con);
2099		if (num_rects > du->unit) {
2100			drm_object_property_set_value
2101			  (&con->base, dev->mode_config.suggested_x_property,
2102			   du->gui_x);
2103			drm_object_property_set_value
2104			  (&con->base, dev->mode_config.suggested_y_property,
2105			   du->gui_y);
2106		} else {
2107			drm_object_property_set_value
2108			  (&con->base, dev->mode_config.suggested_x_property,
2109			   0);
2110			drm_object_property_set_value
2111			  (&con->base, dev->mode_config.suggested_y_property,
2112			   0);
2113		}
2114		con->status = vmw_du_connector_detect(con, true);
2115	}
2116out_fini:
2117	drm_modeset_drop_locks(&ctx);
2118	drm_modeset_acquire_fini(&ctx);
2119	mutex_unlock(&dev->mode_config.mutex);
2120
2121	drm_sysfs_hotplug_event(dev);
2122
2123	return 0;
2124}
2125
2126int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2127			  u16 *r, u16 *g, u16 *b,
2128			  uint32_t size,
2129			  struct drm_modeset_acquire_ctx *ctx)
2130{
2131	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2132	int i;
2133
2134	for (i = 0; i < size; i++) {
2135		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2136			  r[i], g[i], b[i]);
2137		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2138		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2139		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2140	}
2141
2142	return 0;
2143}
2144
2145int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2146{
2147	return 0;
2148}
2149
2150enum drm_connector_status
2151vmw_du_connector_detect(struct drm_connector *connector, bool force)
2152{
2153	uint32_t num_displays;
2154	struct drm_device *dev = connector->dev;
2155	struct vmw_private *dev_priv = vmw_priv(dev);
2156	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2157
2158	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2159
2160	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2161		 du->pref_active) ?
2162		connector_status_connected : connector_status_disconnected);
2163}
2164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2165/**
2166 * vmw_guess_mode_timing - Provide fake timings for a
2167 * 60Hz vrefresh mode.
2168 *
2169 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2170 * members filled in.
2171 */
2172void vmw_guess_mode_timing(struct drm_display_mode *mode)
2173{
2174	mode->hsync_start = mode->hdisplay + 50;
2175	mode->hsync_end = mode->hsync_start + 50;
2176	mode->htotal = mode->hsync_end + 50;
2177
2178	mode->vsync_start = mode->vdisplay + 50;
2179	mode->vsync_end = mode->vsync_start + 50;
2180	mode->vtotal = mode->vsync_end + 50;
2181
2182	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2183}
2184
2185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2186/**
2187 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2188 * @dev: drm device for the ioctl
2189 * @data: data pointer for the ioctl
2190 * @file_priv: drm file for the ioctl call
2191 *
2192 * Update preferred topology of display unit as per ioctl request. The topology
2193 * is expressed as array of drm_vmw_rect.
2194 * e.g.
2195 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2196 *
2197 * NOTE:
2198 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2199 * device limit on topology, x + w and y + h (lower right) cannot be greater
2200 * than INT_MAX. So topology beyond these limits will return with error.
2201 *
2202 * Returns:
2203 * Zero on success, negative errno on failure.
2204 */
2205int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2206				struct drm_file *file_priv)
2207{
2208	struct vmw_private *dev_priv = vmw_priv(dev);
2209	struct drm_mode_config *mode_config = &dev->mode_config;
2210	struct drm_vmw_update_layout_arg *arg =
2211		(struct drm_vmw_update_layout_arg *)data;
2212	const void __user *user_rects;
2213	struct drm_vmw_rect *rects;
2214	struct drm_rect *drm_rects;
2215	unsigned rects_size;
2216	int ret, i;
2217
2218	if (!arg->num_outputs) {
2219		struct drm_rect def_rect = {0, 0,
2220					    VMWGFX_MIN_INITIAL_WIDTH,
2221					    VMWGFX_MIN_INITIAL_HEIGHT};
2222		vmw_du_update_layout(dev_priv, 1, &def_rect);
2223		return 0;
2224	} else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) {
2225		return -E2BIG;
2226	}
2227
2228	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2229	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2230			GFP_KERNEL);
2231	if (unlikely(!rects))
2232		return -ENOMEM;
2233
2234	user_rects = (void __user *)(unsigned long)arg->rects;
2235	ret = copy_from_user(rects, user_rects, rects_size);
2236	if (unlikely(ret != 0)) {
2237		DRM_ERROR("Failed to get rects.\n");
2238		ret = -EFAULT;
2239		goto out_free;
2240	}
2241
2242	drm_rects = (struct drm_rect *)rects;
2243
2244	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2245	for (i = 0; i < arg->num_outputs; i++) {
2246		struct drm_vmw_rect curr_rect;
2247
2248		/* Verify user-space for overflow as kernel use drm_rect */
2249		if ((rects[i].x + rects[i].w > INT_MAX) ||
2250		    (rects[i].y + rects[i].h > INT_MAX)) {
2251			ret = -ERANGE;
2252			goto out_free;
2253		}
2254
2255		curr_rect = rects[i];
2256		drm_rects[i].x1 = curr_rect.x;
2257		drm_rects[i].y1 = curr_rect.y;
2258		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2259		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2260
2261		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2262			      drm_rects[i].x1, drm_rects[i].y1,
2263			      drm_rects[i].x2, drm_rects[i].y2);
2264
2265		/*
2266		 * Currently this check is limiting the topology within
2267		 * mode_config->max (which actually is max texture size
2268		 * supported by virtual device). This limit is here to address
2269		 * window managers that create a big framebuffer for whole
2270		 * topology.
2271		 */
2272		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2273		    drm_rects[i].x2 > mode_config->max_width ||
2274		    drm_rects[i].y2 > mode_config->max_height) {
2275			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2276				      drm_rects[i].x1, drm_rects[i].y1,
2277				      drm_rects[i].x2, drm_rects[i].y2);
2278			ret = -EINVAL;
2279			goto out_free;
2280		}
2281	}
2282
2283	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2284
2285	if (ret == 0)
2286		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2287
2288out_free:
2289	kfree(rects);
2290	return ret;
2291}
2292
2293/**
2294 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2295 * on a set of cliprects and a set of display units.
2296 *
2297 * @dev_priv: Pointer to a device private structure.
2298 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2299 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2300 * Cliprects are given in framebuffer coordinates.
2301 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2302 * be NULL. Cliprects are given in source coordinates.
2303 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2304 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2305 * @num_clips: Number of cliprects in the @clips or @vclips array.
2306 * @increment: Integer with which to increment the clip counter when looping.
2307 * Used to skip a predetermined number of clip rects.
2308 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2309 */
2310int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2311			 struct vmw_framebuffer *framebuffer,
2312			 const struct drm_clip_rect *clips,
2313			 const struct drm_vmw_rect *vclips,
2314			 s32 dest_x, s32 dest_y,
2315			 int num_clips,
2316			 int increment,
2317			 struct vmw_kms_dirty *dirty)
2318{
2319	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2320	struct drm_crtc *crtc;
2321	u32 num_units = 0;
2322	u32 i, k;
2323
2324	dirty->dev_priv = dev_priv;
2325
2326	/* If crtc is passed, no need to iterate over other display units */
2327	if (dirty->crtc) {
2328		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2329	} else {
2330		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2331				    head) {
2332			struct drm_plane *plane = crtc->primary;
2333
2334			if (plane->state->fb == &framebuffer->base)
2335				units[num_units++] = vmw_crtc_to_du(crtc);
2336		}
2337	}
2338
2339	for (k = 0; k < num_units; k++) {
2340		struct vmw_display_unit *unit = units[k];
2341		s32 crtc_x = unit->crtc.x;
2342		s32 crtc_y = unit->crtc.y;
2343		s32 crtc_width = unit->crtc.mode.hdisplay;
2344		s32 crtc_height = unit->crtc.mode.vdisplay;
2345		const struct drm_clip_rect *clips_ptr = clips;
2346		const struct drm_vmw_rect *vclips_ptr = vclips;
2347
2348		dirty->unit = unit;
2349		if (dirty->fifo_reserve_size > 0) {
2350			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2351						      dirty->fifo_reserve_size);
2352			if (!dirty->cmd)
2353				return -ENOMEM;
2354
2355			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2356		}
2357		dirty->num_hits = 0;
2358		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2359		       vclips_ptr += increment) {
2360			s32 clip_left;
2361			s32 clip_top;
2362
2363			/*
2364			 * Select clip array type. Note that integer type
2365			 * in @clips is unsigned short, whereas in @vclips
2366			 * it's 32-bit.
2367			 */
2368			if (clips) {
2369				dirty->fb_x = (s32) clips_ptr->x1;
2370				dirty->fb_y = (s32) clips_ptr->y1;
2371				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2372					crtc_x;
2373				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2374					crtc_y;
2375			} else {
2376				dirty->fb_x = vclips_ptr->x;
2377				dirty->fb_y = vclips_ptr->y;
2378				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2379					dest_x - crtc_x;
2380				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2381					dest_y - crtc_y;
2382			}
2383
2384			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2385			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2386
2387			/* Skip this clip if it's outside the crtc region */
2388			if (dirty->unit_x1 >= crtc_width ||
2389			    dirty->unit_y1 >= crtc_height ||
2390			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2391				continue;
2392
2393			/* Clip right and bottom to crtc limits */
2394			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2395					       crtc_width);
2396			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2397					       crtc_height);
2398
2399			/* Clip left and top to crtc limits */
2400			clip_left = min_t(s32, dirty->unit_x1, 0);
2401			clip_top = min_t(s32, dirty->unit_y1, 0);
2402			dirty->unit_x1 -= clip_left;
2403			dirty->unit_y1 -= clip_top;
2404			dirty->fb_x -= clip_left;
2405			dirty->fb_y -= clip_top;
2406
2407			dirty->clip(dirty);
2408		}
2409
2410		dirty->fifo_commit(dirty);
2411	}
2412
2413	return 0;
2414}
2415
2416/**
2417 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2418 * cleanup and fencing
2419 * @dev_priv: Pointer to the device-private struct
2420 * @file_priv: Pointer identifying the client when user-space fencing is used
2421 * @ctx: Pointer to the validation context
2422 * @out_fence: If non-NULL, returned refcounted fence-pointer
2423 * @user_fence_rep: If non-NULL, pointer to user-space address area
2424 * in which to copy user-space fence info
2425 */
2426void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2427				      struct drm_file *file_priv,
2428				      struct vmw_validation_context *ctx,
2429				      struct vmw_fence_obj **out_fence,
2430				      struct drm_vmw_fence_rep __user *
2431				      user_fence_rep)
2432{
2433	struct vmw_fence_obj *fence = NULL;
2434	uint32_t handle = 0;
2435	int ret = 0;
2436
2437	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2438	    out_fence)
2439		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2440						 file_priv ? &handle : NULL);
2441	vmw_validation_done(ctx, fence);
2442	if (file_priv)
2443		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2444					    ret, user_fence_rep, fence,
2445					    handle, -1);
2446	if (out_fence)
2447		*out_fence = fence;
2448	else
2449		vmw_fence_obj_unreference(&fence);
2450}
2451
2452/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2453 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2454 * property.
2455 *
2456 * @dev_priv: Pointer to a device private struct.
2457 *
2458 * Sets up the implicit placement property unless it's already set up.
2459 */
2460void
2461vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2462{
2463	if (dev_priv->implicit_placement_property)
2464		return;
2465
2466	dev_priv->implicit_placement_property =
2467		drm_property_create_range(&dev_priv->drm,
2468					  DRM_MODE_PROP_IMMUTABLE,
2469					  "implicit_placement", 0, 1);
2470}
2471
2472/**
2473 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2474 *
2475 * @dev: Pointer to the drm device
2476 * Return: 0 on success. Negative error code on failure.
2477 */
2478int vmw_kms_suspend(struct drm_device *dev)
2479{
2480	struct vmw_private *dev_priv = vmw_priv(dev);
2481
2482	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2483	if (IS_ERR(dev_priv->suspend_state)) {
2484		int ret = PTR_ERR(dev_priv->suspend_state);
2485
2486		DRM_ERROR("Failed kms suspend: %d\n", ret);
2487		dev_priv->suspend_state = NULL;
2488
2489		return ret;
2490	}
2491
2492	return 0;
2493}
2494
2495
2496/**
2497 * vmw_kms_resume - Re-enable modesetting and restore state
2498 *
2499 * @dev: Pointer to the drm device
2500 * Return: 0 on success. Negative error code on failure.
2501 *
2502 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2503 * to call this function without a previous vmw_kms_suspend().
2504 */
2505int vmw_kms_resume(struct drm_device *dev)
2506{
2507	struct vmw_private *dev_priv = vmw_priv(dev);
2508	int ret;
2509
2510	if (WARN_ON(!dev_priv->suspend_state))
2511		return 0;
2512
2513	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2514	dev_priv->suspend_state = NULL;
2515
2516	return ret;
2517}
2518
2519/**
2520 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2521 *
2522 * @dev: Pointer to the drm device
2523 */
2524void vmw_kms_lost_device(struct drm_device *dev)
2525{
2526	drm_atomic_helper_shutdown(dev);
2527}
2528
2529/**
2530 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2531 * @update: The closure structure.
2532 *
2533 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2534 * update on display unit.
2535 *
2536 * Return: 0 on success or a negative error code on failure.
2537 */
2538int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2539{
2540	struct drm_plane_state *state = update->plane->state;
2541	struct drm_plane_state *old_state = update->old_state;
2542	struct drm_atomic_helper_damage_iter iter;
2543	struct drm_rect clip;
2544	struct drm_rect bb;
2545	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2546	uint32_t reserved_size = 0;
2547	uint32_t submit_size = 0;
2548	uint32_t curr_size = 0;
2549	uint32_t num_hits = 0;
2550	void *cmd_start;
2551	char *cmd_next;
2552	int ret;
2553
2554	/*
2555	 * Iterate in advance to check if really need plane update and find the
2556	 * number of clips that actually are in plane src for fifo allocation.
2557	 */
2558	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2559	drm_atomic_for_each_plane_damage(&iter, &clip)
2560		num_hits++;
2561
2562	if (num_hits == 0)
2563		return 0;
2564
2565	if (update->vfb->bo) {
2566		struct vmw_framebuffer_bo *vfbbo =
2567			container_of(update->vfb, typeof(*vfbbo), base);
2568
2569		/*
2570		 * For screen targets we want a mappable bo, for everything else we want
2571		 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2572		 * is not screen target then mob's shouldn't be available.
2573		 */
2574		if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2575			vmw_bo_placement_set(vfbbo->buffer,
2576					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2577					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2578		} else {
2579			WARN_ON(update->dev_priv->has_mob);
2580			vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2581		}
2582		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2583	} else {
2584		struct vmw_framebuffer_surface *vfbs =
2585			container_of(update->vfb, typeof(*vfbs), base);
2586		struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
2587
2588		ret = vmw_validation_add_resource(&val_ctx, &surf->res,
2589						  0, VMW_RES_DIRTY_NONE, NULL,
2590						  NULL);
2591	}
2592
2593	if (ret)
2594		return ret;
2595
2596	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2597	if (ret)
2598		goto out_unref;
2599
2600	reserved_size = update->calc_fifo_size(update, num_hits);
2601	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2602	if (!cmd_start) {
2603		ret = -ENOMEM;
2604		goto out_revert;
2605	}
2606
2607	cmd_next = cmd_start;
2608
2609	if (update->post_prepare) {
2610		curr_size = update->post_prepare(update, cmd_next);
2611		cmd_next += curr_size;
2612		submit_size += curr_size;
2613	}
2614
2615	if (update->pre_clip) {
2616		curr_size = update->pre_clip(update, cmd_next, num_hits);
2617		cmd_next += curr_size;
2618		submit_size += curr_size;
2619	}
2620
2621	bb.x1 = INT_MAX;
2622	bb.y1 = INT_MAX;
2623	bb.x2 = INT_MIN;
2624	bb.y2 = INT_MIN;
2625
2626	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2627	drm_atomic_for_each_plane_damage(&iter, &clip) {
2628		uint32_t fb_x = clip.x1;
2629		uint32_t fb_y = clip.y1;
2630
2631		vmw_du_translate_to_crtc(state, &clip);
2632		if (update->clip) {
2633			curr_size = update->clip(update, cmd_next, &clip, fb_x,
2634						 fb_y);
2635			cmd_next += curr_size;
2636			submit_size += curr_size;
2637		}
2638		bb.x1 = min_t(int, bb.x1, clip.x1);
2639		bb.y1 = min_t(int, bb.y1, clip.y1);
2640		bb.x2 = max_t(int, bb.x2, clip.x2);
2641		bb.y2 = max_t(int, bb.y2, clip.y2);
2642	}
2643
2644	curr_size = update->post_clip(update, cmd_next, &bb);
2645	submit_size += curr_size;
2646
2647	if (reserved_size < submit_size)
2648		submit_size = 0;
2649
2650	vmw_cmd_commit(update->dev_priv, submit_size);
2651
2652	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2653					 update->out_fence, NULL);
2654	return ret;
2655
2656out_revert:
2657	vmw_validation_revert(&val_ctx);
2658
2659out_unref:
2660	vmw_validation_unref_lists(&val_ctx);
2661	return ret;
2662}
2663
2664/**
2665 * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
2666 *
2667 * @connector: the drm connector, part of a DU container
2668 * @mode: drm mode to check
2669 *
2670 * Returns MODE_OK on success, or a drm_mode_status error code.
2671 */
2672enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
2673					      struct drm_display_mode *mode)
2674{
2675	enum drm_mode_status ret;
2676	struct drm_device *dev = connector->dev;
2677	struct vmw_private *dev_priv = vmw_priv(dev);
2678	u32 assumed_cpp = 4;
2679
2680	if (dev_priv->assume_16bpp)
2681		assumed_cpp = 2;
2682
2683	ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
2684				     dev_priv->texture_max_height);
2685	if (ret != MODE_OK)
2686		return ret;
2687
2688	if (!vmw_kms_validate_mode_vram(dev_priv,
2689					mode->hdisplay * assumed_cpp,
2690					mode->vdisplay))
2691		return MODE_MEM;
2692
2693	return MODE_OK;
2694}
2695
2696/**
2697 * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
2698 *
2699 * @connector: the drm connector, part of a DU container
2700 *
2701 * Returns the number of added modes.
2702 */
2703int vmw_connector_get_modes(struct drm_connector *connector)
2704{
2705	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2706	struct drm_device *dev = connector->dev;
2707	struct vmw_private *dev_priv = vmw_priv(dev);
2708	struct drm_display_mode *mode = NULL;
2709	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2710		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2711		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2712		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2713	};
2714	u32 max_width;
2715	u32 max_height;
2716	u32 num_modes;
2717
2718	/* Add preferred mode */
2719	mode = drm_mode_duplicate(dev, &prefmode);
2720	if (!mode)
2721		return 0;
2722
2723	mode->hdisplay = du->pref_width;
2724	mode->vdisplay = du->pref_height;
2725	vmw_guess_mode_timing(mode);
2726	drm_mode_set_name(mode);
2727
2728	drm_mode_probed_add(connector, mode);
2729	drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2730
2731	/* Probe connector for all modes not exceeding our geom limits */
2732	max_width  = dev_priv->texture_max_width;
2733	max_height = dev_priv->texture_max_height;
2734
2735	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2736		max_width  = min(dev_priv->stdu_max_width,  max_width);
2737		max_height = min(dev_priv->stdu_max_height, max_height);
2738	}
2739
2740	num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
2741
2742	return num_modes;
2743}
2744
2745struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo)
2746{
2747	if (uo->buffer)
2748		vmw_user_bo_ref(uo->buffer);
2749	else if (uo->surface)
2750		vmw_surface_reference(uo->surface);
2751	return uo;
2752}
2753
2754void vmw_user_object_unref(struct vmw_user_object *uo)
2755{
2756	if (uo->buffer)
2757		vmw_user_bo_unref(&uo->buffer);
2758	else if (uo->surface)
2759		vmw_surface_unreference(&uo->surface);
2760}
2761
2762struct vmw_bo *
2763vmw_user_object_buffer(struct vmw_user_object *uo)
2764{
2765	if (uo->buffer)
2766		return uo->buffer;
2767	else if (uo->surface)
2768		return uo->surface->res.guest_memory_bo;
2769	return NULL;
2770}
2771
2772struct vmw_surface *
2773vmw_user_object_surface(struct vmw_user_object *uo)
2774{
2775	if (uo->buffer)
2776		return uo->buffer->dumb_surface;
2777	return uo->surface;
2778}
2779
2780void *vmw_user_object_map(struct vmw_user_object *uo)
2781{
2782	struct vmw_bo *bo = vmw_user_object_buffer(uo);
2783
2784	WARN_ON(!bo);
2785	return vmw_bo_map_and_cache(bo);
2786}
2787
2788void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size)
2789{
2790	struct vmw_bo *bo = vmw_user_object_buffer(uo);
2791
2792	WARN_ON(!bo);
2793	return vmw_bo_map_and_cache_size(bo, size);
2794}
2795
2796void vmw_user_object_unmap(struct vmw_user_object *uo)
2797{
2798	struct vmw_bo *bo = vmw_user_object_buffer(uo);
2799	int ret;
2800
2801	WARN_ON(!bo);
2802
2803	/* Fence the mob creation so we are guarateed to have the mob */
2804	ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
2805	if (ret != 0)
2806		return;
2807
2808	vmw_bo_unmap(bo);
2809	vmw_bo_pin_reserved(bo, false);
2810
2811	ttm_bo_unreserve(&bo->tbo);
2812}
2813
2814bool vmw_user_object_is_mapped(struct vmw_user_object *uo)
2815{
2816	struct vmw_bo *bo;
2817
2818	if (!uo || vmw_user_object_is_null(uo))
2819		return false;
2820
2821	bo = vmw_user_object_buffer(uo);
2822
2823	if (WARN_ON(!bo))
2824		return false;
2825
2826	WARN_ON(bo->map.bo && !bo->map.virtual);
2827	return bo->map.virtual;
2828}
2829
2830bool vmw_user_object_is_null(struct vmw_user_object *uo)
2831{
2832	return !uo->buffer && !uo->surface;
2833}
v6.2
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
 
  27
  28#include "vmwgfx_kms.h"
 
  29#include "vmw_surface_cache.h"
  30
  31#include <drm/drm_atomic.h>
  32#include <drm/drm_atomic_helper.h>
  33#include <drm/drm_damage_helper.h>
  34#include <drm/drm_fourcc.h>
  35#include <drm/drm_rect.h>
  36#include <drm/drm_sysfs.h>
 
 
 
 
 
 
  37
  38void vmw_du_cleanup(struct vmw_display_unit *du)
  39{
  40	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
 
 
  41	drm_plane_cleanup(&du->primary);
  42	if (vmw_cmd_supported(dev_priv))
  43		drm_plane_cleanup(&du->cursor.base);
  44
  45	drm_connector_unregister(&du->connector);
  46	drm_crtc_cleanup(&du->crtc);
  47	drm_encoder_cleanup(&du->encoder);
  48	drm_connector_cleanup(&du->connector);
  49}
  50
  51/*
  52 * Display Unit Cursor functions
  53 */
  54
  55static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
  56static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
  57				  struct vmw_plane_state *vps,
  58				  u32 *image, u32 width, u32 height,
  59				  u32 hotspotX, u32 hotspotY);
  60
  61struct vmw_svga_fifo_cmd_define_cursor {
  62	u32 cmd;
  63	SVGAFifoCmdDefineAlphaCursor cursor;
  64};
  65
  66/**
  67 * vmw_send_define_cursor_cmd - queue a define cursor command
  68 * @dev_priv: the private driver struct
  69 * @image: buffer which holds the cursor image
  70 * @width: width of the mouse cursor image
  71 * @height: height of the mouse cursor image
  72 * @hotspotX: the horizontal position of mouse hotspot
  73 * @hotspotY: the vertical position of mouse hotspot
  74 */
  75static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
  76				       u32 *image, u32 width, u32 height,
  77				       u32 hotspotX, u32 hotspotY)
  78{
  79	struct vmw_svga_fifo_cmd_define_cursor *cmd;
  80	const u32 image_size = width * height * sizeof(*image);
  81	const u32 cmd_size = sizeof(*cmd) + image_size;
  82
  83	/* Try to reserve fifocmd space and swallow any failures;
  84	   such reservations cannot be left unconsumed for long
  85	   under the risk of clogging other fifocmd users, so
  86	   we treat reservations separtely from the way we treat
  87	   other fallible KMS-atomic resources at prepare_fb */
  88	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
  89
  90	if (unlikely(!cmd))
  91		return;
  92
  93	memset(cmd, 0, sizeof(*cmd));
  94
  95	memcpy(&cmd[1], image, image_size);
  96
  97	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
  98	cmd->cursor.id = 0;
  99	cmd->cursor.width = width;
 100	cmd->cursor.height = height;
 101	cmd->cursor.hotspotX = hotspotX;
 102	cmd->cursor.hotspotY = hotspotY;
 103
 104	vmw_cmd_commit_flush(dev_priv, cmd_size);
 105}
 106
 107/**
 108 * vmw_cursor_update_image - update the cursor image on the provided plane
 109 * @dev_priv: the private driver struct
 110 * @vps: the plane state of the cursor plane
 111 * @image: buffer which holds the cursor image
 112 * @width: width of the mouse cursor image
 113 * @height: height of the mouse cursor image
 114 * @hotspotX: the horizontal position of mouse hotspot
 115 * @hotspotY: the vertical position of mouse hotspot
 116 */
 117static void vmw_cursor_update_image(struct vmw_private *dev_priv,
 118				    struct vmw_plane_state *vps,
 119				    u32 *image, u32 width, u32 height,
 120				    u32 hotspotX, u32 hotspotY)
 121{
 122	if (vps->cursor.bo)
 123		vmw_cursor_update_mob(dev_priv, vps, image,
 124				      vps->base.crtc_w, vps->base.crtc_h,
 125				      hotspotX, hotspotY);
 126
 127	else
 128		vmw_send_define_cursor_cmd(dev_priv, image, width, height,
 129					   hotspotX, hotspotY);
 130}
 131
 132
 133/**
 134 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
 135 *
 136 * Called from inside vmw_du_cursor_plane_atomic_update to actually
 137 * make the cursor-image live.
 138 *
 139 * @dev_priv: device to work with
 140 * @vps: the plane state of the cursor plane
 141 * @image: cursor source data to fill the MOB with
 142 * @width: source data width
 143 * @height: source data height
 144 * @hotspotX: cursor hotspot x
 145 * @hotspotY: cursor hotspot Y
 146 */
 147static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
 148				  struct vmw_plane_state *vps,
 149				  u32 *image, u32 width, u32 height,
 150				  u32 hotspotX, u32 hotspotY)
 151{
 152	SVGAGBCursorHeader *header;
 153	SVGAGBAlphaCursorHeader *alpha_header;
 154	const u32 image_size = width * height * sizeof(*image);
 155	bool dummy;
 156
 157	header = ttm_kmap_obj_virtual(&vps->cursor.map, &dummy);
 158	alpha_header = &header->header.alphaHeader;
 159
 160	memset(header, 0, sizeof(*header));
 161
 162	header->type = SVGA_ALPHA_CURSOR;
 163	header->sizeInBytes = image_size;
 164
 165	alpha_header->hotspotX = hotspotX;
 166	alpha_header->hotspotY = hotspotY;
 167	alpha_header->width = width;
 168	alpha_header->height = height;
 169
 170	memcpy(header + 1, image, image_size);
 171	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
 172		  vps->cursor.bo->resource->start);
 173}
 174
 175
 176static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
 177{
 178	return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
 179}
 180
 181/**
 182 * vmw_du_cursor_plane_acquire_image -- Acquire the image data
 183 * @vps: cursor plane state
 184 */
 185static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
 186{
 187	bool dummy;
 188	if (vps->surf) {
 189		if (vps->surf_mapped)
 190			return vmw_bo_map_and_cache(vps->surf->res.backup);
 191		return vps->surf->snooper.image;
 192	} else if (vps->bo)
 193		return ttm_kmap_obj_virtual(&vps->bo->map, &dummy);
 194	return NULL;
 
 
 195}
 196
 197static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
 198					    struct vmw_plane_state *new_vps)
 199{
 200	void *old_image;
 201	void *new_image;
 202	u32 size;
 203	bool changed;
 204
 205	if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
 206	    old_vps->base.crtc_h != new_vps->base.crtc_h)
 207	    return true;
 208
 209	if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
 210	    old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
 211	    return true;
 212
 213	size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
 214
 215	old_image = vmw_du_cursor_plane_acquire_image(old_vps);
 216	new_image = vmw_du_cursor_plane_acquire_image(new_vps);
 217
 218	changed = false;
 219	if (old_image && new_image)
 220		changed = memcmp(old_image, new_image, size) != 0;
 221
 222	return changed;
 223}
 224
 225static void vmw_du_destroy_cursor_mob(struct ttm_buffer_object **bo)
 226{
 227	if (!(*bo))
 228		return;
 229
 230	ttm_bo_unpin(*bo);
 231	ttm_bo_put(*bo);
 232	kfree(*bo);
 233	*bo = NULL;
 234}
 235
 236static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
 237				  struct vmw_plane_state *vps)
 238{
 239	u32 i;
 240
 241	if (!vps->cursor.bo)
 242		return;
 243
 244	vmw_du_cursor_plane_unmap_cm(vps);
 245
 246	/* Look for a free slot to return this mob to the cache. */
 247	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 248		if (!vcp->cursor_mobs[i]) {
 249			vcp->cursor_mobs[i] = vps->cursor.bo;
 250			vps->cursor.bo = NULL;
 251			return;
 252		}
 253	}
 254
 255	/* Cache is full: See if this mob is bigger than an existing mob. */
 256	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 257		if (vcp->cursor_mobs[i]->base.size <
 258		    vps->cursor.bo->base.size) {
 259			vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
 260			vcp->cursor_mobs[i] = vps->cursor.bo;
 261			vps->cursor.bo = NULL;
 262			return;
 263		}
 264	}
 265
 266	/* Destroy it if it's not worth caching. */
 267	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
 268}
 269
 270static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
 271				 struct vmw_plane_state *vps)
 272{
 273	struct vmw_private *dev_priv = vcp->base.dev->dev_private;
 274	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
 275	u32 i;
 276	u32 cursor_max_dim, mob_max_size;
 
 277	int ret;
 278
 279	if (!dev_priv->has_mob ||
 280	    (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
 281		return -EINVAL;
 282
 283	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
 284	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
 285
 286	if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
 287	    vps->base.crtc_h > cursor_max_dim)
 288		return -EINVAL;
 289
 290	if (vps->cursor.bo) {
 291		if (vps->cursor.bo->base.size >= size)
 292			return 0;
 293		vmw_du_put_cursor_mob(vcp, vps);
 294	}
 295
 296	/* Look for an unused mob in the cache. */
 297	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 298		if (vcp->cursor_mobs[i] &&
 299		    vcp->cursor_mobs[i]->base.size >= size) {
 300			vps->cursor.bo = vcp->cursor_mobs[i];
 301			vcp->cursor_mobs[i] = NULL;
 302			return 0;
 303		}
 304	}
 305	/* Create a new mob if we can't find an existing one. */
 306	ret = vmw_bo_create_kernel(dev_priv, size, &vmw_mob_placement,
 307				   &vps->cursor.bo);
 
 308
 309	if (ret != 0)
 310		return ret;
 311
 312	/* Fence the mob creation so we are guarateed to have the mob */
 313	ret = ttm_bo_reserve(vps->cursor.bo, false, false, NULL);
 314	if (ret != 0)
 315		goto teardown;
 316
 317	vmw_bo_fence_single(vps->cursor.bo, NULL);
 318	ttm_bo_unreserve(vps->cursor.bo);
 
 
 
 
 
 
 
 
 319	return 0;
 320
 321teardown:
 322	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
 323	return ret;
 324}
 325
 326
 327static void vmw_cursor_update_position(struct vmw_private *dev_priv,
 328				       bool show, int x, int y)
 329{
 330	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
 331					     : SVGA_CURSOR_ON_HIDE;
 332	uint32_t count;
 333
 334	spin_lock(&dev_priv->cursor_lock);
 335	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
 336		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
 337		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
 338		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
 339		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
 340		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
 341	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
 342		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
 343		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
 344		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
 345		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
 346		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
 347	} else {
 348		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
 349		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
 350		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
 351	}
 352	spin_unlock(&dev_priv->cursor_lock);
 353}
 354
 355void vmw_kms_cursor_snoop(struct vmw_surface *srf,
 356			  struct ttm_object_file *tfile,
 357			  struct ttm_buffer_object *bo,
 358			  SVGA3dCmdHeader *header)
 359{
 360	struct ttm_bo_kmap_obj map;
 361	unsigned long kmap_offset;
 362	unsigned long kmap_num;
 363	SVGA3dCopyBox *box;
 364	unsigned box_count;
 365	void *virtual;
 366	bool dummy;
 367	struct vmw_dma_cmd {
 368		SVGA3dCmdHeader header;
 369		SVGA3dCmdSurfaceDMA dma;
 370	} *cmd;
 371	int i, ret;
 372	const struct SVGA3dSurfaceDesc *desc =
 373		vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
 374	const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
 375
 376	cmd = container_of(header, struct vmw_dma_cmd, header);
 377
 378	/* No snooper installed, nothing to copy */
 379	if (!srf->snooper.image)
 380		return;
 381
 382	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
 383		DRM_ERROR("face and mipmap for cursors should never != 0\n");
 384		return;
 385	}
 386
 387	if (cmd->header.size < 64) {
 388		DRM_ERROR("at least one full copy box must be given\n");
 389		return;
 390	}
 391
 392	box = (SVGA3dCopyBox *)&cmd[1];
 393	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
 394			sizeof(SVGA3dCopyBox);
 395
 396	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
 397	    box->x != 0    || box->y != 0    || box->z != 0    ||
 398	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
 399	    box->d != 1    || box_count != 1 ||
 400	    box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
 401		/* TODO handle none page aligned offsets */
 402		/* TODO handle more dst & src != 0 */
 403		/* TODO handle more then one copy */
 404		DRM_ERROR("Can't snoop dma request for cursor!\n");
 405		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
 406			  box->srcx, box->srcy, box->srcz,
 407			  box->x, box->y, box->z,
 408			  box->w, box->h, box->d, box_count,
 409			  cmd->dma.guest.ptr.offset);
 410		return;
 411	}
 412
 413	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
 414	kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
 415
 416	ret = ttm_bo_reserve(bo, true, false, NULL);
 417	if (unlikely(ret != 0)) {
 418		DRM_ERROR("reserve failed\n");
 419		return;
 420	}
 421
 422	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
 423	if (unlikely(ret != 0))
 424		goto err_unreserve;
 425
 426	virtual = ttm_kmap_obj_virtual(&map, &dummy);
 427
 428	if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
 429		memcpy(srf->snooper.image, virtual,
 430		       VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
 431	} else {
 432		/* Image is unsigned pointer. */
 433		for (i = 0; i < box->h; i++)
 434			memcpy(srf->snooper.image + i * image_pitch,
 435			       virtual + i * cmd->dma.guest.pitch,
 436			       box->w * desc->pitchBytesPerBlock);
 437	}
 438
 439	srf->snooper.age++;
 440
 441	ttm_bo_kunmap(&map);
 442err_unreserve:
 443	ttm_bo_unreserve(bo);
 444}
 445
 446/**
 447 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
 448 *
 449 * @dev_priv: Pointer to the device private struct.
 450 *
 451 * Clears all legacy hotspots.
 452 */
 453void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
 454{
 455	struct drm_device *dev = &dev_priv->drm;
 456	struct vmw_display_unit *du;
 457	struct drm_crtc *crtc;
 458
 459	drm_modeset_lock_all(dev);
 460	drm_for_each_crtc(crtc, dev) {
 461		du = vmw_crtc_to_du(crtc);
 462
 463		du->hotspot_x = 0;
 464		du->hotspot_y = 0;
 465	}
 466	drm_modeset_unlock_all(dev);
 467}
 468
 469void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 470{
 471	struct drm_device *dev = &dev_priv->drm;
 472	struct vmw_display_unit *du;
 473	struct drm_crtc *crtc;
 474
 475	mutex_lock(&dev->mode_config.mutex);
 476
 477	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 478		du = vmw_crtc_to_du(crtc);
 479		if (!du->cursor_surface ||
 480		    du->cursor_age == du->cursor_surface->snooper.age ||
 481		    !du->cursor_surface->snooper.image)
 482			continue;
 483
 484		du->cursor_age = du->cursor_surface->snooper.age;
 485		vmw_send_define_cursor_cmd(dev_priv,
 486					   du->cursor_surface->snooper.image,
 487					   VMW_CURSOR_SNOOP_WIDTH,
 488					   VMW_CURSOR_SNOOP_HEIGHT,
 489					   du->hotspot_x + du->core_hotspot_x,
 490					   du->hotspot_y + du->core_hotspot_y);
 491	}
 492
 493	mutex_unlock(&dev->mode_config.mutex);
 494}
 495
 496
 497void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
 498{
 499	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 500	u32 i;
 501
 502	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
 503
 504	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
 505		vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
 506
 507	drm_plane_cleanup(plane);
 508}
 509
 510
 511void vmw_du_primary_plane_destroy(struct drm_plane *plane)
 512{
 513	drm_plane_cleanup(plane);
 514
 515	/* Planes are static in our case so we don't free it */
 516}
 517
 518
 519/**
 520 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
 521 *
 522 * @vps: plane state associated with the display surface
 523 * @unreference: true if we also want to unreference the display.
 524 */
 525void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
 526			     bool unreference)
 527{
 528	if (vps->surf) {
 
 
 529		if (vps->pinned) {
 530			vmw_resource_unpin(&vps->surf->res);
 531			vps->pinned--;
 532		}
 533
 534		if (unreference) {
 535			if (vps->pinned)
 536				DRM_ERROR("Surface still pinned\n");
 537			vmw_surface_unreference(&vps->surf);
 538		}
 539	}
 540}
 541
 542
 543/**
 544 * vmw_du_plane_cleanup_fb - Unpins the plane surface
 545 *
 546 * @plane:  display plane
 547 * @old_state: Contains the FB to clean up
 548 *
 549 * Unpins the framebuffer surface
 550 *
 551 * Returns 0 on success
 552 */
 553void
 554vmw_du_plane_cleanup_fb(struct drm_plane *plane,
 555			struct drm_plane_state *old_state)
 556{
 557	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 558
 559	vmw_du_plane_unpin_surf(vps, false);
 560}
 561
 562
 563/**
 564 * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
 565 *
 566 * @vps: plane_state
 567 *
 568 * Returns 0 on success
 569 */
 570
 571static int
 572vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
 573{
 574	int ret;
 575	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
 576	struct ttm_buffer_object *bo = vps->cursor.bo;
 577
 578	if (!bo)
 579		return -EINVAL;
 580
 
 
 581	if (bo->base.size < size)
 582		return -EINVAL;
 583
 584	if (vps->cursor.mapped)
 585		return 0;
 586
 587	ret = ttm_bo_reserve(bo, false, false, NULL);
 588
 589	if (unlikely(ret != 0))
 590		return -ENOMEM;
 591
 592	ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vps->cursor.map);
 593
 594	/*
 595	 * We just want to try to get mob bind to finish
 596	 * so that the first write to SVGA_REG_CURSOR_MOBID
 597	 * is done with a buffer that the device has already
 598	 * seen
 599	 */
 600	(void) ttm_bo_wait(bo, false, false);
 601
 602	ttm_bo_unreserve(bo);
 603
 604	if (unlikely(ret != 0))
 605		return -ENOMEM;
 606
 607	vps->cursor.mapped = true;
 608
 609	return 0;
 610}
 611
 612
 613/**
 614 * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
 615 *
 616 * @vps: state of the cursor plane
 617 *
 618 * Returns 0 on success
 619 */
 620
 621static int
 622vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
 623{
 624	int ret = 0;
 625	struct ttm_buffer_object *bo = vps->cursor.bo;
 626
 627	if (!vps->cursor.mapped)
 628		return 0;
 629
 630	if (!bo)
 631		return 0;
 632
 633	ret = ttm_bo_reserve(bo, true, false, NULL);
 634	if (likely(ret == 0)) {
 635		ttm_bo_kunmap(&vps->cursor.map);
 636		ttm_bo_unreserve(bo);
 637		vps->cursor.mapped = false;
 638	}
 639
 640	return ret;
 641}
 642
 643
 644/**
 645 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
 646 *
 647 * @plane: cursor plane
 648 * @old_state: contains the state to clean up
 649 *
 650 * Unmaps all cursor bo mappings and unpins the cursor surface
 651 *
 652 * Returns 0 on success
 653 */
 654void
 655vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
 656			       struct drm_plane_state *old_state)
 657{
 658	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 659	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 660	bool dummy;
 661
 662	if (vps->surf_mapped) {
 663		vmw_bo_unmap(vps->surf->res.backup);
 664		vps->surf_mapped = false;
 665	}
 666
 667	if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &dummy)) {
 668		const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
 669
 670		if (likely(ret == 0)) {
 671			if (atomic_read(&vps->bo->base_mapped_count) == 0)
 672			    ttm_bo_kunmap(&vps->bo->map);
 673			ttm_bo_unreserve(&vps->bo->base);
 674		}
 675	}
 676
 677	vmw_du_cursor_plane_unmap_cm(vps);
 678	vmw_du_put_cursor_mob(vcp, vps);
 679
 680	vmw_du_plane_unpin_surf(vps, false);
 681
 682	if (vps->surf) {
 683		vmw_surface_unreference(&vps->surf);
 684		vps->surf = NULL;
 685	}
 686
 687	if (vps->bo) {
 688		vmw_bo_unreference(&vps->bo);
 689		vps->bo = NULL;
 690	}
 691}
 692
 693
 694/**
 695 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
 696 *
 697 * @plane:  display plane
 698 * @new_state: info on the new plane state, including the FB
 699 *
 700 * Returns 0 on success
 701 */
 702int
 703vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
 704			       struct drm_plane_state *new_state)
 705{
 706	struct drm_framebuffer *fb = new_state->fb;
 707	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 708	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
 
 709	int ret = 0;
 710
 711	if (vps->surf) {
 712		vmw_surface_unreference(&vps->surf);
 713		vps->surf = NULL;
 714	}
 715
 716	if (vps->bo) {
 717		vmw_bo_unreference(&vps->bo);
 718		vps->bo = NULL;
 719	}
 720
 721	if (fb) {
 722		if (vmw_framebuffer_to_vfb(fb)->bo) {
 723			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
 724			vmw_bo_reference(vps->bo);
 725		} else {
 726			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
 727			vmw_surface_reference(vps->surf);
 728		}
 
 729	}
 730
 731	if (!vps->surf && vps->bo) {
 732		const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
 
 733
 734		/*
 735		 * Not using vmw_bo_map_and_cache() helper here as we need to
 736		 * reserve the ttm_buffer_object first which
 737		 * vmw_bo_map_and_cache() omits.
 738		 */
 739		ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
 740
 741		if (unlikely(ret != 0))
 
 742			return -ENOMEM;
 743
 744		ret = ttm_bo_kmap(&vps->bo->base, 0, PFN_UP(size), &vps->bo->map);
 
 
 745
 746		if (likely(ret == 0))
 747			atomic_inc(&vps->bo->base_mapped_count);
 748
 749		ttm_bo_unreserve(&vps->bo->base);
 750
 751		if (unlikely(ret != 0))
 752			return -ENOMEM;
 753	} else if (vps->surf && !vps->bo && vps->surf->res.backup) {
 754
 755		WARN_ON(vps->surf->snooper.image);
 756		ret = ttm_bo_reserve(&vps->surf->res.backup->base, true, false,
 757				     NULL);
 758		if (unlikely(ret != 0))
 759			return -ENOMEM;
 760		vmw_bo_map_and_cache(vps->surf->res.backup);
 761		ttm_bo_unreserve(&vps->surf->res.backup->base);
 762		vps->surf_mapped = true;
 763	}
 764
 765	if (vps->surf || vps->bo) {
 766		vmw_du_get_cursor_mob(vcp, vps);
 767		vmw_du_cursor_plane_map_cm(vps);
 768	}
 769
 770	return 0;
 771}
 772
 773
 774void
 775vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
 776				  struct drm_atomic_state *state)
 777{
 778	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 779									   plane);
 780	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
 781									   plane);
 782	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
 783	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 784	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 785	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
 786	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
 
 
 
 787	s32 hotspot_x, hotspot_y;
 788	bool dummy;
 789
 790	hotspot_x = du->hotspot_x;
 791	hotspot_y = du->hotspot_y;
 792
 793	if (new_state->fb) {
 794		hotspot_x += new_state->fb->hot_x;
 795		hotspot_y += new_state->fb->hot_y;
 796	}
 797
 798	du->cursor_surface = vps->surf;
 799	du->cursor_bo = vps->bo;
 800
 801	if (!vps->surf && !vps->bo) {
 802		vmw_cursor_update_position(dev_priv, false, 0, 0);
 803		return;
 804	}
 805
 806	vps->cursor.hotspot_x = hotspot_x;
 807	vps->cursor.hotspot_y = hotspot_y;
 808
 809	if (vps->surf) {
 810		du->cursor_age = du->cursor_surface->snooper.age;
 
 
 
 
 
 
 
 
 811	}
 812
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 813	if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
 814		/*
 815		 * If it hasn't changed, avoid making the device do extra
 816		 * work by keeping the old cursor active.
 817		 */
 818		struct vmw_cursor_plane_state tmp = old_vps->cursor;
 819		old_vps->cursor = vps->cursor;
 820		vps->cursor = tmp;
 821	} else {
 822		void *image = vmw_du_cursor_plane_acquire_image(vps);
 823		if (image)
 824			vmw_cursor_update_image(dev_priv, vps, image,
 825						new_state->crtc_w,
 826						new_state->crtc_h,
 827						hotspot_x, hotspot_y);
 828	}
 829
 830	if (vps->bo) {
 831		if (ttm_kmap_obj_virtual(&vps->bo->map, &dummy))
 832			atomic_dec(&vps->bo->base_mapped_count);
 833	}
 
 
 834
 835	du->cursor_x = new_state->crtc_x + du->set_gui_x;
 836	du->cursor_y = new_state->crtc_y + du->set_gui_y;
 837
 838	vmw_cursor_update_position(dev_priv, true,
 839				   du->cursor_x + hotspot_x,
 840				   du->cursor_y + hotspot_y);
 841
 842	du->core_hotspot_x = hotspot_x - du->hotspot_x;
 843	du->core_hotspot_y = hotspot_y - du->hotspot_y;
 844}
 845
 846
 847/**
 848 * vmw_du_primary_plane_atomic_check - check if the new state is okay
 849 *
 850 * @plane: display plane
 851 * @state: info on the new plane state, including the FB
 852 *
 853 * Check if the new state is settable given the current state.  Other
 854 * than what the atomic helper checks, we care about crtc fitting
 855 * the FB and maintaining one active framebuffer.
 856 *
 857 * Returns 0 on success
 858 */
 859int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
 860				      struct drm_atomic_state *state)
 861{
 862	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 863									   plane);
 
 
 864	struct drm_crtc_state *crtc_state = NULL;
 865	struct drm_framebuffer *new_fb = new_state->fb;
 
 866	int ret;
 867
 
 
 
 
 
 
 
 
 868	if (new_state->crtc)
 869		crtc_state = drm_atomic_get_new_crtc_state(state,
 870							   new_state->crtc);
 871
 872	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
 873						  DRM_PLANE_NO_SCALING,
 874						  DRM_PLANE_NO_SCALING,
 875						  false, true);
 876
 877	if (!ret && new_fb) {
 878		struct drm_crtc *crtc = new_state->crtc;
 879		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 880
 881		vmw_connector_state_to_vcs(du->connector.state);
 882	}
 883
 884
 885	return ret;
 886}
 887
 888
 889/**
 890 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
 891 *
 892 * @plane: cursor plane
 893 * @state: info on the new plane state
 894 *
 895 * This is a chance to fail if the new cursor state does not fit
 896 * our requirements.
 897 *
 898 * Returns 0 on success
 899 */
 900int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
 901				     struct drm_atomic_state *state)
 902{
 903	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 904									   plane);
 905	int ret = 0;
 906	struct drm_crtc_state *crtc_state = NULL;
 907	struct vmw_surface *surface = NULL;
 908	struct drm_framebuffer *fb = new_state->fb;
 909
 910	if (new_state->crtc)
 911		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
 912							   new_state->crtc);
 913
 914	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
 915						  DRM_PLANE_NO_SCALING,
 916						  DRM_PLANE_NO_SCALING,
 917						  true, true);
 918	if (ret)
 919		return ret;
 920
 921	/* Turning off */
 922	if (!fb)
 923		return 0;
 924
 925	/* A lot of the code assumes this */
 926	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
 927		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
 928			  new_state->crtc_w, new_state->crtc_h);
 929		return -EINVAL;
 930	}
 931
 932	if (!vmw_framebuffer_to_vfb(fb)->bo) {
 933		surface = vmw_framebuffer_to_vfbs(fb)->surface;
 934
 935		WARN_ON(!surface);
 936
 937		if (!surface ||
 938		    (!surface->snooper.image && !surface->res.backup)) {
 939			DRM_ERROR("surface not suitable for cursor\n");
 940			return -EINVAL;
 941		}
 942	}
 943
 944	return 0;
 945}
 946
 947
 948int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
 949			     struct drm_atomic_state *state)
 950{
 
 951	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
 952									 crtc);
 953	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
 954	int connector_mask = drm_connector_mask(&du->connector);
 955	bool has_primary = new_state->plane_mask &
 956			   drm_plane_mask(crtc->primary);
 957
 958	/* We always want to have an active plane with an active CRTC */
 959	if (has_primary != new_state->enable)
 960		return -EINVAL;
 
 
 
 
 961
 962
 963	if (new_state->connector_mask != connector_mask &&
 964	    new_state->connector_mask != 0) {
 965		DRM_ERROR("Invalid connectors configuration\n");
 966		return -EINVAL;
 967	}
 968
 969	/*
 970	 * Our virtual device does not have a dot clock, so use the logical
 971	 * clock value as the dot clock.
 972	 */
 973	if (new_state->mode.crtc_clock == 0)
 974		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
 975
 976	return 0;
 977}
 978
 979
 980void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
 981			      struct drm_atomic_state *state)
 982{
 
 983}
 984
 985
 986void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
 987			      struct drm_atomic_state *state)
 988{
 989}
 990
 991
 992/**
 993 * vmw_du_crtc_duplicate_state - duplicate crtc state
 994 * @crtc: DRM crtc
 995 *
 996 * Allocates and returns a copy of the crtc state (both common and
 997 * vmw-specific) for the specified crtc.
 998 *
 999 * Returns: The newly allocated crtc state, or NULL on failure.
1000 */
1001struct drm_crtc_state *
1002vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
1003{
1004	struct drm_crtc_state *state;
1005	struct vmw_crtc_state *vcs;
1006
1007	if (WARN_ON(!crtc->state))
1008		return NULL;
1009
1010	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
1011
1012	if (!vcs)
1013		return NULL;
1014
1015	state = &vcs->base;
1016
1017	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
1018
1019	return state;
1020}
1021
1022
1023/**
1024 * vmw_du_crtc_reset - creates a blank vmw crtc state
1025 * @crtc: DRM crtc
1026 *
1027 * Resets the atomic state for @crtc by freeing the state pointer (which
1028 * might be NULL, e.g. at driver load time) and allocating a new empty state
1029 * object.
1030 */
1031void vmw_du_crtc_reset(struct drm_crtc *crtc)
1032{
1033	struct vmw_crtc_state *vcs;
1034
1035
1036	if (crtc->state) {
1037		__drm_atomic_helper_crtc_destroy_state(crtc->state);
1038
1039		kfree(vmw_crtc_state_to_vcs(crtc->state));
1040	}
1041
1042	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1043
1044	if (!vcs) {
1045		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1046		return;
1047	}
1048
1049	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1050}
1051
1052
1053/**
1054 * vmw_du_crtc_destroy_state - destroy crtc state
1055 * @crtc: DRM crtc
1056 * @state: state object to destroy
1057 *
1058 * Destroys the crtc state (both common and vmw-specific) for the
1059 * specified plane.
1060 */
1061void
1062vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1063			  struct drm_crtc_state *state)
1064{
1065	drm_atomic_helper_crtc_destroy_state(crtc, state);
1066}
1067
1068
1069/**
1070 * vmw_du_plane_duplicate_state - duplicate plane state
1071 * @plane: drm plane
1072 *
1073 * Allocates and returns a copy of the plane state (both common and
1074 * vmw-specific) for the specified plane.
1075 *
1076 * Returns: The newly allocated plane state, or NULL on failure.
1077 */
1078struct drm_plane_state *
1079vmw_du_plane_duplicate_state(struct drm_plane *plane)
1080{
1081	struct drm_plane_state *state;
1082	struct vmw_plane_state *vps;
1083
1084	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1085
1086	if (!vps)
1087		return NULL;
1088
1089	vps->pinned = 0;
1090	vps->cpp = 0;
1091
1092	memset(&vps->cursor, 0, sizeof(vps->cursor));
1093
1094	/* Each ref counted resource needs to be acquired again */
1095	if (vps->surf)
1096		(void) vmw_surface_reference(vps->surf);
1097
1098	if (vps->bo)
1099		(void) vmw_bo_reference(vps->bo);
1100
1101	state = &vps->base;
1102
1103	__drm_atomic_helper_plane_duplicate_state(plane, state);
1104
1105	return state;
1106}
1107
1108
1109/**
1110 * vmw_du_plane_reset - creates a blank vmw plane state
1111 * @plane: drm plane
1112 *
1113 * Resets the atomic state for @plane by freeing the state pointer (which might
1114 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1115 */
1116void vmw_du_plane_reset(struct drm_plane *plane)
1117{
1118	struct vmw_plane_state *vps;
1119
1120	if (plane->state)
1121		vmw_du_plane_destroy_state(plane, plane->state);
1122
1123	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1124
1125	if (!vps) {
1126		DRM_ERROR("Cannot allocate vmw_plane_state\n");
1127		return;
1128	}
1129
1130	__drm_atomic_helper_plane_reset(plane, &vps->base);
1131}
1132
1133
1134/**
1135 * vmw_du_plane_destroy_state - destroy plane state
1136 * @plane: DRM plane
1137 * @state: state object to destroy
1138 *
1139 * Destroys the plane state (both common and vmw-specific) for the
1140 * specified plane.
1141 */
1142void
1143vmw_du_plane_destroy_state(struct drm_plane *plane,
1144			   struct drm_plane_state *state)
1145{
1146	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1147
1148	/* Should have been freed by cleanup_fb */
1149	if (vps->surf)
1150		vmw_surface_unreference(&vps->surf);
1151
1152	if (vps->bo)
1153		vmw_bo_unreference(&vps->bo);
1154
1155	drm_atomic_helper_plane_destroy_state(plane, state);
1156}
1157
1158
1159/**
1160 * vmw_du_connector_duplicate_state - duplicate connector state
1161 * @connector: DRM connector
1162 *
1163 * Allocates and returns a copy of the connector state (both common and
1164 * vmw-specific) for the specified connector.
1165 *
1166 * Returns: The newly allocated connector state, or NULL on failure.
1167 */
1168struct drm_connector_state *
1169vmw_du_connector_duplicate_state(struct drm_connector *connector)
1170{
1171	struct drm_connector_state *state;
1172	struct vmw_connector_state *vcs;
1173
1174	if (WARN_ON(!connector->state))
1175		return NULL;
1176
1177	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1178
1179	if (!vcs)
1180		return NULL;
1181
1182	state = &vcs->base;
1183
1184	__drm_atomic_helper_connector_duplicate_state(connector, state);
1185
1186	return state;
1187}
1188
1189
1190/**
1191 * vmw_du_connector_reset - creates a blank vmw connector state
1192 * @connector: DRM connector
1193 *
1194 * Resets the atomic state for @connector by freeing the state pointer (which
1195 * might be NULL, e.g. at driver load time) and allocating a new empty state
1196 * object.
1197 */
1198void vmw_du_connector_reset(struct drm_connector *connector)
1199{
1200	struct vmw_connector_state *vcs;
1201
1202
1203	if (connector->state) {
1204		__drm_atomic_helper_connector_destroy_state(connector->state);
1205
1206		kfree(vmw_connector_state_to_vcs(connector->state));
1207	}
1208
1209	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1210
1211	if (!vcs) {
1212		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1213		return;
1214	}
1215
1216	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1217}
1218
1219
1220/**
1221 * vmw_du_connector_destroy_state - destroy connector state
1222 * @connector: DRM connector
1223 * @state: state object to destroy
1224 *
1225 * Destroys the connector state (both common and vmw-specific) for the
1226 * specified plane.
1227 */
1228void
1229vmw_du_connector_destroy_state(struct drm_connector *connector,
1230			  struct drm_connector_state *state)
1231{
1232	drm_atomic_helper_connector_destroy_state(connector, state);
1233}
1234/*
1235 * Generic framebuffer code
1236 */
1237
1238/*
1239 * Surface framebuffer code
1240 */
1241
1242static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1243{
1244	struct vmw_framebuffer_surface *vfbs =
1245		vmw_framebuffer_to_vfbs(framebuffer);
1246
1247	drm_framebuffer_cleanup(framebuffer);
1248	vmw_surface_unreference(&vfbs->surface);
1249
1250	kfree(vfbs);
1251}
1252
1253/**
1254 * vmw_kms_readback - Perform a readback from the screen system to
1255 * a buffer-object backed framebuffer.
1256 *
1257 * @dev_priv: Pointer to the device private structure.
1258 * @file_priv: Pointer to a struct drm_file identifying the caller.
1259 * Must be set to NULL if @user_fence_rep is NULL.
1260 * @vfb: Pointer to the buffer-object backed framebuffer.
1261 * @user_fence_rep: User-space provided structure for fence information.
1262 * Must be set to non-NULL if @file_priv is non-NULL.
1263 * @vclips: Array of clip rects.
1264 * @num_clips: Number of clip rects in @vclips.
1265 *
1266 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1267 * interrupted.
1268 */
1269int vmw_kms_readback(struct vmw_private *dev_priv,
1270		     struct drm_file *file_priv,
1271		     struct vmw_framebuffer *vfb,
1272		     struct drm_vmw_fence_rep __user *user_fence_rep,
1273		     struct drm_vmw_rect *vclips,
1274		     uint32_t num_clips)
1275{
1276	switch (dev_priv->active_display_unit) {
1277	case vmw_du_screen_object:
1278		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1279					    user_fence_rep, vclips, num_clips,
1280					    NULL);
1281	case vmw_du_screen_target:
1282		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
1283					user_fence_rep, NULL, vclips, num_clips,
1284					1, false, true, NULL);
1285	default:
1286		WARN_ONCE(true,
1287			  "Readback called with invalid display system.\n");
1288}
1289
1290	return -ENOSYS;
1291}
1292
 
 
 
 
 
 
 
 
 
 
 
1293
1294static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
 
1295	.destroy = vmw_framebuffer_surface_destroy,
1296	.dirty = drm_atomic_helper_dirtyfb,
1297};
1298
1299static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1300					   struct vmw_surface *surface,
1301					   struct vmw_framebuffer **out,
1302					   const struct drm_mode_fb_cmd2
1303					   *mode_cmd,
1304					   bool is_bo_proxy)
1305
1306{
1307	struct drm_device *dev = &dev_priv->drm;
1308	struct vmw_framebuffer_surface *vfbs;
1309	enum SVGA3dSurfaceFormat format;
1310	int ret;
1311
1312	/* 3D is only supported on HWv8 and newer hosts */
1313	if (dev_priv->active_display_unit == vmw_du_legacy)
1314		return -ENOSYS;
1315
 
 
1316	/*
1317	 * Sanity checks.
1318	 */
1319
1320	if (!drm_any_plane_has_format(&dev_priv->drm,
1321				      mode_cmd->pixel_format,
1322				      mode_cmd->modifier[0])) {
1323		drm_dbg(&dev_priv->drm,
1324			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1325			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1326		return -EINVAL;
1327	}
1328
1329	/* Surface must be marked as a scanout. */
1330	if (unlikely(!surface->metadata.scanout))
1331		return -EINVAL;
1332
1333	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1334		     surface->metadata.num_sizes != 1 ||
1335		     surface->metadata.base_size.width < mode_cmd->width ||
1336		     surface->metadata.base_size.height < mode_cmd->height ||
1337		     surface->metadata.base_size.depth != 1)) {
1338		DRM_ERROR("Incompatible surface dimensions "
1339			  "for requested mode.\n");
1340		return -EINVAL;
1341	}
1342
1343	switch (mode_cmd->pixel_format) {
1344	case DRM_FORMAT_ARGB8888:
1345		format = SVGA3D_A8R8G8B8;
1346		break;
1347	case DRM_FORMAT_XRGB8888:
1348		format = SVGA3D_X8R8G8B8;
1349		break;
1350	case DRM_FORMAT_RGB565:
1351		format = SVGA3D_R5G6B5;
1352		break;
1353	case DRM_FORMAT_XRGB1555:
1354		format = SVGA3D_A1R5G5B5;
1355		break;
1356	default:
1357		DRM_ERROR("Invalid pixel format: %p4cc\n",
1358			  &mode_cmd->pixel_format);
1359		return -EINVAL;
1360	}
1361
1362	/*
1363	 * For DX, surface format validation is done when surface->scanout
1364	 * is set.
1365	 */
1366	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1367		DRM_ERROR("Invalid surface format for requested mode.\n");
1368		return -EINVAL;
1369	}
1370
1371	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1372	if (!vfbs) {
1373		ret = -ENOMEM;
1374		goto out_err1;
1375	}
1376
1377	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1378	vfbs->surface = vmw_surface_reference(surface);
1379	vfbs->base.user_handle = mode_cmd->handles[0];
1380	vfbs->is_bo_proxy = is_bo_proxy;
1381
1382	*out = &vfbs->base;
1383
1384	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1385				   &vmw_framebuffer_surface_funcs);
1386	if (ret)
1387		goto out_err2;
1388
1389	return 0;
1390
1391out_err2:
1392	vmw_surface_unreference(&surface);
1393	kfree(vfbs);
1394out_err1:
1395	return ret;
1396}
1397
1398/*
1399 * Buffer-object framebuffer code
1400 */
1401
1402static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1403					    struct drm_file *file_priv,
1404					    unsigned int *handle)
1405{
1406	struct vmw_framebuffer_bo *vfbd =
1407			vmw_framebuffer_to_vfbd(fb);
1408
1409	return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle);
1410}
1411
1412static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1413{
1414	struct vmw_framebuffer_bo *vfbd =
1415		vmw_framebuffer_to_vfbd(framebuffer);
1416
1417	drm_framebuffer_cleanup(framebuffer);
1418	vmw_bo_unreference(&vfbd->buffer);
1419
1420	kfree(vfbd);
1421}
1422
1423static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
1424				    struct drm_file *file_priv,
1425				    unsigned int flags, unsigned int color,
1426				    struct drm_clip_rect *clips,
1427				    unsigned int num_clips)
1428{
1429	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1430	struct vmw_framebuffer_bo *vfbd =
1431		vmw_framebuffer_to_vfbd(framebuffer);
1432	struct drm_clip_rect norect;
1433	int ret, increment = 1;
1434
1435	drm_modeset_lock_all(&dev_priv->drm);
1436
1437	if (!num_clips) {
1438		num_clips = 1;
1439		clips = &norect;
1440		norect.x1 = norect.y1 = 0;
1441		norect.x2 = framebuffer->width;
1442		norect.y2 = framebuffer->height;
1443	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
1444		num_clips /= 2;
1445		increment = 2;
1446	}
1447
1448	switch (dev_priv->active_display_unit) {
1449	case vmw_du_legacy:
1450		ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
1451					      clips, num_clips, increment);
1452		break;
1453	default:
1454		ret = -EINVAL;
1455		WARN_ONCE(true, "Dirty called with invalid display system.\n");
1456		break;
1457	}
1458
1459	vmw_cmd_flush(dev_priv, false);
1460
1461	drm_modeset_unlock_all(&dev_priv->drm);
1462
1463	return ret;
1464}
1465
1466static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
1467					struct drm_file *file_priv,
1468					unsigned int flags, unsigned int color,
1469					struct drm_clip_rect *clips,
1470					unsigned int num_clips)
1471{
1472	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1473
1474	if (dev_priv->active_display_unit == vmw_du_legacy &&
1475	    vmw_cmd_supported(dev_priv))
1476		return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
1477						color, clips, num_clips);
1478
1479	return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
1480					 clips, num_clips);
1481}
1482
1483static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1484	.create_handle = vmw_framebuffer_bo_create_handle,
1485	.destroy = vmw_framebuffer_bo_destroy,
1486	.dirty = vmw_framebuffer_bo_dirty_ext,
1487};
1488
1489/*
1490 * Pin the bofer in a location suitable for access by the
1491 * display system.
1492 */
1493static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1494{
1495	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1496	struct vmw_buffer_object *buf;
1497	struct ttm_placement *placement;
1498	int ret;
1499
1500	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1501		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1502
1503	if (!buf)
1504		return 0;
1505
1506	switch (dev_priv->active_display_unit) {
1507	case vmw_du_legacy:
1508		vmw_overlay_pause_all(dev_priv);
1509		ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
1510		vmw_overlay_resume_all(dev_priv);
1511		break;
1512	case vmw_du_screen_object:
1513	case vmw_du_screen_target:
1514		if (vfb->bo) {
1515			if (dev_priv->capabilities & SVGA_CAP_3D) {
1516				/*
1517				 * Use surface DMA to get content to
1518				 * sreen target surface.
1519				 */
1520				placement = &vmw_vram_gmr_placement;
1521			} else {
1522				/* Use CPU blit. */
1523				placement = &vmw_sys_placement;
1524			}
1525		} else {
1526			/* Use surface / image update */
1527			placement = &vmw_mob_placement;
1528		}
1529
1530		return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
1531	default:
1532		return -EINVAL;
1533	}
1534
1535	return ret;
1536}
1537
1538static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1539{
1540	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1541	struct vmw_buffer_object *buf;
1542
1543	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1544		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1545
1546	if (WARN_ON(!buf))
1547		return 0;
1548
1549	return vmw_bo_unpin(dev_priv, buf, false);
1550}
1551
1552/**
1553 * vmw_create_bo_proxy - create a proxy surface for the buffer object
1554 *
1555 * @dev: DRM device
1556 * @mode_cmd: parameters for the new surface
1557 * @bo_mob: MOB backing the buffer object
1558 * @srf_out: newly created surface
1559 *
1560 * When the content FB is a buffer object, we create a surface as a proxy to the
1561 * same buffer.  This way we can do a surface copy rather than a surface DMA.
1562 * This is a more efficient approach
1563 *
1564 * RETURNS:
1565 * 0 on success, error code otherwise
1566 */
1567static int vmw_create_bo_proxy(struct drm_device *dev,
1568			       const struct drm_mode_fb_cmd2 *mode_cmd,
1569			       struct vmw_buffer_object *bo_mob,
1570			       struct vmw_surface **srf_out)
1571{
1572	struct vmw_surface_metadata metadata = {0};
1573	uint32_t format;
1574	struct vmw_resource *res;
1575	unsigned int bytes_pp;
1576	int ret;
1577
1578	switch (mode_cmd->pixel_format) {
1579	case DRM_FORMAT_ARGB8888:
1580	case DRM_FORMAT_XRGB8888:
1581		format = SVGA3D_X8R8G8B8;
1582		bytes_pp = 4;
1583		break;
1584
1585	case DRM_FORMAT_RGB565:
1586	case DRM_FORMAT_XRGB1555:
1587		format = SVGA3D_R5G6B5;
1588		bytes_pp = 2;
1589		break;
1590
1591	case 8:
1592		format = SVGA3D_P8;
1593		bytes_pp = 1;
1594		break;
1595
1596	default:
1597		DRM_ERROR("Invalid framebuffer format %p4cc\n",
1598			  &mode_cmd->pixel_format);
1599		return -EINVAL;
1600	}
1601
1602	metadata.format = format;
1603	metadata.mip_levels[0] = 1;
1604	metadata.num_sizes = 1;
1605	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1606	metadata.base_size.height =  mode_cmd->height;
1607	metadata.base_size.depth = 1;
1608	metadata.scanout = true;
1609
1610	ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1611	if (ret) {
1612		DRM_ERROR("Failed to allocate proxy content buffer\n");
1613		return ret;
1614	}
1615
1616	res = &(*srf_out)->res;
1617
1618	/* Reserve and switch the backing mob. */
1619	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1620	(void) vmw_resource_reserve(res, false, true);
1621	vmw_bo_unreference(&res->backup);
1622	res->backup = vmw_bo_reference(bo_mob);
1623	res->backup_offset = 0;
1624	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1625	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1626
1627	return 0;
1628}
1629
1630
1631
1632static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1633				      struct vmw_buffer_object *bo,
1634				      struct vmw_framebuffer **out,
1635				      const struct drm_mode_fb_cmd2
1636				      *mode_cmd)
1637
1638{
1639	struct drm_device *dev = &dev_priv->drm;
1640	struct vmw_framebuffer_bo *vfbd;
1641	unsigned int requested_size;
1642	int ret;
1643
1644	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1645	if (unlikely(requested_size > bo->base.base.size)) {
1646		DRM_ERROR("Screen buffer object size is too small "
1647			  "for requested mode.\n");
1648		return -EINVAL;
1649	}
1650
1651	if (!drm_any_plane_has_format(&dev_priv->drm,
1652				      mode_cmd->pixel_format,
1653				      mode_cmd->modifier[0])) {
1654		drm_dbg(&dev_priv->drm,
1655			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1656			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1657		return -EINVAL;
1658	}
1659
1660	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1661	if (!vfbd) {
1662		ret = -ENOMEM;
1663		goto out_err1;
1664	}
1665
1666	vfbd->base.base.obj[0] = &bo->base.base;
1667	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1668	vfbd->base.bo = true;
1669	vfbd->buffer = vmw_bo_reference(bo);
1670	vfbd->base.user_handle = mode_cmd->handles[0];
1671	*out = &vfbd->base;
1672
1673	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1674				   &vmw_framebuffer_bo_funcs);
1675	if (ret)
1676		goto out_err2;
1677
1678	return 0;
1679
1680out_err2:
1681	vmw_bo_unreference(&bo);
1682	kfree(vfbd);
1683out_err1:
1684	return ret;
1685}
1686
1687
1688/**
1689 * vmw_kms_srf_ok - check if a surface can be created
1690 *
1691 * @dev_priv: Pointer to device private struct.
1692 * @width: requested width
1693 * @height: requested height
1694 *
1695 * Surfaces need to be less than texture size
1696 */
1697static bool
1698vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1699{
1700	if (width  > dev_priv->texture_max_width ||
1701	    height > dev_priv->texture_max_height)
1702		return false;
1703
1704	return true;
1705}
1706
1707/**
1708 * vmw_kms_new_framebuffer - Create a new framebuffer.
1709 *
1710 * @dev_priv: Pointer to device private struct.
1711 * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1712 * Either @bo or @surface must be NULL.
1713 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1714 * Either @bo or @surface must be NULL.
1715 * @only_2d: No presents will occur to this buffer object based framebuffer.
1716 * This helps the code to do some important optimizations.
1717 * @mode_cmd: Frame-buffer metadata.
1718 */
1719struct vmw_framebuffer *
1720vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1721			struct vmw_buffer_object *bo,
1722			struct vmw_surface *surface,
1723			bool only_2d,
1724			const struct drm_mode_fb_cmd2 *mode_cmd)
1725{
1726	struct vmw_framebuffer *vfb = NULL;
1727	bool is_bo_proxy = false;
1728	int ret;
1729
1730	/*
1731	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1732	 * therefore, wrap the buffer object in a surface so we can use the
1733	 * SurfaceCopy command.
1734	 */
1735	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1736	    bo && only_2d &&
1737	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1738	    dev_priv->active_display_unit == vmw_du_screen_target) {
1739		ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1740					  bo, &surface);
1741		if (ret)
1742			return ERR_PTR(ret);
1743
1744		is_bo_proxy = true;
1745	}
1746
1747	/* Create the new framebuffer depending one what we have */
1748	if (surface) {
1749		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1750						      mode_cmd,
1751						      is_bo_proxy);
1752		/*
1753		 * vmw_create_bo_proxy() adds a reference that is no longer
1754		 * needed
1755		 */
1756		if (is_bo_proxy)
1757			vmw_surface_unreference(&surface);
1758	} else if (bo) {
1759		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1760						 mode_cmd);
1761	} else {
1762		BUG();
1763	}
1764
1765	if (ret)
1766		return ERR_PTR(ret);
1767
1768	vfb->pin = vmw_framebuffer_pin;
1769	vfb->unpin = vmw_framebuffer_unpin;
1770
1771	return vfb;
1772}
1773
1774/*
1775 * Generic Kernel modesetting functions
1776 */
1777
1778static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1779						 struct drm_file *file_priv,
1780						 const struct drm_mode_fb_cmd2 *mode_cmd)
1781{
1782	struct vmw_private *dev_priv = vmw_priv(dev);
1783	struct vmw_framebuffer *vfb = NULL;
1784	struct vmw_surface *surface = NULL;
1785	struct vmw_buffer_object *bo = NULL;
1786	int ret;
1787
1788	/* returns either a bo or surface */
1789	ret = vmw_user_lookup_handle(dev_priv, file_priv,
1790				     mode_cmd->handles[0],
1791				     &surface, &bo);
1792	if (ret) {
1793		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1794			  mode_cmd->handles[0], mode_cmd->handles[0]);
1795		goto err_out;
1796	}
1797
1798
1799	if (!bo &&
1800	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1801		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1802			dev_priv->texture_max_width,
1803			dev_priv->texture_max_height);
 
1804		goto err_out;
1805	}
1806
1807
1808	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1809				      !(dev_priv->capabilities & SVGA_CAP_3D),
1810				      mode_cmd);
1811	if (IS_ERR(vfb)) {
1812		ret = PTR_ERR(vfb);
1813		goto err_out;
1814	}
1815
1816err_out:
1817	/* vmw_user_lookup_handle takes one ref so does new_fb */
1818	if (bo) {
1819		vmw_bo_unreference(&bo);
1820		drm_gem_object_put(&bo->base.base);
1821	}
1822	if (surface)
1823		vmw_surface_unreference(&surface);
1824
1825	if (ret) {
1826		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1827		return ERR_PTR(ret);
1828	}
1829
1830	return &vfb->base;
1831}
1832
1833/**
1834 * vmw_kms_check_display_memory - Validates display memory required for a
1835 * topology
1836 * @dev: DRM device
1837 * @num_rects: number of drm_rect in rects
1838 * @rects: array of drm_rect representing the topology to validate indexed by
1839 * crtc index.
1840 *
1841 * Returns:
1842 * 0 on success otherwise negative error code
1843 */
1844static int vmw_kms_check_display_memory(struct drm_device *dev,
1845					uint32_t num_rects,
1846					struct drm_rect *rects)
1847{
1848	struct vmw_private *dev_priv = vmw_priv(dev);
1849	struct drm_rect bounding_box = {0};
1850	u64 total_pixels = 0, pixel_mem, bb_mem;
1851	int i;
1852
1853	for (i = 0; i < num_rects; i++) {
1854		/*
1855		 * For STDU only individual screen (screen target) is limited by
1856		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1857		 */
1858		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1859		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1860		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1861			VMW_DEBUG_KMS("Screen size not supported.\n");
1862			return -EINVAL;
1863		}
1864
1865		/* Bounding box upper left is at (0,0). */
1866		if (rects[i].x2 > bounding_box.x2)
1867			bounding_box.x2 = rects[i].x2;
1868
1869		if (rects[i].y2 > bounding_box.y2)
1870			bounding_box.y2 = rects[i].y2;
1871
1872		total_pixels += (u64) drm_rect_width(&rects[i]) *
1873			(u64) drm_rect_height(&rects[i]);
1874	}
1875
1876	/* Virtual svga device primary limits are always in 32-bpp. */
1877	pixel_mem = total_pixels * 4;
1878
1879	/*
1880	 * For HV10 and below prim_bb_mem is vram size. When
1881	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1882	 * limit on primary bounding box
1883	 */
1884	if (pixel_mem > dev_priv->max_primary_mem) {
1885		VMW_DEBUG_KMS("Combined output size too large.\n");
1886		return -EINVAL;
1887	}
1888
1889	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1890	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1891	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1892		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1893
1894		if (bb_mem > dev_priv->max_primary_mem) {
1895			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1896			return -EINVAL;
1897		}
1898	}
1899
1900	return 0;
1901}
1902
1903/**
1904 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1905 * crtc mutex
1906 * @state: The atomic state pointer containing the new atomic state
1907 * @crtc: The crtc
1908 *
1909 * This function returns the new crtc state if it's part of the state update.
1910 * Otherwise returns the current crtc state. It also makes sure that the
1911 * crtc mutex is locked.
1912 *
1913 * Returns: A valid crtc state pointer or NULL. It may also return a
1914 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1915 */
1916static struct drm_crtc_state *
1917vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1918{
1919	struct drm_crtc_state *crtc_state;
1920
1921	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1922	if (crtc_state) {
1923		lockdep_assert_held(&crtc->mutex.mutex.base);
1924	} else {
1925		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1926
1927		if (ret != 0 && ret != -EALREADY)
1928			return ERR_PTR(ret);
1929
1930		crtc_state = crtc->state;
1931	}
1932
1933	return crtc_state;
1934}
1935
1936/**
1937 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1938 * from the same fb after the new state is committed.
1939 * @dev: The drm_device.
1940 * @state: The new state to be checked.
1941 *
1942 * Returns:
1943 *   Zero on success,
1944 *   -EINVAL on invalid state,
1945 *   -EDEADLK if modeset locking needs to be rerun.
1946 */
1947static int vmw_kms_check_implicit(struct drm_device *dev,
1948				  struct drm_atomic_state *state)
1949{
1950	struct drm_framebuffer *implicit_fb = NULL;
1951	struct drm_crtc *crtc;
1952	struct drm_crtc_state *crtc_state;
1953	struct drm_plane_state *plane_state;
1954
1955	drm_for_each_crtc(crtc, dev) {
1956		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1957
1958		if (!du->is_implicit)
1959			continue;
1960
1961		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1962		if (IS_ERR(crtc_state))
1963			return PTR_ERR(crtc_state);
1964
1965		if (!crtc_state || !crtc_state->enable)
1966			continue;
1967
1968		/*
1969		 * Can't move primary planes across crtcs, so this is OK.
1970		 * It also means we don't need to take the plane mutex.
1971		 */
1972		plane_state = du->primary.state;
1973		if (plane_state->crtc != crtc)
1974			continue;
1975
1976		if (!implicit_fb)
1977			implicit_fb = plane_state->fb;
1978		else if (implicit_fb != plane_state->fb)
1979			return -EINVAL;
1980	}
1981
1982	return 0;
1983}
1984
1985/**
1986 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1987 * @dev: DRM device
1988 * @state: the driver state object
1989 *
1990 * Returns:
1991 * 0 on success otherwise negative error code
1992 */
1993static int vmw_kms_check_topology(struct drm_device *dev,
1994				  struct drm_atomic_state *state)
1995{
1996	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1997	struct drm_rect *rects;
1998	struct drm_crtc *crtc;
1999	uint32_t i;
2000	int ret = 0;
2001
2002	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
2003			GFP_KERNEL);
2004	if (!rects)
2005		return -ENOMEM;
2006
2007	drm_for_each_crtc(crtc, dev) {
2008		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
2009		struct drm_crtc_state *crtc_state;
2010
2011		i = drm_crtc_index(crtc);
2012
2013		crtc_state = vmw_crtc_state_and_lock(state, crtc);
2014		if (IS_ERR(crtc_state)) {
2015			ret = PTR_ERR(crtc_state);
2016			goto clean;
2017		}
2018
2019		if (!crtc_state)
2020			continue;
2021
2022		if (crtc_state->enable) {
2023			rects[i].x1 = du->gui_x;
2024			rects[i].y1 = du->gui_y;
2025			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
2026			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
2027		} else {
2028			rects[i].x1 = 0;
2029			rects[i].y1 = 0;
2030			rects[i].x2 = 0;
2031			rects[i].y2 = 0;
2032		}
2033	}
2034
2035	/* Determine change to topology due to new atomic state */
2036	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
2037				      new_crtc_state, i) {
2038		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
2039		struct drm_connector *connector;
2040		struct drm_connector_state *conn_state;
2041		struct vmw_connector_state *vmw_conn_state;
2042
2043		if (!du->pref_active && new_crtc_state->enable) {
2044			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
2045			ret = -EINVAL;
2046			goto clean;
2047		}
2048
2049		/*
2050		 * For vmwgfx each crtc has only one connector attached and it
2051		 * is not changed so don't really need to check the
2052		 * crtc->connector_mask and iterate over it.
2053		 */
2054		connector = &du->connector;
2055		conn_state = drm_atomic_get_connector_state(state, connector);
2056		if (IS_ERR(conn_state)) {
2057			ret = PTR_ERR(conn_state);
2058			goto clean;
2059		}
2060
2061		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
2062		vmw_conn_state->gui_x = du->gui_x;
2063		vmw_conn_state->gui_y = du->gui_y;
2064	}
2065
2066	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
2067					   rects);
2068
2069clean:
2070	kfree(rects);
2071	return ret;
2072}
2073
2074/**
2075 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
2076 *
2077 * @dev: DRM device
2078 * @state: the driver state object
2079 *
2080 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
2081 * us to assign a value to mode->crtc_clock so that
2082 * drm_calc_timestamping_constants() won't throw an error message
2083 *
2084 * Returns:
2085 * Zero for success or -errno
2086 */
2087static int
2088vmw_kms_atomic_check_modeset(struct drm_device *dev,
2089			     struct drm_atomic_state *state)
2090{
2091	struct drm_crtc *crtc;
2092	struct drm_crtc_state *crtc_state;
2093	bool need_modeset = false;
2094	int i, ret;
2095
2096	ret = drm_atomic_helper_check(dev, state);
2097	if (ret)
2098		return ret;
2099
2100	ret = vmw_kms_check_implicit(dev, state);
2101	if (ret) {
2102		VMW_DEBUG_KMS("Invalid implicit state\n");
2103		return ret;
2104	}
2105
2106	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2107		if (drm_atomic_crtc_needs_modeset(crtc_state))
2108			need_modeset = true;
2109	}
2110
2111	if (need_modeset)
2112		return vmw_kms_check_topology(dev, state);
2113
2114	return ret;
2115}
2116
2117static const struct drm_mode_config_funcs vmw_kms_funcs = {
2118	.fb_create = vmw_kms_fb_create,
2119	.atomic_check = vmw_kms_atomic_check_modeset,
2120	.atomic_commit = drm_atomic_helper_commit,
2121};
2122
2123static int vmw_kms_generic_present(struct vmw_private *dev_priv,
2124				   struct drm_file *file_priv,
2125				   struct vmw_framebuffer *vfb,
2126				   struct vmw_surface *surface,
2127				   uint32_t sid,
2128				   int32_t destX, int32_t destY,
2129				   struct drm_vmw_rect *clips,
2130				   uint32_t num_clips)
2131{
2132	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
2133					    &surface->res, destX, destY,
2134					    num_clips, 1, NULL, NULL);
2135}
2136
2137
2138int vmw_kms_present(struct vmw_private *dev_priv,
2139		    struct drm_file *file_priv,
2140		    struct vmw_framebuffer *vfb,
2141		    struct vmw_surface *surface,
2142		    uint32_t sid,
2143		    int32_t destX, int32_t destY,
2144		    struct drm_vmw_rect *clips,
2145		    uint32_t num_clips)
2146{
2147	int ret;
2148
2149	switch (dev_priv->active_display_unit) {
2150	case vmw_du_screen_target:
2151		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2152						 &surface->res, destX, destY,
2153						 num_clips, 1, NULL, NULL);
2154		break;
2155	case vmw_du_screen_object:
2156		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2157					      sid, destX, destY, clips,
2158					      num_clips);
2159		break;
2160	default:
2161		WARN_ONCE(true,
2162			  "Present called with invalid display system.\n");
2163		ret = -ENOSYS;
2164		break;
2165	}
2166	if (ret)
2167		return ret;
2168
2169	vmw_cmd_flush(dev_priv, false);
2170
2171	return 0;
2172}
2173
2174static void
2175vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2176{
2177	if (dev_priv->hotplug_mode_update_property)
2178		return;
2179
2180	dev_priv->hotplug_mode_update_property =
2181		drm_property_create_range(&dev_priv->drm,
2182					  DRM_MODE_PROP_IMMUTABLE,
2183					  "hotplug_mode_update", 0, 1);
2184}
2185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2186int vmw_kms_init(struct vmw_private *dev_priv)
2187{
2188	struct drm_device *dev = &dev_priv->drm;
2189	int ret;
2190	static const char *display_unit_names[] = {
2191		"Invalid",
2192		"Legacy",
2193		"Screen Object",
2194		"Screen Target",
2195		"Invalid (max)"
2196	};
2197
2198	drm_mode_config_init(dev);
2199	dev->mode_config.funcs = &vmw_kms_funcs;
2200	dev->mode_config.min_width = 1;
2201	dev->mode_config.min_height = 1;
2202	dev->mode_config.max_width = dev_priv->texture_max_width;
2203	dev->mode_config.max_height = dev_priv->texture_max_height;
2204	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2205	dev->mode_config.prefer_shadow_fbdev = !dev_priv->has_mob;
2206
2207	drm_mode_create_suggested_offset_properties(dev);
2208	vmw_kms_create_hotplug_mode_update_property(dev_priv);
2209
2210	ret = vmw_kms_stdu_init_display(dev_priv);
2211	if (ret) {
2212		ret = vmw_kms_sou_init_display(dev_priv);
2213		if (ret) /* Fallback */
2214			ret = vmw_kms_ldu_init_display(dev_priv);
2215	}
2216	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2217	drm_info(&dev_priv->drm, "%s display unit initialized\n",
2218		 display_unit_names[dev_priv->active_display_unit]);
2219
2220	return ret;
2221}
2222
2223int vmw_kms_close(struct vmw_private *dev_priv)
2224{
2225	int ret = 0;
2226
2227	/*
2228	 * Docs says we should take the lock before calling this function
2229	 * but since it destroys encoders and our destructor calls
2230	 * drm_encoder_cleanup which takes the lock we deadlock.
2231	 */
2232	drm_mode_config_cleanup(&dev_priv->drm);
2233	if (dev_priv->active_display_unit == vmw_du_legacy)
2234		ret = vmw_kms_ldu_close_display(dev_priv);
2235
2236	return ret;
2237}
2238
2239int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2240				struct drm_file *file_priv)
2241{
2242	struct drm_vmw_cursor_bypass_arg *arg = data;
2243	struct vmw_display_unit *du;
2244	struct drm_crtc *crtc;
2245	int ret = 0;
2246
2247	mutex_lock(&dev->mode_config.mutex);
2248	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2249
2250		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2251			du = vmw_crtc_to_du(crtc);
2252			du->hotspot_x = arg->xhot;
2253			du->hotspot_y = arg->yhot;
2254		}
2255
2256		mutex_unlock(&dev->mode_config.mutex);
2257		return 0;
2258	}
2259
2260	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2261	if (!crtc) {
2262		ret = -ENOENT;
2263		goto out;
2264	}
2265
2266	du = vmw_crtc_to_du(crtc);
2267
2268	du->hotspot_x = arg->xhot;
2269	du->hotspot_y = arg->yhot;
2270
2271out:
2272	mutex_unlock(&dev->mode_config.mutex);
2273
2274	return ret;
2275}
2276
2277int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2278			unsigned width, unsigned height, unsigned pitch,
2279			unsigned bpp, unsigned depth)
2280{
2281	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2282		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2283	else if (vmw_fifo_have_pitchlock(vmw_priv))
2284		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2285	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2286	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2287	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2288		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2289
2290	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2291		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2292			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2293		return -EINVAL;
2294	}
2295
2296	return 0;
2297}
2298
 
2299bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2300				uint32_t pitch,
2301				uint32_t height)
2302{
2303	return ((u64) pitch * (u64) height) < (u64)
2304		((dev_priv->active_display_unit == vmw_du_screen_target) ?
2305		 dev_priv->max_primary_mem : dev_priv->vram_size);
2306}
2307
2308/**
2309 * vmw_du_update_layout - Update the display unit with topology from resolution
2310 * plugin and generate DRM uevent
2311 * @dev_priv: device private
2312 * @num_rects: number of drm_rect in rects
2313 * @rects: toplogy to update
2314 */
2315static int vmw_du_update_layout(struct vmw_private *dev_priv,
2316				unsigned int num_rects, struct drm_rect *rects)
2317{
2318	struct drm_device *dev = &dev_priv->drm;
2319	struct vmw_display_unit *du;
2320	struct drm_connector *con;
2321	struct drm_connector_list_iter conn_iter;
2322	struct drm_modeset_acquire_ctx ctx;
2323	struct drm_crtc *crtc;
2324	int ret;
2325
2326	/* Currently gui_x/y is protected with the crtc mutex */
2327	mutex_lock(&dev->mode_config.mutex);
2328	drm_modeset_acquire_init(&ctx, 0);
2329retry:
2330	drm_for_each_crtc(crtc, dev) {
2331		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2332		if (ret < 0) {
2333			if (ret == -EDEADLK) {
2334				drm_modeset_backoff(&ctx);
2335				goto retry;
2336		}
2337			goto out_fini;
2338		}
2339	}
2340
2341	drm_connector_list_iter_begin(dev, &conn_iter);
2342	drm_for_each_connector_iter(con, &conn_iter) {
2343		du = vmw_connector_to_du(con);
2344		if (num_rects > du->unit) {
2345			du->pref_width = drm_rect_width(&rects[du->unit]);
2346			du->pref_height = drm_rect_height(&rects[du->unit]);
2347			du->pref_active = true;
2348			du->gui_x = rects[du->unit].x1;
2349			du->gui_y = rects[du->unit].y1;
2350		} else {
2351			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2352			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2353			du->pref_active = false;
2354			du->gui_x = 0;
2355			du->gui_y = 0;
2356		}
2357	}
2358	drm_connector_list_iter_end(&conn_iter);
2359
2360	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2361		du = vmw_connector_to_du(con);
2362		if (num_rects > du->unit) {
2363			drm_object_property_set_value
2364			  (&con->base, dev->mode_config.suggested_x_property,
2365			   du->gui_x);
2366			drm_object_property_set_value
2367			  (&con->base, dev->mode_config.suggested_y_property,
2368			   du->gui_y);
2369		} else {
2370			drm_object_property_set_value
2371			  (&con->base, dev->mode_config.suggested_x_property,
2372			   0);
2373			drm_object_property_set_value
2374			  (&con->base, dev->mode_config.suggested_y_property,
2375			   0);
2376		}
2377		con->status = vmw_du_connector_detect(con, true);
2378	}
2379out_fini:
2380	drm_modeset_drop_locks(&ctx);
2381	drm_modeset_acquire_fini(&ctx);
2382	mutex_unlock(&dev->mode_config.mutex);
2383
2384	drm_sysfs_hotplug_event(dev);
2385
2386	return 0;
2387}
2388
2389int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2390			  u16 *r, u16 *g, u16 *b,
2391			  uint32_t size,
2392			  struct drm_modeset_acquire_ctx *ctx)
2393{
2394	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2395	int i;
2396
2397	for (i = 0; i < size; i++) {
2398		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2399			  r[i], g[i], b[i]);
2400		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2401		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2402		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2403	}
2404
2405	return 0;
2406}
2407
2408int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2409{
2410	return 0;
2411}
2412
2413enum drm_connector_status
2414vmw_du_connector_detect(struct drm_connector *connector, bool force)
2415{
2416	uint32_t num_displays;
2417	struct drm_device *dev = connector->dev;
2418	struct vmw_private *dev_priv = vmw_priv(dev);
2419	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2420
2421	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2422
2423	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2424		 du->pref_active) ?
2425		connector_status_connected : connector_status_disconnected);
2426}
2427
2428static struct drm_display_mode vmw_kms_connector_builtin[] = {
2429	/* 640x480@60Hz */
2430	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2431		   752, 800, 0, 480, 489, 492, 525, 0,
2432		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2433	/* 800x600@60Hz */
2434	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2435		   968, 1056, 0, 600, 601, 605, 628, 0,
2436		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2437	/* 1024x768@60Hz */
2438	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2439		   1184, 1344, 0, 768, 771, 777, 806, 0,
2440		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2441	/* 1152x864@75Hz */
2442	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2443		   1344, 1600, 0, 864, 865, 868, 900, 0,
2444		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2445	/* 1280x720@60Hz */
2446	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2447		   1472, 1664, 0, 720, 723, 728, 748, 0,
2448		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2449	/* 1280x768@60Hz */
2450	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2451		   1472, 1664, 0, 768, 771, 778, 798, 0,
2452		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2453	/* 1280x800@60Hz */
2454	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2455		   1480, 1680, 0, 800, 803, 809, 831, 0,
2456		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2457	/* 1280x960@60Hz */
2458	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2459		   1488, 1800, 0, 960, 961, 964, 1000, 0,
2460		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2461	/* 1280x1024@60Hz */
2462	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2463		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2464		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2465	/* 1360x768@60Hz */
2466	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2467		   1536, 1792, 0, 768, 771, 777, 795, 0,
2468		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2469	/* 1440x1050@60Hz */
2470	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2471		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2472		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2473	/* 1440x900@60Hz */
2474	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2475		   1672, 1904, 0, 900, 903, 909, 934, 0,
2476		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2477	/* 1600x1200@60Hz */
2478	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2479		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2480		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2481	/* 1680x1050@60Hz */
2482	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2483		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2484		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2485	/* 1792x1344@60Hz */
2486	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2487		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2488		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2489	/* 1853x1392@60Hz */
2490	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2491		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2492		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2493	/* 1920x1080@60Hz */
2494	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2495		   2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2496		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2497	/* 1920x1200@60Hz */
2498	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2499		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2500		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2501	/* 1920x1440@60Hz */
2502	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2503		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2504		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2505	/* 2560x1440@60Hz */
2506	{ DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2507		   2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2508		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2509	/* 2560x1600@60Hz */
2510	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2511		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2512		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2513	/* 2880x1800@60Hz */
2514	{ DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2515		   2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2516		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2517	/* 3840x2160@60Hz */
2518	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2519		   3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2520		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2521	/* 3840x2400@60Hz */
2522	{ DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2523		   3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2524		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2525	/* Terminate */
2526	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2527};
2528
2529/**
2530 * vmw_guess_mode_timing - Provide fake timings for a
2531 * 60Hz vrefresh mode.
2532 *
2533 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2534 * members filled in.
2535 */
2536void vmw_guess_mode_timing(struct drm_display_mode *mode)
2537{
2538	mode->hsync_start = mode->hdisplay + 50;
2539	mode->hsync_end = mode->hsync_start + 50;
2540	mode->htotal = mode->hsync_end + 50;
2541
2542	mode->vsync_start = mode->vdisplay + 50;
2543	mode->vsync_end = mode->vsync_start + 50;
2544	mode->vtotal = mode->vsync_end + 50;
2545
2546	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2547}
2548
2549
2550int vmw_du_connector_fill_modes(struct drm_connector *connector,
2551				uint32_t max_width, uint32_t max_height)
2552{
2553	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2554	struct drm_device *dev = connector->dev;
2555	struct vmw_private *dev_priv = vmw_priv(dev);
2556	struct drm_display_mode *mode = NULL;
2557	struct drm_display_mode *bmode;
2558	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2559		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2560		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2561		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2562	};
2563	int i;
2564	u32 assumed_bpp = 4;
2565
2566	if (dev_priv->assume_16bpp)
2567		assumed_bpp = 2;
2568
2569	max_width  = min(max_width,  dev_priv->texture_max_width);
2570	max_height = min(max_height, dev_priv->texture_max_height);
2571
2572	/*
2573	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2574	 * HEIGHT registers.
2575	 */
2576	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2577		max_width  = min(max_width,  dev_priv->stdu_max_width);
2578		max_height = min(max_height, dev_priv->stdu_max_height);
2579	}
2580
2581	/* Add preferred mode */
2582	mode = drm_mode_duplicate(dev, &prefmode);
2583	if (!mode)
2584		return 0;
2585	mode->hdisplay = du->pref_width;
2586	mode->vdisplay = du->pref_height;
2587	vmw_guess_mode_timing(mode);
2588	drm_mode_set_name(mode);
2589
2590	if (vmw_kms_validate_mode_vram(dev_priv,
2591					mode->hdisplay * assumed_bpp,
2592					mode->vdisplay)) {
2593		drm_mode_probed_add(connector, mode);
2594	} else {
2595		drm_mode_destroy(dev, mode);
2596		mode = NULL;
2597	}
2598
2599	if (du->pref_mode) {
2600		list_del_init(&du->pref_mode->head);
2601		drm_mode_destroy(dev, du->pref_mode);
2602	}
2603
2604	/* mode might be null here, this is intended */
2605	du->pref_mode = mode;
2606
2607	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2608		bmode = &vmw_kms_connector_builtin[i];
2609		if (bmode->hdisplay > max_width ||
2610		    bmode->vdisplay > max_height)
2611			continue;
2612
2613		if (!vmw_kms_validate_mode_vram(dev_priv,
2614						bmode->hdisplay * assumed_bpp,
2615						bmode->vdisplay))
2616			continue;
2617
2618		mode = drm_mode_duplicate(dev, bmode);
2619		if (!mode)
2620			return 0;
2621
2622		drm_mode_probed_add(connector, mode);
2623	}
2624
2625	drm_connector_list_update(connector);
2626	/* Move the prefered mode first, help apps pick the right mode. */
2627	drm_mode_sort(&connector->modes);
2628
2629	return 1;
2630}
2631
2632/**
2633 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2634 * @dev: drm device for the ioctl
2635 * @data: data pointer for the ioctl
2636 * @file_priv: drm file for the ioctl call
2637 *
2638 * Update preferred topology of display unit as per ioctl request. The topology
2639 * is expressed as array of drm_vmw_rect.
2640 * e.g.
2641 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2642 *
2643 * NOTE:
2644 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2645 * device limit on topology, x + w and y + h (lower right) cannot be greater
2646 * than INT_MAX. So topology beyond these limits will return with error.
2647 *
2648 * Returns:
2649 * Zero on success, negative errno on failure.
2650 */
2651int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2652				struct drm_file *file_priv)
2653{
2654	struct vmw_private *dev_priv = vmw_priv(dev);
2655	struct drm_mode_config *mode_config = &dev->mode_config;
2656	struct drm_vmw_update_layout_arg *arg =
2657		(struct drm_vmw_update_layout_arg *)data;
2658	void __user *user_rects;
2659	struct drm_vmw_rect *rects;
2660	struct drm_rect *drm_rects;
2661	unsigned rects_size;
2662	int ret, i;
2663
2664	if (!arg->num_outputs) {
2665		struct drm_rect def_rect = {0, 0,
2666					    VMWGFX_MIN_INITIAL_WIDTH,
2667					    VMWGFX_MIN_INITIAL_HEIGHT};
2668		vmw_du_update_layout(dev_priv, 1, &def_rect);
2669		return 0;
 
 
2670	}
2671
2672	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2673	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2674			GFP_KERNEL);
2675	if (unlikely(!rects))
2676		return -ENOMEM;
2677
2678	user_rects = (void __user *)(unsigned long)arg->rects;
2679	ret = copy_from_user(rects, user_rects, rects_size);
2680	if (unlikely(ret != 0)) {
2681		DRM_ERROR("Failed to get rects.\n");
2682		ret = -EFAULT;
2683		goto out_free;
2684	}
2685
2686	drm_rects = (struct drm_rect *)rects;
2687
2688	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2689	for (i = 0; i < arg->num_outputs; i++) {
2690		struct drm_vmw_rect curr_rect;
2691
2692		/* Verify user-space for overflow as kernel use drm_rect */
2693		if ((rects[i].x + rects[i].w > INT_MAX) ||
2694		    (rects[i].y + rects[i].h > INT_MAX)) {
2695			ret = -ERANGE;
2696			goto out_free;
2697		}
2698
2699		curr_rect = rects[i];
2700		drm_rects[i].x1 = curr_rect.x;
2701		drm_rects[i].y1 = curr_rect.y;
2702		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2703		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2704
2705		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2706			      drm_rects[i].x1, drm_rects[i].y1,
2707			      drm_rects[i].x2, drm_rects[i].y2);
2708
2709		/*
2710		 * Currently this check is limiting the topology within
2711		 * mode_config->max (which actually is max texture size
2712		 * supported by virtual device). This limit is here to address
2713		 * window managers that create a big framebuffer for whole
2714		 * topology.
2715		 */
2716		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2717		    drm_rects[i].x2 > mode_config->max_width ||
2718		    drm_rects[i].y2 > mode_config->max_height) {
2719			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2720				      drm_rects[i].x1, drm_rects[i].y1,
2721				      drm_rects[i].x2, drm_rects[i].y2);
2722			ret = -EINVAL;
2723			goto out_free;
2724		}
2725	}
2726
2727	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2728
2729	if (ret == 0)
2730		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2731
2732out_free:
2733	kfree(rects);
2734	return ret;
2735}
2736
2737/**
2738 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2739 * on a set of cliprects and a set of display units.
2740 *
2741 * @dev_priv: Pointer to a device private structure.
2742 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2743 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2744 * Cliprects are given in framebuffer coordinates.
2745 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2746 * be NULL. Cliprects are given in source coordinates.
2747 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2748 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2749 * @num_clips: Number of cliprects in the @clips or @vclips array.
2750 * @increment: Integer with which to increment the clip counter when looping.
2751 * Used to skip a predetermined number of clip rects.
2752 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2753 */
2754int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2755			 struct vmw_framebuffer *framebuffer,
2756			 const struct drm_clip_rect *clips,
2757			 const struct drm_vmw_rect *vclips,
2758			 s32 dest_x, s32 dest_y,
2759			 int num_clips,
2760			 int increment,
2761			 struct vmw_kms_dirty *dirty)
2762{
2763	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2764	struct drm_crtc *crtc;
2765	u32 num_units = 0;
2766	u32 i, k;
2767
2768	dirty->dev_priv = dev_priv;
2769
2770	/* If crtc is passed, no need to iterate over other display units */
2771	if (dirty->crtc) {
2772		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2773	} else {
2774		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2775				    head) {
2776			struct drm_plane *plane = crtc->primary;
2777
2778			if (plane->state->fb == &framebuffer->base)
2779				units[num_units++] = vmw_crtc_to_du(crtc);
2780		}
2781	}
2782
2783	for (k = 0; k < num_units; k++) {
2784		struct vmw_display_unit *unit = units[k];
2785		s32 crtc_x = unit->crtc.x;
2786		s32 crtc_y = unit->crtc.y;
2787		s32 crtc_width = unit->crtc.mode.hdisplay;
2788		s32 crtc_height = unit->crtc.mode.vdisplay;
2789		const struct drm_clip_rect *clips_ptr = clips;
2790		const struct drm_vmw_rect *vclips_ptr = vclips;
2791
2792		dirty->unit = unit;
2793		if (dirty->fifo_reserve_size > 0) {
2794			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2795						      dirty->fifo_reserve_size);
2796			if (!dirty->cmd)
2797				return -ENOMEM;
2798
2799			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2800		}
2801		dirty->num_hits = 0;
2802		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2803		       vclips_ptr += increment) {
2804			s32 clip_left;
2805			s32 clip_top;
2806
2807			/*
2808			 * Select clip array type. Note that integer type
2809			 * in @clips is unsigned short, whereas in @vclips
2810			 * it's 32-bit.
2811			 */
2812			if (clips) {
2813				dirty->fb_x = (s32) clips_ptr->x1;
2814				dirty->fb_y = (s32) clips_ptr->y1;
2815				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2816					crtc_x;
2817				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2818					crtc_y;
2819			} else {
2820				dirty->fb_x = vclips_ptr->x;
2821				dirty->fb_y = vclips_ptr->y;
2822				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2823					dest_x - crtc_x;
2824				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2825					dest_y - crtc_y;
2826			}
2827
2828			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2829			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2830
2831			/* Skip this clip if it's outside the crtc region */
2832			if (dirty->unit_x1 >= crtc_width ||
2833			    dirty->unit_y1 >= crtc_height ||
2834			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2835				continue;
2836
2837			/* Clip right and bottom to crtc limits */
2838			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2839					       crtc_width);
2840			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2841					       crtc_height);
2842
2843			/* Clip left and top to crtc limits */
2844			clip_left = min_t(s32, dirty->unit_x1, 0);
2845			clip_top = min_t(s32, dirty->unit_y1, 0);
2846			dirty->unit_x1 -= clip_left;
2847			dirty->unit_y1 -= clip_top;
2848			dirty->fb_x -= clip_left;
2849			dirty->fb_y -= clip_top;
2850
2851			dirty->clip(dirty);
2852		}
2853
2854		dirty->fifo_commit(dirty);
2855	}
2856
2857	return 0;
2858}
2859
2860/**
2861 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2862 * cleanup and fencing
2863 * @dev_priv: Pointer to the device-private struct
2864 * @file_priv: Pointer identifying the client when user-space fencing is used
2865 * @ctx: Pointer to the validation context
2866 * @out_fence: If non-NULL, returned refcounted fence-pointer
2867 * @user_fence_rep: If non-NULL, pointer to user-space address area
2868 * in which to copy user-space fence info
2869 */
2870void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2871				      struct drm_file *file_priv,
2872				      struct vmw_validation_context *ctx,
2873				      struct vmw_fence_obj **out_fence,
2874				      struct drm_vmw_fence_rep __user *
2875				      user_fence_rep)
2876{
2877	struct vmw_fence_obj *fence = NULL;
2878	uint32_t handle = 0;
2879	int ret = 0;
2880
2881	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2882	    out_fence)
2883		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2884						 file_priv ? &handle : NULL);
2885	vmw_validation_done(ctx, fence);
2886	if (file_priv)
2887		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2888					    ret, user_fence_rep, fence,
2889					    handle, -1);
2890	if (out_fence)
2891		*out_fence = fence;
2892	else
2893		vmw_fence_obj_unreference(&fence);
2894}
2895
2896/**
2897 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2898 * its backing MOB.
2899 *
2900 * @res: Pointer to the surface resource
2901 * @clips: Clip rects in framebuffer (surface) space.
2902 * @num_clips: Number of clips in @clips.
2903 * @increment: Integer with which to increment the clip counter when looping.
2904 * Used to skip a predetermined number of clip rects.
2905 *
2906 * This function makes sure the proxy surface is updated from its backing MOB
2907 * using the region given by @clips. The surface resource @res and its backing
2908 * MOB needs to be reserved and validated on call.
2909 */
2910int vmw_kms_update_proxy(struct vmw_resource *res,
2911			 const struct drm_clip_rect *clips,
2912			 unsigned num_clips,
2913			 int increment)
2914{
2915	struct vmw_private *dev_priv = res->dev_priv;
2916	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2917	struct {
2918		SVGA3dCmdHeader header;
2919		SVGA3dCmdUpdateGBImage body;
2920	} *cmd;
2921	SVGA3dBox *box;
2922	size_t copy_size = 0;
2923	int i;
2924
2925	if (!clips)
2926		return 0;
2927
2928	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2929	if (!cmd)
2930		return -ENOMEM;
2931
2932	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2933		box = &cmd->body.box;
2934
2935		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2936		cmd->header.size = sizeof(cmd->body);
2937		cmd->body.image.sid = res->id;
2938		cmd->body.image.face = 0;
2939		cmd->body.image.mipmap = 0;
2940
2941		if (clips->x1 > size->width || clips->x2 > size->width ||
2942		    clips->y1 > size->height || clips->y2 > size->height) {
2943			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2944			return -EINVAL;
2945		}
2946
2947		box->x = clips->x1;
2948		box->y = clips->y1;
2949		box->z = 0;
2950		box->w = clips->x2 - clips->x1;
2951		box->h = clips->y2 - clips->y1;
2952		box->d = 1;
2953
2954		copy_size += sizeof(*cmd);
2955	}
2956
2957	vmw_cmd_commit(dev_priv, copy_size);
2958
2959	return 0;
2960}
2961
2962/**
2963 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2964 * property.
2965 *
2966 * @dev_priv: Pointer to a device private struct.
2967 *
2968 * Sets up the implicit placement property unless it's already set up.
2969 */
2970void
2971vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2972{
2973	if (dev_priv->implicit_placement_property)
2974		return;
2975
2976	dev_priv->implicit_placement_property =
2977		drm_property_create_range(&dev_priv->drm,
2978					  DRM_MODE_PROP_IMMUTABLE,
2979					  "implicit_placement", 0, 1);
2980}
2981
2982/**
2983 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2984 *
2985 * @dev: Pointer to the drm device
2986 * Return: 0 on success. Negative error code on failure.
2987 */
2988int vmw_kms_suspend(struct drm_device *dev)
2989{
2990	struct vmw_private *dev_priv = vmw_priv(dev);
2991
2992	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2993	if (IS_ERR(dev_priv->suspend_state)) {
2994		int ret = PTR_ERR(dev_priv->suspend_state);
2995
2996		DRM_ERROR("Failed kms suspend: %d\n", ret);
2997		dev_priv->suspend_state = NULL;
2998
2999		return ret;
3000	}
3001
3002	return 0;
3003}
3004
3005
3006/**
3007 * vmw_kms_resume - Re-enable modesetting and restore state
3008 *
3009 * @dev: Pointer to the drm device
3010 * Return: 0 on success. Negative error code on failure.
3011 *
3012 * State is resumed from a previous vmw_kms_suspend(). It's illegal
3013 * to call this function without a previous vmw_kms_suspend().
3014 */
3015int vmw_kms_resume(struct drm_device *dev)
3016{
3017	struct vmw_private *dev_priv = vmw_priv(dev);
3018	int ret;
3019
3020	if (WARN_ON(!dev_priv->suspend_state))
3021		return 0;
3022
3023	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
3024	dev_priv->suspend_state = NULL;
3025
3026	return ret;
3027}
3028
3029/**
3030 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
3031 *
3032 * @dev: Pointer to the drm device
3033 */
3034void vmw_kms_lost_device(struct drm_device *dev)
3035{
3036	drm_atomic_helper_shutdown(dev);
3037}
3038
3039/**
3040 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
3041 * @update: The closure structure.
3042 *
3043 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
3044 * update on display unit.
3045 *
3046 * Return: 0 on success or a negative error code on failure.
3047 */
3048int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
3049{
3050	struct drm_plane_state *state = update->plane->state;
3051	struct drm_plane_state *old_state = update->old_state;
3052	struct drm_atomic_helper_damage_iter iter;
3053	struct drm_rect clip;
3054	struct drm_rect bb;
3055	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
3056	uint32_t reserved_size = 0;
3057	uint32_t submit_size = 0;
3058	uint32_t curr_size = 0;
3059	uint32_t num_hits = 0;
3060	void *cmd_start;
3061	char *cmd_next;
3062	int ret;
3063
3064	/*
3065	 * Iterate in advance to check if really need plane update and find the
3066	 * number of clips that actually are in plane src for fifo allocation.
3067	 */
3068	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
3069	drm_atomic_for_each_plane_damage(&iter, &clip)
3070		num_hits++;
3071
3072	if (num_hits == 0)
3073		return 0;
3074
3075	if (update->vfb->bo) {
3076		struct vmw_framebuffer_bo *vfbbo =
3077			container_of(update->vfb, typeof(*vfbbo), base);
3078
3079		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
3080					    update->cpu_blit);
 
 
 
 
 
 
 
 
 
 
 
 
3081	} else {
3082		struct vmw_framebuffer_surface *vfbs =
3083			container_of(update->vfb, typeof(*vfbs), base);
 
3084
3085		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
3086						  0, VMW_RES_DIRTY_NONE, NULL,
3087						  NULL);
3088	}
3089
3090	if (ret)
3091		return ret;
3092
3093	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
3094	if (ret)
3095		goto out_unref;
3096
3097	reserved_size = update->calc_fifo_size(update, num_hits);
3098	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
3099	if (!cmd_start) {
3100		ret = -ENOMEM;
3101		goto out_revert;
3102	}
3103
3104	cmd_next = cmd_start;
3105
3106	if (update->post_prepare) {
3107		curr_size = update->post_prepare(update, cmd_next);
3108		cmd_next += curr_size;
3109		submit_size += curr_size;
3110	}
3111
3112	if (update->pre_clip) {
3113		curr_size = update->pre_clip(update, cmd_next, num_hits);
3114		cmd_next += curr_size;
3115		submit_size += curr_size;
3116	}
3117
3118	bb.x1 = INT_MAX;
3119	bb.y1 = INT_MAX;
3120	bb.x2 = INT_MIN;
3121	bb.y2 = INT_MIN;
3122
3123	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
3124	drm_atomic_for_each_plane_damage(&iter, &clip) {
3125		uint32_t fb_x = clip.x1;
3126		uint32_t fb_y = clip.y1;
3127
3128		vmw_du_translate_to_crtc(state, &clip);
3129		if (update->clip) {
3130			curr_size = update->clip(update, cmd_next, &clip, fb_x,
3131						 fb_y);
3132			cmd_next += curr_size;
3133			submit_size += curr_size;
3134		}
3135		bb.x1 = min_t(int, bb.x1, clip.x1);
3136		bb.y1 = min_t(int, bb.y1, clip.y1);
3137		bb.x2 = max_t(int, bb.x2, clip.x2);
3138		bb.y2 = max_t(int, bb.y2, clip.y2);
3139	}
3140
3141	curr_size = update->post_clip(update, cmd_next, &bb);
3142	submit_size += curr_size;
3143
3144	if (reserved_size < submit_size)
3145		submit_size = 0;
3146
3147	vmw_cmd_commit(update->dev_priv, submit_size);
3148
3149	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3150					 update->out_fence, NULL);
3151	return ret;
3152
3153out_revert:
3154	vmw_validation_revert(&val_ctx);
3155
3156out_unref:
3157	vmw_validation_unref_lists(&val_ctx);
3158	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3159}