Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27#include "vmwgfx_kms.h"
  28
  29#include "vmwgfx_bo.h"
  30#include "vmw_surface_cache.h"
  31
 
 
  32#include <drm/drm_atomic.h>
  33#include <drm/drm_atomic_helper.h>
  34#include <drm/drm_damage_helper.h>
  35#include <drm/drm_fourcc.h>
  36#include <drm/drm_rect.h>
  37#include <drm/drm_sysfs.h>
 
 
  38
  39void vmw_du_cleanup(struct vmw_display_unit *du)
  40{
  41	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
  42	drm_plane_cleanup(&du->primary);
  43	if (vmw_cmd_supported(dev_priv))
  44		drm_plane_cleanup(&du->cursor.base);
  45
  46	drm_connector_unregister(&du->connector);
  47	drm_crtc_cleanup(&du->crtc);
  48	drm_encoder_cleanup(&du->encoder);
  49	drm_connector_cleanup(&du->connector);
  50}
  51
  52/*
  53 * Display Unit Cursor functions
  54 */
  55
  56static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
  57static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
  58				  struct vmw_plane_state *vps,
  59				  u32 *image, u32 width, u32 height,
  60				  u32 hotspotX, u32 hotspotY);
  61
  62struct vmw_svga_fifo_cmd_define_cursor {
  63	u32 cmd;
  64	SVGAFifoCmdDefineAlphaCursor cursor;
  65};
  66
  67/**
  68 * vmw_send_define_cursor_cmd - queue a define cursor command
  69 * @dev_priv: the private driver struct
  70 * @image: buffer which holds the cursor image
  71 * @width: width of the mouse cursor image
  72 * @height: height of the mouse cursor image
  73 * @hotspotX: the horizontal position of mouse hotspot
  74 * @hotspotY: the vertical position of mouse hotspot
  75 */
  76static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
  77				       u32 *image, u32 width, u32 height,
  78				       u32 hotspotX, u32 hotspotY)
  79{
  80	struct vmw_svga_fifo_cmd_define_cursor *cmd;
  81	const u32 image_size = width * height * sizeof(*image);
  82	const u32 cmd_size = sizeof(*cmd) + image_size;
  83
  84	/* Try to reserve fifocmd space and swallow any failures;
  85	   such reservations cannot be left unconsumed for long
  86	   under the risk of clogging other fifocmd users, so
  87	   we treat reservations separtely from the way we treat
  88	   other fallible KMS-atomic resources at prepare_fb */
  89	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
  90
  91	if (unlikely(!cmd))
  92		return;
 
 
 
  93
  94	memset(cmd, 0, sizeof(*cmd));
  95
  96	memcpy(&cmd[1], image, image_size);
  97
  98	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
  99	cmd->cursor.id = 0;
 100	cmd->cursor.width = width;
 101	cmd->cursor.height = height;
 102	cmd->cursor.hotspotX = hotspotX;
 103	cmd->cursor.hotspotY = hotspotY;
 104
 105	vmw_cmd_commit_flush(dev_priv, cmd_size);
 106}
 107
 108/**
 109 * vmw_cursor_update_image - update the cursor image on the provided plane
 110 * @dev_priv: the private driver struct
 111 * @vps: the plane state of the cursor plane
 112 * @image: buffer which holds the cursor image
 113 * @width: width of the mouse cursor image
 114 * @height: height of the mouse cursor image
 115 * @hotspotX: the horizontal position of mouse hotspot
 116 * @hotspotY: the vertical position of mouse hotspot
 117 */
 118static void vmw_cursor_update_image(struct vmw_private *dev_priv,
 119				    struct vmw_plane_state *vps,
 120				    u32 *image, u32 width, u32 height,
 121				    u32 hotspotX, u32 hotspotY)
 122{
 123	if (vps->cursor.bo)
 124		vmw_cursor_update_mob(dev_priv, vps, image,
 125				      vps->base.crtc_w, vps->base.crtc_h,
 126				      hotspotX, hotspotY);
 127
 128	else
 129		vmw_send_define_cursor_cmd(dev_priv, image, width, height,
 130					   hotspotX, hotspotY);
 131}
 132
 133
 134/**
 135 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
 136 *
 137 * Called from inside vmw_du_cursor_plane_atomic_update to actually
 138 * make the cursor-image live.
 139 *
 140 * @dev_priv: device to work with
 141 * @vps: the plane state of the cursor plane
 142 * @image: cursor source data to fill the MOB with
 143 * @width: source data width
 144 * @height: source data height
 145 * @hotspotX: cursor hotspot x
 146 * @hotspotY: cursor hotspot Y
 147 */
 148static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
 149				  struct vmw_plane_state *vps,
 150				  u32 *image, u32 width, u32 height,
 151				  u32 hotspotX, u32 hotspotY)
 152{
 153	SVGAGBCursorHeader *header;
 154	SVGAGBAlphaCursorHeader *alpha_header;
 155	const u32 image_size = width * height * sizeof(*image);
 156
 157	header = vmw_bo_map_and_cache(vps->cursor.bo);
 158	alpha_header = &header->header.alphaHeader;
 159
 160	memset(header, 0, sizeof(*header));
 161
 162	header->type = SVGA_ALPHA_CURSOR;
 163	header->sizeInBytes = image_size;
 164
 165	alpha_header->hotspotX = hotspotX;
 166	alpha_header->hotspotY = hotspotY;
 167	alpha_header->width = width;
 168	alpha_header->height = height;
 169
 170	memcpy(header + 1, image, image_size);
 171	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
 172		  vps->cursor.bo->tbo.resource->start);
 173}
 174
 175
 176static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
 177{
 178	return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
 179}
 180
 181/**
 182 * vmw_du_cursor_plane_acquire_image -- Acquire the image data
 183 * @vps: cursor plane state
 184 */
 185static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
 186{
 187	bool is_iomem;
 188	if (vps->surf) {
 189		if (vps->surf_mapped)
 190			return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
 191		return vps->surf->snooper.image;
 192	} else if (vps->bo)
 193		return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
 194	return NULL;
 195}
 196
 197static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
 198					    struct vmw_plane_state *new_vps)
 199{
 200	void *old_image;
 201	void *new_image;
 202	u32 size;
 203	bool changed;
 204
 205	if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
 206	    old_vps->base.crtc_h != new_vps->base.crtc_h)
 207	    return true;
 208
 209	if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
 210	    old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
 211	    return true;
 212
 213	size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
 214
 215	old_image = vmw_du_cursor_plane_acquire_image(old_vps);
 216	new_image = vmw_du_cursor_plane_acquire_image(new_vps);
 217
 218	changed = false;
 219	if (old_image && new_image)
 220		changed = memcmp(old_image, new_image, size) != 0;
 221
 222	return changed;
 223}
 224
 225static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
 226{
 227	if (!(*vbo))
 228		return;
 229
 230	ttm_bo_unpin(&(*vbo)->tbo);
 231	vmw_bo_unreference(vbo);
 232}
 233
 234static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
 235				  struct vmw_plane_state *vps)
 236{
 237	u32 i;
 238
 239	if (!vps->cursor.bo)
 240		return;
 241
 242	vmw_du_cursor_plane_unmap_cm(vps);
 243
 244	/* Look for a free slot to return this mob to the cache. */
 245	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 246		if (!vcp->cursor_mobs[i]) {
 247			vcp->cursor_mobs[i] = vps->cursor.bo;
 248			vps->cursor.bo = NULL;
 249			return;
 250		}
 251	}
 252
 253	/* Cache is full: See if this mob is bigger than an existing mob. */
 254	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 255		if (vcp->cursor_mobs[i]->tbo.base.size <
 256		    vps->cursor.bo->tbo.base.size) {
 257			vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
 258			vcp->cursor_mobs[i] = vps->cursor.bo;
 259			vps->cursor.bo = NULL;
 260			return;
 261		}
 262	}
 263
 264	/* Destroy it if it's not worth caching. */
 265	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
 266}
 267
 268static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
 269				 struct vmw_plane_state *vps)
 
 
 270{
 271	struct vmw_private *dev_priv = vcp->base.dev->dev_private;
 272	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
 273	u32 i;
 274	u32 cursor_max_dim, mob_max_size;
 
 275	int ret;
 276
 277	if (!dev_priv->has_mob ||
 278	    (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
 279		return -EINVAL;
 280
 281	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
 282	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
 283
 284	if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
 285	    vps->base.crtc_h > cursor_max_dim)
 
 286		return -EINVAL;
 287
 288	if (vps->cursor.bo) {
 289		if (vps->cursor.bo->tbo.base.size >= size)
 290			return 0;
 291		vmw_du_put_cursor_mob(vcp, vps);
 292	}
 293
 294	/* Look for an unused mob in the cache. */
 295	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 296		if (vcp->cursor_mobs[i] &&
 297		    vcp->cursor_mobs[i]->tbo.base.size >= size) {
 298			vps->cursor.bo = vcp->cursor_mobs[i];
 299			vcp->cursor_mobs[i] = NULL;
 300			return 0;
 301		}
 302	}
 303	/* Create a new mob if we can't find an existing one. */
 304	ret = vmw_bo_create_and_populate(dev_priv, size,
 305					 VMW_BO_DOMAIN_MOB,
 306					 &vps->cursor.bo);
 307
 308	if (ret != 0)
 309		return ret;
 310
 311	/* Fence the mob creation so we are guarateed to have the mob */
 312	ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
 313	if (ret != 0)
 314		goto teardown;
 315
 316	vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
 317	ttm_bo_unreserve(&vps->cursor.bo->tbo);
 318	return 0;
 319
 320teardown:
 321	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
 322	return ret;
 323}
 324
 325
 326static void vmw_cursor_update_position(struct vmw_private *dev_priv,
 327				       bool show, int x, int y)
 328{
 329	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
 330					     : SVGA_CURSOR_ON_HIDE;
 331	uint32_t count;
 332
 333	spin_lock(&dev_priv->cursor_lock);
 334	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
 335		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
 336		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
 337		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
 338		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
 339		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
 340	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
 341		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
 342		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
 343		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
 344		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
 345		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
 346	} else {
 347		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
 348		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
 349		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
 350	}
 351	spin_unlock(&dev_priv->cursor_lock);
 352}
 353
 
 354void vmw_kms_cursor_snoop(struct vmw_surface *srf,
 355			  struct ttm_object_file *tfile,
 356			  struct ttm_buffer_object *bo,
 357			  SVGA3dCmdHeader *header)
 358{
 359	struct ttm_bo_kmap_obj map;
 360	unsigned long kmap_offset;
 361	unsigned long kmap_num;
 362	SVGA3dCopyBox *box;
 363	unsigned box_count;
 364	void *virtual;
 365	bool is_iomem;
 366	struct vmw_dma_cmd {
 367		SVGA3dCmdHeader header;
 368		SVGA3dCmdSurfaceDMA dma;
 369	} *cmd;
 370	int i, ret;
 371	const struct SVGA3dSurfaceDesc *desc =
 372		vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
 373	const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
 374
 375	cmd = container_of(header, struct vmw_dma_cmd, header);
 376
 377	/* No snooper installed, nothing to copy */
 378	if (!srf->snooper.image)
 379		return;
 380
 381	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
 382		DRM_ERROR("face and mipmap for cursors should never != 0\n");
 383		return;
 384	}
 385
 386	if (cmd->header.size < 64) {
 387		DRM_ERROR("at least one full copy box must be given\n");
 388		return;
 389	}
 390
 391	box = (SVGA3dCopyBox *)&cmd[1];
 392	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
 393			sizeof(SVGA3dCopyBox);
 394
 395	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
 396	    box->x != 0    || box->y != 0    || box->z != 0    ||
 397	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
 398	    box->d != 1    || box_count != 1 ||
 399	    box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
 400		/* TODO handle none page aligned offsets */
 401		/* TODO handle more dst & src != 0 */
 402		/* TODO handle more then one copy */
 403		DRM_ERROR("Can't snoop dma request for cursor!\n");
 404		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
 405			  box->srcx, box->srcy, box->srcz,
 406			  box->x, box->y, box->z,
 407			  box->w, box->h, box->d, box_count,
 408			  cmd->dma.guest.ptr.offset);
 409		return;
 410	}
 411
 412	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
 413	kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
 414
 415	ret = ttm_bo_reserve(bo, true, false, NULL);
 416	if (unlikely(ret != 0)) {
 417		DRM_ERROR("reserve failed\n");
 418		return;
 419	}
 420
 421	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
 422	if (unlikely(ret != 0))
 423		goto err_unreserve;
 424
 425	virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
 426
 427	if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
 428		memcpy(srf->snooper.image, virtual,
 429		       VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
 430	} else {
 431		/* Image is unsigned pointer. */
 432		for (i = 0; i < box->h; i++)
 433			memcpy(srf->snooper.image + i * image_pitch,
 434			       virtual + i * cmd->dma.guest.pitch,
 435			       box->w * desc->pitchBytesPerBlock);
 436	}
 437
 438	srf->snooper.age++;
 439
 440	ttm_bo_kunmap(&map);
 441err_unreserve:
 442	ttm_bo_unreserve(bo);
 443}
 444
 445/**
 446 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
 447 *
 448 * @dev_priv: Pointer to the device private struct.
 449 *
 450 * Clears all legacy hotspots.
 451 */
 452void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
 453{
 454	struct drm_device *dev = &dev_priv->drm;
 455	struct vmw_display_unit *du;
 456	struct drm_crtc *crtc;
 457
 458	drm_modeset_lock_all(dev);
 459	drm_for_each_crtc(crtc, dev) {
 460		du = vmw_crtc_to_du(crtc);
 461
 462		du->hotspot_x = 0;
 463		du->hotspot_y = 0;
 464	}
 465	drm_modeset_unlock_all(dev);
 466}
 467
 468void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 469{
 470	struct drm_device *dev = &dev_priv->drm;
 471	struct vmw_display_unit *du;
 472	struct drm_crtc *crtc;
 473
 474	mutex_lock(&dev->mode_config.mutex);
 475
 476	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 477		du = vmw_crtc_to_du(crtc);
 478		if (!du->cursor_surface ||
 479		    du->cursor_age == du->cursor_surface->snooper.age ||
 480		    !du->cursor_surface->snooper.image)
 481			continue;
 482
 483		du->cursor_age = du->cursor_surface->snooper.age;
 484		vmw_send_define_cursor_cmd(dev_priv,
 485					   du->cursor_surface->snooper.image,
 486					   VMW_CURSOR_SNOOP_WIDTH,
 487					   VMW_CURSOR_SNOOP_HEIGHT,
 488					   du->hotspot_x + du->core_hotspot_x,
 489					   du->hotspot_y + du->core_hotspot_y);
 490	}
 491
 492	mutex_unlock(&dev->mode_config.mutex);
 493}
 494
 495
 496void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
 497{
 498	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 499	u32 i;
 500
 501	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
 502
 503	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
 504		vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
 505
 506	drm_plane_cleanup(plane);
 507}
 508
 509
 510void vmw_du_primary_plane_destroy(struct drm_plane *plane)
 511{
 512	drm_plane_cleanup(plane);
 513
 514	/* Planes are static in our case so we don't free it */
 515}
 516
 517
 518/**
 519 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
 520 *
 521 * @vps: plane state associated with the display surface
 522 * @unreference: true if we also want to unreference the display.
 523 */
 524void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
 525			     bool unreference)
 526{
 527	if (vps->surf) {
 528		if (vps->pinned) {
 529			vmw_resource_unpin(&vps->surf->res);
 530			vps->pinned--;
 531		}
 532
 533		if (unreference) {
 534			if (vps->pinned)
 535				DRM_ERROR("Surface still pinned\n");
 536			vmw_surface_unreference(&vps->surf);
 537		}
 538	}
 539}
 540
 541
 542/**
 543 * vmw_du_plane_cleanup_fb - Unpins the plane surface
 544 *
 545 * @plane:  display plane
 546 * @old_state: Contains the FB to clean up
 547 *
 548 * Unpins the framebuffer surface
 549 *
 550 * Returns 0 on success
 551 */
 552void
 553vmw_du_plane_cleanup_fb(struct drm_plane *plane,
 554			struct drm_plane_state *old_state)
 555{
 556	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 557
 558	vmw_du_plane_unpin_surf(vps, false);
 559}
 560
 561
 562/**
 563 * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
 564 *
 565 * @vps: plane_state
 566 *
 567 * Returns 0 on success
 568 */
 569
 570static int
 571vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
 572{
 573	int ret;
 574	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
 575	struct ttm_buffer_object *bo;
 576
 577	if (!vps->cursor.bo)
 578		return -EINVAL;
 579
 580	bo = &vps->cursor.bo->tbo;
 581
 582	if (bo->base.size < size)
 583		return -EINVAL;
 584
 585	if (vps->cursor.bo->map.virtual)
 586		return 0;
 587
 588	ret = ttm_bo_reserve(bo, false, false, NULL);
 589	if (unlikely(ret != 0))
 590		return -ENOMEM;
 591
 592	vmw_bo_map_and_cache(vps->cursor.bo);
 593
 594	ttm_bo_unreserve(bo);
 595
 596	if (unlikely(ret != 0))
 597		return -ENOMEM;
 598
 599	return 0;
 600}
 601
 602
 603/**
 604 * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
 605 *
 606 * @vps: state of the cursor plane
 607 *
 608 * Returns 0 on success
 609 */
 610
 611static int
 612vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
 613{
 614	int ret = 0;
 615	struct vmw_bo *vbo = vps->cursor.bo;
 616
 617	if (!vbo || !vbo->map.virtual)
 618		return 0;
 619
 620	ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
 621	if (likely(ret == 0)) {
 622		vmw_bo_unmap(vbo);
 623		ttm_bo_unreserve(&vbo->tbo);
 624	}
 625
 626	return ret;
 627}
 628
 629
 630/**
 631 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
 632 *
 633 * @plane: cursor plane
 634 * @old_state: contains the state to clean up
 635 *
 636 * Unmaps all cursor bo mappings and unpins the cursor surface
 637 *
 638 * Returns 0 on success
 639 */
 640void
 641vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
 642			       struct drm_plane_state *old_state)
 643{
 644	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 645	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 646	bool is_iomem;
 647
 648	if (vps->surf_mapped) {
 649		vmw_bo_unmap(vps->surf->res.guest_memory_bo);
 650		vps->surf_mapped = false;
 651	}
 652
 653	if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
 654		const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
 655
 656		if (likely(ret == 0)) {
 657			ttm_bo_kunmap(&vps->bo->map);
 658			ttm_bo_unreserve(&vps->bo->tbo);
 659		}
 660	}
 661
 662	vmw_du_cursor_plane_unmap_cm(vps);
 663	vmw_du_put_cursor_mob(vcp, vps);
 664
 665	vmw_du_plane_unpin_surf(vps, false);
 666
 667	if (vps->surf) {
 668		vmw_surface_unreference(&vps->surf);
 669		vps->surf = NULL;
 670	}
 671
 672	if (vps->bo) {
 673		vmw_bo_unreference(&vps->bo);
 674		vps->bo = NULL;
 675	}
 676}
 677
 678
 679/**
 680 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
 681 *
 682 * @plane:  display plane
 683 * @new_state: info on the new plane state, including the FB
 684 *
 685 * Returns 0 on success
 686 */
 687int
 688vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
 689			       struct drm_plane_state *new_state)
 690{
 691	struct drm_framebuffer *fb = new_state->fb;
 692	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 693	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
 694	int ret = 0;
 695
 696	if (vps->surf) {
 
 697		vmw_surface_unreference(&vps->surf);
 698		vps->surf = NULL;
 699	}
 700
 701	if (vps->bo) {
 702		vmw_bo_unreference(&vps->bo);
 703		vps->bo = NULL;
 704	}
 705
 706	if (fb) {
 707		if (vmw_framebuffer_to_vfb(fb)->bo) {
 708			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
 709			vmw_bo_reference(vps->bo);
 710		} else {
 711			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
 712			vmw_surface_reference(vps->surf);
 713		}
 714	}
 715
 716	if (!vps->surf && vps->bo) {
 717		const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
 718
 719		/*
 720		 * Not using vmw_bo_map_and_cache() helper here as we need to
 721		 * reserve the ttm_buffer_object first which
 722		 * vmw_bo_map_and_cache() omits.
 723		 */
 724		ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
 725
 726		if (unlikely(ret != 0))
 727			return -ENOMEM;
 728
 729		ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
 730
 731		ttm_bo_unreserve(&vps->bo->tbo);
 732
 733		if (unlikely(ret != 0))
 734			return -ENOMEM;
 735	} else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
 736
 737		WARN_ON(vps->surf->snooper.image);
 738		ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
 739				     NULL);
 740		if (unlikely(ret != 0))
 741			return -ENOMEM;
 742		vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
 743		ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
 744		vps->surf_mapped = true;
 745	}
 746
 747	if (vps->surf || vps->bo) {
 748		vmw_du_get_cursor_mob(vcp, vps);
 749		vmw_du_cursor_plane_map_cm(vps);
 750	}
 751
 752	return 0;
 753}
 754
 755
 756void
 757vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
 758				  struct drm_atomic_state *state)
 759{
 760	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 761									   plane);
 762	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
 763									   plane);
 764	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
 765	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 766	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 767	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
 768	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
 769	s32 hotspot_x, hotspot_y;
 
 770
 771	hotspot_x = du->hotspot_x + new_state->hotspot_x;
 772	hotspot_y = du->hotspot_y + new_state->hotspot_y;
 773
 774	du->cursor_surface = vps->surf;
 775	du->cursor_bo = vps->bo;
 776
 777	if (!vps->surf && !vps->bo) {
 778		vmw_cursor_update_position(dev_priv, false, 0, 0);
 779		return;
 780	}
 781
 782	vps->cursor.hotspot_x = hotspot_x;
 783	vps->cursor.hotspot_y = hotspot_y;
 784
 785	if (vps->surf) {
 786		du->cursor_age = du->cursor_surface->snooper.age;
 787	}
 788
 789	if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
 790		/*
 791		 * If it hasn't changed, avoid making the device do extra
 792		 * work by keeping the old cursor active.
 793		 */
 794		struct vmw_cursor_plane_state tmp = old_vps->cursor;
 795		old_vps->cursor = vps->cursor;
 796		vps->cursor = tmp;
 
 797	} else {
 798		void *image = vmw_du_cursor_plane_acquire_image(vps);
 799		if (image)
 800			vmw_cursor_update_image(dev_priv, vps, image,
 801						new_state->crtc_w,
 802						new_state->crtc_h,
 803						hotspot_x, hotspot_y);
 804	}
 805
 806	du->cursor_x = new_state->crtc_x + du->set_gui_x;
 807	du->cursor_y = new_state->crtc_y + du->set_gui_y;
 808
 809	vmw_cursor_update_position(dev_priv, true,
 810				   du->cursor_x + hotspot_x,
 811				   du->cursor_y + hotspot_y);
 812
 813	du->core_hotspot_x = hotspot_x - du->hotspot_x;
 814	du->core_hotspot_y = hotspot_y - du->hotspot_y;
 
 
 
 815}
 816
 817
 818/**
 819 * vmw_du_primary_plane_atomic_check - check if the new state is okay
 820 *
 821 * @plane: display plane
 822 * @state: info on the new plane state, including the FB
 823 *
 824 * Check if the new state is settable given the current state.  Other
 825 * than what the atomic helper checks, we care about crtc fitting
 826 * the FB and maintaining one active framebuffer.
 827 *
 828 * Returns 0 on success
 829 */
 830int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
 831				      struct drm_atomic_state *state)
 832{
 833	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 834									   plane);
 835	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
 836									   plane);
 837	struct drm_crtc_state *crtc_state = NULL;
 838	struct drm_framebuffer *new_fb = new_state->fb;
 839	struct drm_framebuffer *old_fb = old_state->fb;
 840	int ret;
 841
 842	/*
 843	 * Ignore damage clips if the framebuffer attached to the plane's state
 844	 * has changed since the last plane update (page-flip). In this case, a
 845	 * full plane update should happen because uploads are done per-buffer.
 846	 */
 847	if (old_fb != new_fb)
 848		new_state->ignore_damage_clips = true;
 849
 850	if (new_state->crtc)
 851		crtc_state = drm_atomic_get_new_crtc_state(state,
 852							   new_state->crtc);
 853
 854	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
 855						  DRM_PLANE_NO_SCALING,
 856						  DRM_PLANE_NO_SCALING,
 857						  false, true);
 858
 859	if (!ret && new_fb) {
 860		struct drm_crtc *crtc = new_state->crtc;
 
 861		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 
 
 862
 863		vmw_connector_state_to_vcs(du->connector.state);
 
 
 
 
 
 
 
 
 
 
 
 864	}
 865
 866
 867	return ret;
 868}
 869
 870
 871/**
 872 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
 873 *
 874 * @plane: cursor plane
 875 * @state: info on the new plane state
 876 *
 877 * This is a chance to fail if the new cursor state does not fit
 878 * our requirements.
 879 *
 880 * Returns 0 on success
 881 */
 882int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
 883				     struct drm_atomic_state *state)
 884{
 885	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 886									   plane);
 887	int ret = 0;
 888	struct drm_crtc_state *crtc_state = NULL;
 889	struct vmw_surface *surface = NULL;
 890	struct drm_framebuffer *fb = new_state->fb;
 891
 892	if (new_state->crtc)
 893		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
 894							   new_state->crtc);
 895
 896	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
 897						  DRM_PLANE_NO_SCALING,
 898						  DRM_PLANE_NO_SCALING,
 899						  true, true);
 900	if (ret)
 901		return ret;
 902
 903	/* Turning off */
 904	if (!fb)
 905		return 0;
 
 
 
 
 
 
 
 
 
 906
 907	/* A lot of the code assumes this */
 908	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
 909		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
 910			  new_state->crtc_w, new_state->crtc_h);
 911		return -EINVAL;
 912	}
 913
 914	if (!vmw_framebuffer_to_vfb(fb)->bo) {
 915		surface = vmw_framebuffer_to_vfbs(fb)->surface;
 916
 917		WARN_ON(!surface);
 918
 919		if (!surface ||
 920		    (!surface->snooper.image && !surface->res.guest_memory_bo)) {
 921			DRM_ERROR("surface not suitable for cursor\n");
 922			return -EINVAL;
 923		}
 924	}
 925
 926	return 0;
 927}
 928
 929
 930int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
 931			     struct drm_atomic_state *state)
 932{
 933	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
 934									 crtc);
 935	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
 936	int connector_mask = drm_connector_mask(&du->connector);
 937	bool has_primary = new_state->plane_mask &
 938			   drm_plane_mask(crtc->primary);
 939
 940	/* We always want to have an active plane with an active CRTC */
 941	if (has_primary != new_state->enable)
 942		return -EINVAL;
 943
 944
 945	if (new_state->connector_mask != connector_mask &&
 946	    new_state->connector_mask != 0) {
 947		DRM_ERROR("Invalid connectors configuration\n");
 948		return -EINVAL;
 949	}
 950
 951	/*
 952	 * Our virtual device does not have a dot clock, so use the logical
 953	 * clock value as the dot clock.
 954	 */
 955	if (new_state->mode.crtc_clock == 0)
 956		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
 957
 958	return 0;
 959}
 960
 961
 962void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
 963			      struct drm_atomic_state *state)
 964{
 965}
 966
 967
 968void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
 969			      struct drm_atomic_state *state)
 970{
 
 
 
 
 
 
 
 
 
 971}
 972
 973
 974/**
 975 * vmw_du_crtc_duplicate_state - duplicate crtc state
 976 * @crtc: DRM crtc
 977 *
 978 * Allocates and returns a copy of the crtc state (both common and
 979 * vmw-specific) for the specified crtc.
 980 *
 981 * Returns: The newly allocated crtc state, or NULL on failure.
 982 */
 983struct drm_crtc_state *
 984vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
 985{
 986	struct drm_crtc_state *state;
 987	struct vmw_crtc_state *vcs;
 988
 989	if (WARN_ON(!crtc->state))
 990		return NULL;
 991
 992	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
 993
 994	if (!vcs)
 995		return NULL;
 996
 997	state = &vcs->base;
 998
 999	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
1000
1001	return state;
1002}
1003
1004
1005/**
1006 * vmw_du_crtc_reset - creates a blank vmw crtc state
1007 * @crtc: DRM crtc
1008 *
1009 * Resets the atomic state for @crtc by freeing the state pointer (which
1010 * might be NULL, e.g. at driver load time) and allocating a new empty state
1011 * object.
1012 */
1013void vmw_du_crtc_reset(struct drm_crtc *crtc)
1014{
1015	struct vmw_crtc_state *vcs;
1016
1017
1018	if (crtc->state) {
1019		__drm_atomic_helper_crtc_destroy_state(crtc->state);
1020
1021		kfree(vmw_crtc_state_to_vcs(crtc->state));
1022	}
1023
1024	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1025
1026	if (!vcs) {
1027		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1028		return;
1029	}
1030
1031	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
 
1032}
1033
1034
1035/**
1036 * vmw_du_crtc_destroy_state - destroy crtc state
1037 * @crtc: DRM crtc
1038 * @state: state object to destroy
1039 *
1040 * Destroys the crtc state (both common and vmw-specific) for the
1041 * specified plane.
1042 */
1043void
1044vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1045			  struct drm_crtc_state *state)
1046{
1047	drm_atomic_helper_crtc_destroy_state(crtc, state);
1048}
1049
1050
1051/**
1052 * vmw_du_plane_duplicate_state - duplicate plane state
1053 * @plane: drm plane
1054 *
1055 * Allocates and returns a copy of the plane state (both common and
1056 * vmw-specific) for the specified plane.
1057 *
1058 * Returns: The newly allocated plane state, or NULL on failure.
1059 */
1060struct drm_plane_state *
1061vmw_du_plane_duplicate_state(struct drm_plane *plane)
1062{
1063	struct drm_plane_state *state;
1064	struct vmw_plane_state *vps;
1065
1066	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1067
1068	if (!vps)
1069		return NULL;
1070
1071	vps->pinned = 0;
1072	vps->cpp = 0;
1073
1074	memset(&vps->cursor, 0, sizeof(vps->cursor));
1075
1076	/* Each ref counted resource needs to be acquired again */
1077	if (vps->surf)
1078		(void) vmw_surface_reference(vps->surf);
1079
1080	if (vps->bo)
1081		(void) vmw_bo_reference(vps->bo);
1082
1083	state = &vps->base;
1084
1085	__drm_atomic_helper_plane_duplicate_state(plane, state);
1086
1087	return state;
1088}
1089
1090
1091/**
1092 * vmw_du_plane_reset - creates a blank vmw plane state
1093 * @plane: drm plane
1094 *
1095 * Resets the atomic state for @plane by freeing the state pointer (which might
1096 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1097 */
1098void vmw_du_plane_reset(struct drm_plane *plane)
1099{
1100	struct vmw_plane_state *vps;
1101
 
1102	if (plane->state)
1103		vmw_du_plane_destroy_state(plane, plane->state);
1104
1105	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1106
1107	if (!vps) {
1108		DRM_ERROR("Cannot allocate vmw_plane_state\n");
1109		return;
1110	}
1111
1112	__drm_atomic_helper_plane_reset(plane, &vps->base);
 
 
1113}
1114
1115
1116/**
1117 * vmw_du_plane_destroy_state - destroy plane state
1118 * @plane: DRM plane
1119 * @state: state object to destroy
1120 *
1121 * Destroys the plane state (both common and vmw-specific) for the
1122 * specified plane.
1123 */
1124void
1125vmw_du_plane_destroy_state(struct drm_plane *plane,
1126			   struct drm_plane_state *state)
1127{
1128	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1129
 
1130	/* Should have been freed by cleanup_fb */
1131	if (vps->surf)
1132		vmw_surface_unreference(&vps->surf);
1133
1134	if (vps->bo)
1135		vmw_bo_unreference(&vps->bo);
1136
1137	drm_atomic_helper_plane_destroy_state(plane, state);
1138}
1139
1140
1141/**
1142 * vmw_du_connector_duplicate_state - duplicate connector state
1143 * @connector: DRM connector
1144 *
1145 * Allocates and returns a copy of the connector state (both common and
1146 * vmw-specific) for the specified connector.
1147 *
1148 * Returns: The newly allocated connector state, or NULL on failure.
1149 */
1150struct drm_connector_state *
1151vmw_du_connector_duplicate_state(struct drm_connector *connector)
1152{
1153	struct drm_connector_state *state;
1154	struct vmw_connector_state *vcs;
1155
1156	if (WARN_ON(!connector->state))
1157		return NULL;
1158
1159	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1160
1161	if (!vcs)
1162		return NULL;
1163
1164	state = &vcs->base;
1165
1166	__drm_atomic_helper_connector_duplicate_state(connector, state);
1167
1168	return state;
1169}
1170
1171
1172/**
1173 * vmw_du_connector_reset - creates a blank vmw connector state
1174 * @connector: DRM connector
1175 *
1176 * Resets the atomic state for @connector by freeing the state pointer (which
1177 * might be NULL, e.g. at driver load time) and allocating a new empty state
1178 * object.
1179 */
1180void vmw_du_connector_reset(struct drm_connector *connector)
1181{
1182	struct vmw_connector_state *vcs;
1183
1184
1185	if (connector->state) {
1186		__drm_atomic_helper_connector_destroy_state(connector->state);
1187
1188		kfree(vmw_connector_state_to_vcs(connector->state));
1189	}
1190
1191	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1192
1193	if (!vcs) {
1194		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1195		return;
1196	}
1197
1198	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1199}
1200
1201
1202/**
1203 * vmw_du_connector_destroy_state - destroy connector state
1204 * @connector: DRM connector
1205 * @state: state object to destroy
1206 *
1207 * Destroys the connector state (both common and vmw-specific) for the
1208 * specified plane.
1209 */
1210void
1211vmw_du_connector_destroy_state(struct drm_connector *connector,
1212			  struct drm_connector_state *state)
1213{
1214	drm_atomic_helper_connector_destroy_state(connector, state);
1215}
1216/*
1217 * Generic framebuffer code
1218 */
1219
1220/*
1221 * Surface framebuffer code
1222 */
1223
1224static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1225{
1226	struct vmw_framebuffer_surface *vfbs =
1227		vmw_framebuffer_to_vfbs(framebuffer);
1228
1229	drm_framebuffer_cleanup(framebuffer);
1230	vmw_surface_unreference(&vfbs->surface);
 
 
1231
1232	kfree(vfbs);
1233}
1234
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1235/**
1236 * vmw_kms_readback - Perform a readback from the screen system to
1237 * a buffer-object backed framebuffer.
1238 *
1239 * @dev_priv: Pointer to the device private structure.
1240 * @file_priv: Pointer to a struct drm_file identifying the caller.
1241 * Must be set to NULL if @user_fence_rep is NULL.
1242 * @vfb: Pointer to the buffer-object backed framebuffer.
1243 * @user_fence_rep: User-space provided structure for fence information.
1244 * Must be set to non-NULL if @file_priv is non-NULL.
1245 * @vclips: Array of clip rects.
1246 * @num_clips: Number of clip rects in @vclips.
1247 *
1248 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1249 * interrupted.
1250 */
1251int vmw_kms_readback(struct vmw_private *dev_priv,
1252		     struct drm_file *file_priv,
1253		     struct vmw_framebuffer *vfb,
1254		     struct drm_vmw_fence_rep __user *user_fence_rep,
1255		     struct drm_vmw_rect *vclips,
1256		     uint32_t num_clips)
1257{
1258	switch (dev_priv->active_display_unit) {
1259	case vmw_du_screen_object:
1260		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1261					    user_fence_rep, vclips, num_clips,
1262					    NULL);
1263	case vmw_du_screen_target:
1264		return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1265					     user_fence_rep, NULL, vclips, num_clips,
1266					     1, NULL);
1267	default:
1268		WARN_ONCE(true,
1269			  "Readback called with invalid display system.\n");
1270}
1271
1272	return -ENOSYS;
1273}
1274
1275
1276static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1277	.destroy = vmw_framebuffer_surface_destroy,
1278	.dirty = drm_atomic_helper_dirtyfb,
1279};
1280
1281static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1282					   struct vmw_surface *surface,
1283					   struct vmw_framebuffer **out,
1284					   const struct drm_mode_fb_cmd2
1285					   *mode_cmd,
1286					   bool is_bo_proxy)
1287
1288{
1289	struct drm_device *dev = &dev_priv->drm;
1290	struct vmw_framebuffer_surface *vfbs;
1291	enum SVGA3dSurfaceFormat format;
1292	int ret;
 
1293
1294	/* 3D is only supported on HWv8 and newer hosts */
1295	if (dev_priv->active_display_unit == vmw_du_legacy)
1296		return -ENOSYS;
1297
1298	/*
1299	 * Sanity checks.
1300	 */
1301
1302	if (!drm_any_plane_has_format(&dev_priv->drm,
1303				      mode_cmd->pixel_format,
1304				      mode_cmd->modifier[0])) {
1305		drm_dbg(&dev_priv->drm,
1306			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1307			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1308		return -EINVAL;
1309	}
1310
1311	/* Surface must be marked as a scanout. */
1312	if (unlikely(!surface->metadata.scanout))
1313		return -EINVAL;
1314
1315	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1316		     surface->metadata.num_sizes != 1 ||
1317		     surface->metadata.base_size.width < mode_cmd->width ||
1318		     surface->metadata.base_size.height < mode_cmd->height ||
1319		     surface->metadata.base_size.depth != 1)) {
1320		DRM_ERROR("Incompatible surface dimensions "
1321			  "for requested mode.\n");
1322		return -EINVAL;
1323	}
1324
1325	switch (mode_cmd->pixel_format) {
1326	case DRM_FORMAT_ARGB8888:
1327		format = SVGA3D_A8R8G8B8;
1328		break;
1329	case DRM_FORMAT_XRGB8888:
1330		format = SVGA3D_X8R8G8B8;
1331		break;
1332	case DRM_FORMAT_RGB565:
1333		format = SVGA3D_R5G6B5;
1334		break;
1335	case DRM_FORMAT_XRGB1555:
1336		format = SVGA3D_A1R5G5B5;
1337		break;
1338	default:
1339		DRM_ERROR("Invalid pixel format: %p4cc\n",
1340			  &mode_cmd->pixel_format);
1341		return -EINVAL;
1342	}
1343
1344	/*
1345	 * For DX, surface format validation is done when surface->scanout
1346	 * is set.
1347	 */
1348	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1349		DRM_ERROR("Invalid surface format for requested mode.\n");
1350		return -EINVAL;
1351	}
1352
1353	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1354	if (!vfbs) {
1355		ret = -ENOMEM;
1356		goto out_err1;
1357	}
1358
1359	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1360	vfbs->surface = vmw_surface_reference(surface);
1361	vfbs->base.user_handle = mode_cmd->handles[0];
1362	vfbs->is_bo_proxy = is_bo_proxy;
1363
1364	*out = &vfbs->base;
1365
1366	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1367				   &vmw_framebuffer_surface_funcs);
1368	if (ret)
1369		goto out_err2;
1370
1371	return 0;
1372
1373out_err2:
1374	vmw_surface_unreference(&surface);
1375	kfree(vfbs);
1376out_err1:
1377	return ret;
1378}
1379
1380/*
1381 * Buffer-object framebuffer code
1382 */
1383
1384static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1385					    struct drm_file *file_priv,
1386					    unsigned int *handle)
1387{
1388	struct vmw_framebuffer_bo *vfbd =
1389			vmw_framebuffer_to_vfbd(fb);
 
 
 
 
 
1390
1391	return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1392}
1393
1394static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
 
 
 
 
1395{
1396	struct vmw_framebuffer_bo *vfbd =
 
1397		vmw_framebuffer_to_vfbd(framebuffer);
 
 
1398
1399	drm_framebuffer_cleanup(framebuffer);
1400	vmw_bo_unreference(&vfbd->buffer);
1401
1402	kfree(vfbd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1403}
1404
1405static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1406	.create_handle = vmw_framebuffer_bo_create_handle,
1407	.destroy = vmw_framebuffer_bo_destroy,
1408	.dirty = drm_atomic_helper_dirtyfb,
1409};
1410
1411/**
1412 * vmw_create_bo_proxy - create a proxy surface for the buffer object
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1413 *
1414 * @dev: DRM device
1415 * @mode_cmd: parameters for the new surface
1416 * @bo_mob: MOB backing the buffer object
1417 * @srf_out: newly created surface
1418 *
1419 * When the content FB is a buffer object, we create a surface as a proxy to the
1420 * same buffer.  This way we can do a surface copy rather than a surface DMA.
1421 * This is a more efficient approach
1422 *
1423 * RETURNS:
1424 * 0 on success, error code otherwise
1425 */
1426static int vmw_create_bo_proxy(struct drm_device *dev,
1427			       const struct drm_mode_fb_cmd2 *mode_cmd,
1428			       struct vmw_bo *bo_mob,
1429			       struct vmw_surface **srf_out)
1430{
1431	struct vmw_surface_metadata metadata = {0};
1432	uint32_t format;
 
1433	struct vmw_resource *res;
1434	unsigned int bytes_pp;
 
1435	int ret;
1436
1437	switch (mode_cmd->pixel_format) {
1438	case DRM_FORMAT_ARGB8888:
1439	case DRM_FORMAT_XRGB8888:
1440		format = SVGA3D_X8R8G8B8;
1441		bytes_pp = 4;
1442		break;
1443
1444	case DRM_FORMAT_RGB565:
1445	case DRM_FORMAT_XRGB1555:
1446		format = SVGA3D_R5G6B5;
1447		bytes_pp = 2;
1448		break;
1449
1450	case 8:
1451		format = SVGA3D_P8;
1452		bytes_pp = 1;
1453		break;
1454
1455	default:
1456		DRM_ERROR("Invalid framebuffer format %p4cc\n",
1457			  &mode_cmd->pixel_format);
1458		return -EINVAL;
1459	}
1460
1461	metadata.format = format;
1462	metadata.mip_levels[0] = 1;
1463	metadata.num_sizes = 1;
1464	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1465	metadata.base_size.height =  mode_cmd->height;
1466	metadata.base_size.depth = 1;
1467	metadata.scanout = true;
1468
1469	ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
 
 
 
 
 
1470	if (ret) {
1471		DRM_ERROR("Failed to allocate proxy content buffer\n");
1472		return ret;
1473	}
1474
1475	res = &(*srf_out)->res;
1476
1477	/* Reserve and switch the backing mob. */
1478	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1479	(void) vmw_resource_reserve(res, false, true);
1480	vmw_user_bo_unref(&res->guest_memory_bo);
1481	res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
1482	res->guest_memory_offset = 0;
1483	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1484	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1485
1486	return 0;
1487}
1488
1489
1490
1491static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1492				      struct vmw_bo *bo,
1493				      struct vmw_framebuffer **out,
1494				      const struct drm_mode_fb_cmd2
1495				      *mode_cmd)
1496
1497{
1498	struct drm_device *dev = &dev_priv->drm;
1499	struct vmw_framebuffer_bo *vfbd;
1500	unsigned int requested_size;
 
1501	int ret;
1502
1503	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1504	if (unlikely(requested_size > bo->tbo.base.size)) {
1505		DRM_ERROR("Screen buffer object size is too small "
1506			  "for requested mode.\n");
1507		return -EINVAL;
1508	}
1509
1510	if (!drm_any_plane_has_format(&dev_priv->drm,
1511				      mode_cmd->pixel_format,
1512				      mode_cmd->modifier[0])) {
1513		drm_dbg(&dev_priv->drm,
1514			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1515			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1516		return -EINVAL;
 
 
 
 
 
 
 
1517	}
1518
1519	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1520	if (!vfbd) {
1521		ret = -ENOMEM;
1522		goto out_err1;
1523	}
1524
1525	vfbd->base.base.obj[0] = &bo->tbo.base;
1526	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1527	vfbd->base.bo = true;
1528	vfbd->buffer = vmw_bo_reference(bo);
1529	vfbd->base.user_handle = mode_cmd->handles[0];
1530	*out = &vfbd->base;
1531
1532	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1533				   &vmw_framebuffer_bo_funcs);
1534	if (ret)
1535		goto out_err2;
1536
1537	return 0;
1538
1539out_err2:
1540	vmw_bo_unreference(&bo);
1541	kfree(vfbd);
1542out_err1:
1543	return ret;
1544}
1545
1546
1547/**
1548 * vmw_kms_srf_ok - check if a surface can be created
1549 *
1550 * @dev_priv: Pointer to device private struct.
1551 * @width: requested width
1552 * @height: requested height
1553 *
1554 * Surfaces need to be less than texture size
1555 */
1556static bool
1557vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1558{
1559	if (width  > dev_priv->texture_max_width ||
1560	    height > dev_priv->texture_max_height)
1561		return false;
1562
1563	return true;
1564}
1565
1566/**
1567 * vmw_kms_new_framebuffer - Create a new framebuffer.
1568 *
1569 * @dev_priv: Pointer to device private struct.
1570 * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1571 * Either @bo or @surface must be NULL.
1572 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1573 * Either @bo or @surface must be NULL.
1574 * @only_2d: No presents will occur to this buffer object based framebuffer.
1575 * This helps the code to do some important optimizations.
1576 * @mode_cmd: Frame-buffer metadata.
1577 */
1578struct vmw_framebuffer *
1579vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1580			struct vmw_bo *bo,
1581			struct vmw_surface *surface,
1582			bool only_2d,
1583			const struct drm_mode_fb_cmd2 *mode_cmd)
1584{
1585	struct vmw_framebuffer *vfb = NULL;
1586	bool is_bo_proxy = false;
1587	int ret;
1588
1589	/*
1590	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1591	 * therefore, wrap the buffer object in a surface so we can use the
1592	 * SurfaceCopy command.
1593	 */
1594	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1595	    bo && only_2d &&
1596	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1597	    dev_priv->active_display_unit == vmw_du_screen_target) {
1598		ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1599					  bo, &surface);
1600		if (ret)
1601			return ERR_PTR(ret);
1602
1603		is_bo_proxy = true;
1604	}
1605
1606	/* Create the new framebuffer depending one what we have */
1607	if (surface) {
1608		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1609						      mode_cmd,
1610						      is_bo_proxy);
 
1611		/*
1612		 * vmw_create_bo_proxy() adds a reference that is no longer
1613		 * needed
1614		 */
1615		if (is_bo_proxy)
1616			vmw_surface_unreference(&surface);
1617	} else if (bo) {
1618		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1619						 mode_cmd);
1620	} else {
1621		BUG();
1622	}
1623
1624	if (ret)
1625		return ERR_PTR(ret);
1626
 
 
 
1627	return vfb;
1628}
1629
1630/*
1631 * Generic Kernel modesetting functions
1632 */
1633
1634static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1635						 struct drm_file *file_priv,
1636						 const struct drm_mode_fb_cmd2 *mode_cmd)
1637{
1638	struct vmw_private *dev_priv = vmw_priv(dev);
 
1639	struct vmw_framebuffer *vfb = NULL;
1640	struct vmw_surface *surface = NULL;
1641	struct vmw_bo *bo = NULL;
 
1642	int ret;
1643
1644	/* returns either a bo or surface */
1645	ret = vmw_user_lookup_handle(dev_priv, file_priv,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1646				     mode_cmd->handles[0],
1647				     &surface, &bo);
1648	if (ret) {
1649		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1650			  mode_cmd->handles[0], mode_cmd->handles[0]);
1651		goto err_out;
1652	}
1653
1654
1655	if (!bo &&
1656	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1657		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1658			dev_priv->texture_max_width,
1659			dev_priv->texture_max_height);
1660		goto err_out;
1661	}
1662
1663
1664	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1665				      !(dev_priv->capabilities & SVGA_CAP_3D),
1666				      mode_cmd);
1667	if (IS_ERR(vfb)) {
1668		ret = PTR_ERR(vfb);
1669		goto err_out;
1670	}
1671
1672err_out:
1673	/* vmw_user_lookup_handle takes one ref so does new_fb */
1674	if (bo)
1675		vmw_user_bo_unref(&bo);
1676	if (surface)
1677		vmw_surface_unreference(&surface);
1678
1679	if (ret) {
1680		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
 
1681		return ERR_PTR(ret);
1682	}
 
1683
1684	return &vfb->base;
1685}
1686
1687/**
1688 * vmw_kms_check_display_memory - Validates display memory required for a
1689 * topology
1690 * @dev: DRM device
1691 * @num_rects: number of drm_rect in rects
1692 * @rects: array of drm_rect representing the topology to validate indexed by
1693 * crtc index.
1694 *
1695 * Returns:
1696 * 0 on success otherwise negative error code
1697 */
1698static int vmw_kms_check_display_memory(struct drm_device *dev,
1699					uint32_t num_rects,
1700					struct drm_rect *rects)
1701{
1702	struct vmw_private *dev_priv = vmw_priv(dev);
1703	struct drm_rect bounding_box = {0};
1704	u64 total_pixels = 0, pixel_mem, bb_mem;
1705	int i;
1706
1707	for (i = 0; i < num_rects; i++) {
1708		/*
1709		 * For STDU only individual screen (screen target) is limited by
1710		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1711		 */
1712		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1713		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1714		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1715			VMW_DEBUG_KMS("Screen size not supported.\n");
1716			return -EINVAL;
1717		}
1718
1719		/* Bounding box upper left is at (0,0). */
1720		if (rects[i].x2 > bounding_box.x2)
1721			bounding_box.x2 = rects[i].x2;
1722
1723		if (rects[i].y2 > bounding_box.y2)
1724			bounding_box.y2 = rects[i].y2;
1725
1726		total_pixels += (u64) drm_rect_width(&rects[i]) *
1727			(u64) drm_rect_height(&rects[i]);
1728	}
1729
1730	/* Virtual svga device primary limits are always in 32-bpp. */
1731	pixel_mem = total_pixels * 4;
1732
1733	/*
1734	 * For HV10 and below prim_bb_mem is vram size. When
1735	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1736	 * limit on primary bounding box
1737	 */
1738	if (pixel_mem > dev_priv->max_primary_mem) {
1739		VMW_DEBUG_KMS("Combined output size too large.\n");
1740		return -EINVAL;
1741	}
1742
1743	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1744	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1745	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1746		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1747
1748		if (bb_mem > dev_priv->max_primary_mem) {
1749			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1750			return -EINVAL;
1751		}
1752	}
1753
1754	return 0;
1755}
1756
1757/**
1758 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1759 * crtc mutex
1760 * @state: The atomic state pointer containing the new atomic state
1761 * @crtc: The crtc
1762 *
1763 * This function returns the new crtc state if it's part of the state update.
1764 * Otherwise returns the current crtc state. It also makes sure that the
1765 * crtc mutex is locked.
1766 *
1767 * Returns: A valid crtc state pointer or NULL. It may also return a
1768 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1769 */
1770static struct drm_crtc_state *
1771vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1772{
1773	struct drm_crtc_state *crtc_state;
1774
1775	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1776	if (crtc_state) {
1777		lockdep_assert_held(&crtc->mutex.mutex.base);
1778	} else {
1779		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1780
1781		if (ret != 0 && ret != -EALREADY)
1782			return ERR_PTR(ret);
1783
1784		crtc_state = crtc->state;
1785	}
1786
1787	return crtc_state;
1788}
1789
1790/**
1791 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1792 * from the same fb after the new state is committed.
1793 * @dev: The drm_device.
1794 * @state: The new state to be checked.
1795 *
1796 * Returns:
1797 *   Zero on success,
1798 *   -EINVAL on invalid state,
1799 *   -EDEADLK if modeset locking needs to be rerun.
1800 */
1801static int vmw_kms_check_implicit(struct drm_device *dev,
1802				  struct drm_atomic_state *state)
1803{
1804	struct drm_framebuffer *implicit_fb = NULL;
1805	struct drm_crtc *crtc;
1806	struct drm_crtc_state *crtc_state;
1807	struct drm_plane_state *plane_state;
1808
1809	drm_for_each_crtc(crtc, dev) {
1810		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1811
1812		if (!du->is_implicit)
1813			continue;
1814
1815		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1816		if (IS_ERR(crtc_state))
1817			return PTR_ERR(crtc_state);
1818
1819		if (!crtc_state || !crtc_state->enable)
1820			continue;
1821
1822		/*
1823		 * Can't move primary planes across crtcs, so this is OK.
1824		 * It also means we don't need to take the plane mutex.
1825		 */
1826		plane_state = du->primary.state;
1827		if (plane_state->crtc != crtc)
1828			continue;
1829
1830		if (!implicit_fb)
1831			implicit_fb = plane_state->fb;
1832		else if (implicit_fb != plane_state->fb)
1833			return -EINVAL;
1834	}
1835
1836	return 0;
1837}
1838
1839/**
1840 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1841 * @dev: DRM device
1842 * @state: the driver state object
1843 *
1844 * Returns:
1845 * 0 on success otherwise negative error code
1846 */
1847static int vmw_kms_check_topology(struct drm_device *dev,
1848				  struct drm_atomic_state *state)
1849{
1850	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1851	struct drm_rect *rects;
1852	struct drm_crtc *crtc;
1853	uint32_t i;
1854	int ret = 0;
1855
1856	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1857			GFP_KERNEL);
1858	if (!rects)
1859		return -ENOMEM;
1860
1861	drm_for_each_crtc(crtc, dev) {
1862		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1863		struct drm_crtc_state *crtc_state;
1864
1865		i = drm_crtc_index(crtc);
1866
1867		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1868		if (IS_ERR(crtc_state)) {
1869			ret = PTR_ERR(crtc_state);
1870			goto clean;
1871		}
1872
1873		if (!crtc_state)
1874			continue;
1875
1876		if (crtc_state->enable) {
1877			rects[i].x1 = du->gui_x;
1878			rects[i].y1 = du->gui_y;
1879			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1880			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1881		} else {
1882			rects[i].x1 = 0;
1883			rects[i].y1 = 0;
1884			rects[i].x2 = 0;
1885			rects[i].y2 = 0;
1886		}
1887	}
1888
1889	/* Determine change to topology due to new atomic state */
1890	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1891				      new_crtc_state, i) {
1892		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1893		struct drm_connector *connector;
1894		struct drm_connector_state *conn_state;
1895		struct vmw_connector_state *vmw_conn_state;
1896
1897		if (!du->pref_active && new_crtc_state->enable) {
1898			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1899			ret = -EINVAL;
1900			goto clean;
1901		}
1902
1903		/*
1904		 * For vmwgfx each crtc has only one connector attached and it
1905		 * is not changed so don't really need to check the
1906		 * crtc->connector_mask and iterate over it.
1907		 */
1908		connector = &du->connector;
1909		conn_state = drm_atomic_get_connector_state(state, connector);
1910		if (IS_ERR(conn_state)) {
1911			ret = PTR_ERR(conn_state);
1912			goto clean;
1913		}
1914
1915		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1916		vmw_conn_state->gui_x = du->gui_x;
1917		vmw_conn_state->gui_y = du->gui_y;
1918	}
1919
1920	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1921					   rects);
1922
1923clean:
1924	kfree(rects);
1925	return ret;
1926}
1927
1928/**
1929 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1930 *
1931 * @dev: DRM device
1932 * @state: the driver state object
1933 *
1934 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1935 * us to assign a value to mode->crtc_clock so that
1936 * drm_calc_timestamping_constants() won't throw an error message
1937 *
1938 * Returns:
1939 * Zero for success or -errno
1940 */
1941static int
1942vmw_kms_atomic_check_modeset(struct drm_device *dev,
1943			     struct drm_atomic_state *state)
1944{
1945	struct drm_crtc *crtc;
1946	struct drm_crtc_state *crtc_state;
1947	bool need_modeset = false;
1948	int i, ret;
 
1949
1950	ret = drm_atomic_helper_check(dev, state);
1951	if (ret)
1952		return ret;
1953
1954	ret = vmw_kms_check_implicit(dev, state);
1955	if (ret) {
1956		VMW_DEBUG_KMS("Invalid implicit state\n");
1957		return ret;
1958	}
1959
1960	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1961		if (drm_atomic_crtc_needs_modeset(crtc_state))
1962			need_modeset = true;
1963	}
1964
1965	if (need_modeset)
1966		return vmw_kms_check_topology(dev, state);
 
 
1967
1968	return ret;
1969}
1970
1971static const struct drm_mode_config_funcs vmw_kms_funcs = {
1972	.fb_create = vmw_kms_fb_create,
1973	.atomic_check = vmw_kms_atomic_check_modeset,
1974	.atomic_commit = drm_atomic_helper_commit,
1975};
1976
1977static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1978				   struct drm_file *file_priv,
1979				   struct vmw_framebuffer *vfb,
1980				   struct vmw_surface *surface,
1981				   uint32_t sid,
1982				   int32_t destX, int32_t destY,
1983				   struct drm_vmw_rect *clips,
1984				   uint32_t num_clips)
1985{
1986	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1987					    &surface->res, destX, destY,
1988					    num_clips, 1, NULL, NULL);
1989}
1990
1991
1992int vmw_kms_present(struct vmw_private *dev_priv,
1993		    struct drm_file *file_priv,
1994		    struct vmw_framebuffer *vfb,
1995		    struct vmw_surface *surface,
1996		    uint32_t sid,
1997		    int32_t destX, int32_t destY,
1998		    struct drm_vmw_rect *clips,
1999		    uint32_t num_clips)
2000{
2001	int ret;
2002
2003	switch (dev_priv->active_display_unit) {
2004	case vmw_du_screen_target:
2005		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2006						 &surface->res, destX, destY,
2007						 num_clips, 1, NULL, NULL);
2008		break;
2009	case vmw_du_screen_object:
2010		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2011					      sid, destX, destY, clips,
2012					      num_clips);
2013		break;
2014	default:
2015		WARN_ONCE(true,
2016			  "Present called with invalid display system.\n");
2017		ret = -ENOSYS;
2018		break;
2019	}
2020	if (ret)
2021		return ret;
2022
2023	vmw_cmd_flush(dev_priv, false);
2024
2025	return 0;
2026}
2027
2028static void
2029vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2030{
2031	if (dev_priv->hotplug_mode_update_property)
2032		return;
2033
2034	dev_priv->hotplug_mode_update_property =
2035		drm_property_create_range(&dev_priv->drm,
2036					  DRM_MODE_PROP_IMMUTABLE,
2037					  "hotplug_mode_update", 0, 1);
 
 
 
 
2038}
2039
2040int vmw_kms_init(struct vmw_private *dev_priv)
2041{
2042	struct drm_device *dev = &dev_priv->drm;
2043	int ret;
2044	static const char *display_unit_names[] = {
2045		"Invalid",
2046		"Legacy",
2047		"Screen Object",
2048		"Screen Target",
2049		"Invalid (max)"
2050	};
2051
2052	drm_mode_config_init(dev);
2053	dev->mode_config.funcs = &vmw_kms_funcs;
2054	dev->mode_config.min_width = 1;
2055	dev->mode_config.min_height = 1;
2056	dev->mode_config.max_width = dev_priv->texture_max_width;
2057	dev->mode_config.max_height = dev_priv->texture_max_height;
2058	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2059
2060	drm_mode_create_suggested_offset_properties(dev);
2061	vmw_kms_create_hotplug_mode_update_property(dev_priv);
2062
2063	ret = vmw_kms_stdu_init_display(dev_priv);
2064	if (ret) {
2065		ret = vmw_kms_sou_init_display(dev_priv);
2066		if (ret) /* Fallback */
2067			ret = vmw_kms_ldu_init_display(dev_priv);
2068	}
2069	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2070	drm_info(&dev_priv->drm, "%s display unit initialized\n",
2071		 display_unit_names[dev_priv->active_display_unit]);
2072
2073	return ret;
2074}
2075
2076int vmw_kms_close(struct vmw_private *dev_priv)
2077{
2078	int ret = 0;
2079
2080	/*
2081	 * Docs says we should take the lock before calling this function
2082	 * but since it destroys encoders and our destructor calls
2083	 * drm_encoder_cleanup which takes the lock we deadlock.
2084	 */
2085	drm_mode_config_cleanup(&dev_priv->drm);
2086	if (dev_priv->active_display_unit == vmw_du_legacy)
2087		ret = vmw_kms_ldu_close_display(dev_priv);
2088
2089	return ret;
2090}
2091
2092int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2093				struct drm_file *file_priv)
2094{
2095	struct drm_vmw_cursor_bypass_arg *arg = data;
2096	struct vmw_display_unit *du;
2097	struct drm_crtc *crtc;
2098	int ret = 0;
2099
 
2100	mutex_lock(&dev->mode_config.mutex);
2101	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2102
2103		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2104			du = vmw_crtc_to_du(crtc);
2105			du->hotspot_x = arg->xhot;
2106			du->hotspot_y = arg->yhot;
2107		}
2108
2109		mutex_unlock(&dev->mode_config.mutex);
2110		return 0;
2111	}
2112
2113	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2114	if (!crtc) {
2115		ret = -ENOENT;
2116		goto out;
2117	}
2118
2119	du = vmw_crtc_to_du(crtc);
2120
2121	du->hotspot_x = arg->xhot;
2122	du->hotspot_y = arg->yhot;
2123
2124out:
2125	mutex_unlock(&dev->mode_config.mutex);
2126
2127	return ret;
2128}
2129
2130int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2131			unsigned width, unsigned height, unsigned pitch,
2132			unsigned bpp, unsigned depth)
2133{
2134	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2135		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2136	else if (vmw_fifo_have_pitchlock(vmw_priv))
2137		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
 
2138	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2139	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2140	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2141		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2142
2143	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2144		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2145			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2146		return -EINVAL;
2147	}
2148
2149	return 0;
2150}
2151
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2152bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2153				uint32_t pitch,
2154				uint32_t height)
2155{
2156	return ((u64) pitch * (u64) height) < (u64)
2157		((dev_priv->active_display_unit == vmw_du_screen_target) ?
2158		 dev_priv->max_primary_mem : dev_priv->vram_size);
 
 
 
 
 
 
 
 
 
2159}
2160
2161/**
2162 * vmw_du_update_layout - Update the display unit with topology from resolution
2163 * plugin and generate DRM uevent
2164 * @dev_priv: device private
2165 * @num_rects: number of drm_rect in rects
2166 * @rects: toplogy to update
2167 */
2168static int vmw_du_update_layout(struct vmw_private *dev_priv,
2169				unsigned int num_rects, struct drm_rect *rects)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2170{
2171	struct drm_device *dev = &dev_priv->drm;
2172	struct vmw_display_unit *du;
2173	struct drm_connector *con;
2174	struct drm_connector_list_iter conn_iter;
2175	struct drm_modeset_acquire_ctx ctx;
2176	struct drm_crtc *crtc;
2177	int ret;
2178
2179	/* Currently gui_x/y is protected with the crtc mutex */
2180	mutex_lock(&dev->mode_config.mutex);
2181	drm_modeset_acquire_init(&ctx, 0);
2182retry:
2183	drm_for_each_crtc(crtc, dev) {
2184		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2185		if (ret < 0) {
2186			if (ret == -EDEADLK) {
2187				drm_modeset_backoff(&ctx);
2188				goto retry;
2189		}
2190			goto out_fini;
2191		}
2192	}
2193
2194	drm_connector_list_iter_begin(dev, &conn_iter);
2195	drm_for_each_connector_iter(con, &conn_iter) {
2196		du = vmw_connector_to_du(con);
2197		if (num_rects > du->unit) {
2198			du->pref_width = drm_rect_width(&rects[du->unit]);
2199			du->pref_height = drm_rect_height(&rects[du->unit]);
2200			du->pref_active = true;
2201			du->gui_x = rects[du->unit].x1;
2202			du->gui_y = rects[du->unit].y1;
2203		} else {
2204			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2205			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2206			du->pref_active = false;
2207			du->gui_x = 0;
2208			du->gui_y = 0;
2209		}
2210	}
2211	drm_connector_list_iter_end(&conn_iter);
2212
2213	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2214		du = vmw_connector_to_du(con);
2215		if (num_rects > du->unit) {
 
 
 
 
 
2216			drm_object_property_set_value
2217			  (&con->base, dev->mode_config.suggested_x_property,
2218			   du->gui_x);
2219			drm_object_property_set_value
2220			  (&con->base, dev->mode_config.suggested_y_property,
2221			   du->gui_y);
2222		} else {
 
 
 
2223			drm_object_property_set_value
2224			  (&con->base, dev->mode_config.suggested_x_property,
2225			   0);
2226			drm_object_property_set_value
2227			  (&con->base, dev->mode_config.suggested_y_property,
2228			   0);
2229		}
2230		con->status = vmw_du_connector_detect(con, true);
2231	}
2232out_fini:
2233	drm_modeset_drop_locks(&ctx);
2234	drm_modeset_acquire_fini(&ctx);
2235	mutex_unlock(&dev->mode_config.mutex);
2236
 
2237	drm_sysfs_hotplug_event(dev);
2238
2239	return 0;
2240}
2241
2242int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2243			  u16 *r, u16 *g, u16 *b,
2244			  uint32_t size,
2245			  struct drm_modeset_acquire_ctx *ctx)
2246{
2247	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2248	int i;
2249
2250	for (i = 0; i < size; i++) {
2251		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2252			  r[i], g[i], b[i]);
2253		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2254		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2255		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2256	}
2257
2258	return 0;
2259}
2260
2261int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2262{
2263	return 0;
2264}
2265
2266enum drm_connector_status
2267vmw_du_connector_detect(struct drm_connector *connector, bool force)
2268{
2269	uint32_t num_displays;
2270	struct drm_device *dev = connector->dev;
2271	struct vmw_private *dev_priv = vmw_priv(dev);
2272	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2273
2274	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2275
2276	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2277		 du->pref_active) ?
2278		connector_status_connected : connector_status_disconnected);
2279}
2280
2281static struct drm_display_mode vmw_kms_connector_builtin[] = {
2282	/* 640x480@60Hz */
2283	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2284		   752, 800, 0, 480, 489, 492, 525, 0,
2285		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2286	/* 800x600@60Hz */
2287	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2288		   968, 1056, 0, 600, 601, 605, 628, 0,
2289		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2290	/* 1024x768@60Hz */
2291	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2292		   1184, 1344, 0, 768, 771, 777, 806, 0,
2293		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2294	/* 1152x864@75Hz */
2295	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2296		   1344, 1600, 0, 864, 865, 868, 900, 0,
2297		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2298	/* 1280x720@60Hz */
2299	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2300		   1472, 1664, 0, 720, 723, 728, 748, 0,
2301		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2302	/* 1280x768@60Hz */
2303	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2304		   1472, 1664, 0, 768, 771, 778, 798, 0,
2305		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2306	/* 1280x800@60Hz */
2307	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2308		   1480, 1680, 0, 800, 803, 809, 831, 0,
2309		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2310	/* 1280x960@60Hz */
2311	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2312		   1488, 1800, 0, 960, 961, 964, 1000, 0,
2313		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2314	/* 1280x1024@60Hz */
2315	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2316		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2317		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2318	/* 1360x768@60Hz */
2319	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2320		   1536, 1792, 0, 768, 771, 777, 795, 0,
2321		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2322	/* 1440x1050@60Hz */
2323	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2324		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2325		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2326	/* 1440x900@60Hz */
2327	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2328		   1672, 1904, 0, 900, 903, 909, 934, 0,
2329		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2330	/* 1600x1200@60Hz */
2331	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2332		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2333		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2334	/* 1680x1050@60Hz */
2335	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2336		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2337		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2338	/* 1792x1344@60Hz */
2339	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2340		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2341		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2342	/* 1853x1392@60Hz */
2343	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2344		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2345		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2346	/* 1920x1080@60Hz */
2347	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2348		   2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2349		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2350	/* 1920x1200@60Hz */
2351	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2352		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2353		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2354	/* 1920x1440@60Hz */
2355	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2356		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2357		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2358	/* 2560x1440@60Hz */
2359	{ DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2360		   2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2361		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2362	/* 2560x1600@60Hz */
2363	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2364		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2365		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2366	/* 2880x1800@60Hz */
2367	{ DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2368		   2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2369		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2370	/* 3840x2160@60Hz */
2371	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2372		   3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2373		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2374	/* 3840x2400@60Hz */
2375	{ DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2376		   3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2377		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2378	/* Terminate */
2379	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2380};
2381
2382/**
2383 * vmw_guess_mode_timing - Provide fake timings for a
2384 * 60Hz vrefresh mode.
2385 *
2386 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2387 * members filled in.
2388 */
2389void vmw_guess_mode_timing(struct drm_display_mode *mode)
2390{
2391	mode->hsync_start = mode->hdisplay + 50;
2392	mode->hsync_end = mode->hsync_start + 50;
2393	mode->htotal = mode->hsync_end + 50;
2394
2395	mode->vsync_start = mode->vdisplay + 50;
2396	mode->vsync_end = mode->vsync_start + 50;
2397	mode->vtotal = mode->vsync_end + 50;
2398
2399	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
 
2400}
2401
2402
2403int vmw_du_connector_fill_modes(struct drm_connector *connector,
2404				uint32_t max_width, uint32_t max_height)
2405{
2406	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2407	struct drm_device *dev = connector->dev;
2408	struct vmw_private *dev_priv = vmw_priv(dev);
2409	struct drm_display_mode *mode = NULL;
2410	struct drm_display_mode *bmode;
2411	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2412		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2413		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2414		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2415	};
2416	int i;
2417	u32 assumed_bpp = 4;
2418
2419	if (dev_priv->assume_16bpp)
2420		assumed_bpp = 2;
2421
2422	max_width  = min(max_width,  dev_priv->texture_max_width);
2423	max_height = min(max_height, dev_priv->texture_max_height);
2424
2425	/*
2426	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2427	 * HEIGHT registers.
2428	 */
2429	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2430		max_width  = min(max_width,  dev_priv->stdu_max_width);
 
 
2431		max_height = min(max_height, dev_priv->stdu_max_height);
 
2432	}
2433
2434	/* Add preferred mode */
2435	mode = drm_mode_duplicate(dev, &prefmode);
2436	if (!mode)
2437		return 0;
2438	mode->hdisplay = du->pref_width;
2439	mode->vdisplay = du->pref_height;
2440	vmw_guess_mode_timing(mode);
2441	drm_mode_set_name(mode);
2442
2443	if (vmw_kms_validate_mode_vram(dev_priv,
2444					mode->hdisplay * assumed_bpp,
2445					mode->vdisplay)) {
2446		drm_mode_probed_add(connector, mode);
2447	} else {
2448		drm_mode_destroy(dev, mode);
2449		mode = NULL;
2450	}
2451
2452	if (du->pref_mode) {
2453		list_del_init(&du->pref_mode->head);
2454		drm_mode_destroy(dev, du->pref_mode);
2455	}
2456
2457	/* mode might be null here, this is intended */
2458	du->pref_mode = mode;
2459
2460	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2461		bmode = &vmw_kms_connector_builtin[i];
2462		if (bmode->hdisplay > max_width ||
2463		    bmode->vdisplay > max_height)
2464			continue;
2465
2466		if (!vmw_kms_validate_mode_vram(dev_priv,
2467						bmode->hdisplay * assumed_bpp,
2468						bmode->vdisplay))
2469			continue;
2470
2471		mode = drm_mode_duplicate(dev, bmode);
2472		if (!mode)
2473			return 0;
 
2474
2475		drm_mode_probed_add(connector, mode);
2476	}
2477
2478	drm_connector_list_update(connector);
2479	/* Move the prefered mode first, help apps pick the right mode. */
2480	drm_mode_sort(&connector->modes);
2481
2482	return 1;
2483}
2484
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2485/**
2486 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2487 * @dev: drm device for the ioctl
2488 * @data: data pointer for the ioctl
2489 * @file_priv: drm file for the ioctl call
2490 *
2491 * Update preferred topology of display unit as per ioctl request. The topology
2492 * is expressed as array of drm_vmw_rect.
2493 * e.g.
2494 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2495 *
2496 * NOTE:
2497 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2498 * device limit on topology, x + w and y + h (lower right) cannot be greater
2499 * than INT_MAX. So topology beyond these limits will return with error.
2500 *
2501 * Returns:
2502 * Zero on success, negative errno on failure.
2503 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2504int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2505				struct drm_file *file_priv)
2506{
2507	struct vmw_private *dev_priv = vmw_priv(dev);
2508	struct drm_mode_config *mode_config = &dev->mode_config;
2509	struct drm_vmw_update_layout_arg *arg =
2510		(struct drm_vmw_update_layout_arg *)data;
2511	void __user *user_rects;
2512	struct drm_vmw_rect *rects;
2513	struct drm_rect *drm_rects;
2514	unsigned rects_size;
2515	int ret, i;
 
 
 
 
2516
2517	if (!arg->num_outputs) {
2518		struct drm_rect def_rect = {0, 0,
2519					    VMWGFX_MIN_INITIAL_WIDTH,
2520					    VMWGFX_MIN_INITIAL_HEIGHT};
2521		vmw_du_update_layout(dev_priv, 1, &def_rect);
2522		return 0;
2523	}
2524
2525	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2526	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2527			GFP_KERNEL);
2528	if (unlikely(!rects))
2529		return -ENOMEM;
2530
2531	user_rects = (void __user *)(unsigned long)arg->rects;
2532	ret = copy_from_user(rects, user_rects, rects_size);
2533	if (unlikely(ret != 0)) {
2534		DRM_ERROR("Failed to get rects.\n");
2535		ret = -EFAULT;
2536		goto out_free;
2537	}
2538
2539	drm_rects = (struct drm_rect *)rects;
2540
2541	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2542	for (i = 0; i < arg->num_outputs; i++) {
2543		struct drm_vmw_rect curr_rect;
2544
2545		/* Verify user-space for overflow as kernel use drm_rect */
2546		if ((rects[i].x + rects[i].w > INT_MAX) ||
2547		    (rects[i].y + rects[i].h > INT_MAX)) {
2548			ret = -ERANGE;
2549			goto out_free;
2550		}
2551
2552		curr_rect = rects[i];
2553		drm_rects[i].x1 = curr_rect.x;
2554		drm_rects[i].y1 = curr_rect.y;
2555		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2556		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2557
2558		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2559			      drm_rects[i].x1, drm_rects[i].y1,
2560			      drm_rects[i].x2, drm_rects[i].y2);
2561
 
 
 
 
2562		/*
2563		 * Currently this check is limiting the topology within
2564		 * mode_config->max (which actually is max texture size
2565		 * supported by virtual device). This limit is here to address
2566		 * window managers that create a big framebuffer for whole
2567		 * topology.
2568		 */
2569		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2570		    drm_rects[i].x2 > mode_config->max_width ||
2571		    drm_rects[i].y2 > mode_config->max_height) {
2572			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2573				      drm_rects[i].x1, drm_rects[i].y1,
2574				      drm_rects[i].x2, drm_rects[i].y2);
2575			ret = -EINVAL;
2576			goto out_free;
2577		}
2578	}
2579
2580	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
 
 
 
 
 
2581
2582	if (ret == 0)
2583		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2584
2585out_free:
2586	kfree(rects);
2587	return ret;
2588}
2589
2590/**
2591 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2592 * on a set of cliprects and a set of display units.
2593 *
2594 * @dev_priv: Pointer to a device private structure.
2595 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2596 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2597 * Cliprects are given in framebuffer coordinates.
2598 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2599 * be NULL. Cliprects are given in source coordinates.
2600 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2601 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2602 * @num_clips: Number of cliprects in the @clips or @vclips array.
2603 * @increment: Integer with which to increment the clip counter when looping.
2604 * Used to skip a predetermined number of clip rects.
2605 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2606 */
2607int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2608			 struct vmw_framebuffer *framebuffer,
2609			 const struct drm_clip_rect *clips,
2610			 const struct drm_vmw_rect *vclips,
2611			 s32 dest_x, s32 dest_y,
2612			 int num_clips,
2613			 int increment,
2614			 struct vmw_kms_dirty *dirty)
2615{
2616	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2617	struct drm_crtc *crtc;
2618	u32 num_units = 0;
2619	u32 i, k;
2620
2621	dirty->dev_priv = dev_priv;
2622
2623	/* If crtc is passed, no need to iterate over other display units */
2624	if (dirty->crtc) {
2625		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2626	} else {
2627		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2628				    head) {
2629			struct drm_plane *plane = crtc->primary;
2630
2631			if (plane->state->fb == &framebuffer->base)
2632				units[num_units++] = vmw_crtc_to_du(crtc);
2633		}
2634	}
2635
2636	for (k = 0; k < num_units; k++) {
2637		struct vmw_display_unit *unit = units[k];
2638		s32 crtc_x = unit->crtc.x;
2639		s32 crtc_y = unit->crtc.y;
2640		s32 crtc_width = unit->crtc.mode.hdisplay;
2641		s32 crtc_height = unit->crtc.mode.vdisplay;
2642		const struct drm_clip_rect *clips_ptr = clips;
2643		const struct drm_vmw_rect *vclips_ptr = vclips;
2644
2645		dirty->unit = unit;
2646		if (dirty->fifo_reserve_size > 0) {
2647			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2648						      dirty->fifo_reserve_size);
2649			if (!dirty->cmd)
 
 
2650				return -ENOMEM;
2651
2652			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2653		}
2654		dirty->num_hits = 0;
2655		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2656		       vclips_ptr += increment) {
2657			s32 clip_left;
2658			s32 clip_top;
2659
2660			/*
2661			 * Select clip array type. Note that integer type
2662			 * in @clips is unsigned short, whereas in @vclips
2663			 * it's 32-bit.
2664			 */
2665			if (clips) {
2666				dirty->fb_x = (s32) clips_ptr->x1;
2667				dirty->fb_y = (s32) clips_ptr->y1;
2668				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2669					crtc_x;
2670				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2671					crtc_y;
2672			} else {
2673				dirty->fb_x = vclips_ptr->x;
2674				dirty->fb_y = vclips_ptr->y;
2675				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2676					dest_x - crtc_x;
2677				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2678					dest_y - crtc_y;
2679			}
2680
2681			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2682			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2683
2684			/* Skip this clip if it's outside the crtc region */
2685			if (dirty->unit_x1 >= crtc_width ||
2686			    dirty->unit_y1 >= crtc_height ||
2687			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2688				continue;
2689
2690			/* Clip right and bottom to crtc limits */
2691			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2692					       crtc_width);
2693			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2694					       crtc_height);
2695
2696			/* Clip left and top to crtc limits */
2697			clip_left = min_t(s32, dirty->unit_x1, 0);
2698			clip_top = min_t(s32, dirty->unit_y1, 0);
2699			dirty->unit_x1 -= clip_left;
2700			dirty->unit_y1 -= clip_top;
2701			dirty->fb_x -= clip_left;
2702			dirty->fb_y -= clip_top;
2703
2704			dirty->clip(dirty);
2705		}
2706
2707		dirty->fifo_commit(dirty);
2708	}
2709
2710	return 0;
2711}
2712
2713/**
2714 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2715 * cleanup and fencing
2716 * @dev_priv: Pointer to the device-private struct
2717 * @file_priv: Pointer identifying the client when user-space fencing is used
2718 * @ctx: Pointer to the validation context
2719 * @out_fence: If non-NULL, returned refcounted fence-pointer
2720 * @user_fence_rep: If non-NULL, pointer to user-space address area
2721 * in which to copy user-space fence info
2722 */
2723void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2724				      struct drm_file *file_priv,
2725				      struct vmw_validation_context *ctx,
2726				      struct vmw_fence_obj **out_fence,
2727				      struct drm_vmw_fence_rep __user *
2728				      user_fence_rep)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2729{
2730	struct vmw_fence_obj *fence = NULL;
2731	uint32_t handle = 0;
2732	int ret = 0;
2733
2734	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2735	    out_fence)
2736		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2737						 file_priv ? &handle : NULL);
2738	vmw_validation_done(ctx, fence);
2739	if (file_priv)
2740		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2741					    ret, user_fence_rep, fence,
2742					    handle, -1);
2743	if (out_fence)
2744		*out_fence = fence;
2745	else
2746		vmw_fence_obj_unreference(&fence);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2747}
2748
2749/**
2750 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2751 * its backing MOB.
2752 *
2753 * @res: Pointer to the surface resource
2754 * @clips: Clip rects in framebuffer (surface) space.
2755 * @num_clips: Number of clips in @clips.
2756 * @increment: Integer with which to increment the clip counter when looping.
2757 * Used to skip a predetermined number of clip rects.
2758 *
2759 * This function makes sure the proxy surface is updated from its backing MOB
2760 * using the region given by @clips. The surface resource @res and its backing
2761 * MOB needs to be reserved and validated on call.
2762 */
2763int vmw_kms_update_proxy(struct vmw_resource *res,
2764			 const struct drm_clip_rect *clips,
2765			 unsigned num_clips,
2766			 int increment)
2767{
2768	struct vmw_private *dev_priv = res->dev_priv;
2769	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2770	struct {
2771		SVGA3dCmdHeader header;
2772		SVGA3dCmdUpdateGBImage body;
2773	} *cmd;
2774	SVGA3dBox *box;
2775	size_t copy_size = 0;
2776	int i;
2777
2778	if (!clips)
2779		return 0;
2780
2781	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2782	if (!cmd)
 
 
2783		return -ENOMEM;
 
2784
2785	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2786		box = &cmd->body.box;
2787
2788		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2789		cmd->header.size = sizeof(cmd->body);
2790		cmd->body.image.sid = res->id;
2791		cmd->body.image.face = 0;
2792		cmd->body.image.mipmap = 0;
2793
2794		if (clips->x1 > size->width || clips->x2 > size->width ||
2795		    clips->y1 > size->height || clips->y2 > size->height) {
2796			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2797			return -EINVAL;
2798		}
2799
2800		box->x = clips->x1;
2801		box->y = clips->y1;
2802		box->z = 0;
2803		box->w = clips->x2 - clips->x1;
2804		box->h = clips->y2 - clips->y1;
2805		box->d = 1;
2806
2807		copy_size += sizeof(*cmd);
2808	}
2809
2810	vmw_cmd_commit(dev_priv, copy_size);
2811
2812	return 0;
2813}
2814
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2815/**
2816 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2817 * property.
2818 *
2819 * @dev_priv: Pointer to a device private struct.
 
2820 *
2821 * Sets up the implicit placement property unless it's already set up.
2822 */
2823void
2824vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
 
2825{
2826	if (dev_priv->implicit_placement_property)
2827		return;
2828
2829	dev_priv->implicit_placement_property =
2830		drm_property_create_range(&dev_priv->drm,
2831					  DRM_MODE_PROP_IMMUTABLE,
 
2832					  "implicit_placement", 0, 1);
 
2833}
2834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2835/**
2836 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2837 *
2838 * @dev: Pointer to the drm device
2839 * Return: 0 on success. Negative error code on failure.
2840 */
2841int vmw_kms_suspend(struct drm_device *dev)
2842{
2843	struct vmw_private *dev_priv = vmw_priv(dev);
2844
2845	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2846	if (IS_ERR(dev_priv->suspend_state)) {
2847		int ret = PTR_ERR(dev_priv->suspend_state);
2848
2849		DRM_ERROR("Failed kms suspend: %d\n", ret);
2850		dev_priv->suspend_state = NULL;
2851
2852		return ret;
2853	}
2854
2855	return 0;
2856}
2857
2858
2859/**
2860 * vmw_kms_resume - Re-enable modesetting and restore state
2861 *
2862 * @dev: Pointer to the drm device
2863 * Return: 0 on success. Negative error code on failure.
2864 *
2865 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2866 * to call this function without a previous vmw_kms_suspend().
2867 */
2868int vmw_kms_resume(struct drm_device *dev)
2869{
2870	struct vmw_private *dev_priv = vmw_priv(dev);
2871	int ret;
2872
2873	if (WARN_ON(!dev_priv->suspend_state))
2874		return 0;
2875
2876	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2877	dev_priv->suspend_state = NULL;
2878
2879	return ret;
2880}
2881
2882/**
2883 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2884 *
2885 * @dev: Pointer to the drm device
2886 */
2887void vmw_kms_lost_device(struct drm_device *dev)
2888{
2889	drm_atomic_helper_shutdown(dev);
2890}
2891
2892/**
2893 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2894 * @update: The closure structure.
2895 *
2896 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2897 * update on display unit.
2898 *
2899 * Return: 0 on success or a negative error code on failure.
2900 */
2901int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2902{
2903	struct drm_plane_state *state = update->plane->state;
2904	struct drm_plane_state *old_state = update->old_state;
2905	struct drm_atomic_helper_damage_iter iter;
2906	struct drm_rect clip;
2907	struct drm_rect bb;
2908	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2909	uint32_t reserved_size = 0;
2910	uint32_t submit_size = 0;
2911	uint32_t curr_size = 0;
2912	uint32_t num_hits = 0;
2913	void *cmd_start;
2914	char *cmd_next;
2915	int ret;
2916
2917	/*
2918	 * Iterate in advance to check if really need plane update and find the
2919	 * number of clips that actually are in plane src for fifo allocation.
2920	 */
2921	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2922	drm_atomic_for_each_plane_damage(&iter, &clip)
2923		num_hits++;
2924
2925	if (num_hits == 0)
2926		return 0;
2927
2928	if (update->vfb->bo) {
2929		struct vmw_framebuffer_bo *vfbbo =
2930			container_of(update->vfb, typeof(*vfbbo), base);
2931
2932		/*
2933		 * For screen targets we want a mappable bo, for everything else we want
2934		 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2935		 * is not screen target then mob's shouldn't be available.
2936		 */
2937		if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2938			vmw_bo_placement_set(vfbbo->buffer,
2939					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2940					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2941		} else {
2942			WARN_ON(update->dev_priv->has_mob);
2943			vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2944		}
2945		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2946	} else {
2947		struct vmw_framebuffer_surface *vfbs =
2948			container_of(update->vfb, typeof(*vfbs), base);
2949
2950		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2951						  0, VMW_RES_DIRTY_NONE, NULL,
2952						  NULL);
2953	}
2954
2955	if (ret)
2956		return ret;
2957
2958	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2959	if (ret)
2960		goto out_unref;
2961
2962	reserved_size = update->calc_fifo_size(update, num_hits);
2963	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2964	if (!cmd_start) {
2965		ret = -ENOMEM;
2966		goto out_revert;
2967	}
2968
2969	cmd_next = cmd_start;
2970
2971	if (update->post_prepare) {
2972		curr_size = update->post_prepare(update, cmd_next);
2973		cmd_next += curr_size;
2974		submit_size += curr_size;
2975	}
2976
2977	if (update->pre_clip) {
2978		curr_size = update->pre_clip(update, cmd_next, num_hits);
2979		cmd_next += curr_size;
2980		submit_size += curr_size;
2981	}
2982
2983	bb.x1 = INT_MAX;
2984	bb.y1 = INT_MAX;
2985	bb.x2 = INT_MIN;
2986	bb.y2 = INT_MIN;
2987
2988	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2989	drm_atomic_for_each_plane_damage(&iter, &clip) {
2990		uint32_t fb_x = clip.x1;
2991		uint32_t fb_y = clip.y1;
2992
2993		vmw_du_translate_to_crtc(state, &clip);
2994		if (update->clip) {
2995			curr_size = update->clip(update, cmd_next, &clip, fb_x,
2996						 fb_y);
2997			cmd_next += curr_size;
2998			submit_size += curr_size;
2999		}
3000		bb.x1 = min_t(int, bb.x1, clip.x1);
3001		bb.y1 = min_t(int, bb.y1, clip.y1);
3002		bb.x2 = max_t(int, bb.x2, clip.x2);
3003		bb.y2 = max_t(int, bb.y2, clip.y2);
3004	}
3005
3006	curr_size = update->post_clip(update, cmd_next, &bb);
3007	submit_size += curr_size;
3008
3009	if (reserved_size < submit_size)
3010		submit_size = 0;
3011
3012	vmw_cmd_commit(update->dev_priv, submit_size);
3013
3014	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3015					 update->out_fence, NULL);
3016	return ret;
3017
3018out_revert:
3019	vmw_validation_revert(&val_ctx);
3020
3021out_unref:
3022	vmw_validation_unref_lists(&val_ctx);
3023	return ret;
3024}
v4.17
 
   1/**************************************************************************
   2 *
   3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
 
 
 
 
  27
  28#include "vmwgfx_kms.h"
  29#include <drm/drm_plane_helper.h>
  30#include <drm/drm_atomic.h>
  31#include <drm/drm_atomic_helper.h>
 
 
  32#include <drm/drm_rect.h>
  33
  34/* Might need a hrtimer here? */
  35#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
  36
  37void vmw_du_cleanup(struct vmw_display_unit *du)
  38{
 
  39	drm_plane_cleanup(&du->primary);
  40	drm_plane_cleanup(&du->cursor);
 
  41
  42	drm_connector_unregister(&du->connector);
  43	drm_crtc_cleanup(&du->crtc);
  44	drm_encoder_cleanup(&du->encoder);
  45	drm_connector_cleanup(&du->connector);
  46}
  47
  48/*
  49 * Display Unit Cursor functions
  50 */
  51
  52static int vmw_cursor_update_image(struct vmw_private *dev_priv,
  53				   u32 *image, u32 width, u32 height,
  54				   u32 hotspotX, u32 hotspotY)
  55{
  56	struct {
  57		u32 cmd;
  58		SVGAFifoCmdDefineAlphaCursor cursor;
  59	} *cmd;
  60	u32 image_size = width * height * 4;
  61	u32 cmd_size = sizeof(*cmd) + image_size;
  62
  63	if (!image)
  64		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  65
  66	cmd = vmw_fifo_reserve(dev_priv, cmd_size);
  67	if (unlikely(cmd == NULL)) {
  68		DRM_ERROR("Fifo reserve failed.\n");
  69		return -ENOMEM;
  70	}
  71
  72	memset(cmd, 0, sizeof(*cmd));
  73
  74	memcpy(&cmd[1], image, image_size);
  75
  76	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
  77	cmd->cursor.id = 0;
  78	cmd->cursor.width = width;
  79	cmd->cursor.height = height;
  80	cmd->cursor.hotspotX = hotspotX;
  81	cmd->cursor.hotspotY = hotspotY;
  82
  83	vmw_fifo_commit_flush(dev_priv, cmd_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  84
  85	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  86}
  87
  88static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
  89				    struct vmw_dma_buffer *dmabuf,
  90				    u32 width, u32 height,
  91				    u32 hotspotX, u32 hotspotY)
  92{
  93	struct ttm_bo_kmap_obj map;
  94	unsigned long kmap_offset;
  95	unsigned long kmap_num;
  96	void *virtual;
  97	bool dummy;
  98	int ret;
  99
 100	kmap_offset = 0;
 101	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
 
 
 
 102
 103	ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL);
 104	if (unlikely(ret != 0)) {
 105		DRM_ERROR("reserve failed\n");
 106		return -EINVAL;
 
 
 
 
 
 107	}
 108
 109	ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
 110	if (unlikely(ret != 0))
 111		goto err_unreserve;
 
 
 
 
 
 
 
 
 
 
 
 
 
 112
 113	virtual = ttm_kmap_obj_virtual(&map, &dummy);
 114	ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
 115				      hotspotX, hotspotY);
 
 116
 117	ttm_bo_kunmap(&map);
 118err_unreserve:
 119	ttm_bo_unreserve(&dmabuf->base);
 120
 
 
 121	return ret;
 122}
 123
 124
 125static void vmw_cursor_update_position(struct vmw_private *dev_priv,
 126				       bool show, int x, int y)
 127{
 128	u32 *fifo_mem = dev_priv->mmio_virt;
 
 129	uint32_t count;
 130
 131	spin_lock(&dev_priv->cursor_lock);
 132	vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
 133	vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
 134	vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
 135	count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
 136	vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
 
 
 
 
 
 
 
 
 
 
 
 
 137	spin_unlock(&dev_priv->cursor_lock);
 138}
 139
 140
 141void vmw_kms_cursor_snoop(struct vmw_surface *srf,
 142			  struct ttm_object_file *tfile,
 143			  struct ttm_buffer_object *bo,
 144			  SVGA3dCmdHeader *header)
 145{
 146	struct ttm_bo_kmap_obj map;
 147	unsigned long kmap_offset;
 148	unsigned long kmap_num;
 149	SVGA3dCopyBox *box;
 150	unsigned box_count;
 151	void *virtual;
 152	bool dummy;
 153	struct vmw_dma_cmd {
 154		SVGA3dCmdHeader header;
 155		SVGA3dCmdSurfaceDMA dma;
 156	} *cmd;
 157	int i, ret;
 
 
 
 158
 159	cmd = container_of(header, struct vmw_dma_cmd, header);
 160
 161	/* No snooper installed */
 162	if (!srf->snooper.image)
 163		return;
 164
 165	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
 166		DRM_ERROR("face and mipmap for cursors should never != 0\n");
 167		return;
 168	}
 169
 170	if (cmd->header.size < 64) {
 171		DRM_ERROR("at least one full copy box must be given\n");
 172		return;
 173	}
 174
 175	box = (SVGA3dCopyBox *)&cmd[1];
 176	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
 177			sizeof(SVGA3dCopyBox);
 178
 179	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
 180	    box->x != 0    || box->y != 0    || box->z != 0    ||
 181	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
 182	    box->d != 1    || box_count != 1) {
 
 183		/* TODO handle none page aligned offsets */
 184		/* TODO handle more dst & src != 0 */
 185		/* TODO handle more then one copy */
 186		DRM_ERROR("Cant snoop dma request for cursor!\n");
 187		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
 188			  box->srcx, box->srcy, box->srcz,
 189			  box->x, box->y, box->z,
 190			  box->w, box->h, box->d, box_count,
 191			  cmd->dma.guest.ptr.offset);
 192		return;
 193	}
 194
 195	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
 196	kmap_num = (64*64*4) >> PAGE_SHIFT;
 197
 198	ret = ttm_bo_reserve(bo, true, false, NULL);
 199	if (unlikely(ret != 0)) {
 200		DRM_ERROR("reserve failed\n");
 201		return;
 202	}
 203
 204	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
 205	if (unlikely(ret != 0))
 206		goto err_unreserve;
 207
 208	virtual = ttm_kmap_obj_virtual(&map, &dummy);
 209
 210	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
 211		memcpy(srf->snooper.image, virtual, 64*64*4);
 
 212	} else {
 213		/* Image is unsigned pointer. */
 214		for (i = 0; i < box->h; i++)
 215			memcpy(srf->snooper.image + i * 64,
 216			       virtual + i * cmd->dma.guest.pitch,
 217			       box->w * 4);
 218	}
 219
 220	srf->snooper.age++;
 221
 222	ttm_bo_kunmap(&map);
 223err_unreserve:
 224	ttm_bo_unreserve(bo);
 225}
 226
 227/**
 228 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
 229 *
 230 * @dev_priv: Pointer to the device private struct.
 231 *
 232 * Clears all legacy hotspots.
 233 */
 234void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
 235{
 236	struct drm_device *dev = dev_priv->dev;
 237	struct vmw_display_unit *du;
 238	struct drm_crtc *crtc;
 239
 240	drm_modeset_lock_all(dev);
 241	drm_for_each_crtc(crtc, dev) {
 242		du = vmw_crtc_to_du(crtc);
 243
 244		du->hotspot_x = 0;
 245		du->hotspot_y = 0;
 246	}
 247	drm_modeset_unlock_all(dev);
 248}
 249
 250void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 251{
 252	struct drm_device *dev = dev_priv->dev;
 253	struct vmw_display_unit *du;
 254	struct drm_crtc *crtc;
 255
 256	mutex_lock(&dev->mode_config.mutex);
 257
 258	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 259		du = vmw_crtc_to_du(crtc);
 260		if (!du->cursor_surface ||
 261		    du->cursor_age == du->cursor_surface->snooper.age)
 
 262			continue;
 263
 264		du->cursor_age = du->cursor_surface->snooper.age;
 265		vmw_cursor_update_image(dev_priv,
 266					du->cursor_surface->snooper.image,
 267					64, 64,
 268					du->hotspot_x + du->core_hotspot_x,
 269					du->hotspot_y + du->core_hotspot_y);
 
 270	}
 271
 272	mutex_unlock(&dev->mode_config.mutex);
 273}
 274
 275
 276void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
 277{
 
 
 
 278	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
 279
 
 
 
 280	drm_plane_cleanup(plane);
 281}
 282
 283
 284void vmw_du_primary_plane_destroy(struct drm_plane *plane)
 285{
 286	drm_plane_cleanup(plane);
 287
 288	/* Planes are static in our case so we don't free it */
 289}
 290
 291
 292/**
 293 * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface
 294 *
 295 * @vps: plane state associated with the display surface
 296 * @unreference: true if we also want to unreference the display.
 297 */
 298void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
 299			     bool unreference)
 300{
 301	if (vps->surf) {
 302		if (vps->pinned) {
 303			vmw_resource_unpin(&vps->surf->res);
 304			vps->pinned--;
 305		}
 306
 307		if (unreference) {
 308			if (vps->pinned)
 309				DRM_ERROR("Surface still pinned\n");
 310			vmw_surface_unreference(&vps->surf);
 311		}
 312	}
 313}
 314
 315
 316/**
 317 * vmw_du_plane_cleanup_fb - Unpins the cursor
 318 *
 319 * @plane:  display plane
 320 * @old_state: Contains the FB to clean up
 321 *
 322 * Unpins the framebuffer surface
 323 *
 324 * Returns 0 on success
 325 */
 326void
 327vmw_du_plane_cleanup_fb(struct drm_plane *plane,
 328			struct drm_plane_state *old_state)
 329{
 330	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 331
 332	vmw_du_plane_unpin_surf(vps, false);
 333}
 334
 335
 336/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 337 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
 338 *
 339 * @plane:  display plane
 340 * @new_state: info on the new plane state, including the FB
 341 *
 342 * Returns 0 on success
 343 */
 344int
 345vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
 346			       struct drm_plane_state *new_state)
 347{
 348	struct drm_framebuffer *fb = new_state->fb;
 
 349	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
 
 350
 351
 352	if (vps->surf)
 353		vmw_surface_unreference(&vps->surf);
 
 
 354
 355	if (vps->dmabuf)
 356		vmw_dmabuf_unreference(&vps->dmabuf);
 
 
 357
 358	if (fb) {
 359		if (vmw_framebuffer_to_vfb(fb)->dmabuf) {
 360			vps->dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
 361			vmw_dmabuf_reference(vps->dmabuf);
 362		} else {
 363			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
 364			vmw_surface_reference(vps->surf);
 365		}
 366	}
 367
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 368	return 0;
 369}
 370
 371
 372void
 373vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
 374				  struct drm_plane_state *old_state)
 375{
 376	struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
 
 
 
 
 377	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 378	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 379	struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state);
 
 380	s32 hotspot_x, hotspot_y;
 381	int ret = 0;
 382
 
 
 383
 384	hotspot_x = du->hotspot_x;
 385	hotspot_y = du->hotspot_y;
 386
 387	if (plane->fb) {
 388		hotspot_x += plane->fb->hot_x;
 389		hotspot_y += plane->fb->hot_y;
 390	}
 391
 392	du->cursor_surface = vps->surf;
 393	du->cursor_dmabuf = vps->dmabuf;
 394
 395	if (vps->surf) {
 396		du->cursor_age = du->cursor_surface->snooper.age;
 
 397
 398		ret = vmw_cursor_update_image(dev_priv,
 399					      vps->surf->snooper.image,
 400					      64, 64, hotspot_x,
 401					      hotspot_y);
 402	} else if (vps->dmabuf) {
 403		ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf,
 404					       plane->state->crtc_w,
 405					       plane->state->crtc_h,
 406					       hotspot_x, hotspot_y);
 407	} else {
 408		vmw_cursor_update_position(dev_priv, false, 0, 0);
 409		return;
 410	}
 411
 412	if (!ret) {
 413		du->cursor_x = plane->state->crtc_x + du->set_gui_x;
 414		du->cursor_y = plane->state->crtc_y + du->set_gui_y;
 415
 416		vmw_cursor_update_position(dev_priv, true,
 417					   du->cursor_x + hotspot_x,
 418					   du->cursor_y + hotspot_y);
 
 
 
 419
 420		du->core_hotspot_x = hotspot_x - du->hotspot_x;
 421		du->core_hotspot_y = hotspot_y - du->hotspot_y;
 422	} else {
 423		DRM_ERROR("Failed to update cursor image\n");
 424	}
 425}
 426
 427
 428/**
 429 * vmw_du_primary_plane_atomic_check - check if the new state is okay
 430 *
 431 * @plane: display plane
 432 * @state: info on the new plane state, including the FB
 433 *
 434 * Check if the new state is settable given the current state.  Other
 435 * than what the atomic helper checks, we care about crtc fitting
 436 * the FB and maintaining one active framebuffer.
 437 *
 438 * Returns 0 on success
 439 */
 440int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
 441				      struct drm_plane_state *state)
 442{
 
 
 
 
 443	struct drm_crtc_state *crtc_state = NULL;
 444	struct drm_framebuffer *new_fb = state->fb;
 
 445	int ret;
 446
 447	if (state->crtc)
 448		crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
 
 
 
 
 
 449
 450	ret = drm_atomic_helper_check_plane_state(state, crtc_state,
 451						  DRM_PLANE_HELPER_NO_SCALING,
 452						  DRM_PLANE_HELPER_NO_SCALING,
 
 
 
 
 453						  false, true);
 454
 455	if (!ret && new_fb) {
 456		struct drm_crtc *crtc = state->crtc;
 457		struct vmw_connector_state *vcs;
 458		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 459		struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 460		struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
 461
 462		vcs = vmw_connector_state_to_vcs(du->connector.state);
 463
 464		/* Only one active implicit framebuffer at a time. */
 465		mutex_lock(&dev_priv->global_kms_state_mutex);
 466		if (vcs->is_implicit && dev_priv->implicit_fb &&
 467		    !(dev_priv->num_implicit == 1 && du->active_implicit)
 468		    && dev_priv->implicit_fb != vfb) {
 469			DRM_ERROR("Multiple implicit framebuffers "
 470				  "not supported.\n");
 471			ret = -EINVAL;
 472		}
 473		mutex_unlock(&dev_priv->global_kms_state_mutex);
 474	}
 475
 476
 477	return ret;
 478}
 479
 480
 481/**
 482 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
 483 *
 484 * @plane: cursor plane
 485 * @state: info on the new plane state
 486 *
 487 * This is a chance to fail if the new cursor state does not fit
 488 * our requirements.
 489 *
 490 * Returns 0 on success
 491 */
 492int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
 493				     struct drm_plane_state *new_state)
 494{
 
 
 495	int ret = 0;
 
 496	struct vmw_surface *surface = NULL;
 497	struct drm_framebuffer *fb = new_state->fb;
 498
 499	struct drm_rect src = drm_plane_state_src(new_state);
 500	struct drm_rect dest = drm_plane_state_dest(new_state);
 
 
 
 
 
 
 
 
 501
 502	/* Turning off */
 503	if (!fb)
 504		return ret;
 505
 506	ret = drm_plane_helper_check_update(plane, new_state->crtc, fb,
 507					    &src, &dest,
 508					    DRM_MODE_ROTATE_0,
 509					    DRM_PLANE_HELPER_NO_SCALING,
 510					    DRM_PLANE_HELPER_NO_SCALING,
 511					    true, true, &new_state->visible);
 512	if (!ret)
 513		return ret;
 514
 515	/* A lot of the code assumes this */
 516	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
 517		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
 518			  new_state->crtc_w, new_state->crtc_h);
 519		ret = -EINVAL;
 520	}
 521
 522	if (!vmw_framebuffer_to_vfb(fb)->dmabuf)
 523		surface = vmw_framebuffer_to_vfbs(fb)->surface;
 524
 525	if (surface && !surface->snooper.image) {
 526		DRM_ERROR("surface not suitable for cursor\n");
 527		ret = -EINVAL;
 
 
 
 
 528	}
 529
 530	return ret;
 531}
 532
 533
 534int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
 535			     struct drm_crtc_state *new_state)
 536{
 
 
 537	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
 538	int connector_mask = 1 << drm_connector_index(&du->connector);
 539	bool has_primary = new_state->plane_mask &
 540			   BIT(drm_plane_index(crtc->primary));
 541
 542	/* We always want to have an active plane with an active CRTC */
 543	if (has_primary != new_state->enable)
 544		return -EINVAL;
 545
 546
 547	if (new_state->connector_mask != connector_mask &&
 548	    new_state->connector_mask != 0) {
 549		DRM_ERROR("Invalid connectors configuration\n");
 550		return -EINVAL;
 551	}
 552
 553	/*
 554	 * Our virtual device does not have a dot clock, so use the logical
 555	 * clock value as the dot clock.
 556	 */
 557	if (new_state->mode.crtc_clock == 0)
 558		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
 559
 560	return 0;
 561}
 562
 563
 564void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
 565			      struct drm_crtc_state *old_crtc_state)
 566{
 567}
 568
 569
 570void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
 571			      struct drm_crtc_state *old_crtc_state)
 572{
 573	struct drm_pending_vblank_event *event = crtc->state->event;
 574
 575	if (event) {
 576		crtc->state->event = NULL;
 577
 578		spin_lock_irq(&crtc->dev->event_lock);
 579		drm_crtc_send_vblank_event(crtc, event);
 580		spin_unlock_irq(&crtc->dev->event_lock);
 581	}
 582}
 583
 584
 585/**
 586 * vmw_du_crtc_duplicate_state - duplicate crtc state
 587 * @crtc: DRM crtc
 588 *
 589 * Allocates and returns a copy of the crtc state (both common and
 590 * vmw-specific) for the specified crtc.
 591 *
 592 * Returns: The newly allocated crtc state, or NULL on failure.
 593 */
 594struct drm_crtc_state *
 595vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
 596{
 597	struct drm_crtc_state *state;
 598	struct vmw_crtc_state *vcs;
 599
 600	if (WARN_ON(!crtc->state))
 601		return NULL;
 602
 603	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
 604
 605	if (!vcs)
 606		return NULL;
 607
 608	state = &vcs->base;
 609
 610	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
 611
 612	return state;
 613}
 614
 615
 616/**
 617 * vmw_du_crtc_reset - creates a blank vmw crtc state
 618 * @crtc: DRM crtc
 619 *
 620 * Resets the atomic state for @crtc by freeing the state pointer (which
 621 * might be NULL, e.g. at driver load time) and allocating a new empty state
 622 * object.
 623 */
 624void vmw_du_crtc_reset(struct drm_crtc *crtc)
 625{
 626	struct vmw_crtc_state *vcs;
 627
 628
 629	if (crtc->state) {
 630		__drm_atomic_helper_crtc_destroy_state(crtc->state);
 631
 632		kfree(vmw_crtc_state_to_vcs(crtc->state));
 633	}
 634
 635	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
 636
 637	if (!vcs) {
 638		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
 639		return;
 640	}
 641
 642	crtc->state = &vcs->base;
 643	crtc->state->crtc = crtc;
 644}
 645
 646
 647/**
 648 * vmw_du_crtc_destroy_state - destroy crtc state
 649 * @crtc: DRM crtc
 650 * @state: state object to destroy
 651 *
 652 * Destroys the crtc state (both common and vmw-specific) for the
 653 * specified plane.
 654 */
 655void
 656vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
 657			  struct drm_crtc_state *state)
 658{
 659	drm_atomic_helper_crtc_destroy_state(crtc, state);
 660}
 661
 662
 663/**
 664 * vmw_du_plane_duplicate_state - duplicate plane state
 665 * @plane: drm plane
 666 *
 667 * Allocates and returns a copy of the plane state (both common and
 668 * vmw-specific) for the specified plane.
 669 *
 670 * Returns: The newly allocated plane state, or NULL on failure.
 671 */
 672struct drm_plane_state *
 673vmw_du_plane_duplicate_state(struct drm_plane *plane)
 674{
 675	struct drm_plane_state *state;
 676	struct vmw_plane_state *vps;
 677
 678	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
 679
 680	if (!vps)
 681		return NULL;
 682
 683	vps->pinned = 0;
 684	vps->cpp = 0;
 685
 
 
 686	/* Each ref counted resource needs to be acquired again */
 687	if (vps->surf)
 688		(void) vmw_surface_reference(vps->surf);
 689
 690	if (vps->dmabuf)
 691		(void) vmw_dmabuf_reference(vps->dmabuf);
 692
 693	state = &vps->base;
 694
 695	__drm_atomic_helper_plane_duplicate_state(plane, state);
 696
 697	return state;
 698}
 699
 700
 701/**
 702 * vmw_du_plane_reset - creates a blank vmw plane state
 703 * @plane: drm plane
 704 *
 705 * Resets the atomic state for @plane by freeing the state pointer (which might
 706 * be NULL, e.g. at driver load time) and allocating a new empty state object.
 707 */
 708void vmw_du_plane_reset(struct drm_plane *plane)
 709{
 710	struct vmw_plane_state *vps;
 711
 712
 713	if (plane->state)
 714		vmw_du_plane_destroy_state(plane, plane->state);
 715
 716	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
 717
 718	if (!vps) {
 719		DRM_ERROR("Cannot allocate vmw_plane_state\n");
 720		return;
 721	}
 722
 723	plane->state = &vps->base;
 724	plane->state->plane = plane;
 725	plane->state->rotation = DRM_MODE_ROTATE_0;
 726}
 727
 728
 729/**
 730 * vmw_du_plane_destroy_state - destroy plane state
 731 * @plane: DRM plane
 732 * @state: state object to destroy
 733 *
 734 * Destroys the plane state (both common and vmw-specific) for the
 735 * specified plane.
 736 */
 737void
 738vmw_du_plane_destroy_state(struct drm_plane *plane,
 739			   struct drm_plane_state *state)
 740{
 741	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
 742
 743
 744	/* Should have been freed by cleanup_fb */
 745	if (vps->surf)
 746		vmw_surface_unreference(&vps->surf);
 747
 748	if (vps->dmabuf)
 749		vmw_dmabuf_unreference(&vps->dmabuf);
 750
 751	drm_atomic_helper_plane_destroy_state(plane, state);
 752}
 753
 754
 755/**
 756 * vmw_du_connector_duplicate_state - duplicate connector state
 757 * @connector: DRM connector
 758 *
 759 * Allocates and returns a copy of the connector state (both common and
 760 * vmw-specific) for the specified connector.
 761 *
 762 * Returns: The newly allocated connector state, or NULL on failure.
 763 */
 764struct drm_connector_state *
 765vmw_du_connector_duplicate_state(struct drm_connector *connector)
 766{
 767	struct drm_connector_state *state;
 768	struct vmw_connector_state *vcs;
 769
 770	if (WARN_ON(!connector->state))
 771		return NULL;
 772
 773	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
 774
 775	if (!vcs)
 776		return NULL;
 777
 778	state = &vcs->base;
 779
 780	__drm_atomic_helper_connector_duplicate_state(connector, state);
 781
 782	return state;
 783}
 784
 785
 786/**
 787 * vmw_du_connector_reset - creates a blank vmw connector state
 788 * @connector: DRM connector
 789 *
 790 * Resets the atomic state for @connector by freeing the state pointer (which
 791 * might be NULL, e.g. at driver load time) and allocating a new empty state
 792 * object.
 793 */
 794void vmw_du_connector_reset(struct drm_connector *connector)
 795{
 796	struct vmw_connector_state *vcs;
 797
 798
 799	if (connector->state) {
 800		__drm_atomic_helper_connector_destroy_state(connector->state);
 801
 802		kfree(vmw_connector_state_to_vcs(connector->state));
 803	}
 804
 805	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
 806
 807	if (!vcs) {
 808		DRM_ERROR("Cannot allocate vmw_connector_state\n");
 809		return;
 810	}
 811
 812	__drm_atomic_helper_connector_reset(connector, &vcs->base);
 813}
 814
 815
 816/**
 817 * vmw_du_connector_destroy_state - destroy connector state
 818 * @connector: DRM connector
 819 * @state: state object to destroy
 820 *
 821 * Destroys the connector state (both common and vmw-specific) for the
 822 * specified plane.
 823 */
 824void
 825vmw_du_connector_destroy_state(struct drm_connector *connector,
 826			  struct drm_connector_state *state)
 827{
 828	drm_atomic_helper_connector_destroy_state(connector, state);
 829}
 830/*
 831 * Generic framebuffer code
 832 */
 833
 834/*
 835 * Surface framebuffer code
 836 */
 837
 838static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 839{
 840	struct vmw_framebuffer_surface *vfbs =
 841		vmw_framebuffer_to_vfbs(framebuffer);
 842
 843	drm_framebuffer_cleanup(framebuffer);
 844	vmw_surface_unreference(&vfbs->surface);
 845	if (vfbs->base.user_obj)
 846		ttm_base_object_unref(&vfbs->base.user_obj);
 847
 848	kfree(vfbs);
 849}
 850
 851static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 852				  struct drm_file *file_priv,
 853				  unsigned flags, unsigned color,
 854				  struct drm_clip_rect *clips,
 855				  unsigned num_clips)
 856{
 857	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
 858	struct vmw_framebuffer_surface *vfbs =
 859		vmw_framebuffer_to_vfbs(framebuffer);
 860	struct drm_clip_rect norect;
 861	int ret, inc = 1;
 862
 863	/* Legacy Display Unit does not support 3D */
 864	if (dev_priv->active_display_unit == vmw_du_legacy)
 865		return -EINVAL;
 866
 867	drm_modeset_lock_all(dev_priv->dev);
 868
 869	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 870	if (unlikely(ret != 0)) {
 871		drm_modeset_unlock_all(dev_priv->dev);
 872		return ret;
 873	}
 874
 875	if (!num_clips) {
 876		num_clips = 1;
 877		clips = &norect;
 878		norect.x1 = norect.y1 = 0;
 879		norect.x2 = framebuffer->width;
 880		norect.y2 = framebuffer->height;
 881	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
 882		num_clips /= 2;
 883		inc = 2; /* skip source rects */
 884	}
 885
 886	if (dev_priv->active_display_unit == vmw_du_screen_object)
 887		ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
 888						   clips, NULL, NULL, 0, 0,
 889						   num_clips, inc, NULL, NULL);
 890	else
 891		ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
 892						 clips, NULL, NULL, 0, 0,
 893						 num_clips, inc, NULL, NULL);
 894
 895	vmw_fifo_flush(dev_priv, false);
 896	ttm_read_unlock(&dev_priv->reservation_sem);
 897
 898	drm_modeset_unlock_all(dev_priv->dev);
 899
 900	return 0;
 901}
 902
 903/**
 904 * vmw_kms_readback - Perform a readback from the screen system to
 905 * a dma-buffer backed framebuffer.
 906 *
 907 * @dev_priv: Pointer to the device private structure.
 908 * @file_priv: Pointer to a struct drm_file identifying the caller.
 909 * Must be set to NULL if @user_fence_rep is NULL.
 910 * @vfb: Pointer to the dma-buffer backed framebuffer.
 911 * @user_fence_rep: User-space provided structure for fence information.
 912 * Must be set to non-NULL if @file_priv is non-NULL.
 913 * @vclips: Array of clip rects.
 914 * @num_clips: Number of clip rects in @vclips.
 915 *
 916 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
 917 * interrupted.
 918 */
 919int vmw_kms_readback(struct vmw_private *dev_priv,
 920		     struct drm_file *file_priv,
 921		     struct vmw_framebuffer *vfb,
 922		     struct drm_vmw_fence_rep __user *user_fence_rep,
 923		     struct drm_vmw_rect *vclips,
 924		     uint32_t num_clips)
 925{
 926	switch (dev_priv->active_display_unit) {
 927	case vmw_du_screen_object:
 928		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
 929					    user_fence_rep, vclips, num_clips,
 930					    NULL);
 931	case vmw_du_screen_target:
 932		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
 933					user_fence_rep, NULL, vclips, num_clips,
 934					1, false, true, NULL);
 935	default:
 936		WARN_ONCE(true,
 937			  "Readback called with invalid display system.\n");
 938}
 939
 940	return -ENOSYS;
 941}
 942
 943
 944static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
 945	.destroy = vmw_framebuffer_surface_destroy,
 946	.dirty = vmw_framebuffer_surface_dirty,
 947};
 948
 949static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 950					   struct vmw_surface *surface,
 951					   struct vmw_framebuffer **out,
 952					   const struct drm_mode_fb_cmd2
 953					   *mode_cmd,
 954					   bool is_dmabuf_proxy)
 955
 956{
 957	struct drm_device *dev = dev_priv->dev;
 958	struct vmw_framebuffer_surface *vfbs;
 959	enum SVGA3dSurfaceFormat format;
 960	int ret;
 961	struct drm_format_name_buf format_name;
 962
 963	/* 3D is only supported on HWv8 and newer hosts */
 964	if (dev_priv->active_display_unit == vmw_du_legacy)
 965		return -ENOSYS;
 966
 967	/*
 968	 * Sanity checks.
 969	 */
 970
 
 
 
 
 
 
 
 
 
 971	/* Surface must be marked as a scanout. */
 972	if (unlikely(!surface->scanout))
 973		return -EINVAL;
 974
 975	if (unlikely(surface->mip_levels[0] != 1 ||
 976		     surface->num_sizes != 1 ||
 977		     surface->base_size.width < mode_cmd->width ||
 978		     surface->base_size.height < mode_cmd->height ||
 979		     surface->base_size.depth != 1)) {
 980		DRM_ERROR("Incompatible surface dimensions "
 981			  "for requested mode.\n");
 982		return -EINVAL;
 983	}
 984
 985	switch (mode_cmd->pixel_format) {
 986	case DRM_FORMAT_ARGB8888:
 987		format = SVGA3D_A8R8G8B8;
 988		break;
 989	case DRM_FORMAT_XRGB8888:
 990		format = SVGA3D_X8R8G8B8;
 991		break;
 992	case DRM_FORMAT_RGB565:
 993		format = SVGA3D_R5G6B5;
 994		break;
 995	case DRM_FORMAT_XRGB1555:
 996		format = SVGA3D_A1R5G5B5;
 997		break;
 998	default:
 999		DRM_ERROR("Invalid pixel format: %s\n",
1000			  drm_get_format_name(mode_cmd->pixel_format, &format_name));
1001		return -EINVAL;
1002	}
1003
1004	/*
1005	 * For DX, surface format validation is done when surface->scanout
1006	 * is set.
1007	 */
1008	if (!dev_priv->has_dx && format != surface->format) {
1009		DRM_ERROR("Invalid surface format for requested mode.\n");
1010		return -EINVAL;
1011	}
1012
1013	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1014	if (!vfbs) {
1015		ret = -ENOMEM;
1016		goto out_err1;
1017	}
1018
1019	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1020	vfbs->surface = vmw_surface_reference(surface);
1021	vfbs->base.user_handle = mode_cmd->handles[0];
1022	vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
1023
1024	*out = &vfbs->base;
1025
1026	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1027				   &vmw_framebuffer_surface_funcs);
1028	if (ret)
1029		goto out_err2;
1030
1031	return 0;
1032
1033out_err2:
1034	vmw_surface_unreference(&surface);
1035	kfree(vfbs);
1036out_err1:
1037	return ret;
1038}
1039
1040/*
1041 * Dmabuf framebuffer code
1042 */
1043
1044static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
 
 
1045{
1046	struct vmw_framebuffer_dmabuf *vfbd =
1047		vmw_framebuffer_to_vfbd(framebuffer);
1048
1049	drm_framebuffer_cleanup(framebuffer);
1050	vmw_dmabuf_unreference(&vfbd->buffer);
1051	if (vfbd->base.user_obj)
1052		ttm_base_object_unref(&vfbd->base.user_obj);
1053
1054	kfree(vfbd);
1055}
1056
1057static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
1058				 struct drm_file *file_priv,
1059				 unsigned flags, unsigned color,
1060				 struct drm_clip_rect *clips,
1061				 unsigned num_clips)
1062{
1063	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1064	struct vmw_framebuffer_dmabuf *vfbd =
1065		vmw_framebuffer_to_vfbd(framebuffer);
1066	struct drm_clip_rect norect;
1067	int ret, increment = 1;
1068
1069	drm_modeset_lock_all(dev_priv->dev);
 
1070
1071	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1072	if (unlikely(ret != 0)) {
1073		drm_modeset_unlock_all(dev_priv->dev);
1074		return ret;
1075	}
1076
1077	if (!num_clips) {
1078		num_clips = 1;
1079		clips = &norect;
1080		norect.x1 = norect.y1 = 0;
1081		norect.x2 = framebuffer->width;
1082		norect.y2 = framebuffer->height;
1083	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
1084		num_clips /= 2;
1085		increment = 2;
1086	}
1087
1088	switch (dev_priv->active_display_unit) {
1089	case vmw_du_screen_target:
1090		ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
1091				       clips, NULL, num_clips, increment,
1092				       true, true, NULL);
1093		break;
1094	case vmw_du_screen_object:
1095		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
1096						  clips, NULL, num_clips,
1097						  increment, true, NULL, NULL);
1098		break;
1099	case vmw_du_legacy:
1100		ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
1101						  clips, num_clips, increment);
1102		break;
1103	default:
1104		ret = -EINVAL;
1105		WARN_ONCE(true, "Dirty called with invalid display system.\n");
1106		break;
1107	}
1108
1109	vmw_fifo_flush(dev_priv, false);
1110	ttm_read_unlock(&dev_priv->reservation_sem);
1111
1112	drm_modeset_unlock_all(dev_priv->dev);
1113
1114	return ret;
1115}
1116
1117static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
1118	.destroy = vmw_framebuffer_dmabuf_destroy,
1119	.dirty = vmw_framebuffer_dmabuf_dirty,
 
1120};
1121
1122/**
1123 * Pin the dmabuffer in a location suitable for access by the
1124 * display system.
1125 */
1126static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1127{
1128	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1129	struct vmw_dma_buffer *buf;
1130	struct ttm_placement *placement;
1131	int ret;
1132
1133	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1134		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1135
1136	if (!buf)
1137		return 0;
1138
1139	switch (dev_priv->active_display_unit) {
1140	case vmw_du_legacy:
1141		vmw_overlay_pause_all(dev_priv);
1142		ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
1143		vmw_overlay_resume_all(dev_priv);
1144		break;
1145	case vmw_du_screen_object:
1146	case vmw_du_screen_target:
1147		if (vfb->dmabuf) {
1148			if (dev_priv->capabilities & SVGA_CAP_3D) {
1149				/*
1150				 * Use surface DMA to get content to
1151				 * sreen target surface.
1152				 */
1153				placement = &vmw_vram_gmr_placement;
1154			} else {
1155				/* Use CPU blit. */
1156				placement = &vmw_sys_placement;
1157			}
1158		} else {
1159			/* Use surface / image update */
1160			placement = &vmw_mob_placement;
1161		}
1162
1163		return vmw_dmabuf_pin_in_placement(dev_priv, buf, placement,
1164						   false);
1165	default:
1166		return -EINVAL;
1167	}
1168
1169	return ret;
1170}
1171
1172static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1173{
1174	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1175	struct vmw_dma_buffer *buf;
1176
1177	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1178		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1179
1180	if (WARN_ON(!buf))
1181		return 0;
1182
1183	return vmw_dmabuf_unpin(dev_priv, buf, false);
1184}
1185
1186/**
1187 * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
1188 *
1189 * @dev: DRM device
1190 * @mode_cmd: parameters for the new surface
1191 * @dmabuf_mob: MOB backing the DMA buf
1192 * @srf_out: newly created surface
1193 *
1194 * When the content FB is a DMA buf, we create a surface as a proxy to the
1195 * same buffer.  This way we can do a surface copy rather than a surface DMA.
1196 * This is a more efficient approach
1197 *
1198 * RETURNS:
1199 * 0 on success, error code otherwise
1200 */
1201static int vmw_create_dmabuf_proxy(struct drm_device *dev,
1202				   const struct drm_mode_fb_cmd2 *mode_cmd,
1203				   struct vmw_dma_buffer *dmabuf_mob,
1204				   struct vmw_surface **srf_out)
1205{
 
1206	uint32_t format;
1207	struct drm_vmw_size content_base_size = {0};
1208	struct vmw_resource *res;
1209	unsigned int bytes_pp;
1210	struct drm_format_name_buf format_name;
1211	int ret;
1212
1213	switch (mode_cmd->pixel_format) {
1214	case DRM_FORMAT_ARGB8888:
1215	case DRM_FORMAT_XRGB8888:
1216		format = SVGA3D_X8R8G8B8;
1217		bytes_pp = 4;
1218		break;
1219
1220	case DRM_FORMAT_RGB565:
1221	case DRM_FORMAT_XRGB1555:
1222		format = SVGA3D_R5G6B5;
1223		bytes_pp = 2;
1224		break;
1225
1226	case 8:
1227		format = SVGA3D_P8;
1228		bytes_pp = 1;
1229		break;
1230
1231	default:
1232		DRM_ERROR("Invalid framebuffer format %s\n",
1233			  drm_get_format_name(mode_cmd->pixel_format, &format_name));
1234		return -EINVAL;
1235	}
1236
1237	content_base_size.width  = mode_cmd->pitches[0] / bytes_pp;
1238	content_base_size.height = mode_cmd->height;
1239	content_base_size.depth  = 1;
1240
1241	ret = vmw_surface_gb_priv_define(dev,
1242			0, /* kernel visible only */
1243			0, /* flags */
1244			format,
1245			true, /* can be a scanout buffer */
1246			1, /* num of mip levels */
1247			0,
1248			0,
1249			content_base_size,
1250			srf_out);
1251	if (ret) {
1252		DRM_ERROR("Failed to allocate proxy content buffer\n");
1253		return ret;
1254	}
1255
1256	res = &(*srf_out)->res;
1257
1258	/* Reserve and switch the backing mob. */
1259	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1260	(void) vmw_resource_reserve(res, false, true);
1261	vmw_dmabuf_unreference(&res->backup);
1262	res->backup = vmw_dmabuf_reference(dmabuf_mob);
1263	res->backup_offset = 0;
1264	vmw_resource_unreserve(res, false, NULL, 0);
1265	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1266
1267	return 0;
1268}
1269
1270
1271
1272static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
1273					  struct vmw_dma_buffer *dmabuf,
1274					  struct vmw_framebuffer **out,
1275					  const struct drm_mode_fb_cmd2
1276					  *mode_cmd)
1277
1278{
1279	struct drm_device *dev = dev_priv->dev;
1280	struct vmw_framebuffer_dmabuf *vfbd;
1281	unsigned int requested_size;
1282	struct drm_format_name_buf format_name;
1283	int ret;
1284
1285	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1286	if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
1287		DRM_ERROR("Screen buffer object size is too small "
1288			  "for requested mode.\n");
1289		return -EINVAL;
1290	}
1291
1292	/* Limited framebuffer color depth support for screen objects */
1293	if (dev_priv->active_display_unit == vmw_du_screen_object) {
1294		switch (mode_cmd->pixel_format) {
1295		case DRM_FORMAT_XRGB8888:
1296		case DRM_FORMAT_ARGB8888:
1297			break;
1298		case DRM_FORMAT_XRGB1555:
1299		case DRM_FORMAT_RGB565:
1300			break;
1301		default:
1302			DRM_ERROR("Invalid pixel format: %s\n",
1303				  drm_get_format_name(mode_cmd->pixel_format, &format_name));
1304			return -EINVAL;
1305		}
1306	}
1307
1308	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1309	if (!vfbd) {
1310		ret = -ENOMEM;
1311		goto out_err1;
1312	}
1313
 
1314	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1315	vfbd->base.dmabuf = true;
1316	vfbd->buffer = vmw_dmabuf_reference(dmabuf);
1317	vfbd->base.user_handle = mode_cmd->handles[0];
1318	*out = &vfbd->base;
1319
1320	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1321				   &vmw_framebuffer_dmabuf_funcs);
1322	if (ret)
1323		goto out_err2;
1324
1325	return 0;
1326
1327out_err2:
1328	vmw_dmabuf_unreference(&dmabuf);
1329	kfree(vfbd);
1330out_err1:
1331	return ret;
1332}
1333
1334
1335/**
1336 * vmw_kms_srf_ok - check if a surface can be created
1337 *
 
1338 * @width: requested width
1339 * @height: requested height
1340 *
1341 * Surfaces need to be less than texture size
1342 */
1343static bool
1344vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1345{
1346	if (width  > dev_priv->texture_max_width ||
1347	    height > dev_priv->texture_max_height)
1348		return false;
1349
1350	return true;
1351}
1352
1353/**
1354 * vmw_kms_new_framebuffer - Create a new framebuffer.
1355 *
1356 * @dev_priv: Pointer to device private struct.
1357 * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
1358 * Either @dmabuf or @surface must be NULL.
1359 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1360 * Either @dmabuf or @surface must be NULL.
1361 * @only_2d: No presents will occur to this dma buffer based framebuffer. This
1362 * Helps the code to do some important optimizations.
1363 * @mode_cmd: Frame-buffer metadata.
1364 */
1365struct vmw_framebuffer *
1366vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1367			struct vmw_dma_buffer *dmabuf,
1368			struct vmw_surface *surface,
1369			bool only_2d,
1370			const struct drm_mode_fb_cmd2 *mode_cmd)
1371{
1372	struct vmw_framebuffer *vfb = NULL;
1373	bool is_dmabuf_proxy = false;
1374	int ret;
1375
1376	/*
1377	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1378	 * therefore, wrap the DMA buf in a surface so we can use the
1379	 * SurfaceCopy command.
1380	 */
1381	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1382	    dmabuf && only_2d &&
1383	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1384	    dev_priv->active_display_unit == vmw_du_screen_target) {
1385		ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
1386					      dmabuf, &surface);
1387		if (ret)
1388			return ERR_PTR(ret);
1389
1390		is_dmabuf_proxy = true;
1391	}
1392
1393	/* Create the new framebuffer depending one what we have */
1394	if (surface) {
1395		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1396						      mode_cmd,
1397						      is_dmabuf_proxy);
1398
1399		/*
1400		 * vmw_create_dmabuf_proxy() adds a reference that is no longer
1401		 * needed
1402		 */
1403		if (is_dmabuf_proxy)
1404			vmw_surface_unreference(&surface);
1405	} else if (dmabuf) {
1406		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
1407						     mode_cmd);
1408	} else {
1409		BUG();
1410	}
1411
1412	if (ret)
1413		return ERR_PTR(ret);
1414
1415	vfb->pin = vmw_framebuffer_pin;
1416	vfb->unpin = vmw_framebuffer_unpin;
1417
1418	return vfb;
1419}
1420
1421/*
1422 * Generic Kernel modesetting functions
1423 */
1424
1425static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1426						 struct drm_file *file_priv,
1427						 const struct drm_mode_fb_cmd2 *mode_cmd)
1428{
1429	struct vmw_private *dev_priv = vmw_priv(dev);
1430	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1431	struct vmw_framebuffer *vfb = NULL;
1432	struct vmw_surface *surface = NULL;
1433	struct vmw_dma_buffer *bo = NULL;
1434	struct ttm_base_object *user_obj;
1435	int ret;
1436
1437	/**
1438	 * This code should be conditioned on Screen Objects not being used.
1439	 * If screen objects are used, we can allocate a GMR to hold the
1440	 * requested framebuffer.
1441	 */
1442
1443	if (!vmw_kms_validate_mode_vram(dev_priv,
1444					mode_cmd->pitches[0],
1445					mode_cmd->height)) {
1446		DRM_ERROR("Requested mode exceed bounding box limit.\n");
1447		return ERR_PTR(-ENOMEM);
1448	}
1449
1450	/*
1451	 * Take a reference on the user object of the resource
1452	 * backing the kms fb. This ensures that user-space handle
1453	 * lookups on that resource will always work as long as
1454	 * it's registered with a kms framebuffer. This is important,
1455	 * since vmw_execbuf_process identifies resources in the
1456	 * command stream using user-space handles.
1457	 */
1458
1459	user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
1460	if (unlikely(user_obj == NULL)) {
1461		DRM_ERROR("Could not locate requested kms frame buffer.\n");
1462		return ERR_PTR(-ENOENT);
1463	}
1464
1465	/**
1466	 * End conditioned code.
1467	 */
1468
1469	/* returns either a dmabuf or surface */
1470	ret = vmw_user_lookup_handle(dev_priv, tfile,
1471				     mode_cmd->handles[0],
1472				     &surface, &bo);
1473	if (ret)
 
 
1474		goto err_out;
 
1475
1476
1477	if (!bo &&
1478	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1479		DRM_ERROR("Surface size cannot exceed %dx%d",
1480			dev_priv->texture_max_width,
1481			dev_priv->texture_max_height);
1482		goto err_out;
1483	}
1484
1485
1486	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1487				      !(dev_priv->capabilities & SVGA_CAP_3D),
1488				      mode_cmd);
1489	if (IS_ERR(vfb)) {
1490		ret = PTR_ERR(vfb);
1491		goto err_out;
1492 	}
1493
1494err_out:
1495	/* vmw_user_lookup_handle takes one ref so does new_fb */
1496	if (bo)
1497		vmw_dmabuf_unreference(&bo);
1498	if (surface)
1499		vmw_surface_unreference(&surface);
1500
1501	if (ret) {
1502		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1503		ttm_base_object_unref(&user_obj);
1504		return ERR_PTR(ret);
1505	} else
1506		vfb->user_obj = user_obj;
1507
1508	return &vfb->base;
1509}
1510
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1511
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1512
1513/**
1514 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1515 *
1516 * @dev: DRM device
1517 * @state: the driver state object
1518 *
1519 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1520 * us to assign a value to mode->crtc_clock so that
1521 * drm_calc_timestamping_constants() won't throw an error message
1522 *
1523 * RETURNS
1524 * Zero for success or -errno
1525 */
1526static int
1527vmw_kms_atomic_check_modeset(struct drm_device *dev,
1528			     struct drm_atomic_state *state)
1529{
 
1530	struct drm_crtc_state *crtc_state;
1531	struct drm_crtc *crtc;
1532	struct vmw_private *dev_priv = vmw_priv(dev);
1533	int i;
1534
1535	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1536		unsigned long requested_bb_mem = 0;
 
1537
1538		if (dev_priv->active_display_unit == vmw_du_screen_target) {
1539			if (crtc->primary->fb) {
1540				int cpp = crtc->primary->fb->pitches[0] /
1541					  crtc->primary->fb->width;
 
1542
1543				requested_bb_mem += crtc->mode.hdisplay * cpp *
1544						    crtc->mode.vdisplay;
1545			}
 
1546
1547			if (requested_bb_mem > dev_priv->prim_bb_mem)
1548				return -EINVAL;
1549		}
1550	}
1551
1552	return drm_atomic_helper_check(dev, state);
1553}
1554
1555static const struct drm_mode_config_funcs vmw_kms_funcs = {
1556	.fb_create = vmw_kms_fb_create,
1557	.atomic_check = vmw_kms_atomic_check_modeset,
1558	.atomic_commit = drm_atomic_helper_commit,
1559};
1560
1561static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1562				   struct drm_file *file_priv,
1563				   struct vmw_framebuffer *vfb,
1564				   struct vmw_surface *surface,
1565				   uint32_t sid,
1566				   int32_t destX, int32_t destY,
1567				   struct drm_vmw_rect *clips,
1568				   uint32_t num_clips)
1569{
1570	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1571					    &surface->res, destX, destY,
1572					    num_clips, 1, NULL, NULL);
1573}
1574
1575
1576int vmw_kms_present(struct vmw_private *dev_priv,
1577		    struct drm_file *file_priv,
1578		    struct vmw_framebuffer *vfb,
1579		    struct vmw_surface *surface,
1580		    uint32_t sid,
1581		    int32_t destX, int32_t destY,
1582		    struct drm_vmw_rect *clips,
1583		    uint32_t num_clips)
1584{
1585	int ret;
1586
1587	switch (dev_priv->active_display_unit) {
1588	case vmw_du_screen_target:
1589		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1590						 &surface->res, destX, destY,
1591						 num_clips, 1, NULL, NULL);
1592		break;
1593	case vmw_du_screen_object:
1594		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1595					      sid, destX, destY, clips,
1596					      num_clips);
1597		break;
1598	default:
1599		WARN_ONCE(true,
1600			  "Present called with invalid display system.\n");
1601		ret = -ENOSYS;
1602		break;
1603	}
1604	if (ret)
1605		return ret;
1606
1607	vmw_fifo_flush(dev_priv, false);
1608
1609	return 0;
1610}
1611
1612static void
1613vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1614{
1615	if (dev_priv->hotplug_mode_update_property)
1616		return;
1617
1618	dev_priv->hotplug_mode_update_property =
1619		drm_property_create_range(dev_priv->dev,
1620					  DRM_MODE_PROP_IMMUTABLE,
1621					  "hotplug_mode_update", 0, 1);
1622
1623	if (!dev_priv->hotplug_mode_update_property)
1624		return;
1625
1626}
1627
1628int vmw_kms_init(struct vmw_private *dev_priv)
1629{
1630	struct drm_device *dev = dev_priv->dev;
1631	int ret;
 
 
 
 
 
 
 
1632
1633	drm_mode_config_init(dev);
1634	dev->mode_config.funcs = &vmw_kms_funcs;
1635	dev->mode_config.min_width = 1;
1636	dev->mode_config.min_height = 1;
1637	dev->mode_config.max_width = dev_priv->texture_max_width;
1638	dev->mode_config.max_height = dev_priv->texture_max_height;
 
1639
1640	drm_mode_create_suggested_offset_properties(dev);
1641	vmw_kms_create_hotplug_mode_update_property(dev_priv);
1642
1643	ret = vmw_kms_stdu_init_display(dev_priv);
1644	if (ret) {
1645		ret = vmw_kms_sou_init_display(dev_priv);
1646		if (ret) /* Fallback */
1647			ret = vmw_kms_ldu_init_display(dev_priv);
1648	}
 
 
 
1649
1650	return ret;
1651}
1652
1653int vmw_kms_close(struct vmw_private *dev_priv)
1654{
1655	int ret = 0;
1656
1657	/*
1658	 * Docs says we should take the lock before calling this function
1659	 * but since it destroys encoders and our destructor calls
1660	 * drm_encoder_cleanup which takes the lock we deadlock.
1661	 */
1662	drm_mode_config_cleanup(dev_priv->dev);
1663	if (dev_priv->active_display_unit == vmw_du_legacy)
1664		ret = vmw_kms_ldu_close_display(dev_priv);
1665
1666	return ret;
1667}
1668
1669int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1670				struct drm_file *file_priv)
1671{
1672	struct drm_vmw_cursor_bypass_arg *arg = data;
1673	struct vmw_display_unit *du;
1674	struct drm_crtc *crtc;
1675	int ret = 0;
1676
1677
1678	mutex_lock(&dev->mode_config.mutex);
1679	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1680
1681		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1682			du = vmw_crtc_to_du(crtc);
1683			du->hotspot_x = arg->xhot;
1684			du->hotspot_y = arg->yhot;
1685		}
1686
1687		mutex_unlock(&dev->mode_config.mutex);
1688		return 0;
1689	}
1690
1691	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
1692	if (!crtc) {
1693		ret = -ENOENT;
1694		goto out;
1695	}
1696
1697	du = vmw_crtc_to_du(crtc);
1698
1699	du->hotspot_x = arg->xhot;
1700	du->hotspot_y = arg->yhot;
1701
1702out:
1703	mutex_unlock(&dev->mode_config.mutex);
1704
1705	return ret;
1706}
1707
1708int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1709			unsigned width, unsigned height, unsigned pitch,
1710			unsigned bpp, unsigned depth)
1711{
1712	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1713		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1714	else if (vmw_fifo_have_pitchlock(vmw_priv))
1715		vmw_mmio_write(pitch, vmw_priv->mmio_virt +
1716			       SVGA_FIFO_PITCHLOCK);
1717	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1718	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1719	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
 
1720
1721	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1722		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1723			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1724		return -EINVAL;
1725	}
1726
1727	return 0;
1728}
1729
1730int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1731{
1732	struct vmw_vga_topology_state *save;
1733	uint32_t i;
1734
1735	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
1736	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
1737	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
1738	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1739		vmw_priv->vga_pitchlock =
1740		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1741	else if (vmw_fifo_have_pitchlock(vmw_priv))
1742		vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
1743							SVGA_FIFO_PITCHLOCK);
1744
1745	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1746		return 0;
1747
1748	vmw_priv->num_displays = vmw_read(vmw_priv,
1749					  SVGA_REG_NUM_GUEST_DISPLAYS);
1750
1751	if (vmw_priv->num_displays == 0)
1752		vmw_priv->num_displays = 1;
1753
1754	for (i = 0; i < vmw_priv->num_displays; ++i) {
1755		save = &vmw_priv->vga_save[i];
1756		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1757		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
1758		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
1759		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
1760		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
1761		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
1762		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1763		if (i == 0 && vmw_priv->num_displays == 1 &&
1764		    save->width == 0 && save->height == 0) {
1765
1766			/*
1767			 * It should be fairly safe to assume that these
1768			 * values are uninitialized.
1769			 */
1770
1771			save->width = vmw_priv->vga_width - save->pos_x;
1772			save->height = vmw_priv->vga_height - save->pos_y;
1773		}
1774	}
1775
1776	return 0;
1777}
1778
1779int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
1780{
1781	struct vmw_vga_topology_state *save;
1782	uint32_t i;
1783
1784	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
1785	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
1786	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
1787	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1788		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
1789			  vmw_priv->vga_pitchlock);
1790	else if (vmw_fifo_have_pitchlock(vmw_priv))
1791		vmw_mmio_write(vmw_priv->vga_pitchlock,
1792			       vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1793
1794	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1795		return 0;
1796
1797	for (i = 0; i < vmw_priv->num_displays; ++i) {
1798		save = &vmw_priv->vga_save[i];
1799		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1800		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
1801		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
1802		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
1803		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
1804		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
1805		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1806	}
1807
1808	return 0;
1809}
1810
1811bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1812				uint32_t pitch,
1813				uint32_t height)
1814{
1815	return ((u64) pitch * (u64) height) < (u64)
1816		((dev_priv->active_display_unit == vmw_du_screen_target) ?
1817		 dev_priv->prim_bb_mem : dev_priv->vram_size);
1818}
1819
1820
1821/**
1822 * Function called by DRM code called with vbl_lock held.
1823 */
1824u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
1825{
1826	return 0;
1827}
1828
1829/**
1830 * Function called by DRM code called with vbl_lock held.
 
 
 
 
1831 */
1832int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
1833{
1834	return -EINVAL;
1835}
1836
1837/**
1838 * Function called by DRM code called with vbl_lock held.
1839 */
1840void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
1841{
1842}
1843
1844
1845/*
1846 * Small shared kms functions.
1847 */
1848
1849static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1850			 struct drm_vmw_rect *rects)
1851{
1852	struct drm_device *dev = dev_priv->dev;
1853	struct vmw_display_unit *du;
1854	struct drm_connector *con;
 
 
 
 
1855
 
1856	mutex_lock(&dev->mode_config.mutex);
 
 
 
 
 
 
 
 
 
 
 
 
1857
1858#if 0
1859	{
1860		unsigned int i;
1861
1862		DRM_INFO("%s: new layout ", __func__);
1863		for (i = 0; i < num; i++)
1864			DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
1865				 rects[i].w, rects[i].h);
1866		DRM_INFO("\n");
 
 
 
 
 
 
 
1867	}
1868#endif
1869
1870	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
1871		du = vmw_connector_to_du(con);
1872		if (num > du->unit) {
1873			du->pref_width = rects[du->unit].w;
1874			du->pref_height = rects[du->unit].h;
1875			du->pref_active = true;
1876			du->gui_x = rects[du->unit].x;
1877			du->gui_y = rects[du->unit].y;
1878			drm_object_property_set_value
1879			  (&con->base, dev->mode_config.suggested_x_property,
1880			   du->gui_x);
1881			drm_object_property_set_value
1882			  (&con->base, dev->mode_config.suggested_y_property,
1883			   du->gui_y);
1884		} else {
1885			du->pref_width = 800;
1886			du->pref_height = 600;
1887			du->pref_active = false;
1888			drm_object_property_set_value
1889			  (&con->base, dev->mode_config.suggested_x_property,
1890			   0);
1891			drm_object_property_set_value
1892			  (&con->base, dev->mode_config.suggested_y_property,
1893			   0);
1894		}
1895		con->status = vmw_du_connector_detect(con, true);
1896	}
 
 
 
 
1897
1898	mutex_unlock(&dev->mode_config.mutex);
1899	drm_sysfs_hotplug_event(dev);
1900
1901	return 0;
1902}
1903
1904int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1905			  u16 *r, u16 *g, u16 *b,
1906			  uint32_t size,
1907			  struct drm_modeset_acquire_ctx *ctx)
1908{
1909	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1910	int i;
1911
1912	for (i = 0; i < size; i++) {
1913		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
1914			  r[i], g[i], b[i]);
1915		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
1916		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
1917		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
1918	}
1919
1920	return 0;
1921}
1922
1923int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
1924{
1925	return 0;
1926}
1927
1928enum drm_connector_status
1929vmw_du_connector_detect(struct drm_connector *connector, bool force)
1930{
1931	uint32_t num_displays;
1932	struct drm_device *dev = connector->dev;
1933	struct vmw_private *dev_priv = vmw_priv(dev);
1934	struct vmw_display_unit *du = vmw_connector_to_du(connector);
1935
1936	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
1937
1938	return ((vmw_connector_to_du(connector)->unit < num_displays &&
1939		 du->pref_active) ?
1940		connector_status_connected : connector_status_disconnected);
1941}
1942
1943static struct drm_display_mode vmw_kms_connector_builtin[] = {
1944	/* 640x480@60Hz */
1945	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
1946		   752, 800, 0, 480, 489, 492, 525, 0,
1947		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1948	/* 800x600@60Hz */
1949	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
1950		   968, 1056, 0, 600, 601, 605, 628, 0,
1951		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1952	/* 1024x768@60Hz */
1953	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1954		   1184, 1344, 0, 768, 771, 777, 806, 0,
1955		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1956	/* 1152x864@75Hz */
1957	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1958		   1344, 1600, 0, 864, 865, 868, 900, 0,
1959		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 
 
 
 
1960	/* 1280x768@60Hz */
1961	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1962		   1472, 1664, 0, 768, 771, 778, 798, 0,
1963		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1964	/* 1280x800@60Hz */
1965	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1966		   1480, 1680, 0, 800, 803, 809, 831, 0,
1967		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
1968	/* 1280x960@60Hz */
1969	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1970		   1488, 1800, 0, 960, 961, 964, 1000, 0,
1971		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1972	/* 1280x1024@60Hz */
1973	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1974		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
1975		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1976	/* 1360x768@60Hz */
1977	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1978		   1536, 1792, 0, 768, 771, 777, 795, 0,
1979		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1980	/* 1440x1050@60Hz */
1981	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1982		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
1983		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1984	/* 1440x900@60Hz */
1985	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1986		   1672, 1904, 0, 900, 903, 909, 934, 0,
1987		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1988	/* 1600x1200@60Hz */
1989	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1990		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
1991		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1992	/* 1680x1050@60Hz */
1993	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1994		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
1995		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1996	/* 1792x1344@60Hz */
1997	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
1998		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
1999		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2000	/* 1853x1392@60Hz */
2001	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2002		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2003		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
 
 
 
 
2004	/* 1920x1200@60Hz */
2005	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2006		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2007		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2008	/* 1920x1440@60Hz */
2009	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2010		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2011		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
 
 
 
 
2012	/* 2560x1600@60Hz */
2013	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2014		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2015		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
 
 
 
 
 
 
 
 
 
 
 
 
2016	/* Terminate */
2017	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2018};
2019
2020/**
2021 * vmw_guess_mode_timing - Provide fake timings for a
2022 * 60Hz vrefresh mode.
2023 *
2024 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
2025 * members filled in.
2026 */
2027void vmw_guess_mode_timing(struct drm_display_mode *mode)
2028{
2029	mode->hsync_start = mode->hdisplay + 50;
2030	mode->hsync_end = mode->hsync_start + 50;
2031	mode->htotal = mode->hsync_end + 50;
2032
2033	mode->vsync_start = mode->vdisplay + 50;
2034	mode->vsync_end = mode->vsync_start + 50;
2035	mode->vtotal = mode->vsync_end + 50;
2036
2037	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2038	mode->vrefresh = drm_mode_vrefresh(mode);
2039}
2040
2041
2042int vmw_du_connector_fill_modes(struct drm_connector *connector,
2043				uint32_t max_width, uint32_t max_height)
2044{
2045	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2046	struct drm_device *dev = connector->dev;
2047	struct vmw_private *dev_priv = vmw_priv(dev);
2048	struct drm_display_mode *mode = NULL;
2049	struct drm_display_mode *bmode;
2050	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2051		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2052		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2053		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2054	};
2055	int i;
2056	u32 assumed_bpp = 4;
2057
2058	if (dev_priv->assume_16bpp)
2059		assumed_bpp = 2;
2060
 
 
 
 
 
 
 
2061	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2062		max_width  = min(max_width,  dev_priv->stdu_max_width);
2063		max_width  = min(max_width,  dev_priv->texture_max_width);
2064
2065		max_height = min(max_height, dev_priv->stdu_max_height);
2066		max_height = min(max_height, dev_priv->texture_max_height);
2067	}
2068
2069	/* Add preferred mode */
2070	mode = drm_mode_duplicate(dev, &prefmode);
2071	if (!mode)
2072		return 0;
2073	mode->hdisplay = du->pref_width;
2074	mode->vdisplay = du->pref_height;
2075	vmw_guess_mode_timing(mode);
 
2076
2077	if (vmw_kms_validate_mode_vram(dev_priv,
2078					mode->hdisplay * assumed_bpp,
2079					mode->vdisplay)) {
2080		drm_mode_probed_add(connector, mode);
2081	} else {
2082		drm_mode_destroy(dev, mode);
2083		mode = NULL;
2084	}
2085
2086	if (du->pref_mode) {
2087		list_del_init(&du->pref_mode->head);
2088		drm_mode_destroy(dev, du->pref_mode);
2089	}
2090
2091	/* mode might be null here, this is intended */
2092	du->pref_mode = mode;
2093
2094	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2095		bmode = &vmw_kms_connector_builtin[i];
2096		if (bmode->hdisplay > max_width ||
2097		    bmode->vdisplay > max_height)
2098			continue;
2099
2100		if (!vmw_kms_validate_mode_vram(dev_priv,
2101						bmode->hdisplay * assumed_bpp,
2102						bmode->vdisplay))
2103			continue;
2104
2105		mode = drm_mode_duplicate(dev, bmode);
2106		if (!mode)
2107			return 0;
2108		mode->vrefresh = drm_mode_vrefresh(mode);
2109
2110		drm_mode_probed_add(connector, mode);
2111	}
2112
2113	drm_mode_connector_list_update(connector);
2114	/* Move the prefered mode first, help apps pick the right mode. */
2115	drm_mode_sort(&connector->modes);
2116
2117	return 1;
2118}
2119
2120int vmw_du_connector_set_property(struct drm_connector *connector,
2121				  struct drm_property *property,
2122				  uint64_t val)
2123{
2124	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2125	struct vmw_private *dev_priv = vmw_priv(connector->dev);
2126
2127	if (property == dev_priv->implicit_placement_property)
2128		du->is_implicit = val;
2129
2130	return 0;
2131}
2132
2133
2134
2135/**
2136 * vmw_du_connector_atomic_set_property - Atomic version of get property
2137 *
2138 * @crtc - crtc the property is associated with
 
 
 
 
 
 
 
 
 
 
 
2139 *
2140 * Returns:
2141 * Zero on success, negative errno on failure.
2142 */
2143int
2144vmw_du_connector_atomic_set_property(struct drm_connector *connector,
2145				     struct drm_connector_state *state,
2146				     struct drm_property *property,
2147				     uint64_t val)
2148{
2149	struct vmw_private *dev_priv = vmw_priv(connector->dev);
2150	struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
2151	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2152
2153
2154	if (property == dev_priv->implicit_placement_property) {
2155		vcs->is_implicit = val;
2156
2157		/*
2158		 * We should really be doing a drm_atomic_commit() to
2159		 * commit the new state, but since this doesn't cause
2160		 * an immedate state change, this is probably ok
2161		 */
2162		du->is_implicit = vcs->is_implicit;
2163	} else {
2164		return -EINVAL;
2165	}
2166
2167	return 0;
2168}
2169
2170
2171/**
2172 * vmw_du_connector_atomic_get_property - Atomic version of get property
2173 *
2174 * @connector - connector the property is associated with
2175 *
2176 * Returns:
2177 * Zero on success, negative errno on failure.
2178 */
2179int
2180vmw_du_connector_atomic_get_property(struct drm_connector *connector,
2181				     const struct drm_connector_state *state,
2182				     struct drm_property *property,
2183				     uint64_t *val)
2184{
2185	struct vmw_private *dev_priv = vmw_priv(connector->dev);
2186	struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
2187
2188	if (property == dev_priv->implicit_placement_property)
2189		*val = vcs->is_implicit;
2190	else {
2191		DRM_ERROR("Invalid Property %s\n", property->name);
2192		return -EINVAL;
2193	}
2194
2195	return 0;
2196}
2197
2198
2199int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2200				struct drm_file *file_priv)
2201{
2202	struct vmw_private *dev_priv = vmw_priv(dev);
 
2203	struct drm_vmw_update_layout_arg *arg =
2204		(struct drm_vmw_update_layout_arg *)data;
2205	void __user *user_rects;
2206	struct drm_vmw_rect *rects;
 
2207	unsigned rects_size;
2208	int ret;
2209	int i;
2210	u64 total_pixels = 0;
2211	struct drm_mode_config *mode_config = &dev->mode_config;
2212	struct drm_vmw_rect bounding_box = {0};
2213
2214	if (!arg->num_outputs) {
2215		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
 
 
2216		vmw_du_update_layout(dev_priv, 1, &def_rect);
2217		return 0;
2218	}
2219
2220	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2221	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2222			GFP_KERNEL);
2223	if (unlikely(!rects))
2224		return -ENOMEM;
2225
2226	user_rects = (void __user *)(unsigned long)arg->rects;
2227	ret = copy_from_user(rects, user_rects, rects_size);
2228	if (unlikely(ret != 0)) {
2229		DRM_ERROR("Failed to get rects.\n");
2230		ret = -EFAULT;
2231		goto out_free;
2232	}
2233
2234	for (i = 0; i < arg->num_outputs; ++i) {
2235		if (rects[i].x < 0 ||
2236		    rects[i].y < 0 ||
2237		    rects[i].x + rects[i].w > mode_config->max_width ||
2238		    rects[i].y + rects[i].h > mode_config->max_height) {
2239			DRM_ERROR("Invalid GUI layout.\n");
2240			ret = -EINVAL;
 
 
 
2241			goto out_free;
2242		}
2243
2244		/*
2245		 * bounding_box.w and bunding_box.h are used as
2246		 * lower-right coordinates
2247		 */
2248		if (rects[i].x + rects[i].w > bounding_box.w)
2249			bounding_box.w = rects[i].x + rects[i].w;
2250
2251		if (rects[i].y + rects[i].h > bounding_box.h)
2252			bounding_box.h = rects[i].y + rects[i].h;
2253
2254		total_pixels += (u64) rects[i].w * (u64) rects[i].h;
2255	}
2256
2257	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2258		/*
2259		 * For Screen Targets, the limits for a toplogy are:
2260		 *	1. Bounding box (assuming 32bpp) must be < prim_bb_mem
2261		 *      2. Total pixels (assuming 32bpp) must be < prim_bb_mem
 
 
2262		 */
2263		u64 bb_mem    = (u64) bounding_box.w * bounding_box.h * 4;
2264		u64 pixel_mem = total_pixels * 4;
2265
2266		if (bb_mem > dev_priv->prim_bb_mem) {
2267			DRM_ERROR("Topology is beyond supported limits.\n");
 
2268			ret = -EINVAL;
2269			goto out_free;
2270		}
 
2271
2272		if (pixel_mem > dev_priv->prim_bb_mem) {
2273			DRM_ERROR("Combined output size too large\n");
2274			ret = -EINVAL;
2275			goto out_free;
2276		}
2277	}
2278
2279	vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
 
2280
2281out_free:
2282	kfree(rects);
2283	return ret;
2284}
2285
2286/**
2287 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2288 * on a set of cliprects and a set of display units.
2289 *
2290 * @dev_priv: Pointer to a device private structure.
2291 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2292 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2293 * Cliprects are given in framebuffer coordinates.
2294 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2295 * be NULL. Cliprects are given in source coordinates.
2296 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2297 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2298 * @num_clips: Number of cliprects in the @clips or @vclips array.
2299 * @increment: Integer with which to increment the clip counter when looping.
2300 * Used to skip a predetermined number of clip rects.
2301 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2302 */
2303int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2304			 struct vmw_framebuffer *framebuffer,
2305			 const struct drm_clip_rect *clips,
2306			 const struct drm_vmw_rect *vclips,
2307			 s32 dest_x, s32 dest_y,
2308			 int num_clips,
2309			 int increment,
2310			 struct vmw_kms_dirty *dirty)
2311{
2312	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2313	struct drm_crtc *crtc;
2314	u32 num_units = 0;
2315	u32 i, k;
2316
2317	dirty->dev_priv = dev_priv;
2318
2319	/* If crtc is passed, no need to iterate over other display units */
2320	if (dirty->crtc) {
2321		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2322	} else {
2323		list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
2324				    head) {
2325			if (crtc->primary->fb != &framebuffer->base)
2326				continue;
2327			units[num_units++] = vmw_crtc_to_du(crtc);
 
2328		}
2329	}
2330
2331	for (k = 0; k < num_units; k++) {
2332		struct vmw_display_unit *unit = units[k];
2333		s32 crtc_x = unit->crtc.x;
2334		s32 crtc_y = unit->crtc.y;
2335		s32 crtc_width = unit->crtc.mode.hdisplay;
2336		s32 crtc_height = unit->crtc.mode.vdisplay;
2337		const struct drm_clip_rect *clips_ptr = clips;
2338		const struct drm_vmw_rect *vclips_ptr = vclips;
2339
2340		dirty->unit = unit;
2341		if (dirty->fifo_reserve_size > 0) {
2342			dirty->cmd = vmw_fifo_reserve(dev_priv,
2343						      dirty->fifo_reserve_size);
2344			if (!dirty->cmd) {
2345				DRM_ERROR("Couldn't reserve fifo space "
2346					  "for dirty blits.\n");
2347				return -ENOMEM;
2348			}
2349			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2350		}
2351		dirty->num_hits = 0;
2352		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2353		       vclips_ptr += increment) {
2354			s32 clip_left;
2355			s32 clip_top;
2356
2357			/*
2358			 * Select clip array type. Note that integer type
2359			 * in @clips is unsigned short, whereas in @vclips
2360			 * it's 32-bit.
2361			 */
2362			if (clips) {
2363				dirty->fb_x = (s32) clips_ptr->x1;
2364				dirty->fb_y = (s32) clips_ptr->y1;
2365				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2366					crtc_x;
2367				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2368					crtc_y;
2369			} else {
2370				dirty->fb_x = vclips_ptr->x;
2371				dirty->fb_y = vclips_ptr->y;
2372				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2373					dest_x - crtc_x;
2374				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2375					dest_y - crtc_y;
2376			}
2377
2378			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2379			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2380
2381			/* Skip this clip if it's outside the crtc region */
2382			if (dirty->unit_x1 >= crtc_width ||
2383			    dirty->unit_y1 >= crtc_height ||
2384			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2385				continue;
2386
2387			/* Clip right and bottom to crtc limits */
2388			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2389					       crtc_width);
2390			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2391					       crtc_height);
2392
2393			/* Clip left and top to crtc limits */
2394			clip_left = min_t(s32, dirty->unit_x1, 0);
2395			clip_top = min_t(s32, dirty->unit_y1, 0);
2396			dirty->unit_x1 -= clip_left;
2397			dirty->unit_y1 -= clip_top;
2398			dirty->fb_x -= clip_left;
2399			dirty->fb_y -= clip_top;
2400
2401			dirty->clip(dirty);
2402		}
2403
2404		dirty->fifo_commit(dirty);
2405	}
2406
2407	return 0;
2408}
2409
2410/**
2411 * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
2412 * command submission.
2413 *
2414 * @dev_priv. Pointer to a device private structure.
2415 * @buf: The buffer object
2416 * @interruptible: Whether to perform waits as interruptible.
2417 * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
2418 * The buffer will be validated as a GMR. Already pinned buffers will not be
2419 * validated.
2420 *
2421 * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
2422 * interrupted by a signal.
2423 */
2424int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
2425				  struct vmw_dma_buffer *buf,
2426				  bool interruptible,
2427				  bool validate_as_mob,
2428				  bool for_cpu_blit)
2429{
2430	struct ttm_operation_ctx ctx = {
2431		.interruptible = interruptible,
2432		.no_wait_gpu = false};
2433	struct ttm_buffer_object *bo = &buf->base;
2434	int ret;
2435
2436	ttm_bo_reserve(bo, false, false, NULL);
2437	if (for_cpu_blit)
2438		ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx);
2439	else
2440		ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
2441						 validate_as_mob);
2442	if (ret)
2443		ttm_bo_unreserve(bo);
2444
2445	return ret;
2446}
2447
2448/**
2449 * vmw_kms_helper_buffer_revert - Undo the actions of
2450 * vmw_kms_helper_buffer_prepare.
2451 *
2452 * @res: Pointer to the buffer object.
2453 *
2454 * Helper to be used if an error forces the caller to undo the actions of
2455 * vmw_kms_helper_buffer_prepare.
2456 */
2457void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
2458{
2459	if (buf)
2460		ttm_bo_unreserve(&buf->base);
2461}
2462
2463/**
2464 * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
2465 * kms command submission.
2466 *
2467 * @dev_priv: Pointer to a device private structure.
2468 * @file_priv: Pointer to a struct drm_file representing the caller's
2469 * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
2470 * if non-NULL, @user_fence_rep must be non-NULL.
2471 * @buf: The buffer object.
2472 * @out_fence:  Optional pointer to a fence pointer. If non-NULL, a
2473 * ref-counted fence pointer is returned here.
2474 * @user_fence_rep: Optional pointer to a user-space provided struct
2475 * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
2476 * function copies fence data to user-space in a fail-safe manner.
2477 */
2478void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
2479				  struct drm_file *file_priv,
2480				  struct vmw_dma_buffer *buf,
2481				  struct vmw_fence_obj **out_fence,
2482				  struct drm_vmw_fence_rep __user *
2483				  user_fence_rep)
2484{
2485	struct vmw_fence_obj *fence;
2486	uint32_t handle;
2487	int ret;
2488
2489	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2490					 file_priv ? &handle : NULL);
2491	if (buf)
2492		vmw_fence_single_bo(&buf->base, fence);
 
2493	if (file_priv)
2494		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2495					    ret, user_fence_rep, fence,
2496					    handle, -1, NULL);
2497	if (out_fence)
2498		*out_fence = fence;
2499	else
2500		vmw_fence_obj_unreference(&fence);
2501
2502	vmw_kms_helper_buffer_revert(buf);
2503}
2504
2505
2506/**
2507 * vmw_kms_helper_resource_revert - Undo the actions of
2508 * vmw_kms_helper_resource_prepare.
2509 *
2510 * @res: Pointer to the resource. Typically a surface.
2511 *
2512 * Helper to be used if an error forces the caller to undo the actions of
2513 * vmw_kms_helper_resource_prepare.
2514 */
2515void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
2516{
2517	struct vmw_resource *res = ctx->res;
2518
2519	vmw_kms_helper_buffer_revert(ctx->buf);
2520	vmw_dmabuf_unreference(&ctx->buf);
2521	vmw_resource_unreserve(res, false, NULL, 0);
2522	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2523}
2524
2525/**
2526 * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
2527 * command submission.
2528 *
2529 * @res: Pointer to the resource. Typically a surface.
2530 * @interruptible: Whether to perform waits as interruptible.
2531 *
2532 * Reserves and validates also the backup buffer if a guest-backed resource.
2533 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
2534 * interrupted by a signal.
2535 */
2536int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2537				    bool interruptible,
2538				    struct vmw_validation_ctx *ctx)
2539{
2540	int ret = 0;
2541
2542	ctx->buf = NULL;
2543	ctx->res = res;
2544
2545	if (interruptible)
2546		ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
2547	else
2548		mutex_lock(&res->dev_priv->cmdbuf_mutex);
2549
2550	if (unlikely(ret != 0))
2551		return -ERESTARTSYS;
2552
2553	ret = vmw_resource_reserve(res, interruptible, false);
2554	if (ret)
2555		goto out_unlock;
2556
2557	if (res->backup) {
2558		ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
2559						    interruptible,
2560						    res->dev_priv->has_mob,
2561						    false);
2562		if (ret)
2563			goto out_unreserve;
2564
2565		ctx->buf = vmw_dmabuf_reference(res->backup);
2566	}
2567	ret = vmw_resource_validate(res);
2568	if (ret)
2569		goto out_revert;
2570	return 0;
2571
2572out_revert:
2573	vmw_kms_helper_buffer_revert(ctx->buf);
2574out_unreserve:
2575	vmw_resource_unreserve(res, false, NULL, 0);
2576out_unlock:
2577	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2578	return ret;
2579}
2580
2581/**
2582 * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
2583 * kms command submission.
2584 *
2585 * @res: Pointer to the resource. Typically a surface.
2586 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
2587 * ref-counted fence pointer is returned here.
2588 */
2589void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
2590				    struct vmw_fence_obj **out_fence)
2591{
2592	struct vmw_resource *res = ctx->res;
2593
2594	if (ctx->buf || out_fence)
2595		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
2596					     out_fence, NULL);
2597
2598	vmw_dmabuf_unreference(&ctx->buf);
2599	vmw_resource_unreserve(res, false, NULL, 0);
2600	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2601}
2602
2603/**
2604 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2605 * its backing MOB.
2606 *
2607 * @res: Pointer to the surface resource
2608 * @clips: Clip rects in framebuffer (surface) space.
2609 * @num_clips: Number of clips in @clips.
2610 * @increment: Integer with which to increment the clip counter when looping.
2611 * Used to skip a predetermined number of clip rects.
2612 *
2613 * This function makes sure the proxy surface is updated from its backing MOB
2614 * using the region given by @clips. The surface resource @res and its backing
2615 * MOB needs to be reserved and validated on call.
2616 */
2617int vmw_kms_update_proxy(struct vmw_resource *res,
2618			 const struct drm_clip_rect *clips,
2619			 unsigned num_clips,
2620			 int increment)
2621{
2622	struct vmw_private *dev_priv = res->dev_priv;
2623	struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
2624	struct {
2625		SVGA3dCmdHeader header;
2626		SVGA3dCmdUpdateGBImage body;
2627	} *cmd;
2628	SVGA3dBox *box;
2629	size_t copy_size = 0;
2630	int i;
2631
2632	if (!clips)
2633		return 0;
2634
2635	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
2636	if (!cmd) {
2637		DRM_ERROR("Couldn't reserve fifo space for proxy surface "
2638			  "update.\n");
2639		return -ENOMEM;
2640	}
2641
2642	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2643		box = &cmd->body.box;
2644
2645		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2646		cmd->header.size = sizeof(cmd->body);
2647		cmd->body.image.sid = res->id;
2648		cmd->body.image.face = 0;
2649		cmd->body.image.mipmap = 0;
2650
2651		if (clips->x1 > size->width || clips->x2 > size->width ||
2652		    clips->y1 > size->height || clips->y2 > size->height) {
2653			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2654			return -EINVAL;
2655		}
2656
2657		box->x = clips->x1;
2658		box->y = clips->y1;
2659		box->z = 0;
2660		box->w = clips->x2 - clips->x1;
2661		box->h = clips->y2 - clips->y1;
2662		box->d = 1;
2663
2664		copy_size += sizeof(*cmd);
2665	}
2666
2667	vmw_fifo_commit(dev_priv, copy_size);
2668
2669	return 0;
2670}
2671
2672int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2673			    unsigned unit,
2674			    u32 max_width,
2675			    u32 max_height,
2676			    struct drm_connector **p_con,
2677			    struct drm_crtc **p_crtc,
2678			    struct drm_display_mode **p_mode)
2679{
2680	struct drm_connector *con;
2681	struct vmw_display_unit *du;
2682	struct drm_display_mode *mode;
2683	int i = 0;
2684	int ret = 0;
2685
2686	mutex_lock(&dev_priv->dev->mode_config.mutex);
2687	list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
2688			    head) {
2689		if (i == unit)
2690			break;
2691
2692		++i;
2693	}
2694
2695	if (i != unit) {
2696		DRM_ERROR("Could not find initial display unit.\n");
2697		ret = -EINVAL;
2698		goto out_unlock;
2699	}
2700
2701	if (list_empty(&con->modes))
2702		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
2703
2704	if (list_empty(&con->modes)) {
2705		DRM_ERROR("Could not find initial display mode.\n");
2706		ret = -EINVAL;
2707		goto out_unlock;
2708	}
2709
2710	du = vmw_connector_to_du(con);
2711	*p_con = con;
2712	*p_crtc = &du->crtc;
2713
2714	list_for_each_entry(mode, &con->modes, head) {
2715		if (mode->type & DRM_MODE_TYPE_PREFERRED)
2716			break;
2717	}
2718
2719	if (mode->type & DRM_MODE_TYPE_PREFERRED)
2720		*p_mode = mode;
2721	else {
2722		WARN_ONCE(true, "Could not find initial preferred mode.\n");
2723		*p_mode = list_first_entry(&con->modes,
2724					   struct drm_display_mode,
2725					   head);
2726	}
2727
2728 out_unlock:
2729	mutex_unlock(&dev_priv->dev->mode_config.mutex);
2730
2731	return ret;
2732}
2733
2734/**
2735 * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
2736 *
2737 * @dev_priv: Pointer to a device private struct.
2738 * @du: The display unit of the crtc.
2739 */
2740void vmw_kms_del_active(struct vmw_private *dev_priv,
2741			struct vmw_display_unit *du)
2742{
2743	mutex_lock(&dev_priv->global_kms_state_mutex);
2744	if (du->active_implicit) {
2745		if (--(dev_priv->num_implicit) == 0)
2746			dev_priv->implicit_fb = NULL;
2747		du->active_implicit = false;
2748	}
2749	mutex_unlock(&dev_priv->global_kms_state_mutex);
2750}
2751
2752/**
2753 * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
2754 *
2755 * @vmw_priv: Pointer to a device private struct.
2756 * @du: The display unit of the crtc.
2757 * @vfb: The implicit framebuffer
2758 *
2759 * Registers a binding to an implicit framebuffer.
2760 */
2761void vmw_kms_add_active(struct vmw_private *dev_priv,
2762			struct vmw_display_unit *du,
2763			struct vmw_framebuffer *vfb)
2764{
2765	mutex_lock(&dev_priv->global_kms_state_mutex);
2766	WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);
2767
2768	if (!du->active_implicit && du->is_implicit) {
2769		dev_priv->implicit_fb = vfb;
2770		du->active_implicit = true;
2771		dev_priv->num_implicit++;
2772	}
2773	mutex_unlock(&dev_priv->global_kms_state_mutex);
2774}
2775
2776/**
2777 * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
2778 *
2779 * @dev_priv: Pointer to device-private struct.
2780 * @crtc: The crtc we want to flip.
2781 *
2782 * Returns true or false depending whether it's OK to flip this crtc
2783 * based on the criterion that we must not have more than one implicit
2784 * frame-buffer at any one time.
2785 */
2786bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
2787			    struct drm_crtc *crtc)
2788{
2789	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
2790	bool ret;
2791
2792	mutex_lock(&dev_priv->global_kms_state_mutex);
2793	ret = !du->is_implicit || dev_priv->num_implicit == 1;
2794	mutex_unlock(&dev_priv->global_kms_state_mutex);
2795
2796	return ret;
2797}
2798
2799/**
2800 * vmw_kms_update_implicit_fb - Update the implicit fb.
2801 *
2802 * @dev_priv: Pointer to device-private struct.
2803 * @crtc: The crtc the new implicit frame-buffer is bound to.
2804 */
2805void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
2806				struct drm_crtc *crtc)
2807{
2808	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
2809	struct vmw_framebuffer *vfb;
2810
2811	mutex_lock(&dev_priv->global_kms_state_mutex);
2812
2813	if (!du->is_implicit)
2814		goto out_unlock;
2815
2816	vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
2817	WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
2818		     dev_priv->implicit_fb != vfb);
2819
2820	dev_priv->implicit_fb = vfb;
2821out_unlock:
2822	mutex_unlock(&dev_priv->global_kms_state_mutex);
2823}
2824
2825/**
2826 * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
2827 * property.
2828 *
2829 * @dev_priv: Pointer to a device private struct.
2830 * @immutable: Whether the property is immutable.
2831 *
2832 * Sets up the implicit placement property unless it's already set up.
2833 */
2834void
2835vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
2836					   bool immutable)
2837{
2838	if (dev_priv->implicit_placement_property)
2839		return;
2840
2841	dev_priv->implicit_placement_property =
2842		drm_property_create_range(dev_priv->dev,
2843					  immutable ?
2844					  DRM_MODE_PROP_IMMUTABLE : 0,
2845					  "implicit_placement", 0, 1);
2846
2847}
2848
2849
2850/**
2851 * vmw_kms_set_config - Wrapper around drm_atomic_helper_set_config
2852 *
2853 * @set: The configuration to set.
2854 *
2855 * The vmwgfx Xorg driver doesn't assign the mode::type member, which
2856 * when drm_mode_set_crtcinfo is called as part of the configuration setting
2857 * causes it to return incorrect crtc dimensions causing severe problems in
2858 * the vmwgfx modesetting. So explicitly clear that member before calling
2859 * into drm_atomic_helper_set_config.
2860 */
2861int vmw_kms_set_config(struct drm_mode_set *set,
2862		       struct drm_modeset_acquire_ctx *ctx)
2863{
2864	if (set && set->mode)
2865		set->mode->type = 0;
2866
2867	return drm_atomic_helper_set_config(set, ctx);
2868}
2869
2870
2871/**
2872 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2873 *
2874 * @dev: Pointer to the drm device
2875 * Return: 0 on success. Negative error code on failure.
2876 */
2877int vmw_kms_suspend(struct drm_device *dev)
2878{
2879	struct vmw_private *dev_priv = vmw_priv(dev);
2880
2881	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2882	if (IS_ERR(dev_priv->suspend_state)) {
2883		int ret = PTR_ERR(dev_priv->suspend_state);
2884
2885		DRM_ERROR("Failed kms suspend: %d\n", ret);
2886		dev_priv->suspend_state = NULL;
2887
2888		return ret;
2889	}
2890
2891	return 0;
2892}
2893
2894
2895/**
2896 * vmw_kms_resume - Re-enable modesetting and restore state
2897 *
2898 * @dev: Pointer to the drm device
2899 * Return: 0 on success. Negative error code on failure.
2900 *
2901 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2902 * to call this function without a previous vmw_kms_suspend().
2903 */
2904int vmw_kms_resume(struct drm_device *dev)
2905{
2906	struct vmw_private *dev_priv = vmw_priv(dev);
2907	int ret;
2908
2909	if (WARN_ON(!dev_priv->suspend_state))
2910		return 0;
2911
2912	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2913	dev_priv->suspend_state = NULL;
2914
2915	return ret;
2916}
2917
2918/**
2919 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2920 *
2921 * @dev: Pointer to the drm device
2922 */
2923void vmw_kms_lost_device(struct drm_device *dev)
2924{
2925	drm_atomic_helper_shutdown(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2926}