Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
 
 
 
 
  27
  28#include <drm/drm_atomic.h>
  29#include <drm/drm_atomic_helper.h>
  30#include <drm/drm_damage_helper.h>
  31#include <drm/drm_fourcc.h>
  32#include <drm/drm_plane_helper.h>
  33#include <drm/drm_rect.h>
  34#include <drm/drm_sysfs.h>
  35#include <drm/drm_vblank.h>
  36
  37#include "vmwgfx_kms.h"
  38
  39/* Might need a hrtimer here? */
  40#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
  41
  42void vmw_du_cleanup(struct vmw_display_unit *du)
  43{
 
  44	drm_plane_cleanup(&du->primary);
  45	drm_plane_cleanup(&du->cursor);
 
  46
  47	drm_connector_unregister(&du->connector);
  48	drm_crtc_cleanup(&du->crtc);
  49	drm_encoder_cleanup(&du->encoder);
  50	drm_connector_cleanup(&du->connector);
  51}
  52
  53/*
  54 * Display Unit Cursor functions
  55 */
  56
  57static int vmw_cursor_update_image(struct vmw_private *dev_priv,
  58				   u32 *image, u32 width, u32 height,
  59				   u32 hotspotX, u32 hotspotY)
  60{
  61	struct {
  62		u32 cmd;
  63		SVGAFifoCmdDefineAlphaCursor cursor;
  64	} *cmd;
  65	u32 image_size = width * height * 4;
  66	u32 cmd_size = sizeof(*cmd) + image_size;
  67
  68	if (!image)
  69		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  70
  71	cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
  72	if (unlikely(cmd == NULL))
  73		return -ENOMEM;
  74
  75	memset(cmd, 0, sizeof(*cmd));
  76
  77	memcpy(&cmd[1], image, image_size);
  78
  79	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
  80	cmd->cursor.id = 0;
  81	cmd->cursor.width = width;
  82	cmd->cursor.height = height;
  83	cmd->cursor.hotspotX = hotspotX;
  84	cmd->cursor.hotspotY = hotspotY;
  85
  86	vmw_fifo_commit_flush(dev_priv, cmd_size);
 
  87
  88	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  89}
  90
  91static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
  92				struct vmw_buffer_object *bo,
  93				u32 width, u32 height,
  94				u32 hotspotX, u32 hotspotY)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  95{
  96	struct ttm_bo_kmap_obj map;
  97	unsigned long kmap_offset;
  98	unsigned long kmap_num;
  99	void *virtual;
 100	bool dummy;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 101	int ret;
 102
 103	kmap_offset = 0;
 104	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
 
 
 
 105
 106	ret = ttm_bo_reserve(&bo->base, true, false, NULL);
 107	if (unlikely(ret != 0)) {
 108		DRM_ERROR("reserve failed\n");
 109		return -EINVAL;
 
 
 
 
 
 110	}
 111
 112	ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map);
 113	if (unlikely(ret != 0))
 114		goto err_unreserve;
 
 
 
 
 
 
 
 
 
 
 115
 116	virtual = ttm_kmap_obj_virtual(&map, &dummy);
 117	ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
 118				      hotspotX, hotspotY);
 119
 120	ttm_bo_kunmap(&map);
 121err_unreserve:
 122	ttm_bo_unreserve(&bo->base);
 
 
 
 
 
 123
 
 
 124	return ret;
 125}
 126
 127
 128static void vmw_cursor_update_position(struct vmw_private *dev_priv,
 129				       bool show, int x, int y)
 130{
 131	u32 *fifo_mem = dev_priv->mmio_virt;
 
 132	uint32_t count;
 133
 134	spin_lock(&dev_priv->cursor_lock);
 135	vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
 136	vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
 137	vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
 138	count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
 139	vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
 
 
 
 
 
 
 
 
 
 
 
 
 140	spin_unlock(&dev_priv->cursor_lock);
 141}
 142
 143
 144void vmw_kms_cursor_snoop(struct vmw_surface *srf,
 145			  struct ttm_object_file *tfile,
 146			  struct ttm_buffer_object *bo,
 147			  SVGA3dCmdHeader *header)
 148{
 149	struct ttm_bo_kmap_obj map;
 150	unsigned long kmap_offset;
 151	unsigned long kmap_num;
 152	SVGA3dCopyBox *box;
 153	unsigned box_count;
 154	void *virtual;
 155	bool dummy;
 156	struct vmw_dma_cmd {
 157		SVGA3dCmdHeader header;
 158		SVGA3dCmdSurfaceDMA dma;
 159	} *cmd;
 160	int i, ret;
 
 
 
 161
 162	cmd = container_of(header, struct vmw_dma_cmd, header);
 163
 164	/* No snooper installed */
 165	if (!srf->snooper.image)
 166		return;
 167
 168	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
 169		DRM_ERROR("face and mipmap for cursors should never != 0\n");
 170		return;
 171	}
 172
 173	if (cmd->header.size < 64) {
 174		DRM_ERROR("at least one full copy box must be given\n");
 175		return;
 176	}
 177
 178	box = (SVGA3dCopyBox *)&cmd[1];
 179	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
 180			sizeof(SVGA3dCopyBox);
 181
 182	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
 183	    box->x != 0    || box->y != 0    || box->z != 0    ||
 184	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
 185	    box->d != 1    || box_count != 1) {
 
 186		/* TODO handle none page aligned offsets */
 187		/* TODO handle more dst & src != 0 */
 188		/* TODO handle more then one copy */
 189		DRM_ERROR("Cant snoop dma request for cursor!\n");
 190		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
 191			  box->srcx, box->srcy, box->srcz,
 192			  box->x, box->y, box->z,
 193			  box->w, box->h, box->d, box_count,
 194			  cmd->dma.guest.ptr.offset);
 195		return;
 196	}
 197
 198	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
 199	kmap_num = (64*64*4) >> PAGE_SHIFT;
 200
 201	ret = ttm_bo_reserve(bo, true, false, NULL);
 202	if (unlikely(ret != 0)) {
 203		DRM_ERROR("reserve failed\n");
 204		return;
 205	}
 206
 207	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
 208	if (unlikely(ret != 0))
 209		goto err_unreserve;
 210
 211	virtual = ttm_kmap_obj_virtual(&map, &dummy);
 212
 213	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
 214		memcpy(srf->snooper.image, virtual, 64*64*4);
 
 215	} else {
 216		/* Image is unsigned pointer. */
 217		for (i = 0; i < box->h; i++)
 218			memcpy(srf->snooper.image + i * 64,
 219			       virtual + i * cmd->dma.guest.pitch,
 220			       box->w * 4);
 221	}
 222
 223	srf->snooper.age++;
 224
 225	ttm_bo_kunmap(&map);
 226err_unreserve:
 227	ttm_bo_unreserve(bo);
 228}
 229
 230/**
 231 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
 232 *
 233 * @dev_priv: Pointer to the device private struct.
 234 *
 235 * Clears all legacy hotspots.
 236 */
 237void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
 238{
 239	struct drm_device *dev = dev_priv->dev;
 240	struct vmw_display_unit *du;
 241	struct drm_crtc *crtc;
 242
 243	drm_modeset_lock_all(dev);
 244	drm_for_each_crtc(crtc, dev) {
 245		du = vmw_crtc_to_du(crtc);
 246
 247		du->hotspot_x = 0;
 248		du->hotspot_y = 0;
 249	}
 250	drm_modeset_unlock_all(dev);
 251}
 252
 253void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 254{
 255	struct drm_device *dev = dev_priv->dev;
 256	struct vmw_display_unit *du;
 257	struct drm_crtc *crtc;
 258
 259	mutex_lock(&dev->mode_config.mutex);
 260
 261	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 262		du = vmw_crtc_to_du(crtc);
 263		if (!du->cursor_surface ||
 264		    du->cursor_age == du->cursor_surface->snooper.age)
 
 265			continue;
 266
 267		du->cursor_age = du->cursor_surface->snooper.age;
 268		vmw_cursor_update_image(dev_priv,
 269					du->cursor_surface->snooper.image,
 270					64, 64,
 271					du->hotspot_x + du->core_hotspot_x,
 272					du->hotspot_y + du->core_hotspot_y);
 
 273	}
 274
 275	mutex_unlock(&dev->mode_config.mutex);
 276}
 277
 278
 279void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
 280{
 
 
 
 281	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
 282
 
 
 
 283	drm_plane_cleanup(plane);
 284}
 285
 286
 287void vmw_du_primary_plane_destroy(struct drm_plane *plane)
 288{
 289	drm_plane_cleanup(plane);
 290
 291	/* Planes are static in our case so we don't free it */
 292}
 293
 294
 295/**
 296 * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface
 297 *
 298 * @vps: plane state associated with the display surface
 299 * @unreference: true if we also want to unreference the display.
 300 */
 301void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
 302			     bool unreference)
 303{
 304	if (vps->surf) {
 305		if (vps->pinned) {
 306			vmw_resource_unpin(&vps->surf->res);
 307			vps->pinned--;
 308		}
 309
 310		if (unreference) {
 311			if (vps->pinned)
 312				DRM_ERROR("Surface still pinned\n");
 313			vmw_surface_unreference(&vps->surf);
 314		}
 315	}
 316}
 317
 318
 319/**
 320 * vmw_du_plane_cleanup_fb - Unpins the cursor
 321 *
 322 * @plane:  display plane
 323 * @old_state: Contains the FB to clean up
 324 *
 325 * Unpins the framebuffer surface
 326 *
 327 * Returns 0 on success
 328 */
 329void
 330vmw_du_plane_cleanup_fb(struct drm_plane *plane,
 331			struct drm_plane_state *old_state)
 332{
 333	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 334
 335	vmw_du_plane_unpin_surf(vps, false);
 336}
 337
 338
 339/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 340 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
 341 *
 342 * @plane:  display plane
 343 * @new_state: info on the new plane state, including the FB
 344 *
 345 * Returns 0 on success
 346 */
 347int
 348vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
 349			       struct drm_plane_state *new_state)
 350{
 351	struct drm_framebuffer *fb = new_state->fb;
 
 352	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
 
 353
 354
 355	if (vps->surf)
 356		vmw_surface_unreference(&vps->surf);
 
 
 357
 358	if (vps->bo)
 359		vmw_bo_unreference(&vps->bo);
 
 
 360
 361	if (fb) {
 362		if (vmw_framebuffer_to_vfb(fb)->bo) {
 363			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
 364			vmw_bo_reference(vps->bo);
 365		} else {
 366			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
 367			vmw_surface_reference(vps->surf);
 368		}
 369	}
 370
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 371	return 0;
 372}
 373
 374
 375void
 376vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
 377				  struct drm_plane_state *old_state)
 378{
 379	struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
 
 
 
 
 380	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 381	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 382	struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state);
 
 383	s32 hotspot_x, hotspot_y;
 384	int ret = 0;
 385
 386
 387	hotspot_x = du->hotspot_x;
 388	hotspot_y = du->hotspot_y;
 389
 390	if (plane->state->fb) {
 391		hotspot_x += plane->state->fb->hot_x;
 392		hotspot_y += plane->state->fb->hot_y;
 393	}
 394
 395	du->cursor_surface = vps->surf;
 396	du->cursor_bo = vps->bo;
 397
 398	if (vps->surf) {
 399		du->cursor_age = du->cursor_surface->snooper.age;
 400
 401		ret = vmw_cursor_update_image(dev_priv,
 402					      vps->surf->snooper.image,
 403					      64, 64, hotspot_x,
 404					      hotspot_y);
 405	} else if (vps->bo) {
 406		ret = vmw_cursor_update_bo(dev_priv, vps->bo,
 407					   plane->state->crtc_w,
 408					   plane->state->crtc_h,
 409					   hotspot_x, hotspot_y);
 410	} else {
 411		vmw_cursor_update_position(dev_priv, false, 0, 0);
 412		return;
 413	}
 414
 415	if (!ret) {
 416		du->cursor_x = plane->state->crtc_x + du->set_gui_x;
 417		du->cursor_y = plane->state->crtc_y + du->set_gui_y;
 418
 419		vmw_cursor_update_position(dev_priv, true,
 420					   du->cursor_x + hotspot_x,
 421					   du->cursor_y + hotspot_y);
 422
 423		du->core_hotspot_x = hotspot_x - du->hotspot_x;
 424		du->core_hotspot_y = hotspot_y - du->hotspot_y;
 425	} else {
 426		DRM_ERROR("Failed to update cursor image\n");
 427	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 428}
 429
 430
 431/**
 432 * vmw_du_primary_plane_atomic_check - check if the new state is okay
 433 *
 434 * @plane: display plane
 435 * @state: info on the new plane state, including the FB
 436 *
 437 * Check if the new state is settable given the current state.  Other
 438 * than what the atomic helper checks, we care about crtc fitting
 439 * the FB and maintaining one active framebuffer.
 440 *
 441 * Returns 0 on success
 442 */
 443int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
 444				      struct drm_plane_state *state)
 445{
 
 
 
 
 446	struct drm_crtc_state *crtc_state = NULL;
 447	struct drm_framebuffer *new_fb = state->fb;
 
 448	int ret;
 449
 450	if (state->crtc)
 451		crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
 
 
 
 
 
 
 
 
 
 452
 453	ret = drm_atomic_helper_check_plane_state(state, crtc_state,
 454						  DRM_PLANE_HELPER_NO_SCALING,
 455						  DRM_PLANE_HELPER_NO_SCALING,
 456						  false, true);
 457
 458	if (!ret && new_fb) {
 459		struct drm_crtc *crtc = state->crtc;
 460		struct vmw_connector_state *vcs;
 461		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 462
 463		vcs = vmw_connector_state_to_vcs(du->connector.state);
 464	}
 465
 466
 467	return ret;
 468}
 469
 470
 471/**
 472 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
 473 *
 474 * @plane: cursor plane
 475 * @state: info on the new plane state
 476 *
 477 * This is a chance to fail if the new cursor state does not fit
 478 * our requirements.
 479 *
 480 * Returns 0 on success
 481 */
 482int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
 483				     struct drm_plane_state *new_state)
 484{
 
 
 485	int ret = 0;
 486	struct drm_crtc_state *crtc_state = NULL;
 487	struct vmw_surface *surface = NULL;
 488	struct drm_framebuffer *fb = new_state->fb;
 489
 490	if (new_state->crtc)
 491		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
 492							   new_state->crtc);
 493
 494	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
 495						  DRM_PLANE_HELPER_NO_SCALING,
 496						  DRM_PLANE_HELPER_NO_SCALING,
 497						  true, true);
 498	if (ret)
 499		return ret;
 500
 501	/* Turning off */
 502	if (!fb)
 503		return 0;
 504
 505	/* A lot of the code assumes this */
 506	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
 507		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
 508			  new_state->crtc_w, new_state->crtc_h);
 509		ret = -EINVAL;
 510	}
 511
 512	if (!vmw_framebuffer_to_vfb(fb)->bo)
 513		surface = vmw_framebuffer_to_vfbs(fb)->surface;
 514
 515	if (surface && !surface->snooper.image) {
 516		DRM_ERROR("surface not suitable for cursor\n");
 517		ret = -EINVAL;
 
 
 
 
 518	}
 519
 520	return ret;
 521}
 522
 523
 524int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
 525			     struct drm_crtc_state *new_state)
 526{
 
 
 527	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
 528	int connector_mask = drm_connector_mask(&du->connector);
 529	bool has_primary = new_state->plane_mask &
 530			   drm_plane_mask(crtc->primary);
 531
 532	/* We always want to have an active plane with an active CRTC */
 533	if (has_primary != new_state->enable)
 534		return -EINVAL;
 535
 536
 537	if (new_state->connector_mask != connector_mask &&
 538	    new_state->connector_mask != 0) {
 539		DRM_ERROR("Invalid connectors configuration\n");
 540		return -EINVAL;
 541	}
 542
 543	/*
 544	 * Our virtual device does not have a dot clock, so use the logical
 545	 * clock value as the dot clock.
 546	 */
 547	if (new_state->mode.crtc_clock == 0)
 548		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
 549
 550	return 0;
 551}
 552
 553
 554void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
 555			      struct drm_crtc_state *old_crtc_state)
 556{
 557}
 558
 559
 560void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
 561			      struct drm_crtc_state *old_crtc_state)
 562{
 563	struct drm_pending_vblank_event *event = crtc->state->event;
 564
 565	if (event) {
 566		crtc->state->event = NULL;
 567
 568		spin_lock_irq(&crtc->dev->event_lock);
 569		drm_crtc_send_vblank_event(crtc, event);
 570		spin_unlock_irq(&crtc->dev->event_lock);
 571	}
 572}
 573
 574
 575/**
 576 * vmw_du_crtc_duplicate_state - duplicate crtc state
 577 * @crtc: DRM crtc
 578 *
 579 * Allocates and returns a copy of the crtc state (both common and
 580 * vmw-specific) for the specified crtc.
 581 *
 582 * Returns: The newly allocated crtc state, or NULL on failure.
 583 */
 584struct drm_crtc_state *
 585vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
 586{
 587	struct drm_crtc_state *state;
 588	struct vmw_crtc_state *vcs;
 589
 590	if (WARN_ON(!crtc->state))
 591		return NULL;
 592
 593	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
 594
 595	if (!vcs)
 596		return NULL;
 597
 598	state = &vcs->base;
 599
 600	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
 601
 602	return state;
 603}
 604
 605
 606/**
 607 * vmw_du_crtc_reset - creates a blank vmw crtc state
 608 * @crtc: DRM crtc
 609 *
 610 * Resets the atomic state for @crtc by freeing the state pointer (which
 611 * might be NULL, e.g. at driver load time) and allocating a new empty state
 612 * object.
 613 */
 614void vmw_du_crtc_reset(struct drm_crtc *crtc)
 615{
 616	struct vmw_crtc_state *vcs;
 617
 618
 619	if (crtc->state) {
 620		__drm_atomic_helper_crtc_destroy_state(crtc->state);
 621
 622		kfree(vmw_crtc_state_to_vcs(crtc->state));
 623	}
 624
 625	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
 626
 627	if (!vcs) {
 628		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
 629		return;
 630	}
 631
 632	crtc->state = &vcs->base;
 633	crtc->state->crtc = crtc;
 634}
 635
 636
 637/**
 638 * vmw_du_crtc_destroy_state - destroy crtc state
 639 * @crtc: DRM crtc
 640 * @state: state object to destroy
 641 *
 642 * Destroys the crtc state (both common and vmw-specific) for the
 643 * specified plane.
 644 */
 645void
 646vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
 647			  struct drm_crtc_state *state)
 648{
 649	drm_atomic_helper_crtc_destroy_state(crtc, state);
 650}
 651
 652
 653/**
 654 * vmw_du_plane_duplicate_state - duplicate plane state
 655 * @plane: drm plane
 656 *
 657 * Allocates and returns a copy of the plane state (both common and
 658 * vmw-specific) for the specified plane.
 659 *
 660 * Returns: The newly allocated plane state, or NULL on failure.
 661 */
 662struct drm_plane_state *
 663vmw_du_plane_duplicate_state(struct drm_plane *plane)
 664{
 665	struct drm_plane_state *state;
 666	struct vmw_plane_state *vps;
 667
 668	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
 669
 670	if (!vps)
 671		return NULL;
 672
 673	vps->pinned = 0;
 674	vps->cpp = 0;
 675
 
 
 676	/* Each ref counted resource needs to be acquired again */
 677	if (vps->surf)
 678		(void) vmw_surface_reference(vps->surf);
 679
 680	if (vps->bo)
 681		(void) vmw_bo_reference(vps->bo);
 682
 683	state = &vps->base;
 684
 685	__drm_atomic_helper_plane_duplicate_state(plane, state);
 686
 687	return state;
 688}
 689
 690
 691/**
 692 * vmw_du_plane_reset - creates a blank vmw plane state
 693 * @plane: drm plane
 694 *
 695 * Resets the atomic state for @plane by freeing the state pointer (which might
 696 * be NULL, e.g. at driver load time) and allocating a new empty state object.
 697 */
 698void vmw_du_plane_reset(struct drm_plane *plane)
 699{
 700	struct vmw_plane_state *vps;
 701
 702
 703	if (plane->state)
 704		vmw_du_plane_destroy_state(plane, plane->state);
 705
 706	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
 707
 708	if (!vps) {
 709		DRM_ERROR("Cannot allocate vmw_plane_state\n");
 710		return;
 711	}
 712
 713	__drm_atomic_helper_plane_reset(plane, &vps->base);
 714}
 715
 716
 717/**
 718 * vmw_du_plane_destroy_state - destroy plane state
 719 * @plane: DRM plane
 720 * @state: state object to destroy
 721 *
 722 * Destroys the plane state (both common and vmw-specific) for the
 723 * specified plane.
 724 */
 725void
 726vmw_du_plane_destroy_state(struct drm_plane *plane,
 727			   struct drm_plane_state *state)
 728{
 729	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
 730
 731
 732	/* Should have been freed by cleanup_fb */
 733	if (vps->surf)
 734		vmw_surface_unreference(&vps->surf);
 735
 736	if (vps->bo)
 737		vmw_bo_unreference(&vps->bo);
 738
 739	drm_atomic_helper_plane_destroy_state(plane, state);
 740}
 741
 742
 743/**
 744 * vmw_du_connector_duplicate_state - duplicate connector state
 745 * @connector: DRM connector
 746 *
 747 * Allocates and returns a copy of the connector state (both common and
 748 * vmw-specific) for the specified connector.
 749 *
 750 * Returns: The newly allocated connector state, or NULL on failure.
 751 */
 752struct drm_connector_state *
 753vmw_du_connector_duplicate_state(struct drm_connector *connector)
 754{
 755	struct drm_connector_state *state;
 756	struct vmw_connector_state *vcs;
 757
 758	if (WARN_ON(!connector->state))
 759		return NULL;
 760
 761	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
 762
 763	if (!vcs)
 764		return NULL;
 765
 766	state = &vcs->base;
 767
 768	__drm_atomic_helper_connector_duplicate_state(connector, state);
 769
 770	return state;
 771}
 772
 773
 774/**
 775 * vmw_du_connector_reset - creates a blank vmw connector state
 776 * @connector: DRM connector
 777 *
 778 * Resets the atomic state for @connector by freeing the state pointer (which
 779 * might be NULL, e.g. at driver load time) and allocating a new empty state
 780 * object.
 781 */
 782void vmw_du_connector_reset(struct drm_connector *connector)
 783{
 784	struct vmw_connector_state *vcs;
 785
 786
 787	if (connector->state) {
 788		__drm_atomic_helper_connector_destroy_state(connector->state);
 789
 790		kfree(vmw_connector_state_to_vcs(connector->state));
 791	}
 792
 793	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
 794
 795	if (!vcs) {
 796		DRM_ERROR("Cannot allocate vmw_connector_state\n");
 797		return;
 798	}
 799
 800	__drm_atomic_helper_connector_reset(connector, &vcs->base);
 801}
 802
 803
 804/**
 805 * vmw_du_connector_destroy_state - destroy connector state
 806 * @connector: DRM connector
 807 * @state: state object to destroy
 808 *
 809 * Destroys the connector state (both common and vmw-specific) for the
 810 * specified plane.
 811 */
 812void
 813vmw_du_connector_destroy_state(struct drm_connector *connector,
 814			  struct drm_connector_state *state)
 815{
 816	drm_atomic_helper_connector_destroy_state(connector, state);
 817}
 818/*
 819 * Generic framebuffer code
 820 */
 821
 822/*
 823 * Surface framebuffer code
 824 */
 825
 826static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 827{
 828	struct vmw_framebuffer_surface *vfbs =
 829		vmw_framebuffer_to_vfbs(framebuffer);
 830
 831	drm_framebuffer_cleanup(framebuffer);
 832	vmw_surface_unreference(&vfbs->surface);
 833	if (vfbs->base.user_obj)
 834		ttm_base_object_unref(&vfbs->base.user_obj);
 835
 836	kfree(vfbs);
 837}
 838
 839/**
 840 * vmw_kms_readback - Perform a readback from the screen system to
 841 * a buffer-object backed framebuffer.
 842 *
 843 * @dev_priv: Pointer to the device private structure.
 844 * @file_priv: Pointer to a struct drm_file identifying the caller.
 845 * Must be set to NULL if @user_fence_rep is NULL.
 846 * @vfb: Pointer to the buffer-object backed framebuffer.
 847 * @user_fence_rep: User-space provided structure for fence information.
 848 * Must be set to non-NULL if @file_priv is non-NULL.
 849 * @vclips: Array of clip rects.
 850 * @num_clips: Number of clip rects in @vclips.
 851 *
 852 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
 853 * interrupted.
 854 */
 855int vmw_kms_readback(struct vmw_private *dev_priv,
 856		     struct drm_file *file_priv,
 857		     struct vmw_framebuffer *vfb,
 858		     struct drm_vmw_fence_rep __user *user_fence_rep,
 859		     struct drm_vmw_rect *vclips,
 860		     uint32_t num_clips)
 861{
 862	switch (dev_priv->active_display_unit) {
 863	case vmw_du_screen_object:
 864		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
 865					    user_fence_rep, vclips, num_clips,
 866					    NULL);
 867	case vmw_du_screen_target:
 868		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
 869					user_fence_rep, NULL, vclips, num_clips,
 870					1, false, true, NULL);
 871	default:
 872		WARN_ONCE(true,
 873			  "Readback called with invalid display system.\n");
 874}
 875
 876	return -ENOSYS;
 877}
 878
 879
 880static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
 881	.destroy = vmw_framebuffer_surface_destroy,
 882	.dirty = drm_atomic_helper_dirtyfb,
 883};
 884
 885static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 886					   struct vmw_surface *surface,
 887					   struct vmw_framebuffer **out,
 888					   const struct drm_mode_fb_cmd2
 889					   *mode_cmd,
 890					   bool is_bo_proxy)
 891
 892{
 893	struct drm_device *dev = dev_priv->dev;
 894	struct vmw_framebuffer_surface *vfbs;
 895	enum SVGA3dSurfaceFormat format;
 896	int ret;
 897	struct drm_format_name_buf format_name;
 898
 899	/* 3D is only supported on HWv8 and newer hosts */
 900	if (dev_priv->active_display_unit == vmw_du_legacy)
 901		return -ENOSYS;
 902
 903	/*
 904	 * Sanity checks.
 905	 */
 906
 
 
 
 
 
 
 
 
 
 907	/* Surface must be marked as a scanout. */
 908	if (unlikely(!surface->scanout))
 909		return -EINVAL;
 910
 911	if (unlikely(surface->mip_levels[0] != 1 ||
 912		     surface->num_sizes != 1 ||
 913		     surface->base_size.width < mode_cmd->width ||
 914		     surface->base_size.height < mode_cmd->height ||
 915		     surface->base_size.depth != 1)) {
 916		DRM_ERROR("Incompatible surface dimensions "
 917			  "for requested mode.\n");
 918		return -EINVAL;
 919	}
 920
 921	switch (mode_cmd->pixel_format) {
 922	case DRM_FORMAT_ARGB8888:
 923		format = SVGA3D_A8R8G8B8;
 924		break;
 925	case DRM_FORMAT_XRGB8888:
 926		format = SVGA3D_X8R8G8B8;
 927		break;
 928	case DRM_FORMAT_RGB565:
 929		format = SVGA3D_R5G6B5;
 930		break;
 931	case DRM_FORMAT_XRGB1555:
 932		format = SVGA3D_A1R5G5B5;
 933		break;
 934	default:
 935		DRM_ERROR("Invalid pixel format: %s\n",
 936			  drm_get_format_name(mode_cmd->pixel_format, &format_name));
 937		return -EINVAL;
 938	}
 939
 940	/*
 941	 * For DX, surface format validation is done when surface->scanout
 942	 * is set.
 943	 */
 944	if (!dev_priv->has_dx && format != surface->format) {
 945		DRM_ERROR("Invalid surface format for requested mode.\n");
 946		return -EINVAL;
 947	}
 948
 949	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
 950	if (!vfbs) {
 951		ret = -ENOMEM;
 952		goto out_err1;
 953	}
 954
 955	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
 956	vfbs->surface = vmw_surface_reference(surface);
 957	vfbs->base.user_handle = mode_cmd->handles[0];
 958	vfbs->is_bo_proxy = is_bo_proxy;
 959
 960	*out = &vfbs->base;
 961
 962	ret = drm_framebuffer_init(dev, &vfbs->base.base,
 963				   &vmw_framebuffer_surface_funcs);
 964	if (ret)
 965		goto out_err2;
 966
 967	return 0;
 968
 969out_err2:
 970	vmw_surface_unreference(&surface);
 971	kfree(vfbs);
 972out_err1:
 973	return ret;
 974}
 975
 976/*
 977 * Buffer-object framebuffer code
 978 */
 979
 980static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
 
 
 981{
 982	struct vmw_framebuffer_bo *vfbd =
 983		vmw_framebuffer_to_vfbd(framebuffer);
 984
 985	drm_framebuffer_cleanup(framebuffer);
 986	vmw_bo_unreference(&vfbd->buffer);
 987	if (vfbd->base.user_obj)
 988		ttm_base_object_unref(&vfbd->base.user_obj);
 989
 990	kfree(vfbd);
 991}
 992
 993static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
 994				    struct drm_file *file_priv,
 995				    unsigned int flags, unsigned int color,
 996				    struct drm_clip_rect *clips,
 997				    unsigned int num_clips)
 998{
 999	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1000	struct vmw_framebuffer_bo *vfbd =
1001		vmw_framebuffer_to_vfbd(framebuffer);
1002	struct drm_clip_rect norect;
1003	int ret, increment = 1;
1004
1005	drm_modeset_lock_all(dev_priv->dev);
1006
1007	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1008	if (unlikely(ret != 0)) {
1009		drm_modeset_unlock_all(dev_priv->dev);
1010		return ret;
1011	}
1012
1013	if (!num_clips) {
1014		num_clips = 1;
1015		clips = &norect;
1016		norect.x1 = norect.y1 = 0;
1017		norect.x2 = framebuffer->width;
1018		norect.y2 = framebuffer->height;
1019	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
1020		num_clips /= 2;
1021		increment = 2;
1022	}
1023
1024	switch (dev_priv->active_display_unit) {
1025	case vmw_du_legacy:
1026		ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
1027					      clips, num_clips, increment);
1028		break;
1029	default:
1030		ret = -EINVAL;
1031		WARN_ONCE(true, "Dirty called with invalid display system.\n");
1032		break;
1033	}
1034
1035	vmw_fifo_flush(dev_priv, false);
1036	ttm_read_unlock(&dev_priv->reservation_sem);
1037
1038	drm_modeset_unlock_all(dev_priv->dev);
1039
1040	return ret;
1041}
1042
1043static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
1044					struct drm_file *file_priv,
1045					unsigned int flags, unsigned int color,
1046					struct drm_clip_rect *clips,
1047					unsigned int num_clips)
1048{
1049	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1050
1051	if (dev_priv->active_display_unit == vmw_du_legacy)
1052		return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
1053						color, clips, num_clips);
1054
1055	return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
1056					 clips, num_clips);
1057}
1058
1059static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
 
1060	.destroy = vmw_framebuffer_bo_destroy,
1061	.dirty = vmw_framebuffer_bo_dirty_ext,
1062};
1063
1064/**
1065 * Pin the bofer in a location suitable for access by the
1066 * display system.
1067 */
1068static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1069{
1070	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1071	struct vmw_buffer_object *buf;
1072	struct ttm_placement *placement;
1073	int ret;
1074
1075	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1076		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1077
1078	if (!buf)
1079		return 0;
1080
1081	switch (dev_priv->active_display_unit) {
1082	case vmw_du_legacy:
1083		vmw_overlay_pause_all(dev_priv);
1084		ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
1085		vmw_overlay_resume_all(dev_priv);
1086		break;
1087	case vmw_du_screen_object:
1088	case vmw_du_screen_target:
1089		if (vfb->bo) {
1090			if (dev_priv->capabilities & SVGA_CAP_3D) {
1091				/*
1092				 * Use surface DMA to get content to
1093				 * sreen target surface.
1094				 */
1095				placement = &vmw_vram_gmr_placement;
1096			} else {
1097				/* Use CPU blit. */
1098				placement = &vmw_sys_placement;
1099			}
1100		} else {
1101			/* Use surface / image update */
1102			placement = &vmw_mob_placement;
1103		}
1104
1105		return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
1106	default:
1107		return -EINVAL;
1108	}
1109
1110	return ret;
1111}
1112
1113static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1114{
1115	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1116	struct vmw_buffer_object *buf;
1117
1118	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1119		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1120
1121	if (WARN_ON(!buf))
1122		return 0;
1123
1124	return vmw_bo_unpin(dev_priv, buf, false);
1125}
1126
1127/**
1128 * vmw_create_bo_proxy - create a proxy surface for the buffer object
1129 *
1130 * @dev: DRM device
1131 * @mode_cmd: parameters for the new surface
1132 * @bo_mob: MOB backing the buffer object
1133 * @srf_out: newly created surface
1134 *
1135 * When the content FB is a buffer object, we create a surface as a proxy to the
1136 * same buffer.  This way we can do a surface copy rather than a surface DMA.
1137 * This is a more efficient approach
1138 *
1139 * RETURNS:
1140 * 0 on success, error code otherwise
1141 */
1142static int vmw_create_bo_proxy(struct drm_device *dev,
1143			       const struct drm_mode_fb_cmd2 *mode_cmd,
1144			       struct vmw_buffer_object *bo_mob,
1145			       struct vmw_surface **srf_out)
1146{
 
1147	uint32_t format;
1148	struct drm_vmw_size content_base_size = {0};
1149	struct vmw_resource *res;
1150	unsigned int bytes_pp;
1151	struct drm_format_name_buf format_name;
1152	int ret;
1153
1154	switch (mode_cmd->pixel_format) {
1155	case DRM_FORMAT_ARGB8888:
1156	case DRM_FORMAT_XRGB8888:
1157		format = SVGA3D_X8R8G8B8;
1158		bytes_pp = 4;
1159		break;
1160
1161	case DRM_FORMAT_RGB565:
1162	case DRM_FORMAT_XRGB1555:
1163		format = SVGA3D_R5G6B5;
1164		bytes_pp = 2;
1165		break;
1166
1167	case 8:
1168		format = SVGA3D_P8;
1169		bytes_pp = 1;
1170		break;
1171
1172	default:
1173		DRM_ERROR("Invalid framebuffer format %s\n",
1174			  drm_get_format_name(mode_cmd->pixel_format, &format_name));
1175		return -EINVAL;
1176	}
1177
1178	content_base_size.width  = mode_cmd->pitches[0] / bytes_pp;
1179	content_base_size.height = mode_cmd->height;
1180	content_base_size.depth  = 1;
1181
1182	ret = vmw_surface_gb_priv_define(dev,
1183					 0, /* kernel visible only */
1184					 0, /* flags */
1185					 format,
1186					 true, /* can be a scanout buffer */
1187					 1, /* num of mip levels */
1188					 0,
1189					 0,
1190					 content_base_size,
1191					 SVGA3D_MS_PATTERN_NONE,
1192					 SVGA3D_MS_QUALITY_NONE,
1193					 srf_out);
1194	if (ret) {
1195		DRM_ERROR("Failed to allocate proxy content buffer\n");
1196		return ret;
1197	}
1198
1199	res = &(*srf_out)->res;
1200
1201	/* Reserve and switch the backing mob. */
1202	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1203	(void) vmw_resource_reserve(res, false, true);
1204	vmw_bo_unreference(&res->backup);
1205	res->backup = vmw_bo_reference(bo_mob);
1206	res->backup_offset = 0;
1207	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1208	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1209
1210	return 0;
1211}
1212
1213
1214
1215static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1216				      struct vmw_buffer_object *bo,
1217				      struct vmw_framebuffer **out,
1218				      const struct drm_mode_fb_cmd2
1219				      *mode_cmd)
1220
1221{
1222	struct drm_device *dev = dev_priv->dev;
1223	struct vmw_framebuffer_bo *vfbd;
1224	unsigned int requested_size;
1225	struct drm_format_name_buf format_name;
1226	int ret;
1227
1228	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1229	if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
1230		DRM_ERROR("Screen buffer object size is too small "
1231			  "for requested mode.\n");
1232		return -EINVAL;
1233	}
1234
1235	/* Limited framebuffer color depth support for screen objects */
1236	if (dev_priv->active_display_unit == vmw_du_screen_object) {
1237		switch (mode_cmd->pixel_format) {
1238		case DRM_FORMAT_XRGB8888:
1239		case DRM_FORMAT_ARGB8888:
1240			break;
1241		case DRM_FORMAT_XRGB1555:
1242		case DRM_FORMAT_RGB565:
1243			break;
1244		default:
1245			DRM_ERROR("Invalid pixel format: %s\n",
1246				  drm_get_format_name(mode_cmd->pixel_format, &format_name));
1247			return -EINVAL;
1248		}
1249	}
1250
1251	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1252	if (!vfbd) {
1253		ret = -ENOMEM;
1254		goto out_err1;
1255	}
1256
 
1257	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1258	vfbd->base.bo = true;
1259	vfbd->buffer = vmw_bo_reference(bo);
1260	vfbd->base.user_handle = mode_cmd->handles[0];
1261	*out = &vfbd->base;
1262
1263	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1264				   &vmw_framebuffer_bo_funcs);
1265	if (ret)
1266		goto out_err2;
1267
1268	return 0;
1269
1270out_err2:
1271	vmw_bo_unreference(&bo);
1272	kfree(vfbd);
1273out_err1:
1274	return ret;
1275}
1276
1277
1278/**
1279 * vmw_kms_srf_ok - check if a surface can be created
1280 *
 
1281 * @width: requested width
1282 * @height: requested height
1283 *
1284 * Surfaces need to be less than texture size
1285 */
1286static bool
1287vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1288{
1289	if (width  > dev_priv->texture_max_width ||
1290	    height > dev_priv->texture_max_height)
1291		return false;
1292
1293	return true;
1294}
1295
1296/**
1297 * vmw_kms_new_framebuffer - Create a new framebuffer.
1298 *
1299 * @dev_priv: Pointer to device private struct.
1300 * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1301 * Either @bo or @surface must be NULL.
1302 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1303 * Either @bo or @surface must be NULL.
1304 * @only_2d: No presents will occur to this buffer object based framebuffer.
1305 * This helps the code to do some important optimizations.
1306 * @mode_cmd: Frame-buffer metadata.
1307 */
1308struct vmw_framebuffer *
1309vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1310			struct vmw_buffer_object *bo,
1311			struct vmw_surface *surface,
1312			bool only_2d,
1313			const struct drm_mode_fb_cmd2 *mode_cmd)
1314{
1315	struct vmw_framebuffer *vfb = NULL;
1316	bool is_bo_proxy = false;
1317	int ret;
1318
1319	/*
1320	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1321	 * therefore, wrap the buffer object in a surface so we can use the
1322	 * SurfaceCopy command.
1323	 */
1324	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1325	    bo && only_2d &&
1326	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1327	    dev_priv->active_display_unit == vmw_du_screen_target) {
1328		ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
1329					  bo, &surface);
1330		if (ret)
1331			return ERR_PTR(ret);
1332
1333		is_bo_proxy = true;
1334	}
1335
1336	/* Create the new framebuffer depending one what we have */
1337	if (surface) {
1338		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1339						      mode_cmd,
1340						      is_bo_proxy);
1341
1342		/*
1343		 * vmw_create_bo_proxy() adds a reference that is no longer
1344		 * needed
1345		 */
1346		if (is_bo_proxy)
1347			vmw_surface_unreference(&surface);
1348	} else if (bo) {
1349		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1350						 mode_cmd);
1351	} else {
1352		BUG();
1353	}
1354
1355	if (ret)
1356		return ERR_PTR(ret);
1357
1358	vfb->pin = vmw_framebuffer_pin;
1359	vfb->unpin = vmw_framebuffer_unpin;
1360
1361	return vfb;
1362}
1363
1364/*
1365 * Generic Kernel modesetting functions
1366 */
1367
1368static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1369						 struct drm_file *file_priv,
1370						 const struct drm_mode_fb_cmd2 *mode_cmd)
1371{
1372	struct vmw_private *dev_priv = vmw_priv(dev);
1373	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1374	struct vmw_framebuffer *vfb = NULL;
1375	struct vmw_surface *surface = NULL;
1376	struct vmw_buffer_object *bo = NULL;
1377	struct ttm_base_object *user_obj;
1378	int ret;
1379
1380	/*
1381	 * Take a reference on the user object of the resource
1382	 * backing the kms fb. This ensures that user-space handle
1383	 * lookups on that resource will always work as long as
1384	 * it's registered with a kms framebuffer. This is important,
1385	 * since vmw_execbuf_process identifies resources in the
1386	 * command stream using user-space handles.
1387	 */
1388
1389	user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
1390	if (unlikely(user_obj == NULL)) {
1391		DRM_ERROR("Could not locate requested kms frame buffer.\n");
1392		return ERR_PTR(-ENOENT);
1393	}
1394
1395	/**
1396	 * End conditioned code.
1397	 */
1398
1399	/* returns either a bo or surface */
1400	ret = vmw_user_lookup_handle(dev_priv, tfile,
1401				     mode_cmd->handles[0],
1402				     &surface, &bo);
1403	if (ret)
 
 
1404		goto err_out;
 
1405
1406
1407	if (!bo &&
1408	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1409		DRM_ERROR("Surface size cannot exceed %dx%d",
1410			dev_priv->texture_max_width,
1411			dev_priv->texture_max_height);
1412		goto err_out;
1413	}
1414
1415
1416	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1417				      !(dev_priv->capabilities & SVGA_CAP_3D),
1418				      mode_cmd);
1419	if (IS_ERR(vfb)) {
1420		ret = PTR_ERR(vfb);
1421		goto err_out;
1422 	}
1423
1424err_out:
1425	/* vmw_user_lookup_handle takes one ref so does new_fb */
1426	if (bo)
1427		vmw_bo_unreference(&bo);
1428	if (surface)
1429		vmw_surface_unreference(&surface);
1430
1431	if (ret) {
1432		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1433		ttm_base_object_unref(&user_obj);
1434		return ERR_PTR(ret);
1435	} else
1436		vfb->user_obj = user_obj;
1437
1438	return &vfb->base;
1439}
1440
1441/**
1442 * vmw_kms_check_display_memory - Validates display memory required for a
1443 * topology
1444 * @dev: DRM device
1445 * @num_rects: number of drm_rect in rects
1446 * @rects: array of drm_rect representing the topology to validate indexed by
1447 * crtc index.
1448 *
1449 * Returns:
1450 * 0 on success otherwise negative error code
1451 */
1452static int vmw_kms_check_display_memory(struct drm_device *dev,
1453					uint32_t num_rects,
1454					struct drm_rect *rects)
1455{
1456	struct vmw_private *dev_priv = vmw_priv(dev);
1457	struct drm_rect bounding_box = {0};
1458	u64 total_pixels = 0, pixel_mem, bb_mem;
1459	int i;
1460
1461	for (i = 0; i < num_rects; i++) {
1462		/*
1463		 * For STDU only individual screen (screen target) is limited by
1464		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1465		 */
1466		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1467		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1468		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1469			VMW_DEBUG_KMS("Screen size not supported.\n");
1470			return -EINVAL;
1471		}
1472
1473		/* Bounding box upper left is at (0,0). */
1474		if (rects[i].x2 > bounding_box.x2)
1475			bounding_box.x2 = rects[i].x2;
1476
1477		if (rects[i].y2 > bounding_box.y2)
1478			bounding_box.y2 = rects[i].y2;
1479
1480		total_pixels += (u64) drm_rect_width(&rects[i]) *
1481			(u64) drm_rect_height(&rects[i]);
1482	}
1483
1484	/* Virtual svga device primary limits are always in 32-bpp. */
1485	pixel_mem = total_pixels * 4;
1486
1487	/*
1488	 * For HV10 and below prim_bb_mem is vram size. When
1489	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1490	 * limit on primary bounding box
1491	 */
1492	if (pixel_mem > dev_priv->prim_bb_mem) {
1493		VMW_DEBUG_KMS("Combined output size too large.\n");
1494		return -EINVAL;
1495	}
1496
1497	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1498	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1499	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1500		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1501
1502		if (bb_mem > dev_priv->prim_bb_mem) {
1503			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1504			return -EINVAL;
1505		}
1506	}
1507
1508	return 0;
1509}
1510
1511/**
1512 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1513 * crtc mutex
1514 * @state: The atomic state pointer containing the new atomic state
1515 * @crtc: The crtc
1516 *
1517 * This function returns the new crtc state if it's part of the state update.
1518 * Otherwise returns the current crtc state. It also makes sure that the
1519 * crtc mutex is locked.
1520 *
1521 * Returns: A valid crtc state pointer or NULL. It may also return a
1522 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1523 */
1524static struct drm_crtc_state *
1525vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1526{
1527	struct drm_crtc_state *crtc_state;
1528
1529	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1530	if (crtc_state) {
1531		lockdep_assert_held(&crtc->mutex.mutex.base);
1532	} else {
1533		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1534
1535		if (ret != 0 && ret != -EALREADY)
1536			return ERR_PTR(ret);
1537
1538		crtc_state = crtc->state;
1539	}
1540
1541	return crtc_state;
1542}
1543
1544/**
1545 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1546 * from the same fb after the new state is committed.
1547 * @dev: The drm_device.
1548 * @state: The new state to be checked.
1549 *
1550 * Returns:
1551 *   Zero on success,
1552 *   -EINVAL on invalid state,
1553 *   -EDEADLK if modeset locking needs to be rerun.
1554 */
1555static int vmw_kms_check_implicit(struct drm_device *dev,
1556				  struct drm_atomic_state *state)
1557{
1558	struct drm_framebuffer *implicit_fb = NULL;
1559	struct drm_crtc *crtc;
1560	struct drm_crtc_state *crtc_state;
1561	struct drm_plane_state *plane_state;
1562
1563	drm_for_each_crtc(crtc, dev) {
1564		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1565
1566		if (!du->is_implicit)
1567			continue;
1568
1569		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1570		if (IS_ERR(crtc_state))
1571			return PTR_ERR(crtc_state);
1572
1573		if (!crtc_state || !crtc_state->enable)
1574			continue;
1575
1576		/*
1577		 * Can't move primary planes across crtcs, so this is OK.
1578		 * It also means we don't need to take the plane mutex.
1579		 */
1580		plane_state = du->primary.state;
1581		if (plane_state->crtc != crtc)
1582			continue;
1583
1584		if (!implicit_fb)
1585			implicit_fb = plane_state->fb;
1586		else if (implicit_fb != plane_state->fb)
1587			return -EINVAL;
1588	}
1589
1590	return 0;
1591}
1592
1593/**
1594 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1595 * @dev: DRM device
1596 * @state: the driver state object
1597 *
1598 * Returns:
1599 * 0 on success otherwise negative error code
1600 */
1601static int vmw_kms_check_topology(struct drm_device *dev,
1602				  struct drm_atomic_state *state)
1603{
1604	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1605	struct drm_rect *rects;
1606	struct drm_crtc *crtc;
1607	uint32_t i;
1608	int ret = 0;
1609
1610	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1611			GFP_KERNEL);
1612	if (!rects)
1613		return -ENOMEM;
1614
1615	drm_for_each_crtc(crtc, dev) {
1616		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1617		struct drm_crtc_state *crtc_state;
1618
1619		i = drm_crtc_index(crtc);
1620
1621		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1622		if (IS_ERR(crtc_state)) {
1623			ret = PTR_ERR(crtc_state);
1624			goto clean;
1625		}
1626
1627		if (!crtc_state)
1628			continue;
1629
1630		if (crtc_state->enable) {
1631			rects[i].x1 = du->gui_x;
1632			rects[i].y1 = du->gui_y;
1633			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1634			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1635		} else {
1636			rects[i].x1 = 0;
1637			rects[i].y1 = 0;
1638			rects[i].x2 = 0;
1639			rects[i].y2 = 0;
1640		}
1641	}
1642
1643	/* Determine change to topology due to new atomic state */
1644	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1645				      new_crtc_state, i) {
1646		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1647		struct drm_connector *connector;
1648		struct drm_connector_state *conn_state;
1649		struct vmw_connector_state *vmw_conn_state;
1650
1651		if (!du->pref_active && new_crtc_state->enable) {
1652			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1653			ret = -EINVAL;
1654			goto clean;
1655		}
1656
1657		/*
1658		 * For vmwgfx each crtc has only one connector attached and it
1659		 * is not changed so don't really need to check the
1660		 * crtc->connector_mask and iterate over it.
1661		 */
1662		connector = &du->connector;
1663		conn_state = drm_atomic_get_connector_state(state, connector);
1664		if (IS_ERR(conn_state)) {
1665			ret = PTR_ERR(conn_state);
1666			goto clean;
1667		}
1668
1669		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1670		vmw_conn_state->gui_x = du->gui_x;
1671		vmw_conn_state->gui_y = du->gui_y;
1672	}
1673
1674	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1675					   rects);
1676
1677clean:
1678	kfree(rects);
1679	return ret;
1680}
1681
1682/**
1683 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1684 *
1685 * @dev: DRM device
1686 * @state: the driver state object
1687 *
1688 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1689 * us to assign a value to mode->crtc_clock so that
1690 * drm_calc_timestamping_constants() won't throw an error message
1691 *
1692 * Returns:
1693 * Zero for success or -errno
1694 */
1695static int
1696vmw_kms_atomic_check_modeset(struct drm_device *dev,
1697			     struct drm_atomic_state *state)
1698{
1699	struct drm_crtc *crtc;
1700	struct drm_crtc_state *crtc_state;
1701	bool need_modeset = false;
1702	int i, ret;
1703
1704	ret = drm_atomic_helper_check(dev, state);
1705	if (ret)
1706		return ret;
1707
1708	ret = vmw_kms_check_implicit(dev, state);
1709	if (ret) {
1710		VMW_DEBUG_KMS("Invalid implicit state\n");
1711		return ret;
1712	}
1713
1714	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1715		if (drm_atomic_crtc_needs_modeset(crtc_state))
1716			need_modeset = true;
1717	}
1718
1719	if (need_modeset)
1720		return vmw_kms_check_topology(dev, state);
1721
1722	return ret;
1723}
1724
1725static const struct drm_mode_config_funcs vmw_kms_funcs = {
1726	.fb_create = vmw_kms_fb_create,
1727	.atomic_check = vmw_kms_atomic_check_modeset,
1728	.atomic_commit = drm_atomic_helper_commit,
1729};
1730
1731static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1732				   struct drm_file *file_priv,
1733				   struct vmw_framebuffer *vfb,
1734				   struct vmw_surface *surface,
1735				   uint32_t sid,
1736				   int32_t destX, int32_t destY,
1737				   struct drm_vmw_rect *clips,
1738				   uint32_t num_clips)
1739{
1740	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1741					    &surface->res, destX, destY,
1742					    num_clips, 1, NULL, NULL);
1743}
1744
1745
1746int vmw_kms_present(struct vmw_private *dev_priv,
1747		    struct drm_file *file_priv,
1748		    struct vmw_framebuffer *vfb,
1749		    struct vmw_surface *surface,
1750		    uint32_t sid,
1751		    int32_t destX, int32_t destY,
1752		    struct drm_vmw_rect *clips,
1753		    uint32_t num_clips)
1754{
1755	int ret;
1756
1757	switch (dev_priv->active_display_unit) {
1758	case vmw_du_screen_target:
1759		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1760						 &surface->res, destX, destY,
1761						 num_clips, 1, NULL, NULL);
1762		break;
1763	case vmw_du_screen_object:
1764		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1765					      sid, destX, destY, clips,
1766					      num_clips);
1767		break;
1768	default:
1769		WARN_ONCE(true,
1770			  "Present called with invalid display system.\n");
1771		ret = -ENOSYS;
1772		break;
1773	}
1774	if (ret)
1775		return ret;
1776
1777	vmw_fifo_flush(dev_priv, false);
1778
1779	return 0;
1780}
1781
1782static void
1783vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1784{
1785	if (dev_priv->hotplug_mode_update_property)
1786		return;
1787
1788	dev_priv->hotplug_mode_update_property =
1789		drm_property_create_range(dev_priv->dev,
1790					  DRM_MODE_PROP_IMMUTABLE,
1791					  "hotplug_mode_update", 0, 1);
1792
1793	if (!dev_priv->hotplug_mode_update_property)
1794		return;
1795
1796}
1797
1798int vmw_kms_init(struct vmw_private *dev_priv)
1799{
1800	struct drm_device *dev = dev_priv->dev;
1801	int ret;
 
 
 
 
 
 
 
1802
1803	drm_mode_config_init(dev);
1804	dev->mode_config.funcs = &vmw_kms_funcs;
1805	dev->mode_config.min_width = 1;
1806	dev->mode_config.min_height = 1;
1807	dev->mode_config.max_width = dev_priv->texture_max_width;
1808	dev->mode_config.max_height = dev_priv->texture_max_height;
 
1809
1810	drm_mode_create_suggested_offset_properties(dev);
1811	vmw_kms_create_hotplug_mode_update_property(dev_priv);
1812
1813	ret = vmw_kms_stdu_init_display(dev_priv);
1814	if (ret) {
1815		ret = vmw_kms_sou_init_display(dev_priv);
1816		if (ret) /* Fallback */
1817			ret = vmw_kms_ldu_init_display(dev_priv);
1818	}
 
 
 
1819
1820	return ret;
1821}
1822
1823int vmw_kms_close(struct vmw_private *dev_priv)
1824{
1825	int ret = 0;
1826
1827	/*
1828	 * Docs says we should take the lock before calling this function
1829	 * but since it destroys encoders and our destructor calls
1830	 * drm_encoder_cleanup which takes the lock we deadlock.
1831	 */
1832	drm_mode_config_cleanup(dev_priv->dev);
1833	if (dev_priv->active_display_unit == vmw_du_legacy)
1834		ret = vmw_kms_ldu_close_display(dev_priv);
1835
1836	return ret;
1837}
1838
1839int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1840				struct drm_file *file_priv)
1841{
1842	struct drm_vmw_cursor_bypass_arg *arg = data;
1843	struct vmw_display_unit *du;
1844	struct drm_crtc *crtc;
1845	int ret = 0;
1846
1847
1848	mutex_lock(&dev->mode_config.mutex);
1849	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1850
1851		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1852			du = vmw_crtc_to_du(crtc);
1853			du->hotspot_x = arg->xhot;
1854			du->hotspot_y = arg->yhot;
1855		}
1856
1857		mutex_unlock(&dev->mode_config.mutex);
1858		return 0;
1859	}
1860
1861	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
1862	if (!crtc) {
1863		ret = -ENOENT;
1864		goto out;
1865	}
1866
1867	du = vmw_crtc_to_du(crtc);
1868
1869	du->hotspot_x = arg->xhot;
1870	du->hotspot_y = arg->yhot;
1871
1872out:
1873	mutex_unlock(&dev->mode_config.mutex);
1874
1875	return ret;
1876}
1877
1878int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1879			unsigned width, unsigned height, unsigned pitch,
1880			unsigned bpp, unsigned depth)
1881{
1882	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1883		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1884	else if (vmw_fifo_have_pitchlock(vmw_priv))
1885		vmw_mmio_write(pitch, vmw_priv->mmio_virt +
1886			       SVGA_FIFO_PITCHLOCK);
1887	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1888	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1889	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
 
1890
1891	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1892		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1893			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1894		return -EINVAL;
1895	}
1896
1897	return 0;
1898}
1899
1900int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1901{
1902	struct vmw_vga_topology_state *save;
1903	uint32_t i;
1904
1905	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
1906	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
1907	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
1908	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1909		vmw_priv->vga_pitchlock =
1910		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1911	else if (vmw_fifo_have_pitchlock(vmw_priv))
1912		vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
1913							SVGA_FIFO_PITCHLOCK);
1914
1915	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1916		return 0;
1917
1918	vmw_priv->num_displays = vmw_read(vmw_priv,
1919					  SVGA_REG_NUM_GUEST_DISPLAYS);
1920
1921	if (vmw_priv->num_displays == 0)
1922		vmw_priv->num_displays = 1;
1923
1924	for (i = 0; i < vmw_priv->num_displays; ++i) {
1925		save = &vmw_priv->vga_save[i];
1926		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1927		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
1928		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
1929		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
1930		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
1931		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
1932		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1933		if (i == 0 && vmw_priv->num_displays == 1 &&
1934		    save->width == 0 && save->height == 0) {
1935
1936			/*
1937			 * It should be fairly safe to assume that these
1938			 * values are uninitialized.
1939			 */
1940
1941			save->width = vmw_priv->vga_width - save->pos_x;
1942			save->height = vmw_priv->vga_height - save->pos_y;
1943		}
1944	}
1945
1946	return 0;
1947}
1948
1949int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
1950{
1951	struct vmw_vga_topology_state *save;
1952	uint32_t i;
1953
1954	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
1955	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
1956	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
1957	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1958		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
1959			  vmw_priv->vga_pitchlock);
1960	else if (vmw_fifo_have_pitchlock(vmw_priv))
1961		vmw_mmio_write(vmw_priv->vga_pitchlock,
1962			       vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1963
1964	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1965		return 0;
1966
1967	for (i = 0; i < vmw_priv->num_displays; ++i) {
1968		save = &vmw_priv->vga_save[i];
1969		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1970		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
1971		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
1972		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
1973		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
1974		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
1975		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1976	}
1977
1978	return 0;
1979}
1980
1981bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1982				uint32_t pitch,
1983				uint32_t height)
1984{
1985	return ((u64) pitch * (u64) height) < (u64)
1986		((dev_priv->active_display_unit == vmw_du_screen_target) ?
1987		 dev_priv->prim_bb_mem : dev_priv->vram_size);
1988}
1989
1990
1991/**
1992 * Function called by DRM code called with vbl_lock held.
1993 */
1994u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
1995{
1996	return 0;
1997}
1998
1999/**
2000 * Function called by DRM code called with vbl_lock held.
2001 */
2002int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
2003{
2004	return -EINVAL;
2005}
2006
2007/**
2008 * Function called by DRM code called with vbl_lock held.
2009 */
2010void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
2011{
2012}
2013
2014/**
2015 * vmw_du_update_layout - Update the display unit with topology from resolution
2016 * plugin and generate DRM uevent
2017 * @dev_priv: device private
2018 * @num_rects: number of drm_rect in rects
2019 * @rects: toplogy to update
2020 */
2021static int vmw_du_update_layout(struct vmw_private *dev_priv,
2022				unsigned int num_rects, struct drm_rect *rects)
2023{
2024	struct drm_device *dev = dev_priv->dev;
2025	struct vmw_display_unit *du;
2026	struct drm_connector *con;
2027	struct drm_connector_list_iter conn_iter;
2028	struct drm_modeset_acquire_ctx ctx;
2029	struct drm_crtc *crtc;
2030	int ret;
2031
2032	/* Currently gui_x/y is protected with the crtc mutex */
2033	mutex_lock(&dev->mode_config.mutex);
2034	drm_modeset_acquire_init(&ctx, 0);
2035retry:
2036	drm_for_each_crtc(crtc, dev) {
2037		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2038		if (ret < 0) {
2039			if (ret == -EDEADLK) {
2040				drm_modeset_backoff(&ctx);
2041				goto retry;
2042      		}
2043			goto out_fini;
2044		}
2045	}
2046
2047	drm_connector_list_iter_begin(dev, &conn_iter);
2048	drm_for_each_connector_iter(con, &conn_iter) {
2049		du = vmw_connector_to_du(con);
2050		if (num_rects > du->unit) {
2051			du->pref_width = drm_rect_width(&rects[du->unit]);
2052			du->pref_height = drm_rect_height(&rects[du->unit]);
2053			du->pref_active = true;
2054			du->gui_x = rects[du->unit].x1;
2055			du->gui_y = rects[du->unit].y1;
2056		} else {
2057			du->pref_width = 800;
2058			du->pref_height = 600;
2059			du->pref_active = false;
2060			du->gui_x = 0;
2061			du->gui_y = 0;
2062		}
2063	}
2064	drm_connector_list_iter_end(&conn_iter);
2065
2066	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2067		du = vmw_connector_to_du(con);
2068		if (num_rects > du->unit) {
2069			drm_object_property_set_value
2070			  (&con->base, dev->mode_config.suggested_x_property,
2071			   du->gui_x);
2072			drm_object_property_set_value
2073			  (&con->base, dev->mode_config.suggested_y_property,
2074			   du->gui_y);
2075		} else {
2076			drm_object_property_set_value
2077			  (&con->base, dev->mode_config.suggested_x_property,
2078			   0);
2079			drm_object_property_set_value
2080			  (&con->base, dev->mode_config.suggested_y_property,
2081			   0);
2082		}
2083		con->status = vmw_du_connector_detect(con, true);
2084	}
2085
2086	drm_sysfs_hotplug_event(dev);
2087out_fini:
2088	drm_modeset_drop_locks(&ctx);
2089	drm_modeset_acquire_fini(&ctx);
2090	mutex_unlock(&dev->mode_config.mutex);
2091 
 
 
2092	return 0;
2093}
2094
2095int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2096			  u16 *r, u16 *g, u16 *b,
2097			  uint32_t size,
2098			  struct drm_modeset_acquire_ctx *ctx)
2099{
2100	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2101	int i;
2102
2103	for (i = 0; i < size; i++) {
2104		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2105			  r[i], g[i], b[i]);
2106		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2107		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2108		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2109	}
2110
2111	return 0;
2112}
2113
2114int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2115{
2116	return 0;
2117}
2118
2119enum drm_connector_status
2120vmw_du_connector_detect(struct drm_connector *connector, bool force)
2121{
2122	uint32_t num_displays;
2123	struct drm_device *dev = connector->dev;
2124	struct vmw_private *dev_priv = vmw_priv(dev);
2125	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2126
2127	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2128
2129	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2130		 du->pref_active) ?
2131		connector_status_connected : connector_status_disconnected);
2132}
2133
2134static struct drm_display_mode vmw_kms_connector_builtin[] = {
2135	/* 640x480@60Hz */
2136	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2137		   752, 800, 0, 480, 489, 492, 525, 0,
2138		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2139	/* 800x600@60Hz */
2140	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2141		   968, 1056, 0, 600, 601, 605, 628, 0,
2142		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2143	/* 1024x768@60Hz */
2144	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2145		   1184, 1344, 0, 768, 771, 777, 806, 0,
2146		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2147	/* 1152x864@75Hz */
2148	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2149		   1344, 1600, 0, 864, 865, 868, 900, 0,
2150		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
 
 
 
 
2151	/* 1280x768@60Hz */
2152	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2153		   1472, 1664, 0, 768, 771, 778, 798, 0,
2154		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2155	/* 1280x800@60Hz */
2156	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2157		   1480, 1680, 0, 800, 803, 809, 831, 0,
2158		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2159	/* 1280x960@60Hz */
2160	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2161		   1488, 1800, 0, 960, 961, 964, 1000, 0,
2162		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2163	/* 1280x1024@60Hz */
2164	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2165		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2166		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2167	/* 1360x768@60Hz */
2168	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2169		   1536, 1792, 0, 768, 771, 777, 795, 0,
2170		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2171	/* 1440x1050@60Hz */
2172	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2173		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2174		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2175	/* 1440x900@60Hz */
2176	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2177		   1672, 1904, 0, 900, 903, 909, 934, 0,
2178		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2179	/* 1600x1200@60Hz */
2180	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2181		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2182		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2183	/* 1680x1050@60Hz */
2184	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2185		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2186		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2187	/* 1792x1344@60Hz */
2188	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2189		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2190		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2191	/* 1853x1392@60Hz */
2192	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2193		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2194		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
 
 
 
 
2195	/* 1920x1200@60Hz */
2196	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2197		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2198		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2199	/* 1920x1440@60Hz */
2200	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2201		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2202		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
 
 
 
 
2203	/* 2560x1600@60Hz */
2204	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2205		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2206		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
 
 
 
 
 
 
 
 
 
 
 
 
2207	/* Terminate */
2208	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2209};
2210
2211/**
2212 * vmw_guess_mode_timing - Provide fake timings for a
2213 * 60Hz vrefresh mode.
2214 *
2215 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
2216 * members filled in.
2217 */
2218void vmw_guess_mode_timing(struct drm_display_mode *mode)
2219{
2220	mode->hsync_start = mode->hdisplay + 50;
2221	mode->hsync_end = mode->hsync_start + 50;
2222	mode->htotal = mode->hsync_end + 50;
2223
2224	mode->vsync_start = mode->vdisplay + 50;
2225	mode->vsync_end = mode->vsync_start + 50;
2226	mode->vtotal = mode->vsync_end + 50;
2227
2228	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2229	mode->vrefresh = drm_mode_vrefresh(mode);
2230}
2231
2232
2233int vmw_du_connector_fill_modes(struct drm_connector *connector,
2234				uint32_t max_width, uint32_t max_height)
2235{
2236	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2237	struct drm_device *dev = connector->dev;
2238	struct vmw_private *dev_priv = vmw_priv(dev);
2239	struct drm_display_mode *mode = NULL;
2240	struct drm_display_mode *bmode;
2241	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2242		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2243		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2244		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2245	};
2246	int i;
2247	u32 assumed_bpp = 4;
2248
2249	if (dev_priv->assume_16bpp)
2250		assumed_bpp = 2;
2251
2252	max_width  = min(max_width,  dev_priv->texture_max_width);
2253	max_height = min(max_height, dev_priv->texture_max_height);
2254
2255	/*
2256	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2257	 * HEIGHT registers.
2258	 */
2259	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2260		max_width  = min(max_width,  dev_priv->stdu_max_width);
2261		max_height = min(max_height, dev_priv->stdu_max_height);
2262	}
2263
2264	/* Add preferred mode */
2265	mode = drm_mode_duplicate(dev, &prefmode);
2266	if (!mode)
2267		return 0;
2268	mode->hdisplay = du->pref_width;
2269	mode->vdisplay = du->pref_height;
2270	vmw_guess_mode_timing(mode);
 
2271
2272	if (vmw_kms_validate_mode_vram(dev_priv,
2273					mode->hdisplay * assumed_bpp,
2274					mode->vdisplay)) {
2275		drm_mode_probed_add(connector, mode);
2276	} else {
2277		drm_mode_destroy(dev, mode);
2278		mode = NULL;
2279	}
2280
2281	if (du->pref_mode) {
2282		list_del_init(&du->pref_mode->head);
2283		drm_mode_destroy(dev, du->pref_mode);
2284	}
2285
2286	/* mode might be null here, this is intended */
2287	du->pref_mode = mode;
2288
2289	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2290		bmode = &vmw_kms_connector_builtin[i];
2291		if (bmode->hdisplay > max_width ||
2292		    bmode->vdisplay > max_height)
2293			continue;
2294
2295		if (!vmw_kms_validate_mode_vram(dev_priv,
2296						bmode->hdisplay * assumed_bpp,
2297						bmode->vdisplay))
2298			continue;
2299
2300		mode = drm_mode_duplicate(dev, bmode);
2301		if (!mode)
2302			return 0;
2303		mode->vrefresh = drm_mode_vrefresh(mode);
2304
2305		drm_mode_probed_add(connector, mode);
2306	}
2307
2308	drm_connector_list_update(connector);
2309	/* Move the prefered mode first, help apps pick the right mode. */
2310	drm_mode_sort(&connector->modes);
2311
2312	return 1;
2313}
2314
2315/**
2316 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2317 * @dev: drm device for the ioctl
2318 * @data: data pointer for the ioctl
2319 * @file_priv: drm file for the ioctl call
2320 *
2321 * Update preferred topology of display unit as per ioctl request. The topology
2322 * is expressed as array of drm_vmw_rect.
2323 * e.g.
2324 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2325 *
2326 * NOTE:
2327 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2328 * device limit on topology, x + w and y + h (lower right) cannot be greater
2329 * than INT_MAX. So topology beyond these limits will return with error.
2330 *
2331 * Returns:
2332 * Zero on success, negative errno on failure.
2333 */
2334int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2335				struct drm_file *file_priv)
2336{
2337	struct vmw_private *dev_priv = vmw_priv(dev);
2338	struct drm_mode_config *mode_config = &dev->mode_config;
2339	struct drm_vmw_update_layout_arg *arg =
2340		(struct drm_vmw_update_layout_arg *)data;
2341	void __user *user_rects;
2342	struct drm_vmw_rect *rects;
2343	struct drm_rect *drm_rects;
2344	unsigned rects_size;
2345	int ret, i;
2346
2347	if (!arg->num_outputs) {
2348		struct drm_rect def_rect = {0, 0, 800, 600};
2349		VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
2350			      def_rect.x1, def_rect.y1,
2351			      def_rect.x2, def_rect.y2);
2352		vmw_du_update_layout(dev_priv, 1, &def_rect);
2353		return 0;
2354	}
2355
2356	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2357	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2358			GFP_KERNEL);
2359	if (unlikely(!rects))
2360		return -ENOMEM;
2361
2362	user_rects = (void __user *)(unsigned long)arg->rects;
2363	ret = copy_from_user(rects, user_rects, rects_size);
2364	if (unlikely(ret != 0)) {
2365		DRM_ERROR("Failed to get rects.\n");
2366		ret = -EFAULT;
2367		goto out_free;
2368	}
2369
2370	drm_rects = (struct drm_rect *)rects;
2371
2372	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2373	for (i = 0; i < arg->num_outputs; i++) {
2374		struct drm_vmw_rect curr_rect;
2375
2376		/* Verify user-space for overflow as kernel use drm_rect */
2377		if ((rects[i].x + rects[i].w > INT_MAX) ||
2378		    (rects[i].y + rects[i].h > INT_MAX)) {
2379			ret = -ERANGE;
2380			goto out_free;
2381		}
2382
2383		curr_rect = rects[i];
2384		drm_rects[i].x1 = curr_rect.x;
2385		drm_rects[i].y1 = curr_rect.y;
2386		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2387		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2388
2389		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2390			      drm_rects[i].x1, drm_rects[i].y1,
2391			      drm_rects[i].x2, drm_rects[i].y2);
2392
2393		/*
2394		 * Currently this check is limiting the topology within
2395		 * mode_config->max (which actually is max texture size
2396		 * supported by virtual device). This limit is here to address
2397		 * window managers that create a big framebuffer for whole
2398		 * topology.
2399		 */
2400		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2401		    drm_rects[i].x2 > mode_config->max_width ||
2402		    drm_rects[i].y2 > mode_config->max_height) {
2403			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2404				      drm_rects[i].x1, drm_rects[i].y1,
2405				      drm_rects[i].x2, drm_rects[i].y2);
2406			ret = -EINVAL;
2407			goto out_free;
2408		}
2409	}
2410
2411	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2412
2413	if (ret == 0)
2414		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2415
2416out_free:
2417	kfree(rects);
2418	return ret;
2419}
2420
2421/**
2422 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2423 * on a set of cliprects and a set of display units.
2424 *
2425 * @dev_priv: Pointer to a device private structure.
2426 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2427 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2428 * Cliprects are given in framebuffer coordinates.
2429 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2430 * be NULL. Cliprects are given in source coordinates.
2431 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2432 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2433 * @num_clips: Number of cliprects in the @clips or @vclips array.
2434 * @increment: Integer with which to increment the clip counter when looping.
2435 * Used to skip a predetermined number of clip rects.
2436 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2437 */
2438int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2439			 struct vmw_framebuffer *framebuffer,
2440			 const struct drm_clip_rect *clips,
2441			 const struct drm_vmw_rect *vclips,
2442			 s32 dest_x, s32 dest_y,
2443			 int num_clips,
2444			 int increment,
2445			 struct vmw_kms_dirty *dirty)
2446{
2447	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2448	struct drm_crtc *crtc;
2449	u32 num_units = 0;
2450	u32 i, k;
2451
2452	dirty->dev_priv = dev_priv;
2453
2454	/* If crtc is passed, no need to iterate over other display units */
2455	if (dirty->crtc) {
2456		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2457	} else {
2458		list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
2459				    head) {
2460			struct drm_plane *plane = crtc->primary;
2461
2462			if (plane->state->fb == &framebuffer->base)
2463				units[num_units++] = vmw_crtc_to_du(crtc);
2464		}
2465	}
2466
2467	for (k = 0; k < num_units; k++) {
2468		struct vmw_display_unit *unit = units[k];
2469		s32 crtc_x = unit->crtc.x;
2470		s32 crtc_y = unit->crtc.y;
2471		s32 crtc_width = unit->crtc.mode.hdisplay;
2472		s32 crtc_height = unit->crtc.mode.vdisplay;
2473		const struct drm_clip_rect *clips_ptr = clips;
2474		const struct drm_vmw_rect *vclips_ptr = vclips;
2475
2476		dirty->unit = unit;
2477		if (dirty->fifo_reserve_size > 0) {
2478			dirty->cmd = VMW_FIFO_RESERVE(dev_priv,
2479						      dirty->fifo_reserve_size);
2480			if (!dirty->cmd)
2481				return -ENOMEM;
2482
2483			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2484		}
2485		dirty->num_hits = 0;
2486		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2487		       vclips_ptr += increment) {
2488			s32 clip_left;
2489			s32 clip_top;
2490
2491			/*
2492			 * Select clip array type. Note that integer type
2493			 * in @clips is unsigned short, whereas in @vclips
2494			 * it's 32-bit.
2495			 */
2496			if (clips) {
2497				dirty->fb_x = (s32) clips_ptr->x1;
2498				dirty->fb_y = (s32) clips_ptr->y1;
2499				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2500					crtc_x;
2501				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2502					crtc_y;
2503			} else {
2504				dirty->fb_x = vclips_ptr->x;
2505				dirty->fb_y = vclips_ptr->y;
2506				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2507					dest_x - crtc_x;
2508				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2509					dest_y - crtc_y;
2510			}
2511
2512			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2513			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2514
2515			/* Skip this clip if it's outside the crtc region */
2516			if (dirty->unit_x1 >= crtc_width ||
2517			    dirty->unit_y1 >= crtc_height ||
2518			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2519				continue;
2520
2521			/* Clip right and bottom to crtc limits */
2522			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2523					       crtc_width);
2524			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2525					       crtc_height);
2526
2527			/* Clip left and top to crtc limits */
2528			clip_left = min_t(s32, dirty->unit_x1, 0);
2529			clip_top = min_t(s32, dirty->unit_y1, 0);
2530			dirty->unit_x1 -= clip_left;
2531			dirty->unit_y1 -= clip_top;
2532			dirty->fb_x -= clip_left;
2533			dirty->fb_y -= clip_top;
2534
2535			dirty->clip(dirty);
2536		}
2537
2538		dirty->fifo_commit(dirty);
2539	}
2540
2541	return 0;
2542}
2543
2544/**
2545 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2546 * cleanup and fencing
2547 * @dev_priv: Pointer to the device-private struct
2548 * @file_priv: Pointer identifying the client when user-space fencing is used
2549 * @ctx: Pointer to the validation context
2550 * @out_fence: If non-NULL, returned refcounted fence-pointer
2551 * @user_fence_rep: If non-NULL, pointer to user-space address area
2552 * in which to copy user-space fence info
2553 */
2554void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2555				      struct drm_file *file_priv,
2556				      struct vmw_validation_context *ctx,
2557				      struct vmw_fence_obj **out_fence,
2558				      struct drm_vmw_fence_rep __user *
2559				      user_fence_rep)
2560{
2561	struct vmw_fence_obj *fence = NULL;
2562	uint32_t handle = 0;
2563	int ret = 0;
2564
2565	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2566	    out_fence)
2567		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2568						 file_priv ? &handle : NULL);
2569	vmw_validation_done(ctx, fence);
2570	if (file_priv)
2571		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2572					    ret, user_fence_rep, fence,
2573					    handle, -1, NULL);
2574	if (out_fence)
2575		*out_fence = fence;
2576	else
2577		vmw_fence_obj_unreference(&fence);
2578}
2579
2580/**
2581 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2582 * its backing MOB.
2583 *
2584 * @res: Pointer to the surface resource
2585 * @clips: Clip rects in framebuffer (surface) space.
2586 * @num_clips: Number of clips in @clips.
2587 * @increment: Integer with which to increment the clip counter when looping.
2588 * Used to skip a predetermined number of clip rects.
2589 *
2590 * This function makes sure the proxy surface is updated from its backing MOB
2591 * using the region given by @clips. The surface resource @res and its backing
2592 * MOB needs to be reserved and validated on call.
2593 */
2594int vmw_kms_update_proxy(struct vmw_resource *res,
2595			 const struct drm_clip_rect *clips,
2596			 unsigned num_clips,
2597			 int increment)
2598{
2599	struct vmw_private *dev_priv = res->dev_priv;
2600	struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
2601	struct {
2602		SVGA3dCmdHeader header;
2603		SVGA3dCmdUpdateGBImage body;
2604	} *cmd;
2605	SVGA3dBox *box;
2606	size_t copy_size = 0;
2607	int i;
2608
2609	if (!clips)
2610		return 0;
2611
2612	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2613	if (!cmd)
2614		return -ENOMEM;
2615
2616	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2617		box = &cmd->body.box;
2618
2619		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2620		cmd->header.size = sizeof(cmd->body);
2621		cmd->body.image.sid = res->id;
2622		cmd->body.image.face = 0;
2623		cmd->body.image.mipmap = 0;
2624
2625		if (clips->x1 > size->width || clips->x2 > size->width ||
2626		    clips->y1 > size->height || clips->y2 > size->height) {
2627			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2628			return -EINVAL;
2629		}
2630
2631		box->x = clips->x1;
2632		box->y = clips->y1;
2633		box->z = 0;
2634		box->w = clips->x2 - clips->x1;
2635		box->h = clips->y2 - clips->y1;
2636		box->d = 1;
2637
2638		copy_size += sizeof(*cmd);
2639	}
2640
2641	vmw_fifo_commit(dev_priv, copy_size);
2642
2643	return 0;
2644}
2645
2646int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2647			    unsigned unit,
2648			    u32 max_width,
2649			    u32 max_height,
2650			    struct drm_connector **p_con,
2651			    struct drm_crtc **p_crtc,
2652			    struct drm_display_mode **p_mode)
2653{
2654	struct drm_connector *con;
2655	struct vmw_display_unit *du;
2656	struct drm_display_mode *mode;
2657	int i = 0;
2658	int ret = 0;
2659
2660	mutex_lock(&dev_priv->dev->mode_config.mutex);
2661	list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
2662			    head) {
2663		if (i == unit)
2664			break;
2665
2666		++i;
2667	}
2668
2669	if (i != unit) {
2670		DRM_ERROR("Could not find initial display unit.\n");
2671		ret = -EINVAL;
2672		goto out_unlock;
2673	}
2674
2675	if (list_empty(&con->modes))
2676		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
2677
2678	if (list_empty(&con->modes)) {
2679		DRM_ERROR("Could not find initial display mode.\n");
2680		ret = -EINVAL;
2681		goto out_unlock;
2682	}
2683
2684	du = vmw_connector_to_du(con);
2685	*p_con = con;
2686	*p_crtc = &du->crtc;
2687
2688	list_for_each_entry(mode, &con->modes, head) {
2689		if (mode->type & DRM_MODE_TYPE_PREFERRED)
2690			break;
2691	}
2692
2693	if (mode->type & DRM_MODE_TYPE_PREFERRED)
2694		*p_mode = mode;
2695	else {
2696		WARN_ONCE(true, "Could not find initial preferred mode.\n");
2697		*p_mode = list_first_entry(&con->modes,
2698					   struct drm_display_mode,
2699					   head);
2700	}
2701
2702 out_unlock:
2703	mutex_unlock(&dev_priv->dev->mode_config.mutex);
2704
2705	return ret;
2706}
2707
2708/**
2709 * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
2710 * property.
2711 *
2712 * @dev_priv: Pointer to a device private struct.
2713 *
2714 * Sets up the implicit placement property unless it's already set up.
2715 */
2716void
2717vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2718{
2719	if (dev_priv->implicit_placement_property)
2720		return;
2721
2722	dev_priv->implicit_placement_property =
2723		drm_property_create_range(dev_priv->dev,
2724					  DRM_MODE_PROP_IMMUTABLE,
2725					  "implicit_placement", 0, 1);
2726}
2727
2728/**
2729 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2730 *
2731 * @dev: Pointer to the drm device
2732 * Return: 0 on success. Negative error code on failure.
2733 */
2734int vmw_kms_suspend(struct drm_device *dev)
2735{
2736	struct vmw_private *dev_priv = vmw_priv(dev);
2737
2738	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2739	if (IS_ERR(dev_priv->suspend_state)) {
2740		int ret = PTR_ERR(dev_priv->suspend_state);
2741
2742		DRM_ERROR("Failed kms suspend: %d\n", ret);
2743		dev_priv->suspend_state = NULL;
2744
2745		return ret;
2746	}
2747
2748	return 0;
2749}
2750
2751
2752/**
2753 * vmw_kms_resume - Re-enable modesetting and restore state
2754 *
2755 * @dev: Pointer to the drm device
2756 * Return: 0 on success. Negative error code on failure.
2757 *
2758 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2759 * to call this function without a previous vmw_kms_suspend().
2760 */
2761int vmw_kms_resume(struct drm_device *dev)
2762{
2763	struct vmw_private *dev_priv = vmw_priv(dev);
2764	int ret;
2765
2766	if (WARN_ON(!dev_priv->suspend_state))
2767		return 0;
2768
2769	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2770	dev_priv->suspend_state = NULL;
2771
2772	return ret;
2773}
2774
2775/**
2776 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2777 *
2778 * @dev: Pointer to the drm device
2779 */
2780void vmw_kms_lost_device(struct drm_device *dev)
2781{
2782	drm_atomic_helper_shutdown(dev);
2783}
2784
2785/**
2786 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2787 * @update: The closure structure.
2788 *
2789 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2790 * update on display unit.
2791 *
2792 * Return: 0 on success or a negative error code on failure.
2793 */
2794int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2795{
2796	struct drm_plane_state *state = update->plane->state;
2797	struct drm_plane_state *old_state = update->old_state;
2798	struct drm_atomic_helper_damage_iter iter;
2799	struct drm_rect clip;
2800	struct drm_rect bb;
2801	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2802	uint32_t reserved_size = 0;
2803	uint32_t submit_size = 0;
2804	uint32_t curr_size = 0;
2805	uint32_t num_hits = 0;
2806	void *cmd_start;
2807	char *cmd_next;
2808	int ret;
2809
2810	/*
2811	 * Iterate in advance to check if really need plane update and find the
2812	 * number of clips that actually are in plane src for fifo allocation.
2813	 */
2814	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2815	drm_atomic_for_each_plane_damage(&iter, &clip)
2816		num_hits++;
2817
2818	if (num_hits == 0)
2819		return 0;
2820
2821	if (update->vfb->bo) {
2822		struct vmw_framebuffer_bo *vfbbo =
2823			container_of(update->vfb, typeof(*vfbbo), base);
2824
2825		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
2826					    update->cpu_blit);
 
 
 
 
 
 
 
 
 
 
 
 
2827	} else {
2828		struct vmw_framebuffer_surface *vfbs =
2829			container_of(update->vfb, typeof(*vfbs), base);
2830
2831		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2832						  0, VMW_RES_DIRTY_NONE, NULL,
2833						  NULL);
2834	}
2835
2836	if (ret)
2837		return ret;
2838
2839	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2840	if (ret)
2841		goto out_unref;
2842
2843	reserved_size = update->calc_fifo_size(update, num_hits);
2844	cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size);
2845	if (!cmd_start) {
2846		ret = -ENOMEM;
2847		goto out_revert;
2848	}
2849
2850	cmd_next = cmd_start;
2851
2852	if (update->post_prepare) {
2853		curr_size = update->post_prepare(update, cmd_next);
2854		cmd_next += curr_size;
2855		submit_size += curr_size;
2856	}
2857
2858	if (update->pre_clip) {
2859		curr_size = update->pre_clip(update, cmd_next, num_hits);
2860		cmd_next += curr_size;
2861		submit_size += curr_size;
2862	}
2863
2864	bb.x1 = INT_MAX;
2865	bb.y1 = INT_MAX;
2866	bb.x2 = INT_MIN;
2867	bb.y2 = INT_MIN;
2868
2869	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2870	drm_atomic_for_each_plane_damage(&iter, &clip) {
2871		uint32_t fb_x = clip.x1;
2872		uint32_t fb_y = clip.y1;
2873
2874		vmw_du_translate_to_crtc(state, &clip);
2875		if (update->clip) {
2876			curr_size = update->clip(update, cmd_next, &clip, fb_x,
2877						 fb_y);
2878			cmd_next += curr_size;
2879			submit_size += curr_size;
2880		}
2881		bb.x1 = min_t(int, bb.x1, clip.x1);
2882		bb.y1 = min_t(int, bb.y1, clip.y1);
2883		bb.x2 = max_t(int, bb.x2, clip.x2);
2884		bb.y2 = max_t(int, bb.y2, clip.y2);
2885	}
2886
2887	curr_size = update->post_clip(update, cmd_next, &bb);
2888	submit_size += curr_size;
2889
2890	if (reserved_size < submit_size)
2891		submit_size = 0;
2892
2893	vmw_fifo_commit(update->dev_priv, submit_size);
2894
2895	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2896					 update->out_fence, NULL);
2897	return ret;
2898
2899out_revert:
2900	vmw_validation_revert(&val_ctx);
2901
2902out_unref:
2903	vmw_validation_unref_lists(&val_ctx);
2904	return ret;
2905}
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27#include "vmwgfx_kms.h"
  28
  29#include "vmwgfx_bo.h"
  30#include "vmw_surface_cache.h"
  31
  32#include <drm/drm_atomic.h>
  33#include <drm/drm_atomic_helper.h>
  34#include <drm/drm_damage_helper.h>
  35#include <drm/drm_fourcc.h>
 
  36#include <drm/drm_rect.h>
  37#include <drm/drm_sysfs.h>
 
 
 
 
 
 
  38
  39void vmw_du_cleanup(struct vmw_display_unit *du)
  40{
  41	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
  42	drm_plane_cleanup(&du->primary);
  43	if (vmw_cmd_supported(dev_priv))
  44		drm_plane_cleanup(&du->cursor.base);
  45
  46	drm_connector_unregister(&du->connector);
  47	drm_crtc_cleanup(&du->crtc);
  48	drm_encoder_cleanup(&du->encoder);
  49	drm_connector_cleanup(&du->connector);
  50}
  51
  52/*
  53 * Display Unit Cursor functions
  54 */
  55
  56static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
  57static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
  58				  struct vmw_plane_state *vps,
  59				  u32 *image, u32 width, u32 height,
  60				  u32 hotspotX, u32 hotspotY);
  61
  62struct vmw_svga_fifo_cmd_define_cursor {
  63	u32 cmd;
  64	SVGAFifoCmdDefineAlphaCursor cursor;
  65};
  66
  67/**
  68 * vmw_send_define_cursor_cmd - queue a define cursor command
  69 * @dev_priv: the private driver struct
  70 * @image: buffer which holds the cursor image
  71 * @width: width of the mouse cursor image
  72 * @height: height of the mouse cursor image
  73 * @hotspotX: the horizontal position of mouse hotspot
  74 * @hotspotY: the vertical position of mouse hotspot
  75 */
  76static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
  77				       u32 *image, u32 width, u32 height,
  78				       u32 hotspotX, u32 hotspotY)
  79{
  80	struct vmw_svga_fifo_cmd_define_cursor *cmd;
  81	const u32 image_size = width * height * sizeof(*image);
  82	const u32 cmd_size = sizeof(*cmd) + image_size;
  83
  84	/* Try to reserve fifocmd space and swallow any failures;
  85	   such reservations cannot be left unconsumed for long
  86	   under the risk of clogging other fifocmd users, so
  87	   we treat reservations separtely from the way we treat
  88	   other fallible KMS-atomic resources at prepare_fb */
  89	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
  90
  91	if (unlikely(!cmd))
  92		return;
 
  93
  94	memset(cmd, 0, sizeof(*cmd));
  95
  96	memcpy(&cmd[1], image, image_size);
  97
  98	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
  99	cmd->cursor.id = 0;
 100	cmd->cursor.width = width;
 101	cmd->cursor.height = height;
 102	cmd->cursor.hotspotX = hotspotX;
 103	cmd->cursor.hotspotY = hotspotY;
 104
 105	vmw_cmd_commit_flush(dev_priv, cmd_size);
 106}
 107
 108/**
 109 * vmw_cursor_update_image - update the cursor image on the provided plane
 110 * @dev_priv: the private driver struct
 111 * @vps: the plane state of the cursor plane
 112 * @image: buffer which holds the cursor image
 113 * @width: width of the mouse cursor image
 114 * @height: height of the mouse cursor image
 115 * @hotspotX: the horizontal position of mouse hotspot
 116 * @hotspotY: the vertical position of mouse hotspot
 117 */
 118static void vmw_cursor_update_image(struct vmw_private *dev_priv,
 119				    struct vmw_plane_state *vps,
 120				    u32 *image, u32 width, u32 height,
 121				    u32 hotspotX, u32 hotspotY)
 122{
 123	if (vps->cursor.bo)
 124		vmw_cursor_update_mob(dev_priv, vps, image,
 125				      vps->base.crtc_w, vps->base.crtc_h,
 126				      hotspotX, hotspotY);
 127
 128	else
 129		vmw_send_define_cursor_cmd(dev_priv, image, width, height,
 130					   hotspotX, hotspotY);
 131}
 132
 133
 134/**
 135 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
 136 *
 137 * Called from inside vmw_du_cursor_plane_atomic_update to actually
 138 * make the cursor-image live.
 139 *
 140 * @dev_priv: device to work with
 141 * @vps: the plane state of the cursor plane
 142 * @image: cursor source data to fill the MOB with
 143 * @width: source data width
 144 * @height: source data height
 145 * @hotspotX: cursor hotspot x
 146 * @hotspotY: cursor hotspot Y
 147 */
 148static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
 149				  struct vmw_plane_state *vps,
 150				  u32 *image, u32 width, u32 height,
 151				  u32 hotspotX, u32 hotspotY)
 152{
 153	SVGAGBCursorHeader *header;
 154	SVGAGBAlphaCursorHeader *alpha_header;
 155	const u32 image_size = width * height * sizeof(*image);
 156
 157	header = vmw_bo_map_and_cache(vps->cursor.bo);
 158	alpha_header = &header->header.alphaHeader;
 159
 160	memset(header, 0, sizeof(*header));
 161
 162	header->type = SVGA_ALPHA_CURSOR;
 163	header->sizeInBytes = image_size;
 164
 165	alpha_header->hotspotX = hotspotX;
 166	alpha_header->hotspotY = hotspotY;
 167	alpha_header->width = width;
 168	alpha_header->height = height;
 169
 170	memcpy(header + 1, image, image_size);
 171	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
 172		  vps->cursor.bo->tbo.resource->start);
 173}
 174
 175
 176static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
 177{
 178	return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
 179}
 180
 181/**
 182 * vmw_du_cursor_plane_acquire_image -- Acquire the image data
 183 * @vps: cursor plane state
 184 */
 185static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
 186{
 187	bool is_iomem;
 188	if (vps->surf) {
 189		if (vps->surf_mapped)
 190			return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
 191		return vps->surf->snooper.image;
 192	} else if (vps->bo)
 193		return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
 194	return NULL;
 195}
 196
 197static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
 198					    struct vmw_plane_state *new_vps)
 199{
 200	void *old_image;
 201	void *new_image;
 202	u32 size;
 203	bool changed;
 204
 205	if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
 206	    old_vps->base.crtc_h != new_vps->base.crtc_h)
 207	    return true;
 208
 209	if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
 210	    old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
 211	    return true;
 212
 213	size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
 214
 215	old_image = vmw_du_cursor_plane_acquire_image(old_vps);
 216	new_image = vmw_du_cursor_plane_acquire_image(new_vps);
 217
 218	changed = false;
 219	if (old_image && new_image)
 220		changed = memcmp(old_image, new_image, size) != 0;
 221
 222	return changed;
 223}
 224
 225static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
 226{
 227	if (!(*vbo))
 228		return;
 229
 230	ttm_bo_unpin(&(*vbo)->tbo);
 231	vmw_bo_unreference(vbo);
 232}
 233
 234static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
 235				  struct vmw_plane_state *vps)
 236{
 237	u32 i;
 238
 239	if (!vps->cursor.bo)
 240		return;
 241
 242	vmw_du_cursor_plane_unmap_cm(vps);
 243
 244	/* Look for a free slot to return this mob to the cache. */
 245	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 246		if (!vcp->cursor_mobs[i]) {
 247			vcp->cursor_mobs[i] = vps->cursor.bo;
 248			vps->cursor.bo = NULL;
 249			return;
 250		}
 251	}
 252
 253	/* Cache is full: See if this mob is bigger than an existing mob. */
 254	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 255		if (vcp->cursor_mobs[i]->tbo.base.size <
 256		    vps->cursor.bo->tbo.base.size) {
 257			vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
 258			vcp->cursor_mobs[i] = vps->cursor.bo;
 259			vps->cursor.bo = NULL;
 260			return;
 261		}
 262	}
 263
 264	/* Destroy it if it's not worth caching. */
 265	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
 266}
 267
 268static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
 269				 struct vmw_plane_state *vps)
 270{
 271	struct vmw_private *dev_priv = vcp->base.dev->dev_private;
 272	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
 273	u32 i;
 274	u32 cursor_max_dim, mob_max_size;
 275	int ret;
 276
 277	if (!dev_priv->has_mob ||
 278	    (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
 279		return -EINVAL;
 280
 281	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
 282	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
 283
 284	if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
 285	    vps->base.crtc_h > cursor_max_dim)
 
 286		return -EINVAL;
 287
 288	if (vps->cursor.bo) {
 289		if (vps->cursor.bo->tbo.base.size >= size)
 290			return 0;
 291		vmw_du_put_cursor_mob(vcp, vps);
 292	}
 293
 294	/* Look for an unused mob in the cache. */
 295	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 296		if (vcp->cursor_mobs[i] &&
 297		    vcp->cursor_mobs[i]->tbo.base.size >= size) {
 298			vps->cursor.bo = vcp->cursor_mobs[i];
 299			vcp->cursor_mobs[i] = NULL;
 300			return 0;
 301		}
 302	}
 303	/* Create a new mob if we can't find an existing one. */
 304	ret = vmw_bo_create_and_populate(dev_priv, size,
 305					 VMW_BO_DOMAIN_MOB,
 306					 &vps->cursor.bo);
 307
 308	if (ret != 0)
 309		return ret;
 
 310
 311	/* Fence the mob creation so we are guarateed to have the mob */
 312	ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
 313	if (ret != 0)
 314		goto teardown;
 315
 316	vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
 317	ttm_bo_unreserve(&vps->cursor.bo->tbo);
 318	return 0;
 319
 320teardown:
 321	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
 322	return ret;
 323}
 324
 325
 326static void vmw_cursor_update_position(struct vmw_private *dev_priv,
 327				       bool show, int x, int y)
 328{
 329	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
 330					     : SVGA_CURSOR_ON_HIDE;
 331	uint32_t count;
 332
 333	spin_lock(&dev_priv->cursor_lock);
 334	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
 335		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
 336		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
 337		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
 338		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
 339		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
 340	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
 341		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
 342		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
 343		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
 344		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
 345		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
 346	} else {
 347		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
 348		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
 349		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
 350	}
 351	spin_unlock(&dev_priv->cursor_lock);
 352}
 353
 
 354void vmw_kms_cursor_snoop(struct vmw_surface *srf,
 355			  struct ttm_object_file *tfile,
 356			  struct ttm_buffer_object *bo,
 357			  SVGA3dCmdHeader *header)
 358{
 359	struct ttm_bo_kmap_obj map;
 360	unsigned long kmap_offset;
 361	unsigned long kmap_num;
 362	SVGA3dCopyBox *box;
 363	unsigned box_count;
 364	void *virtual;
 365	bool is_iomem;
 366	struct vmw_dma_cmd {
 367		SVGA3dCmdHeader header;
 368		SVGA3dCmdSurfaceDMA dma;
 369	} *cmd;
 370	int i, ret;
 371	const struct SVGA3dSurfaceDesc *desc =
 372		vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
 373	const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
 374
 375	cmd = container_of(header, struct vmw_dma_cmd, header);
 376
 377	/* No snooper installed, nothing to copy */
 378	if (!srf->snooper.image)
 379		return;
 380
 381	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
 382		DRM_ERROR("face and mipmap for cursors should never != 0\n");
 383		return;
 384	}
 385
 386	if (cmd->header.size < 64) {
 387		DRM_ERROR("at least one full copy box must be given\n");
 388		return;
 389	}
 390
 391	box = (SVGA3dCopyBox *)&cmd[1];
 392	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
 393			sizeof(SVGA3dCopyBox);
 394
 395	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
 396	    box->x != 0    || box->y != 0    || box->z != 0    ||
 397	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
 398	    box->d != 1    || box_count != 1 ||
 399	    box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
 400		/* TODO handle none page aligned offsets */
 401		/* TODO handle more dst & src != 0 */
 402		/* TODO handle more then one copy */
 403		DRM_ERROR("Can't snoop dma request for cursor!\n");
 404		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
 405			  box->srcx, box->srcy, box->srcz,
 406			  box->x, box->y, box->z,
 407			  box->w, box->h, box->d, box_count,
 408			  cmd->dma.guest.ptr.offset);
 409		return;
 410	}
 411
 412	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
 413	kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
 414
 415	ret = ttm_bo_reserve(bo, true, false, NULL);
 416	if (unlikely(ret != 0)) {
 417		DRM_ERROR("reserve failed\n");
 418		return;
 419	}
 420
 421	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
 422	if (unlikely(ret != 0))
 423		goto err_unreserve;
 424
 425	virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
 426
 427	if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
 428		memcpy(srf->snooper.image, virtual,
 429		       VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
 430	} else {
 431		/* Image is unsigned pointer. */
 432		for (i = 0; i < box->h; i++)
 433			memcpy(srf->snooper.image + i * image_pitch,
 434			       virtual + i * cmd->dma.guest.pitch,
 435			       box->w * desc->pitchBytesPerBlock);
 436	}
 437
 438	srf->snooper.age++;
 439
 440	ttm_bo_kunmap(&map);
 441err_unreserve:
 442	ttm_bo_unreserve(bo);
 443}
 444
 445/**
 446 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
 447 *
 448 * @dev_priv: Pointer to the device private struct.
 449 *
 450 * Clears all legacy hotspots.
 451 */
 452void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
 453{
 454	struct drm_device *dev = &dev_priv->drm;
 455	struct vmw_display_unit *du;
 456	struct drm_crtc *crtc;
 457
 458	drm_modeset_lock_all(dev);
 459	drm_for_each_crtc(crtc, dev) {
 460		du = vmw_crtc_to_du(crtc);
 461
 462		du->hotspot_x = 0;
 463		du->hotspot_y = 0;
 464	}
 465	drm_modeset_unlock_all(dev);
 466}
 467
 468void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 469{
 470	struct drm_device *dev = &dev_priv->drm;
 471	struct vmw_display_unit *du;
 472	struct drm_crtc *crtc;
 473
 474	mutex_lock(&dev->mode_config.mutex);
 475
 476	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 477		du = vmw_crtc_to_du(crtc);
 478		if (!du->cursor_surface ||
 479		    du->cursor_age == du->cursor_surface->snooper.age ||
 480		    !du->cursor_surface->snooper.image)
 481			continue;
 482
 483		du->cursor_age = du->cursor_surface->snooper.age;
 484		vmw_send_define_cursor_cmd(dev_priv,
 485					   du->cursor_surface->snooper.image,
 486					   VMW_CURSOR_SNOOP_WIDTH,
 487					   VMW_CURSOR_SNOOP_HEIGHT,
 488					   du->hotspot_x + du->core_hotspot_x,
 489					   du->hotspot_y + du->core_hotspot_y);
 490	}
 491
 492	mutex_unlock(&dev->mode_config.mutex);
 493}
 494
 495
 496void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
 497{
 498	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 499	u32 i;
 500
 501	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
 502
 503	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
 504		vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
 505
 506	drm_plane_cleanup(plane);
 507}
 508
 509
 510void vmw_du_primary_plane_destroy(struct drm_plane *plane)
 511{
 512	drm_plane_cleanup(plane);
 513
 514	/* Planes are static in our case so we don't free it */
 515}
 516
 517
 518/**
 519 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
 520 *
 521 * @vps: plane state associated with the display surface
 522 * @unreference: true if we also want to unreference the display.
 523 */
 524void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
 525			     bool unreference)
 526{
 527	if (vps->surf) {
 528		if (vps->pinned) {
 529			vmw_resource_unpin(&vps->surf->res);
 530			vps->pinned--;
 531		}
 532
 533		if (unreference) {
 534			if (vps->pinned)
 535				DRM_ERROR("Surface still pinned\n");
 536			vmw_surface_unreference(&vps->surf);
 537		}
 538	}
 539}
 540
 541
 542/**
 543 * vmw_du_plane_cleanup_fb - Unpins the plane surface
 544 *
 545 * @plane:  display plane
 546 * @old_state: Contains the FB to clean up
 547 *
 548 * Unpins the framebuffer surface
 549 *
 550 * Returns 0 on success
 551 */
 552void
 553vmw_du_plane_cleanup_fb(struct drm_plane *plane,
 554			struct drm_plane_state *old_state)
 555{
 556	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 557
 558	vmw_du_plane_unpin_surf(vps, false);
 559}
 560
 561
 562/**
 563 * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
 564 *
 565 * @vps: plane_state
 566 *
 567 * Returns 0 on success
 568 */
 569
 570static int
 571vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
 572{
 573	int ret;
 574	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
 575	struct ttm_buffer_object *bo;
 576
 577	if (!vps->cursor.bo)
 578		return -EINVAL;
 579
 580	bo = &vps->cursor.bo->tbo;
 581
 582	if (bo->base.size < size)
 583		return -EINVAL;
 584
 585	if (vps->cursor.bo->map.virtual)
 586		return 0;
 587
 588	ret = ttm_bo_reserve(bo, false, false, NULL);
 589	if (unlikely(ret != 0))
 590		return -ENOMEM;
 591
 592	vmw_bo_map_and_cache(vps->cursor.bo);
 593
 594	ttm_bo_unreserve(bo);
 595
 596	if (unlikely(ret != 0))
 597		return -ENOMEM;
 598
 599	return 0;
 600}
 601
 602
 603/**
 604 * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
 605 *
 606 * @vps: state of the cursor plane
 607 *
 608 * Returns 0 on success
 609 */
 610
 611static int
 612vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
 613{
 614	int ret = 0;
 615	struct vmw_bo *vbo = vps->cursor.bo;
 616
 617	if (!vbo || !vbo->map.virtual)
 618		return 0;
 619
 620	ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
 621	if (likely(ret == 0)) {
 622		vmw_bo_unmap(vbo);
 623		ttm_bo_unreserve(&vbo->tbo);
 624	}
 625
 626	return ret;
 627}
 628
 629
 630/**
 631 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
 632 *
 633 * @plane: cursor plane
 634 * @old_state: contains the state to clean up
 635 *
 636 * Unmaps all cursor bo mappings and unpins the cursor surface
 637 *
 638 * Returns 0 on success
 639 */
 640void
 641vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
 642			       struct drm_plane_state *old_state)
 643{
 644	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 645	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 646	bool is_iomem;
 647
 648	if (vps->surf_mapped) {
 649		vmw_bo_unmap(vps->surf->res.guest_memory_bo);
 650		vps->surf_mapped = false;
 651	}
 652
 653	if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
 654		const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
 655
 656		if (likely(ret == 0)) {
 657			ttm_bo_kunmap(&vps->bo->map);
 658			ttm_bo_unreserve(&vps->bo->tbo);
 659		}
 660	}
 661
 662	vmw_du_cursor_plane_unmap_cm(vps);
 663	vmw_du_put_cursor_mob(vcp, vps);
 664
 665	vmw_du_plane_unpin_surf(vps, false);
 666
 667	if (vps->surf) {
 668		vmw_surface_unreference(&vps->surf);
 669		vps->surf = NULL;
 670	}
 671
 672	if (vps->bo) {
 673		vmw_bo_unreference(&vps->bo);
 674		vps->bo = NULL;
 675	}
 676}
 677
 678
 679/**
 680 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
 681 *
 682 * @plane:  display plane
 683 * @new_state: info on the new plane state, including the FB
 684 *
 685 * Returns 0 on success
 686 */
 687int
 688vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
 689			       struct drm_plane_state *new_state)
 690{
 691	struct drm_framebuffer *fb = new_state->fb;
 692	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 693	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
 694	int ret = 0;
 695
 696	if (vps->surf) {
 
 697		vmw_surface_unreference(&vps->surf);
 698		vps->surf = NULL;
 699	}
 700
 701	if (vps->bo) {
 702		vmw_bo_unreference(&vps->bo);
 703		vps->bo = NULL;
 704	}
 705
 706	if (fb) {
 707		if (vmw_framebuffer_to_vfb(fb)->bo) {
 708			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
 709			vmw_bo_reference(vps->bo);
 710		} else {
 711			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
 712			vmw_surface_reference(vps->surf);
 713		}
 714	}
 715
 716	if (!vps->surf && vps->bo) {
 717		const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
 718
 719		/*
 720		 * Not using vmw_bo_map_and_cache() helper here as we need to
 721		 * reserve the ttm_buffer_object first which
 722		 * vmw_bo_map_and_cache() omits.
 723		 */
 724		ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
 725
 726		if (unlikely(ret != 0))
 727			return -ENOMEM;
 728
 729		ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
 730
 731		ttm_bo_unreserve(&vps->bo->tbo);
 732
 733		if (unlikely(ret != 0))
 734			return -ENOMEM;
 735	} else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
 736
 737		WARN_ON(vps->surf->snooper.image);
 738		ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
 739				     NULL);
 740		if (unlikely(ret != 0))
 741			return -ENOMEM;
 742		vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
 743		ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
 744		vps->surf_mapped = true;
 745	}
 746
 747	if (vps->surf || vps->bo) {
 748		vmw_du_get_cursor_mob(vcp, vps);
 749		vmw_du_cursor_plane_map_cm(vps);
 750	}
 751
 752	return 0;
 753}
 754
 755
 756void
 757vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
 758				  struct drm_atomic_state *state)
 759{
 760	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 761									   plane);
 762	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
 763									   plane);
 764	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
 765	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 766	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 767	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
 768	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
 769	s32 hotspot_x, hotspot_y;
 
 770
 771	hotspot_x = du->hotspot_x + new_state->hotspot_x;
 772	hotspot_y = du->hotspot_y + new_state->hotspot_y;
 
 
 
 
 
 
 773
 774	du->cursor_surface = vps->surf;
 775	du->cursor_bo = vps->bo;
 776
 777	if (!vps->surf && !vps->bo) {
 
 
 
 
 
 
 
 
 
 
 
 
 778		vmw_cursor_update_position(dev_priv, false, 0, 0);
 779		return;
 780	}
 781
 782	vps->cursor.hotspot_x = hotspot_x;
 783	vps->cursor.hotspot_y = hotspot_y;
 
 
 
 
 
 784
 785	if (vps->surf) {
 786		du->cursor_age = du->cursor_surface->snooper.age;
 
 
 787	}
 788
 789	if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
 790		/*
 791		 * If it hasn't changed, avoid making the device do extra
 792		 * work by keeping the old cursor active.
 793		 */
 794		struct vmw_cursor_plane_state tmp = old_vps->cursor;
 795		old_vps->cursor = vps->cursor;
 796		vps->cursor = tmp;
 797	} else {
 798		void *image = vmw_du_cursor_plane_acquire_image(vps);
 799		if (image)
 800			vmw_cursor_update_image(dev_priv, vps, image,
 801						new_state->crtc_w,
 802						new_state->crtc_h,
 803						hotspot_x, hotspot_y);
 804	}
 805
 806	du->cursor_x = new_state->crtc_x + du->set_gui_x;
 807	du->cursor_y = new_state->crtc_y + du->set_gui_y;
 808
 809	vmw_cursor_update_position(dev_priv, true,
 810				   du->cursor_x + hotspot_x,
 811				   du->cursor_y + hotspot_y);
 812
 813	du->core_hotspot_x = hotspot_x - du->hotspot_x;
 814	du->core_hotspot_y = hotspot_y - du->hotspot_y;
 815}
 816
 817
 818/**
 819 * vmw_du_primary_plane_atomic_check - check if the new state is okay
 820 *
 821 * @plane: display plane
 822 * @state: info on the new plane state, including the FB
 823 *
 824 * Check if the new state is settable given the current state.  Other
 825 * than what the atomic helper checks, we care about crtc fitting
 826 * the FB and maintaining one active framebuffer.
 827 *
 828 * Returns 0 on success
 829 */
 830int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
 831				      struct drm_atomic_state *state)
 832{
 833	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 834									   plane);
 835	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
 836									   plane);
 837	struct drm_crtc_state *crtc_state = NULL;
 838	struct drm_framebuffer *new_fb = new_state->fb;
 839	struct drm_framebuffer *old_fb = old_state->fb;
 840	int ret;
 841
 842	/*
 843	 * Ignore damage clips if the framebuffer attached to the plane's state
 844	 * has changed since the last plane update (page-flip). In this case, a
 845	 * full plane update should happen because uploads are done per-buffer.
 846	 */
 847	if (old_fb != new_fb)
 848		new_state->ignore_damage_clips = true;
 849
 850	if (new_state->crtc)
 851		crtc_state = drm_atomic_get_new_crtc_state(state,
 852							   new_state->crtc);
 853
 854	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
 855						  DRM_PLANE_NO_SCALING,
 856						  DRM_PLANE_NO_SCALING,
 857						  false, true);
 858
 859	if (!ret && new_fb) {
 860		struct drm_crtc *crtc = new_state->crtc;
 
 861		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 862
 863		vmw_connector_state_to_vcs(du->connector.state);
 864	}
 865
 866
 867	return ret;
 868}
 869
 870
 871/**
 872 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
 873 *
 874 * @plane: cursor plane
 875 * @state: info on the new plane state
 876 *
 877 * This is a chance to fail if the new cursor state does not fit
 878 * our requirements.
 879 *
 880 * Returns 0 on success
 881 */
 882int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
 883				     struct drm_atomic_state *state)
 884{
 885	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 886									   plane);
 887	int ret = 0;
 888	struct drm_crtc_state *crtc_state = NULL;
 889	struct vmw_surface *surface = NULL;
 890	struct drm_framebuffer *fb = new_state->fb;
 891
 892	if (new_state->crtc)
 893		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
 894							   new_state->crtc);
 895
 896	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
 897						  DRM_PLANE_NO_SCALING,
 898						  DRM_PLANE_NO_SCALING,
 899						  true, true);
 900	if (ret)
 901		return ret;
 902
 903	/* Turning off */
 904	if (!fb)
 905		return 0;
 906
 907	/* A lot of the code assumes this */
 908	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
 909		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
 910			  new_state->crtc_w, new_state->crtc_h);
 911		return -EINVAL;
 912	}
 913
 914	if (!vmw_framebuffer_to_vfb(fb)->bo) {
 915		surface = vmw_framebuffer_to_vfbs(fb)->surface;
 916
 917		WARN_ON(!surface);
 918
 919		if (!surface ||
 920		    (!surface->snooper.image && !surface->res.guest_memory_bo)) {
 921			DRM_ERROR("surface not suitable for cursor\n");
 922			return -EINVAL;
 923		}
 924	}
 925
 926	return 0;
 927}
 928
 929
 930int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
 931			     struct drm_atomic_state *state)
 932{
 933	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
 934									 crtc);
 935	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
 936	int connector_mask = drm_connector_mask(&du->connector);
 937	bool has_primary = new_state->plane_mask &
 938			   drm_plane_mask(crtc->primary);
 939
 940	/* We always want to have an active plane with an active CRTC */
 941	if (has_primary != new_state->enable)
 942		return -EINVAL;
 943
 944
 945	if (new_state->connector_mask != connector_mask &&
 946	    new_state->connector_mask != 0) {
 947		DRM_ERROR("Invalid connectors configuration\n");
 948		return -EINVAL;
 949	}
 950
 951	/*
 952	 * Our virtual device does not have a dot clock, so use the logical
 953	 * clock value as the dot clock.
 954	 */
 955	if (new_state->mode.crtc_clock == 0)
 956		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
 957
 958	return 0;
 959}
 960
 961
 962void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
 963			      struct drm_atomic_state *state)
 964{
 965}
 966
 967
 968void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
 969			      struct drm_atomic_state *state)
 970{
 
 
 
 
 
 
 
 
 
 971}
 972
 973
 974/**
 975 * vmw_du_crtc_duplicate_state - duplicate crtc state
 976 * @crtc: DRM crtc
 977 *
 978 * Allocates and returns a copy of the crtc state (both common and
 979 * vmw-specific) for the specified crtc.
 980 *
 981 * Returns: The newly allocated crtc state, or NULL on failure.
 982 */
 983struct drm_crtc_state *
 984vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
 985{
 986	struct drm_crtc_state *state;
 987	struct vmw_crtc_state *vcs;
 988
 989	if (WARN_ON(!crtc->state))
 990		return NULL;
 991
 992	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
 993
 994	if (!vcs)
 995		return NULL;
 996
 997	state = &vcs->base;
 998
 999	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
1000
1001	return state;
1002}
1003
1004
1005/**
1006 * vmw_du_crtc_reset - creates a blank vmw crtc state
1007 * @crtc: DRM crtc
1008 *
1009 * Resets the atomic state for @crtc by freeing the state pointer (which
1010 * might be NULL, e.g. at driver load time) and allocating a new empty state
1011 * object.
1012 */
1013void vmw_du_crtc_reset(struct drm_crtc *crtc)
1014{
1015	struct vmw_crtc_state *vcs;
1016
1017
1018	if (crtc->state) {
1019		__drm_atomic_helper_crtc_destroy_state(crtc->state);
1020
1021		kfree(vmw_crtc_state_to_vcs(crtc->state));
1022	}
1023
1024	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1025
1026	if (!vcs) {
1027		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1028		return;
1029	}
1030
1031	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
 
1032}
1033
1034
1035/**
1036 * vmw_du_crtc_destroy_state - destroy crtc state
1037 * @crtc: DRM crtc
1038 * @state: state object to destroy
1039 *
1040 * Destroys the crtc state (both common and vmw-specific) for the
1041 * specified plane.
1042 */
1043void
1044vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1045			  struct drm_crtc_state *state)
1046{
1047	drm_atomic_helper_crtc_destroy_state(crtc, state);
1048}
1049
1050
1051/**
1052 * vmw_du_plane_duplicate_state - duplicate plane state
1053 * @plane: drm plane
1054 *
1055 * Allocates and returns a copy of the plane state (both common and
1056 * vmw-specific) for the specified plane.
1057 *
1058 * Returns: The newly allocated plane state, or NULL on failure.
1059 */
1060struct drm_plane_state *
1061vmw_du_plane_duplicate_state(struct drm_plane *plane)
1062{
1063	struct drm_plane_state *state;
1064	struct vmw_plane_state *vps;
1065
1066	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1067
1068	if (!vps)
1069		return NULL;
1070
1071	vps->pinned = 0;
1072	vps->cpp = 0;
1073
1074	memset(&vps->cursor, 0, sizeof(vps->cursor));
1075
1076	/* Each ref counted resource needs to be acquired again */
1077	if (vps->surf)
1078		(void) vmw_surface_reference(vps->surf);
1079
1080	if (vps->bo)
1081		(void) vmw_bo_reference(vps->bo);
1082
1083	state = &vps->base;
1084
1085	__drm_atomic_helper_plane_duplicate_state(plane, state);
1086
1087	return state;
1088}
1089
1090
1091/**
1092 * vmw_du_plane_reset - creates a blank vmw plane state
1093 * @plane: drm plane
1094 *
1095 * Resets the atomic state for @plane by freeing the state pointer (which might
1096 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1097 */
1098void vmw_du_plane_reset(struct drm_plane *plane)
1099{
1100	struct vmw_plane_state *vps;
1101
 
1102	if (plane->state)
1103		vmw_du_plane_destroy_state(plane, plane->state);
1104
1105	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1106
1107	if (!vps) {
1108		DRM_ERROR("Cannot allocate vmw_plane_state\n");
1109		return;
1110	}
1111
1112	__drm_atomic_helper_plane_reset(plane, &vps->base);
1113}
1114
1115
1116/**
1117 * vmw_du_plane_destroy_state - destroy plane state
1118 * @plane: DRM plane
1119 * @state: state object to destroy
1120 *
1121 * Destroys the plane state (both common and vmw-specific) for the
1122 * specified plane.
1123 */
1124void
1125vmw_du_plane_destroy_state(struct drm_plane *plane,
1126			   struct drm_plane_state *state)
1127{
1128	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1129
 
1130	/* Should have been freed by cleanup_fb */
1131	if (vps->surf)
1132		vmw_surface_unreference(&vps->surf);
1133
1134	if (vps->bo)
1135		vmw_bo_unreference(&vps->bo);
1136
1137	drm_atomic_helper_plane_destroy_state(plane, state);
1138}
1139
1140
1141/**
1142 * vmw_du_connector_duplicate_state - duplicate connector state
1143 * @connector: DRM connector
1144 *
1145 * Allocates and returns a copy of the connector state (both common and
1146 * vmw-specific) for the specified connector.
1147 *
1148 * Returns: The newly allocated connector state, or NULL on failure.
1149 */
1150struct drm_connector_state *
1151vmw_du_connector_duplicate_state(struct drm_connector *connector)
1152{
1153	struct drm_connector_state *state;
1154	struct vmw_connector_state *vcs;
1155
1156	if (WARN_ON(!connector->state))
1157		return NULL;
1158
1159	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1160
1161	if (!vcs)
1162		return NULL;
1163
1164	state = &vcs->base;
1165
1166	__drm_atomic_helper_connector_duplicate_state(connector, state);
1167
1168	return state;
1169}
1170
1171
1172/**
1173 * vmw_du_connector_reset - creates a blank vmw connector state
1174 * @connector: DRM connector
1175 *
1176 * Resets the atomic state for @connector by freeing the state pointer (which
1177 * might be NULL, e.g. at driver load time) and allocating a new empty state
1178 * object.
1179 */
1180void vmw_du_connector_reset(struct drm_connector *connector)
1181{
1182	struct vmw_connector_state *vcs;
1183
1184
1185	if (connector->state) {
1186		__drm_atomic_helper_connector_destroy_state(connector->state);
1187
1188		kfree(vmw_connector_state_to_vcs(connector->state));
1189	}
1190
1191	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1192
1193	if (!vcs) {
1194		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1195		return;
1196	}
1197
1198	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1199}
1200
1201
1202/**
1203 * vmw_du_connector_destroy_state - destroy connector state
1204 * @connector: DRM connector
1205 * @state: state object to destroy
1206 *
1207 * Destroys the connector state (both common and vmw-specific) for the
1208 * specified plane.
1209 */
1210void
1211vmw_du_connector_destroy_state(struct drm_connector *connector,
1212			  struct drm_connector_state *state)
1213{
1214	drm_atomic_helper_connector_destroy_state(connector, state);
1215}
1216/*
1217 * Generic framebuffer code
1218 */
1219
1220/*
1221 * Surface framebuffer code
1222 */
1223
1224static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1225{
1226	struct vmw_framebuffer_surface *vfbs =
1227		vmw_framebuffer_to_vfbs(framebuffer);
1228
1229	drm_framebuffer_cleanup(framebuffer);
1230	vmw_surface_unreference(&vfbs->surface);
 
 
1231
1232	kfree(vfbs);
1233}
1234
1235/**
1236 * vmw_kms_readback - Perform a readback from the screen system to
1237 * a buffer-object backed framebuffer.
1238 *
1239 * @dev_priv: Pointer to the device private structure.
1240 * @file_priv: Pointer to a struct drm_file identifying the caller.
1241 * Must be set to NULL if @user_fence_rep is NULL.
1242 * @vfb: Pointer to the buffer-object backed framebuffer.
1243 * @user_fence_rep: User-space provided structure for fence information.
1244 * Must be set to non-NULL if @file_priv is non-NULL.
1245 * @vclips: Array of clip rects.
1246 * @num_clips: Number of clip rects in @vclips.
1247 *
1248 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1249 * interrupted.
1250 */
1251int vmw_kms_readback(struct vmw_private *dev_priv,
1252		     struct drm_file *file_priv,
1253		     struct vmw_framebuffer *vfb,
1254		     struct drm_vmw_fence_rep __user *user_fence_rep,
1255		     struct drm_vmw_rect *vclips,
1256		     uint32_t num_clips)
1257{
1258	switch (dev_priv->active_display_unit) {
1259	case vmw_du_screen_object:
1260		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1261					    user_fence_rep, vclips, num_clips,
1262					    NULL);
1263	case vmw_du_screen_target:
1264		return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1265					     user_fence_rep, NULL, vclips, num_clips,
1266					     1, NULL);
1267	default:
1268		WARN_ONCE(true,
1269			  "Readback called with invalid display system.\n");
1270}
1271
1272	return -ENOSYS;
1273}
1274
1275
1276static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1277	.destroy = vmw_framebuffer_surface_destroy,
1278	.dirty = drm_atomic_helper_dirtyfb,
1279};
1280
1281static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1282					   struct vmw_surface *surface,
1283					   struct vmw_framebuffer **out,
1284					   const struct drm_mode_fb_cmd2
1285					   *mode_cmd,
1286					   bool is_bo_proxy)
1287
1288{
1289	struct drm_device *dev = &dev_priv->drm;
1290	struct vmw_framebuffer_surface *vfbs;
1291	enum SVGA3dSurfaceFormat format;
1292	int ret;
 
1293
1294	/* 3D is only supported on HWv8 and newer hosts */
1295	if (dev_priv->active_display_unit == vmw_du_legacy)
1296		return -ENOSYS;
1297
1298	/*
1299	 * Sanity checks.
1300	 */
1301
1302	if (!drm_any_plane_has_format(&dev_priv->drm,
1303				      mode_cmd->pixel_format,
1304				      mode_cmd->modifier[0])) {
1305		drm_dbg(&dev_priv->drm,
1306			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1307			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1308		return -EINVAL;
1309	}
1310
1311	/* Surface must be marked as a scanout. */
1312	if (unlikely(!surface->metadata.scanout))
1313		return -EINVAL;
1314
1315	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1316		     surface->metadata.num_sizes != 1 ||
1317		     surface->metadata.base_size.width < mode_cmd->width ||
1318		     surface->metadata.base_size.height < mode_cmd->height ||
1319		     surface->metadata.base_size.depth != 1)) {
1320		DRM_ERROR("Incompatible surface dimensions "
1321			  "for requested mode.\n");
1322		return -EINVAL;
1323	}
1324
1325	switch (mode_cmd->pixel_format) {
1326	case DRM_FORMAT_ARGB8888:
1327		format = SVGA3D_A8R8G8B8;
1328		break;
1329	case DRM_FORMAT_XRGB8888:
1330		format = SVGA3D_X8R8G8B8;
1331		break;
1332	case DRM_FORMAT_RGB565:
1333		format = SVGA3D_R5G6B5;
1334		break;
1335	case DRM_FORMAT_XRGB1555:
1336		format = SVGA3D_A1R5G5B5;
1337		break;
1338	default:
1339		DRM_ERROR("Invalid pixel format: %p4cc\n",
1340			  &mode_cmd->pixel_format);
1341		return -EINVAL;
1342	}
1343
1344	/*
1345	 * For DX, surface format validation is done when surface->scanout
1346	 * is set.
1347	 */
1348	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1349		DRM_ERROR("Invalid surface format for requested mode.\n");
1350		return -EINVAL;
1351	}
1352
1353	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1354	if (!vfbs) {
1355		ret = -ENOMEM;
1356		goto out_err1;
1357	}
1358
1359	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1360	vfbs->surface = vmw_surface_reference(surface);
1361	vfbs->base.user_handle = mode_cmd->handles[0];
1362	vfbs->is_bo_proxy = is_bo_proxy;
1363
1364	*out = &vfbs->base;
1365
1366	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1367				   &vmw_framebuffer_surface_funcs);
1368	if (ret)
1369		goto out_err2;
1370
1371	return 0;
1372
1373out_err2:
1374	vmw_surface_unreference(&surface);
1375	kfree(vfbs);
1376out_err1:
1377	return ret;
1378}
1379
1380/*
1381 * Buffer-object framebuffer code
1382 */
1383
1384static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1385					    struct drm_file *file_priv,
1386					    unsigned int *handle)
1387{
1388	struct vmw_framebuffer_bo *vfbd =
1389			vmw_framebuffer_to_vfbd(fb);
 
 
 
 
 
1390
1391	return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1392}
1393
1394static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
 
 
 
 
1395{
 
1396	struct vmw_framebuffer_bo *vfbd =
1397		vmw_framebuffer_to_vfbd(framebuffer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1398
1399	drm_framebuffer_cleanup(framebuffer);
1400	vmw_bo_unreference(&vfbd->buffer);
 
1401
1402	kfree(vfbd);
 
1403}
1404
1405static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1406	.create_handle = vmw_framebuffer_bo_create_handle,
1407	.destroy = vmw_framebuffer_bo_destroy,
1408	.dirty = drm_atomic_helper_dirtyfb,
1409};
1410
1411/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1412 * vmw_create_bo_proxy - create a proxy surface for the buffer object
1413 *
1414 * @dev: DRM device
1415 * @mode_cmd: parameters for the new surface
1416 * @bo_mob: MOB backing the buffer object
1417 * @srf_out: newly created surface
1418 *
1419 * When the content FB is a buffer object, we create a surface as a proxy to the
1420 * same buffer.  This way we can do a surface copy rather than a surface DMA.
1421 * This is a more efficient approach
1422 *
1423 * RETURNS:
1424 * 0 on success, error code otherwise
1425 */
1426static int vmw_create_bo_proxy(struct drm_device *dev,
1427			       const struct drm_mode_fb_cmd2 *mode_cmd,
1428			       struct vmw_bo *bo_mob,
1429			       struct vmw_surface **srf_out)
1430{
1431	struct vmw_surface_metadata metadata = {0};
1432	uint32_t format;
 
1433	struct vmw_resource *res;
1434	unsigned int bytes_pp;
 
1435	int ret;
1436
1437	switch (mode_cmd->pixel_format) {
1438	case DRM_FORMAT_ARGB8888:
1439	case DRM_FORMAT_XRGB8888:
1440		format = SVGA3D_X8R8G8B8;
1441		bytes_pp = 4;
1442		break;
1443
1444	case DRM_FORMAT_RGB565:
1445	case DRM_FORMAT_XRGB1555:
1446		format = SVGA3D_R5G6B5;
1447		bytes_pp = 2;
1448		break;
1449
1450	case 8:
1451		format = SVGA3D_P8;
1452		bytes_pp = 1;
1453		break;
1454
1455	default:
1456		DRM_ERROR("Invalid framebuffer format %p4cc\n",
1457			  &mode_cmd->pixel_format);
1458		return -EINVAL;
1459	}
1460
1461	metadata.format = format;
1462	metadata.mip_levels[0] = 1;
1463	metadata.num_sizes = 1;
1464	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1465	metadata.base_size.height =  mode_cmd->height;
1466	metadata.base_size.depth = 1;
1467	metadata.scanout = true;
1468
1469	ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
 
 
 
 
 
 
 
1470	if (ret) {
1471		DRM_ERROR("Failed to allocate proxy content buffer\n");
1472		return ret;
1473	}
1474
1475	res = &(*srf_out)->res;
1476
1477	/* Reserve and switch the backing mob. */
1478	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1479	(void) vmw_resource_reserve(res, false, true);
1480	vmw_user_bo_unref(&res->guest_memory_bo);
1481	res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
1482	res->guest_memory_offset = 0;
1483	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1484	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1485
1486	return 0;
1487}
1488
1489
1490
1491static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1492				      struct vmw_bo *bo,
1493				      struct vmw_framebuffer **out,
1494				      const struct drm_mode_fb_cmd2
1495				      *mode_cmd)
1496
1497{
1498	struct drm_device *dev = &dev_priv->drm;
1499	struct vmw_framebuffer_bo *vfbd;
1500	unsigned int requested_size;
 
1501	int ret;
1502
1503	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1504	if (unlikely(requested_size > bo->tbo.base.size)) {
1505		DRM_ERROR("Screen buffer object size is too small "
1506			  "for requested mode.\n");
1507		return -EINVAL;
1508	}
1509
1510	if (!drm_any_plane_has_format(&dev_priv->drm,
1511				      mode_cmd->pixel_format,
1512				      mode_cmd->modifier[0])) {
1513		drm_dbg(&dev_priv->drm,
1514			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1515			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1516		return -EINVAL;
 
 
 
 
 
 
 
1517	}
1518
1519	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1520	if (!vfbd) {
1521		ret = -ENOMEM;
1522		goto out_err1;
1523	}
1524
1525	vfbd->base.base.obj[0] = &bo->tbo.base;
1526	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1527	vfbd->base.bo = true;
1528	vfbd->buffer = vmw_bo_reference(bo);
1529	vfbd->base.user_handle = mode_cmd->handles[0];
1530	*out = &vfbd->base;
1531
1532	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1533				   &vmw_framebuffer_bo_funcs);
1534	if (ret)
1535		goto out_err2;
1536
1537	return 0;
1538
1539out_err2:
1540	vmw_bo_unreference(&bo);
1541	kfree(vfbd);
1542out_err1:
1543	return ret;
1544}
1545
1546
1547/**
1548 * vmw_kms_srf_ok - check if a surface can be created
1549 *
1550 * @dev_priv: Pointer to device private struct.
1551 * @width: requested width
1552 * @height: requested height
1553 *
1554 * Surfaces need to be less than texture size
1555 */
1556static bool
1557vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1558{
1559	if (width  > dev_priv->texture_max_width ||
1560	    height > dev_priv->texture_max_height)
1561		return false;
1562
1563	return true;
1564}
1565
1566/**
1567 * vmw_kms_new_framebuffer - Create a new framebuffer.
1568 *
1569 * @dev_priv: Pointer to device private struct.
1570 * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1571 * Either @bo or @surface must be NULL.
1572 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1573 * Either @bo or @surface must be NULL.
1574 * @only_2d: No presents will occur to this buffer object based framebuffer.
1575 * This helps the code to do some important optimizations.
1576 * @mode_cmd: Frame-buffer metadata.
1577 */
1578struct vmw_framebuffer *
1579vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1580			struct vmw_bo *bo,
1581			struct vmw_surface *surface,
1582			bool only_2d,
1583			const struct drm_mode_fb_cmd2 *mode_cmd)
1584{
1585	struct vmw_framebuffer *vfb = NULL;
1586	bool is_bo_proxy = false;
1587	int ret;
1588
1589	/*
1590	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1591	 * therefore, wrap the buffer object in a surface so we can use the
1592	 * SurfaceCopy command.
1593	 */
1594	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1595	    bo && only_2d &&
1596	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1597	    dev_priv->active_display_unit == vmw_du_screen_target) {
1598		ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1599					  bo, &surface);
1600		if (ret)
1601			return ERR_PTR(ret);
1602
1603		is_bo_proxy = true;
1604	}
1605
1606	/* Create the new framebuffer depending one what we have */
1607	if (surface) {
1608		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1609						      mode_cmd,
1610						      is_bo_proxy);
 
1611		/*
1612		 * vmw_create_bo_proxy() adds a reference that is no longer
1613		 * needed
1614		 */
1615		if (is_bo_proxy)
1616			vmw_surface_unreference(&surface);
1617	} else if (bo) {
1618		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1619						 mode_cmd);
1620	} else {
1621		BUG();
1622	}
1623
1624	if (ret)
1625		return ERR_PTR(ret);
1626
 
 
 
1627	return vfb;
1628}
1629
1630/*
1631 * Generic Kernel modesetting functions
1632 */
1633
1634static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1635						 struct drm_file *file_priv,
1636						 const struct drm_mode_fb_cmd2 *mode_cmd)
1637{
1638	struct vmw_private *dev_priv = vmw_priv(dev);
 
1639	struct vmw_framebuffer *vfb = NULL;
1640	struct vmw_surface *surface = NULL;
1641	struct vmw_bo *bo = NULL;
 
1642	int ret;
1643
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1644	/* returns either a bo or surface */
1645	ret = vmw_user_lookup_handle(dev_priv, file_priv,
1646				     mode_cmd->handles[0],
1647				     &surface, &bo);
1648	if (ret) {
1649		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1650			  mode_cmd->handles[0], mode_cmd->handles[0]);
1651		goto err_out;
1652	}
1653
1654
1655	if (!bo &&
1656	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1657		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1658			dev_priv->texture_max_width,
1659			dev_priv->texture_max_height);
1660		goto err_out;
1661	}
1662
1663
1664	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1665				      !(dev_priv->capabilities & SVGA_CAP_3D),
1666				      mode_cmd);
1667	if (IS_ERR(vfb)) {
1668		ret = PTR_ERR(vfb);
1669		goto err_out;
1670	}
1671
1672err_out:
1673	/* vmw_user_lookup_handle takes one ref so does new_fb */
1674	if (bo)
1675		vmw_user_bo_unref(&bo);
1676	if (surface)
1677		vmw_surface_unreference(&surface);
1678
1679	if (ret) {
1680		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
 
1681		return ERR_PTR(ret);
1682	}
 
1683
1684	return &vfb->base;
1685}
1686
1687/**
1688 * vmw_kms_check_display_memory - Validates display memory required for a
1689 * topology
1690 * @dev: DRM device
1691 * @num_rects: number of drm_rect in rects
1692 * @rects: array of drm_rect representing the topology to validate indexed by
1693 * crtc index.
1694 *
1695 * Returns:
1696 * 0 on success otherwise negative error code
1697 */
1698static int vmw_kms_check_display_memory(struct drm_device *dev,
1699					uint32_t num_rects,
1700					struct drm_rect *rects)
1701{
1702	struct vmw_private *dev_priv = vmw_priv(dev);
1703	struct drm_rect bounding_box = {0};
1704	u64 total_pixels = 0, pixel_mem, bb_mem;
1705	int i;
1706
1707	for (i = 0; i < num_rects; i++) {
1708		/*
1709		 * For STDU only individual screen (screen target) is limited by
1710		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1711		 */
1712		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1713		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1714		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1715			VMW_DEBUG_KMS("Screen size not supported.\n");
1716			return -EINVAL;
1717		}
1718
1719		/* Bounding box upper left is at (0,0). */
1720		if (rects[i].x2 > bounding_box.x2)
1721			bounding_box.x2 = rects[i].x2;
1722
1723		if (rects[i].y2 > bounding_box.y2)
1724			bounding_box.y2 = rects[i].y2;
1725
1726		total_pixels += (u64) drm_rect_width(&rects[i]) *
1727			(u64) drm_rect_height(&rects[i]);
1728	}
1729
1730	/* Virtual svga device primary limits are always in 32-bpp. */
1731	pixel_mem = total_pixels * 4;
1732
1733	/*
1734	 * For HV10 and below prim_bb_mem is vram size. When
1735	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1736	 * limit on primary bounding box
1737	 */
1738	if (pixel_mem > dev_priv->max_primary_mem) {
1739		VMW_DEBUG_KMS("Combined output size too large.\n");
1740		return -EINVAL;
1741	}
1742
1743	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1744	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1745	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1746		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1747
1748		if (bb_mem > dev_priv->max_primary_mem) {
1749			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1750			return -EINVAL;
1751		}
1752	}
1753
1754	return 0;
1755}
1756
1757/**
1758 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1759 * crtc mutex
1760 * @state: The atomic state pointer containing the new atomic state
1761 * @crtc: The crtc
1762 *
1763 * This function returns the new crtc state if it's part of the state update.
1764 * Otherwise returns the current crtc state. It also makes sure that the
1765 * crtc mutex is locked.
1766 *
1767 * Returns: A valid crtc state pointer or NULL. It may also return a
1768 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1769 */
1770static struct drm_crtc_state *
1771vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1772{
1773	struct drm_crtc_state *crtc_state;
1774
1775	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1776	if (crtc_state) {
1777		lockdep_assert_held(&crtc->mutex.mutex.base);
1778	} else {
1779		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1780
1781		if (ret != 0 && ret != -EALREADY)
1782			return ERR_PTR(ret);
1783
1784		crtc_state = crtc->state;
1785	}
1786
1787	return crtc_state;
1788}
1789
1790/**
1791 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1792 * from the same fb after the new state is committed.
1793 * @dev: The drm_device.
1794 * @state: The new state to be checked.
1795 *
1796 * Returns:
1797 *   Zero on success,
1798 *   -EINVAL on invalid state,
1799 *   -EDEADLK if modeset locking needs to be rerun.
1800 */
1801static int vmw_kms_check_implicit(struct drm_device *dev,
1802				  struct drm_atomic_state *state)
1803{
1804	struct drm_framebuffer *implicit_fb = NULL;
1805	struct drm_crtc *crtc;
1806	struct drm_crtc_state *crtc_state;
1807	struct drm_plane_state *plane_state;
1808
1809	drm_for_each_crtc(crtc, dev) {
1810		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1811
1812		if (!du->is_implicit)
1813			continue;
1814
1815		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1816		if (IS_ERR(crtc_state))
1817			return PTR_ERR(crtc_state);
1818
1819		if (!crtc_state || !crtc_state->enable)
1820			continue;
1821
1822		/*
1823		 * Can't move primary planes across crtcs, so this is OK.
1824		 * It also means we don't need to take the plane mutex.
1825		 */
1826		plane_state = du->primary.state;
1827		if (plane_state->crtc != crtc)
1828			continue;
1829
1830		if (!implicit_fb)
1831			implicit_fb = plane_state->fb;
1832		else if (implicit_fb != plane_state->fb)
1833			return -EINVAL;
1834	}
1835
1836	return 0;
1837}
1838
1839/**
1840 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1841 * @dev: DRM device
1842 * @state: the driver state object
1843 *
1844 * Returns:
1845 * 0 on success otherwise negative error code
1846 */
1847static int vmw_kms_check_topology(struct drm_device *dev,
1848				  struct drm_atomic_state *state)
1849{
1850	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1851	struct drm_rect *rects;
1852	struct drm_crtc *crtc;
1853	uint32_t i;
1854	int ret = 0;
1855
1856	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1857			GFP_KERNEL);
1858	if (!rects)
1859		return -ENOMEM;
1860
1861	drm_for_each_crtc(crtc, dev) {
1862		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1863		struct drm_crtc_state *crtc_state;
1864
1865		i = drm_crtc_index(crtc);
1866
1867		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1868		if (IS_ERR(crtc_state)) {
1869			ret = PTR_ERR(crtc_state);
1870			goto clean;
1871		}
1872
1873		if (!crtc_state)
1874			continue;
1875
1876		if (crtc_state->enable) {
1877			rects[i].x1 = du->gui_x;
1878			rects[i].y1 = du->gui_y;
1879			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1880			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1881		} else {
1882			rects[i].x1 = 0;
1883			rects[i].y1 = 0;
1884			rects[i].x2 = 0;
1885			rects[i].y2 = 0;
1886		}
1887	}
1888
1889	/* Determine change to topology due to new atomic state */
1890	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1891				      new_crtc_state, i) {
1892		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1893		struct drm_connector *connector;
1894		struct drm_connector_state *conn_state;
1895		struct vmw_connector_state *vmw_conn_state;
1896
1897		if (!du->pref_active && new_crtc_state->enable) {
1898			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1899			ret = -EINVAL;
1900			goto clean;
1901		}
1902
1903		/*
1904		 * For vmwgfx each crtc has only one connector attached and it
1905		 * is not changed so don't really need to check the
1906		 * crtc->connector_mask and iterate over it.
1907		 */
1908		connector = &du->connector;
1909		conn_state = drm_atomic_get_connector_state(state, connector);
1910		if (IS_ERR(conn_state)) {
1911			ret = PTR_ERR(conn_state);
1912			goto clean;
1913		}
1914
1915		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1916		vmw_conn_state->gui_x = du->gui_x;
1917		vmw_conn_state->gui_y = du->gui_y;
1918	}
1919
1920	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1921					   rects);
1922
1923clean:
1924	kfree(rects);
1925	return ret;
1926}
1927
1928/**
1929 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1930 *
1931 * @dev: DRM device
1932 * @state: the driver state object
1933 *
1934 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1935 * us to assign a value to mode->crtc_clock so that
1936 * drm_calc_timestamping_constants() won't throw an error message
1937 *
1938 * Returns:
1939 * Zero for success or -errno
1940 */
1941static int
1942vmw_kms_atomic_check_modeset(struct drm_device *dev,
1943			     struct drm_atomic_state *state)
1944{
1945	struct drm_crtc *crtc;
1946	struct drm_crtc_state *crtc_state;
1947	bool need_modeset = false;
1948	int i, ret;
1949
1950	ret = drm_atomic_helper_check(dev, state);
1951	if (ret)
1952		return ret;
1953
1954	ret = vmw_kms_check_implicit(dev, state);
1955	if (ret) {
1956		VMW_DEBUG_KMS("Invalid implicit state\n");
1957		return ret;
1958	}
1959
1960	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1961		if (drm_atomic_crtc_needs_modeset(crtc_state))
1962			need_modeset = true;
1963	}
1964
1965	if (need_modeset)
1966		return vmw_kms_check_topology(dev, state);
1967
1968	return ret;
1969}
1970
1971static const struct drm_mode_config_funcs vmw_kms_funcs = {
1972	.fb_create = vmw_kms_fb_create,
1973	.atomic_check = vmw_kms_atomic_check_modeset,
1974	.atomic_commit = drm_atomic_helper_commit,
1975};
1976
1977static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1978				   struct drm_file *file_priv,
1979				   struct vmw_framebuffer *vfb,
1980				   struct vmw_surface *surface,
1981				   uint32_t sid,
1982				   int32_t destX, int32_t destY,
1983				   struct drm_vmw_rect *clips,
1984				   uint32_t num_clips)
1985{
1986	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1987					    &surface->res, destX, destY,
1988					    num_clips, 1, NULL, NULL);
1989}
1990
1991
1992int vmw_kms_present(struct vmw_private *dev_priv,
1993		    struct drm_file *file_priv,
1994		    struct vmw_framebuffer *vfb,
1995		    struct vmw_surface *surface,
1996		    uint32_t sid,
1997		    int32_t destX, int32_t destY,
1998		    struct drm_vmw_rect *clips,
1999		    uint32_t num_clips)
2000{
2001	int ret;
2002
2003	switch (dev_priv->active_display_unit) {
2004	case vmw_du_screen_target:
2005		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2006						 &surface->res, destX, destY,
2007						 num_clips, 1, NULL, NULL);
2008		break;
2009	case vmw_du_screen_object:
2010		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2011					      sid, destX, destY, clips,
2012					      num_clips);
2013		break;
2014	default:
2015		WARN_ONCE(true,
2016			  "Present called with invalid display system.\n");
2017		ret = -ENOSYS;
2018		break;
2019	}
2020	if (ret)
2021		return ret;
2022
2023	vmw_cmd_flush(dev_priv, false);
2024
2025	return 0;
2026}
2027
2028static void
2029vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2030{
2031	if (dev_priv->hotplug_mode_update_property)
2032		return;
2033
2034	dev_priv->hotplug_mode_update_property =
2035		drm_property_create_range(&dev_priv->drm,
2036					  DRM_MODE_PROP_IMMUTABLE,
2037					  "hotplug_mode_update", 0, 1);
 
 
 
 
2038}
2039
2040int vmw_kms_init(struct vmw_private *dev_priv)
2041{
2042	struct drm_device *dev = &dev_priv->drm;
2043	int ret;
2044	static const char *display_unit_names[] = {
2045		"Invalid",
2046		"Legacy",
2047		"Screen Object",
2048		"Screen Target",
2049		"Invalid (max)"
2050	};
2051
2052	drm_mode_config_init(dev);
2053	dev->mode_config.funcs = &vmw_kms_funcs;
2054	dev->mode_config.min_width = 1;
2055	dev->mode_config.min_height = 1;
2056	dev->mode_config.max_width = dev_priv->texture_max_width;
2057	dev->mode_config.max_height = dev_priv->texture_max_height;
2058	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2059
2060	drm_mode_create_suggested_offset_properties(dev);
2061	vmw_kms_create_hotplug_mode_update_property(dev_priv);
2062
2063	ret = vmw_kms_stdu_init_display(dev_priv);
2064	if (ret) {
2065		ret = vmw_kms_sou_init_display(dev_priv);
2066		if (ret) /* Fallback */
2067			ret = vmw_kms_ldu_init_display(dev_priv);
2068	}
2069	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2070	drm_info(&dev_priv->drm, "%s display unit initialized\n",
2071		 display_unit_names[dev_priv->active_display_unit]);
2072
2073	return ret;
2074}
2075
2076int vmw_kms_close(struct vmw_private *dev_priv)
2077{
2078	int ret = 0;
2079
2080	/*
2081	 * Docs says we should take the lock before calling this function
2082	 * but since it destroys encoders and our destructor calls
2083	 * drm_encoder_cleanup which takes the lock we deadlock.
2084	 */
2085	drm_mode_config_cleanup(&dev_priv->drm);
2086	if (dev_priv->active_display_unit == vmw_du_legacy)
2087		ret = vmw_kms_ldu_close_display(dev_priv);
2088
2089	return ret;
2090}
2091
2092int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2093				struct drm_file *file_priv)
2094{
2095	struct drm_vmw_cursor_bypass_arg *arg = data;
2096	struct vmw_display_unit *du;
2097	struct drm_crtc *crtc;
2098	int ret = 0;
2099
 
2100	mutex_lock(&dev->mode_config.mutex);
2101	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2102
2103		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2104			du = vmw_crtc_to_du(crtc);
2105			du->hotspot_x = arg->xhot;
2106			du->hotspot_y = arg->yhot;
2107		}
2108
2109		mutex_unlock(&dev->mode_config.mutex);
2110		return 0;
2111	}
2112
2113	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2114	if (!crtc) {
2115		ret = -ENOENT;
2116		goto out;
2117	}
2118
2119	du = vmw_crtc_to_du(crtc);
2120
2121	du->hotspot_x = arg->xhot;
2122	du->hotspot_y = arg->yhot;
2123
2124out:
2125	mutex_unlock(&dev->mode_config.mutex);
2126
2127	return ret;
2128}
2129
2130int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2131			unsigned width, unsigned height, unsigned pitch,
2132			unsigned bpp, unsigned depth)
2133{
2134	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2135		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2136	else if (vmw_fifo_have_pitchlock(vmw_priv))
2137		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
 
2138	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2139	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2140	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2141		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2142
2143	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2144		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2145			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2146		return -EINVAL;
2147	}
2148
2149	return 0;
2150}
2151
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2152bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2153				uint32_t pitch,
2154				uint32_t height)
2155{
2156	return ((u64) pitch * (u64) height) < (u64)
2157		((dev_priv->active_display_unit == vmw_du_screen_target) ?
2158		 dev_priv->max_primary_mem : dev_priv->vram_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2159}
2160
2161/**
2162 * vmw_du_update_layout - Update the display unit with topology from resolution
2163 * plugin and generate DRM uevent
2164 * @dev_priv: device private
2165 * @num_rects: number of drm_rect in rects
2166 * @rects: toplogy to update
2167 */
2168static int vmw_du_update_layout(struct vmw_private *dev_priv,
2169				unsigned int num_rects, struct drm_rect *rects)
2170{
2171	struct drm_device *dev = &dev_priv->drm;
2172	struct vmw_display_unit *du;
2173	struct drm_connector *con;
2174	struct drm_connector_list_iter conn_iter;
2175	struct drm_modeset_acquire_ctx ctx;
2176	struct drm_crtc *crtc;
2177	int ret;
2178
2179	/* Currently gui_x/y is protected with the crtc mutex */
2180	mutex_lock(&dev->mode_config.mutex);
2181	drm_modeset_acquire_init(&ctx, 0);
2182retry:
2183	drm_for_each_crtc(crtc, dev) {
2184		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2185		if (ret < 0) {
2186			if (ret == -EDEADLK) {
2187				drm_modeset_backoff(&ctx);
2188				goto retry;
2189		}
2190			goto out_fini;
2191		}
2192	}
2193
2194	drm_connector_list_iter_begin(dev, &conn_iter);
2195	drm_for_each_connector_iter(con, &conn_iter) {
2196		du = vmw_connector_to_du(con);
2197		if (num_rects > du->unit) {
2198			du->pref_width = drm_rect_width(&rects[du->unit]);
2199			du->pref_height = drm_rect_height(&rects[du->unit]);
2200			du->pref_active = true;
2201			du->gui_x = rects[du->unit].x1;
2202			du->gui_y = rects[du->unit].y1;
2203		} else {
2204			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2205			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2206			du->pref_active = false;
2207			du->gui_x = 0;
2208			du->gui_y = 0;
2209		}
2210	}
2211	drm_connector_list_iter_end(&conn_iter);
2212
2213	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2214		du = vmw_connector_to_du(con);
2215		if (num_rects > du->unit) {
2216			drm_object_property_set_value
2217			  (&con->base, dev->mode_config.suggested_x_property,
2218			   du->gui_x);
2219			drm_object_property_set_value
2220			  (&con->base, dev->mode_config.suggested_y_property,
2221			   du->gui_y);
2222		} else {
2223			drm_object_property_set_value
2224			  (&con->base, dev->mode_config.suggested_x_property,
2225			   0);
2226			drm_object_property_set_value
2227			  (&con->base, dev->mode_config.suggested_y_property,
2228			   0);
2229		}
2230		con->status = vmw_du_connector_detect(con, true);
2231	}
 
 
2232out_fini:
2233	drm_modeset_drop_locks(&ctx);
2234	drm_modeset_acquire_fini(&ctx);
2235	mutex_unlock(&dev->mode_config.mutex);
2236
2237	drm_sysfs_hotplug_event(dev);
2238
2239	return 0;
2240}
2241
2242int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2243			  u16 *r, u16 *g, u16 *b,
2244			  uint32_t size,
2245			  struct drm_modeset_acquire_ctx *ctx)
2246{
2247	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2248	int i;
2249
2250	for (i = 0; i < size; i++) {
2251		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2252			  r[i], g[i], b[i]);
2253		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2254		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2255		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2256	}
2257
2258	return 0;
2259}
2260
2261int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2262{
2263	return 0;
2264}
2265
2266enum drm_connector_status
2267vmw_du_connector_detect(struct drm_connector *connector, bool force)
2268{
2269	uint32_t num_displays;
2270	struct drm_device *dev = connector->dev;
2271	struct vmw_private *dev_priv = vmw_priv(dev);
2272	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2273
2274	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2275
2276	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2277		 du->pref_active) ?
2278		connector_status_connected : connector_status_disconnected);
2279}
2280
2281static struct drm_display_mode vmw_kms_connector_builtin[] = {
2282	/* 640x480@60Hz */
2283	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2284		   752, 800, 0, 480, 489, 492, 525, 0,
2285		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2286	/* 800x600@60Hz */
2287	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2288		   968, 1056, 0, 600, 601, 605, 628, 0,
2289		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2290	/* 1024x768@60Hz */
2291	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2292		   1184, 1344, 0, 768, 771, 777, 806, 0,
2293		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2294	/* 1152x864@75Hz */
2295	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2296		   1344, 1600, 0, 864, 865, 868, 900, 0,
2297		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2298	/* 1280x720@60Hz */
2299	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2300		   1472, 1664, 0, 720, 723, 728, 748, 0,
2301		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2302	/* 1280x768@60Hz */
2303	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2304		   1472, 1664, 0, 768, 771, 778, 798, 0,
2305		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2306	/* 1280x800@60Hz */
2307	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2308		   1480, 1680, 0, 800, 803, 809, 831, 0,
2309		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2310	/* 1280x960@60Hz */
2311	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2312		   1488, 1800, 0, 960, 961, 964, 1000, 0,
2313		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2314	/* 1280x1024@60Hz */
2315	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2316		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2317		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2318	/* 1360x768@60Hz */
2319	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2320		   1536, 1792, 0, 768, 771, 777, 795, 0,
2321		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2322	/* 1440x1050@60Hz */
2323	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2324		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2325		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2326	/* 1440x900@60Hz */
2327	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2328		   1672, 1904, 0, 900, 903, 909, 934, 0,
2329		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2330	/* 1600x1200@60Hz */
2331	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2332		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2333		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2334	/* 1680x1050@60Hz */
2335	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2336		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2337		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2338	/* 1792x1344@60Hz */
2339	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2340		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2341		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2342	/* 1853x1392@60Hz */
2343	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2344		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2345		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2346	/* 1920x1080@60Hz */
2347	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2348		   2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2349		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2350	/* 1920x1200@60Hz */
2351	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2352		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2353		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2354	/* 1920x1440@60Hz */
2355	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2356		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2357		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2358	/* 2560x1440@60Hz */
2359	{ DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2360		   2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2361		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2362	/* 2560x1600@60Hz */
2363	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2364		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2365		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2366	/* 2880x1800@60Hz */
2367	{ DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2368		   2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2369		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2370	/* 3840x2160@60Hz */
2371	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2372		   3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2373		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2374	/* 3840x2400@60Hz */
2375	{ DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2376		   3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2377		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2378	/* Terminate */
2379	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2380};
2381
2382/**
2383 * vmw_guess_mode_timing - Provide fake timings for a
2384 * 60Hz vrefresh mode.
2385 *
2386 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2387 * members filled in.
2388 */
2389void vmw_guess_mode_timing(struct drm_display_mode *mode)
2390{
2391	mode->hsync_start = mode->hdisplay + 50;
2392	mode->hsync_end = mode->hsync_start + 50;
2393	mode->htotal = mode->hsync_end + 50;
2394
2395	mode->vsync_start = mode->vdisplay + 50;
2396	mode->vsync_end = mode->vsync_start + 50;
2397	mode->vtotal = mode->vsync_end + 50;
2398
2399	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
 
2400}
2401
2402
2403int vmw_du_connector_fill_modes(struct drm_connector *connector,
2404				uint32_t max_width, uint32_t max_height)
2405{
2406	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2407	struct drm_device *dev = connector->dev;
2408	struct vmw_private *dev_priv = vmw_priv(dev);
2409	struct drm_display_mode *mode = NULL;
2410	struct drm_display_mode *bmode;
2411	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2412		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2413		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2414		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2415	};
2416	int i;
2417	u32 assumed_bpp = 4;
2418
2419	if (dev_priv->assume_16bpp)
2420		assumed_bpp = 2;
2421
2422	max_width  = min(max_width,  dev_priv->texture_max_width);
2423	max_height = min(max_height, dev_priv->texture_max_height);
2424
2425	/*
2426	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2427	 * HEIGHT registers.
2428	 */
2429	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2430		max_width  = min(max_width,  dev_priv->stdu_max_width);
2431		max_height = min(max_height, dev_priv->stdu_max_height);
2432	}
2433
2434	/* Add preferred mode */
2435	mode = drm_mode_duplicate(dev, &prefmode);
2436	if (!mode)
2437		return 0;
2438	mode->hdisplay = du->pref_width;
2439	mode->vdisplay = du->pref_height;
2440	vmw_guess_mode_timing(mode);
2441	drm_mode_set_name(mode);
2442
2443	if (vmw_kms_validate_mode_vram(dev_priv,
2444					mode->hdisplay * assumed_bpp,
2445					mode->vdisplay)) {
2446		drm_mode_probed_add(connector, mode);
2447	} else {
2448		drm_mode_destroy(dev, mode);
2449		mode = NULL;
2450	}
2451
2452	if (du->pref_mode) {
2453		list_del_init(&du->pref_mode->head);
2454		drm_mode_destroy(dev, du->pref_mode);
2455	}
2456
2457	/* mode might be null here, this is intended */
2458	du->pref_mode = mode;
2459
2460	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2461		bmode = &vmw_kms_connector_builtin[i];
2462		if (bmode->hdisplay > max_width ||
2463		    bmode->vdisplay > max_height)
2464			continue;
2465
2466		if (!vmw_kms_validate_mode_vram(dev_priv,
2467						bmode->hdisplay * assumed_bpp,
2468						bmode->vdisplay))
2469			continue;
2470
2471		mode = drm_mode_duplicate(dev, bmode);
2472		if (!mode)
2473			return 0;
 
2474
2475		drm_mode_probed_add(connector, mode);
2476	}
2477
2478	drm_connector_list_update(connector);
2479	/* Move the prefered mode first, help apps pick the right mode. */
2480	drm_mode_sort(&connector->modes);
2481
2482	return 1;
2483}
2484
2485/**
2486 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2487 * @dev: drm device for the ioctl
2488 * @data: data pointer for the ioctl
2489 * @file_priv: drm file for the ioctl call
2490 *
2491 * Update preferred topology of display unit as per ioctl request. The topology
2492 * is expressed as array of drm_vmw_rect.
2493 * e.g.
2494 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2495 *
2496 * NOTE:
2497 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2498 * device limit on topology, x + w and y + h (lower right) cannot be greater
2499 * than INT_MAX. So topology beyond these limits will return with error.
2500 *
2501 * Returns:
2502 * Zero on success, negative errno on failure.
2503 */
2504int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2505				struct drm_file *file_priv)
2506{
2507	struct vmw_private *dev_priv = vmw_priv(dev);
2508	struct drm_mode_config *mode_config = &dev->mode_config;
2509	struct drm_vmw_update_layout_arg *arg =
2510		(struct drm_vmw_update_layout_arg *)data;
2511	void __user *user_rects;
2512	struct drm_vmw_rect *rects;
2513	struct drm_rect *drm_rects;
2514	unsigned rects_size;
2515	int ret, i;
2516
2517	if (!arg->num_outputs) {
2518		struct drm_rect def_rect = {0, 0,
2519					    VMWGFX_MIN_INITIAL_WIDTH,
2520					    VMWGFX_MIN_INITIAL_HEIGHT};
 
2521		vmw_du_update_layout(dev_priv, 1, &def_rect);
2522		return 0;
2523	}
2524
2525	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2526	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2527			GFP_KERNEL);
2528	if (unlikely(!rects))
2529		return -ENOMEM;
2530
2531	user_rects = (void __user *)(unsigned long)arg->rects;
2532	ret = copy_from_user(rects, user_rects, rects_size);
2533	if (unlikely(ret != 0)) {
2534		DRM_ERROR("Failed to get rects.\n");
2535		ret = -EFAULT;
2536		goto out_free;
2537	}
2538
2539	drm_rects = (struct drm_rect *)rects;
2540
2541	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2542	for (i = 0; i < arg->num_outputs; i++) {
2543		struct drm_vmw_rect curr_rect;
2544
2545		/* Verify user-space for overflow as kernel use drm_rect */
2546		if ((rects[i].x + rects[i].w > INT_MAX) ||
2547		    (rects[i].y + rects[i].h > INT_MAX)) {
2548			ret = -ERANGE;
2549			goto out_free;
2550		}
2551
2552		curr_rect = rects[i];
2553		drm_rects[i].x1 = curr_rect.x;
2554		drm_rects[i].y1 = curr_rect.y;
2555		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2556		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2557
2558		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2559			      drm_rects[i].x1, drm_rects[i].y1,
2560			      drm_rects[i].x2, drm_rects[i].y2);
2561
2562		/*
2563		 * Currently this check is limiting the topology within
2564		 * mode_config->max (which actually is max texture size
2565		 * supported by virtual device). This limit is here to address
2566		 * window managers that create a big framebuffer for whole
2567		 * topology.
2568		 */
2569		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2570		    drm_rects[i].x2 > mode_config->max_width ||
2571		    drm_rects[i].y2 > mode_config->max_height) {
2572			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2573				      drm_rects[i].x1, drm_rects[i].y1,
2574				      drm_rects[i].x2, drm_rects[i].y2);
2575			ret = -EINVAL;
2576			goto out_free;
2577		}
2578	}
2579
2580	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2581
2582	if (ret == 0)
2583		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2584
2585out_free:
2586	kfree(rects);
2587	return ret;
2588}
2589
2590/**
2591 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2592 * on a set of cliprects and a set of display units.
2593 *
2594 * @dev_priv: Pointer to a device private structure.
2595 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2596 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2597 * Cliprects are given in framebuffer coordinates.
2598 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2599 * be NULL. Cliprects are given in source coordinates.
2600 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2601 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2602 * @num_clips: Number of cliprects in the @clips or @vclips array.
2603 * @increment: Integer with which to increment the clip counter when looping.
2604 * Used to skip a predetermined number of clip rects.
2605 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2606 */
2607int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2608			 struct vmw_framebuffer *framebuffer,
2609			 const struct drm_clip_rect *clips,
2610			 const struct drm_vmw_rect *vclips,
2611			 s32 dest_x, s32 dest_y,
2612			 int num_clips,
2613			 int increment,
2614			 struct vmw_kms_dirty *dirty)
2615{
2616	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2617	struct drm_crtc *crtc;
2618	u32 num_units = 0;
2619	u32 i, k;
2620
2621	dirty->dev_priv = dev_priv;
2622
2623	/* If crtc is passed, no need to iterate over other display units */
2624	if (dirty->crtc) {
2625		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2626	} else {
2627		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2628				    head) {
2629			struct drm_plane *plane = crtc->primary;
2630
2631			if (plane->state->fb == &framebuffer->base)
2632				units[num_units++] = vmw_crtc_to_du(crtc);
2633		}
2634	}
2635
2636	for (k = 0; k < num_units; k++) {
2637		struct vmw_display_unit *unit = units[k];
2638		s32 crtc_x = unit->crtc.x;
2639		s32 crtc_y = unit->crtc.y;
2640		s32 crtc_width = unit->crtc.mode.hdisplay;
2641		s32 crtc_height = unit->crtc.mode.vdisplay;
2642		const struct drm_clip_rect *clips_ptr = clips;
2643		const struct drm_vmw_rect *vclips_ptr = vclips;
2644
2645		dirty->unit = unit;
2646		if (dirty->fifo_reserve_size > 0) {
2647			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2648						      dirty->fifo_reserve_size);
2649			if (!dirty->cmd)
2650				return -ENOMEM;
2651
2652			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2653		}
2654		dirty->num_hits = 0;
2655		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2656		       vclips_ptr += increment) {
2657			s32 clip_left;
2658			s32 clip_top;
2659
2660			/*
2661			 * Select clip array type. Note that integer type
2662			 * in @clips is unsigned short, whereas in @vclips
2663			 * it's 32-bit.
2664			 */
2665			if (clips) {
2666				dirty->fb_x = (s32) clips_ptr->x1;
2667				dirty->fb_y = (s32) clips_ptr->y1;
2668				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2669					crtc_x;
2670				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2671					crtc_y;
2672			} else {
2673				dirty->fb_x = vclips_ptr->x;
2674				dirty->fb_y = vclips_ptr->y;
2675				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2676					dest_x - crtc_x;
2677				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2678					dest_y - crtc_y;
2679			}
2680
2681			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2682			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2683
2684			/* Skip this clip if it's outside the crtc region */
2685			if (dirty->unit_x1 >= crtc_width ||
2686			    dirty->unit_y1 >= crtc_height ||
2687			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2688				continue;
2689
2690			/* Clip right and bottom to crtc limits */
2691			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2692					       crtc_width);
2693			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2694					       crtc_height);
2695
2696			/* Clip left and top to crtc limits */
2697			clip_left = min_t(s32, dirty->unit_x1, 0);
2698			clip_top = min_t(s32, dirty->unit_y1, 0);
2699			dirty->unit_x1 -= clip_left;
2700			dirty->unit_y1 -= clip_top;
2701			dirty->fb_x -= clip_left;
2702			dirty->fb_y -= clip_top;
2703
2704			dirty->clip(dirty);
2705		}
2706
2707		dirty->fifo_commit(dirty);
2708	}
2709
2710	return 0;
2711}
2712
2713/**
2714 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2715 * cleanup and fencing
2716 * @dev_priv: Pointer to the device-private struct
2717 * @file_priv: Pointer identifying the client when user-space fencing is used
2718 * @ctx: Pointer to the validation context
2719 * @out_fence: If non-NULL, returned refcounted fence-pointer
2720 * @user_fence_rep: If non-NULL, pointer to user-space address area
2721 * in which to copy user-space fence info
2722 */
2723void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2724				      struct drm_file *file_priv,
2725				      struct vmw_validation_context *ctx,
2726				      struct vmw_fence_obj **out_fence,
2727				      struct drm_vmw_fence_rep __user *
2728				      user_fence_rep)
2729{
2730	struct vmw_fence_obj *fence = NULL;
2731	uint32_t handle = 0;
2732	int ret = 0;
2733
2734	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2735	    out_fence)
2736		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2737						 file_priv ? &handle : NULL);
2738	vmw_validation_done(ctx, fence);
2739	if (file_priv)
2740		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2741					    ret, user_fence_rep, fence,
2742					    handle, -1);
2743	if (out_fence)
2744		*out_fence = fence;
2745	else
2746		vmw_fence_obj_unreference(&fence);
2747}
2748
2749/**
2750 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2751 * its backing MOB.
2752 *
2753 * @res: Pointer to the surface resource
2754 * @clips: Clip rects in framebuffer (surface) space.
2755 * @num_clips: Number of clips in @clips.
2756 * @increment: Integer with which to increment the clip counter when looping.
2757 * Used to skip a predetermined number of clip rects.
2758 *
2759 * This function makes sure the proxy surface is updated from its backing MOB
2760 * using the region given by @clips. The surface resource @res and its backing
2761 * MOB needs to be reserved and validated on call.
2762 */
2763int vmw_kms_update_proxy(struct vmw_resource *res,
2764			 const struct drm_clip_rect *clips,
2765			 unsigned num_clips,
2766			 int increment)
2767{
2768	struct vmw_private *dev_priv = res->dev_priv;
2769	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2770	struct {
2771		SVGA3dCmdHeader header;
2772		SVGA3dCmdUpdateGBImage body;
2773	} *cmd;
2774	SVGA3dBox *box;
2775	size_t copy_size = 0;
2776	int i;
2777
2778	if (!clips)
2779		return 0;
2780
2781	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2782	if (!cmd)
2783		return -ENOMEM;
2784
2785	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2786		box = &cmd->body.box;
2787
2788		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2789		cmd->header.size = sizeof(cmd->body);
2790		cmd->body.image.sid = res->id;
2791		cmd->body.image.face = 0;
2792		cmd->body.image.mipmap = 0;
2793
2794		if (clips->x1 > size->width || clips->x2 > size->width ||
2795		    clips->y1 > size->height || clips->y2 > size->height) {
2796			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2797			return -EINVAL;
2798		}
2799
2800		box->x = clips->x1;
2801		box->y = clips->y1;
2802		box->z = 0;
2803		box->w = clips->x2 - clips->x1;
2804		box->h = clips->y2 - clips->y1;
2805		box->d = 1;
2806
2807		copy_size += sizeof(*cmd);
2808	}
2809
2810	vmw_cmd_commit(dev_priv, copy_size);
2811
2812	return 0;
2813}
2814
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2815/**
2816 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2817 * property.
2818 *
2819 * @dev_priv: Pointer to a device private struct.
2820 *
2821 * Sets up the implicit placement property unless it's already set up.
2822 */
2823void
2824vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2825{
2826	if (dev_priv->implicit_placement_property)
2827		return;
2828
2829	dev_priv->implicit_placement_property =
2830		drm_property_create_range(&dev_priv->drm,
2831					  DRM_MODE_PROP_IMMUTABLE,
2832					  "implicit_placement", 0, 1);
2833}
2834
2835/**
2836 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2837 *
2838 * @dev: Pointer to the drm device
2839 * Return: 0 on success. Negative error code on failure.
2840 */
2841int vmw_kms_suspend(struct drm_device *dev)
2842{
2843	struct vmw_private *dev_priv = vmw_priv(dev);
2844
2845	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2846	if (IS_ERR(dev_priv->suspend_state)) {
2847		int ret = PTR_ERR(dev_priv->suspend_state);
2848
2849		DRM_ERROR("Failed kms suspend: %d\n", ret);
2850		dev_priv->suspend_state = NULL;
2851
2852		return ret;
2853	}
2854
2855	return 0;
2856}
2857
2858
2859/**
2860 * vmw_kms_resume - Re-enable modesetting and restore state
2861 *
2862 * @dev: Pointer to the drm device
2863 * Return: 0 on success. Negative error code on failure.
2864 *
2865 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2866 * to call this function without a previous vmw_kms_suspend().
2867 */
2868int vmw_kms_resume(struct drm_device *dev)
2869{
2870	struct vmw_private *dev_priv = vmw_priv(dev);
2871	int ret;
2872
2873	if (WARN_ON(!dev_priv->suspend_state))
2874		return 0;
2875
2876	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2877	dev_priv->suspend_state = NULL;
2878
2879	return ret;
2880}
2881
2882/**
2883 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2884 *
2885 * @dev: Pointer to the drm device
2886 */
2887void vmw_kms_lost_device(struct drm_device *dev)
2888{
2889	drm_atomic_helper_shutdown(dev);
2890}
2891
2892/**
2893 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2894 * @update: The closure structure.
2895 *
2896 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2897 * update on display unit.
2898 *
2899 * Return: 0 on success or a negative error code on failure.
2900 */
2901int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2902{
2903	struct drm_plane_state *state = update->plane->state;
2904	struct drm_plane_state *old_state = update->old_state;
2905	struct drm_atomic_helper_damage_iter iter;
2906	struct drm_rect clip;
2907	struct drm_rect bb;
2908	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2909	uint32_t reserved_size = 0;
2910	uint32_t submit_size = 0;
2911	uint32_t curr_size = 0;
2912	uint32_t num_hits = 0;
2913	void *cmd_start;
2914	char *cmd_next;
2915	int ret;
2916
2917	/*
2918	 * Iterate in advance to check if really need plane update and find the
2919	 * number of clips that actually are in plane src for fifo allocation.
2920	 */
2921	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2922	drm_atomic_for_each_plane_damage(&iter, &clip)
2923		num_hits++;
2924
2925	if (num_hits == 0)
2926		return 0;
2927
2928	if (update->vfb->bo) {
2929		struct vmw_framebuffer_bo *vfbbo =
2930			container_of(update->vfb, typeof(*vfbbo), base);
2931
2932		/*
2933		 * For screen targets we want a mappable bo, for everything else we want
2934		 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2935		 * is not screen target then mob's shouldn't be available.
2936		 */
2937		if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2938			vmw_bo_placement_set(vfbbo->buffer,
2939					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2940					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2941		} else {
2942			WARN_ON(update->dev_priv->has_mob);
2943			vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2944		}
2945		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2946	} else {
2947		struct vmw_framebuffer_surface *vfbs =
2948			container_of(update->vfb, typeof(*vfbs), base);
2949
2950		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2951						  0, VMW_RES_DIRTY_NONE, NULL,
2952						  NULL);
2953	}
2954
2955	if (ret)
2956		return ret;
2957
2958	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2959	if (ret)
2960		goto out_unref;
2961
2962	reserved_size = update->calc_fifo_size(update, num_hits);
2963	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2964	if (!cmd_start) {
2965		ret = -ENOMEM;
2966		goto out_revert;
2967	}
2968
2969	cmd_next = cmd_start;
2970
2971	if (update->post_prepare) {
2972		curr_size = update->post_prepare(update, cmd_next);
2973		cmd_next += curr_size;
2974		submit_size += curr_size;
2975	}
2976
2977	if (update->pre_clip) {
2978		curr_size = update->pre_clip(update, cmd_next, num_hits);
2979		cmd_next += curr_size;
2980		submit_size += curr_size;
2981	}
2982
2983	bb.x1 = INT_MAX;
2984	bb.y1 = INT_MAX;
2985	bb.x2 = INT_MIN;
2986	bb.y2 = INT_MIN;
2987
2988	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2989	drm_atomic_for_each_plane_damage(&iter, &clip) {
2990		uint32_t fb_x = clip.x1;
2991		uint32_t fb_y = clip.y1;
2992
2993		vmw_du_translate_to_crtc(state, &clip);
2994		if (update->clip) {
2995			curr_size = update->clip(update, cmd_next, &clip, fb_x,
2996						 fb_y);
2997			cmd_next += curr_size;
2998			submit_size += curr_size;
2999		}
3000		bb.x1 = min_t(int, bb.x1, clip.x1);
3001		bb.y1 = min_t(int, bb.y1, clip.y1);
3002		bb.x2 = max_t(int, bb.x2, clip.x2);
3003		bb.y2 = max_t(int, bb.y2, clip.y2);
3004	}
3005
3006	curr_size = update->post_clip(update, cmd_next, &bb);
3007	submit_size += curr_size;
3008
3009	if (reserved_size < submit_size)
3010		submit_size = 0;
3011
3012	vmw_cmd_commit(update->dev_priv, submit_size);
3013
3014	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3015					 update->out_fence, NULL);
3016	return ret;
3017
3018out_revert:
3019	vmw_validation_revert(&val_ctx);
3020
3021out_unref:
3022	vmw_validation_unref_lists(&val_ctx);
3023	return ret;
3024}