Linux Audio

Check our new training course

Loading...
v3.1
 
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_kms.h"
 
  29
  30/* Might need a hrtimer here? */
  31#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
 
 
 
 
 
 
 
 
 
 
  32
  33static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
  34static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
  35
  36void vmw_display_unit_cleanup(struct vmw_display_unit *du)
  37{
  38	if (du->cursor_surface)
  39		vmw_surface_unreference(&du->cursor_surface);
  40	if (du->cursor_dmabuf)
  41		vmw_dmabuf_unreference(&du->cursor_dmabuf);
  42	drm_crtc_cleanup(&du->crtc);
  43	drm_encoder_cleanup(&du->encoder);
  44	drm_connector_cleanup(&du->connector);
  45}
  46
  47/*
  48 * Display Unit Cursor functions
  49 */
  50
  51int vmw_cursor_update_image(struct vmw_private *dev_priv,
  52			    u32 *image, u32 width, u32 height,
  53			    u32 hotspotX, u32 hotspotY)
  54{
  55	struct {
  56		u32 cmd;
  57		SVGAFifoCmdDefineAlphaCursor cursor;
  58	} *cmd;
  59	u32 image_size = width * height * 4;
  60	u32 cmd_size = sizeof(*cmd) + image_size;
  61
  62	if (!image)
  63		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64
  65	cmd = vmw_fifo_reserve(dev_priv, cmd_size);
  66	if (unlikely(cmd == NULL)) {
  67		DRM_ERROR("Fifo reserve failed.\n");
  68		return -ENOMEM;
  69	}
  70
  71	memset(cmd, 0, sizeof(*cmd));
  72
  73	memcpy(&cmd[1], image, image_size);
  74
  75	cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
  76	cmd->cursor.id = cpu_to_le32(0);
  77	cmd->cursor.width = cpu_to_le32(width);
  78	cmd->cursor.height = cpu_to_le32(height);
  79	cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
  80	cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
  81
  82	vmw_fifo_commit(dev_priv, cmd_size);
 
  83
  84	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  85}
  86
  87void vmw_cursor_update_position(struct vmw_private *dev_priv,
  88				bool show, int x, int y)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  89{
  90	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
  91	uint32_t count;
  92
  93	iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
  94	iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
  95	iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
  96	count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
  97	iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
 
 
 
 
 
 
 
 
 
  98}
  99
 100int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
 101			   uint32_t handle, uint32_t width, uint32_t height)
 102{
 103	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 104	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 105	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 106	struct vmw_surface *surface = NULL;
 107	struct vmw_dma_buffer *dmabuf = NULL;
 108	int ret;
 109
 110	if (handle) {
 111		ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
 112						     handle, &surface);
 113		if (!ret) {
 114			if (!surface->snooper.image) {
 115				DRM_ERROR("surface not suitable for cursor\n");
 116				return -EINVAL;
 117			}
 118		} else {
 119			ret = vmw_user_dmabuf_lookup(tfile,
 120						     handle, &dmabuf);
 121			if (ret) {
 122				DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
 123				return -EINVAL;
 124			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 125		}
 126	}
 127
 128	/* takedown old cursor */
 129	if (du->cursor_surface) {
 130		du->cursor_surface->snooper.crtc = NULL;
 131		vmw_surface_unreference(&du->cursor_surface);
 
 
 
 
 
 132	}
 133	if (du->cursor_dmabuf)
 134		vmw_dmabuf_unreference(&du->cursor_dmabuf);
 135
 136	/* setup new image */
 137	if (surface) {
 138		/* vmw_user_surface_lookup takes one reference */
 139		du->cursor_surface = surface;
 140
 141		du->cursor_surface->snooper.crtc = crtc;
 142		du->cursor_age = du->cursor_surface->snooper.age;
 143		vmw_cursor_update_image(dev_priv, surface->snooper.image,
 144					64, 64, du->hotspot_x, du->hotspot_y);
 145	} else if (dmabuf) {
 146		struct ttm_bo_kmap_obj map;
 147		unsigned long kmap_offset;
 148		unsigned long kmap_num;
 149		void *virtual;
 150		bool dummy;
 151
 152		/* vmw_user_surface_lookup takes one reference */
 153		du->cursor_dmabuf = dmabuf;
 154
 155		kmap_offset = 0;
 156		kmap_num = (64*64*4) >> PAGE_SHIFT;
 157
 158		ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
 159		if (unlikely(ret != 0)) {
 160			DRM_ERROR("reserve failed\n");
 161			return -EINVAL;
 162		}
 163
 164		ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
 165		if (unlikely(ret != 0))
 166			goto err_unreserve;
 167
 168		virtual = ttm_kmap_obj_virtual(&map, &dummy);
 169		vmw_cursor_update_image(dev_priv, virtual, 64, 64,
 170					du->hotspot_x, du->hotspot_y);
 171
 172		ttm_bo_kunmap(&map);
 173err_unreserve:
 174		ttm_bo_unreserve(&dmabuf->base);
 175
 176	} else {
 177		vmw_cursor_update_position(dev_priv, false, 0, 0);
 178		return 0;
 
 179	}
 180
 181	vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
 
 
 
 
 
 
 
 
 
 
 
 182
 
 
 
 
 
 
 
 
 
 
 183	return 0;
 184}
 185
 186int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 187{
 188	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 189	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 190	bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
 191
 192	du->cursor_x = x + crtc->x;
 193	du->cursor_y = y + crtc->y;
 194
 195	vmw_cursor_update_position(dev_priv, shown,
 196				   du->cursor_x, du->cursor_y);
 
 
 
 
 197
 198	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 199}
 200
 201void vmw_kms_cursor_snoop(struct vmw_surface *srf,
 202			  struct ttm_object_file *tfile,
 203			  struct ttm_buffer_object *bo,
 204			  SVGA3dCmdHeader *header)
 205{
 206	struct ttm_bo_kmap_obj map;
 207	unsigned long kmap_offset;
 208	unsigned long kmap_num;
 209	SVGA3dCopyBox *box;
 210	unsigned box_count;
 211	void *virtual;
 212	bool dummy;
 213	struct vmw_dma_cmd {
 214		SVGA3dCmdHeader header;
 215		SVGA3dCmdSurfaceDMA dma;
 216	} *cmd;
 217	int ret;
 
 
 
 218
 219	cmd = container_of(header, struct vmw_dma_cmd, header);
 220
 221	/* No snooper installed */
 222	if (!srf->snooper.image)
 223		return;
 224
 225	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
 226		DRM_ERROR("face and mipmap for cursors should never != 0\n");
 227		return;
 228	}
 229
 230	if (cmd->header.size < 64) {
 231		DRM_ERROR("at least one full copy box must be given\n");
 232		return;
 233	}
 234
 235	box = (SVGA3dCopyBox *)&cmd[1];
 236	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
 237			sizeof(SVGA3dCopyBox);
 238
 239	if (cmd->dma.guest.pitch != (64 * 4) ||
 240	    cmd->dma.guest.ptr.offset % PAGE_SIZE ||
 241	    box->x != 0    || box->y != 0    || box->z != 0    ||
 242	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
 243	    box->w != 64   || box->h != 64   || box->d != 1    ||
 244	    box_count != 1) {
 245		/* TODO handle none page aligned offsets */
 246		/* TODO handle partial uploads and pitch != 256 */
 247		/* TODO handle more then one copy (size != 64) */
 248		DRM_ERROR("lazy programmer, can't handle weird stuff\n");
 
 
 
 
 
 249		return;
 250	}
 251
 252	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
 253	kmap_num = (64*64*4) >> PAGE_SHIFT;
 254
 255	ret = ttm_bo_reserve(bo, true, false, false, 0);
 256	if (unlikely(ret != 0)) {
 257		DRM_ERROR("reserve failed\n");
 258		return;
 259	}
 260
 261	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
 262	if (unlikely(ret != 0))
 263		goto err_unreserve;
 264
 265	virtual = ttm_kmap_obj_virtual(&map, &dummy);
 266
 267	memcpy(srf->snooper.image, virtual, 64*64*4);
 268	srf->snooper.age++;
 
 
 
 
 
 
 
 
 269
 270	/* we can't call this function from this function since execbuf has
 271	 * reserved fifo space.
 272	 *
 273	 * if (srf->snooper.crtc)
 274	 *	vmw_ldu_crtc_cursor_update_image(dev_priv,
 275	 *					 srf->snooper.image, 64, 64,
 276	 *					 du->hotspot_x, du->hotspot_y);
 277	 */
 278
 279	ttm_bo_kunmap(&map);
 280err_unreserve:
 281	ttm_bo_unreserve(bo);
 282}
 283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 285{
 286	struct drm_device *dev = dev_priv->dev;
 287	struct vmw_display_unit *du;
 288	struct drm_crtc *crtc;
 289
 290	mutex_lock(&dev->mode_config.mutex);
 291
 292	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 293		du = vmw_crtc_to_du(crtc);
 294		if (!du->cursor_surface ||
 295		    du->cursor_age == du->cursor_surface->snooper.age)
 
 296			continue;
 297
 298		du->cursor_age = du->cursor_surface->snooper.age;
 299		vmw_cursor_update_image(dev_priv,
 300					du->cursor_surface->snooper.image,
 301					64, 64, du->hotspot_x, du->hotspot_y);
 
 
 
 302	}
 303
 304	mutex_unlock(&dev->mode_config.mutex);
 305}
 306
 307/*
 308 * Generic framebuffer code
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 309 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 310
 311int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
 312				  struct drm_file *file_priv,
 313				  unsigned int *handle)
 
 
 
 
 
 
 
 
 
 
 314{
 315	if (handle)
 316		handle = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 317
 318	return 0;
 319}
 320
 321/*
 322 * Surface framebuffer code
 
 
 
 
 
 323 */
 324
 325#define vmw_framebuffer_to_vfbs(x) \
 326	container_of(x, struct vmw_framebuffer_surface, base.base)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 327
 328struct vmw_framebuffer_surface {
 329	struct vmw_framebuffer base;
 330	struct vmw_surface *surface;
 331	struct vmw_dma_buffer *buffer;
 332	struct delayed_work d_work;
 333	struct mutex work_lock;
 334	bool present_fs;
 335	struct list_head head;
 336	struct drm_master *master;
 337};
 338
 339/**
 340 * vmw_kms_idle_workqueues - Flush workqueues on this master
 341 *
 342 * @vmaster - Pointer identifying the master, for the surfaces of which
 343 * we idle the dirty work queues.
 344 *
 345 * This function should be called with the ttm lock held in exclusive mode
 346 * to idle all dirty work queues before the fifo is taken down.
 347 *
 348 * The work task may actually requeue itself, but after the flush returns we're
 349 * sure that there's nothing to present, since the ttm lock is held in
 350 * exclusive mode, so the fifo will never get used.
 351 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 352
 353void vmw_kms_idle_workqueues(struct vmw_master *vmaster)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 354{
 355	struct vmw_framebuffer_surface *entry;
 
 
 
 
 356
 357	mutex_lock(&vmaster->fb_surf_mutex);
 358	list_for_each_entry(entry, &vmaster->fb_surf, head) {
 359		if (cancel_delayed_work_sync(&entry->d_work))
 360			(void) entry->d_work.work.func(&entry->d_work.work);
 
 
 
 
 
 
 
 
 361
 362		(void) cancel_delayed_work_sync(&entry->d_work);
 363	}
 364	mutex_unlock(&vmaster->fb_surf_mutex);
 
 
 365}
 366
 367void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 368{
 369	struct vmw_framebuffer_surface *vfbs =
 370		vmw_framebuffer_to_vfbs(framebuffer);
 371	struct vmw_master *vmaster = vmw_master(vfbs->master);
 
 
 
 372
 
 
 
 
 
 
 
 
 
 
 373
 374	mutex_lock(&vmaster->fb_surf_mutex);
 375	list_del(&vfbs->head);
 376	mutex_unlock(&vmaster->fb_surf_mutex);
 377
 378	cancel_delayed_work_sync(&vfbs->d_work);
 379	drm_master_put(&vfbs->master);
 380	drm_framebuffer_cleanup(framebuffer);
 381	vmw_surface_unreference(&vfbs->surface);
 
 
 382
 383	kfree(vfbs);
 
 
 
 
 
 
 
 
 
 
 
 
 384}
 385
 386static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
 
 
 387{
 388	struct delayed_work *d_work =
 389		container_of(work, struct delayed_work, work);
 390	struct vmw_framebuffer_surface *vfbs =
 391		container_of(d_work, struct vmw_framebuffer_surface, d_work);
 392	struct vmw_surface *surf = vfbs->surface;
 393	struct drm_framebuffer *framebuffer = &vfbs->base.base;
 394	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
 395
 396	struct {
 397		SVGA3dCmdHeader header;
 398		SVGA3dCmdPresent body;
 399		SVGA3dCopyRect cr;
 400	} *cmd;
 401
 402	/**
 403	 * Strictly we should take the ttm_lock in read mode before accessing
 404	 * the fifo, to make sure the fifo is present and up. However,
 405	 * instead we flush all workqueues under the ttm lock in exclusive mode
 406	 * before taking down the fifo.
 407	 */
 408	mutex_lock(&vfbs->work_lock);
 409	if (!vfbs->present_fs)
 410		goto out_unlock;
 411
 412	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 413	if (unlikely(cmd == NULL))
 414		goto out_resched;
 415
 416	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
 417	cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
 418	cmd->body.sid = cpu_to_le32(surf->res.id);
 419	cmd->cr.x = cpu_to_le32(0);
 420	cmd->cr.y = cpu_to_le32(0);
 421	cmd->cr.srcx = cmd->cr.x;
 422	cmd->cr.srcy = cmd->cr.y;
 423	cmd->cr.w = cpu_to_le32(framebuffer->width);
 424	cmd->cr.h = cpu_to_le32(framebuffer->height);
 425	vfbs->present_fs = false;
 426	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 427out_resched:
 428	/**
 429	 * Will not re-add if already pending.
 430	 */
 431	schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
 432out_unlock:
 433	mutex_unlock(&vfbs->work_lock);
 
 434}
 435
 436
 437int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 438				  struct drm_file *file_priv,
 439				  unsigned flags, unsigned color,
 440				  struct drm_clip_rect *clips,
 441				  unsigned num_clips)
 442{
 443	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
 444	struct vmw_master *vmaster = vmw_master(file_priv->master);
 445	struct vmw_framebuffer_surface *vfbs =
 446		vmw_framebuffer_to_vfbs(framebuffer);
 447	struct vmw_surface *surf = vfbs->surface;
 448	struct drm_clip_rect norect;
 449	SVGA3dCopyRect *cr;
 450	int i, inc = 1;
 451	int ret;
 452
 453	struct {
 454		SVGA3dCmdHeader header;
 455		SVGA3dCmdPresent body;
 456		SVGA3dCopyRect cr;
 457	} *cmd;
 458
 459	if (unlikely(vfbs->master != file_priv->master))
 460		return -EINVAL;
 
 
 461
 462	ret = ttm_read_lock(&vmaster->lock, true);
 463	if (unlikely(ret != 0))
 464		return ret;
 465
 466	if (!num_clips ||
 467	    !(dev_priv->fifo.capabilities &
 468	      SVGA_FIFO_CAP_SCREEN_OBJECT)) {
 469		int ret;
 470
 471		mutex_lock(&vfbs->work_lock);
 472		vfbs->present_fs = true;
 473		ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
 474		mutex_unlock(&vfbs->work_lock);
 475		if (ret) {
 476			/**
 477			 * No work pending, Force immediate present.
 478			 */
 479			vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
 480		}
 481		ttm_read_unlock(&vmaster->lock);
 482		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 483	}
 484
 485	if (!num_clips) {
 486		num_clips = 1;
 487		clips = &norect;
 488		norect.x1 = norect.y1 = 0;
 489		norect.x2 = framebuffer->width;
 490		norect.y2 = framebuffer->height;
 491	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
 492		num_clips /= 2;
 493		inc = 2; /* skip source rects */
 494	}
 495
 496	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
 497	if (unlikely(cmd == NULL)) {
 498		DRM_ERROR("Fifo reserve failed.\n");
 499		ttm_read_unlock(&vmaster->lock);
 500		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 501	}
 502
 503	memset(cmd, 0, sizeof(*cmd));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 504
 505	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
 506	cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
 507	cmd->body.sid = cpu_to_le32(surf->res.id);
 508
 509	for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
 510		cr->x = cpu_to_le16(clips->x1);
 511		cr->y = cpu_to_le16(clips->y1);
 512		cr->srcx = cr->x;
 513		cr->srcy = cr->y;
 514		cr->w = cpu_to_le16(clips->x2 - clips->x1);
 515		cr->h = cpu_to_le16(clips->y2 - clips->y1);
 516	}
 517
 518	vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
 519	ttm_read_unlock(&vmaster->lock);
 520	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 521}
 
 
 
 522
 523static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 524	.destroy = vmw_framebuffer_surface_destroy,
 525	.dirty = vmw_framebuffer_surface_dirty,
 526	.create_handle = vmw_framebuffer_create_handle,
 527};
 528
 529static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 530					   struct drm_file *file_priv,
 531					   struct vmw_surface *surface,
 532					   struct vmw_framebuffer **out,
 533					   const struct drm_mode_fb_cmd
 534					   *mode_cmd)
 
 535
 536{
 537	struct drm_device *dev = dev_priv->dev;
 538	struct vmw_framebuffer_surface *vfbs;
 539	enum SVGA3dSurfaceFormat format;
 540	struct vmw_master *vmaster = vmw_master(file_priv->master);
 541	int ret;
 542
 
 
 
 
 543	/*
 544	 * Sanity checks.
 545	 */
 546
 547	if (unlikely(surface->mip_levels[0] != 1 ||
 548		     surface->num_sizes != 1 ||
 549		     surface->sizes[0].width < mode_cmd->width ||
 550		     surface->sizes[0].height < mode_cmd->height ||
 551		     surface->sizes[0].depth != 1)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 552		DRM_ERROR("Incompatible surface dimensions "
 553			  "for requested mode.\n");
 554		return -EINVAL;
 555	}
 556
 557	switch (mode_cmd->depth) {
 558	case 32:
 559		format = SVGA3D_A8R8G8B8;
 560		break;
 561	case 24:
 562		format = SVGA3D_X8R8G8B8;
 563		break;
 564	case 16:
 565		format = SVGA3D_R5G6B5;
 566		break;
 567	case 15:
 568		format = SVGA3D_A1R5G5B5;
 569		break;
 570	default:
 571		DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
 
 572		return -EINVAL;
 573	}
 574
 575	if (unlikely(format != surface->format)) {
 
 
 
 
 576		DRM_ERROR("Invalid surface format for requested mode.\n");
 577		return -EINVAL;
 578	}
 579
 580	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
 581	if (!vfbs) {
 582		ret = -ENOMEM;
 583		goto out_err1;
 584	}
 585
 
 
 
 
 
 
 
 586	ret = drm_framebuffer_init(dev, &vfbs->base.base,
 587				   &vmw_framebuffer_surface_funcs);
 588	if (ret)
 589		goto out_err2;
 590
 591	if (!vmw_surface_reference(surface)) {
 592		DRM_ERROR("failed to reference surface %p\n", surface);
 593		goto out_err3;
 594	}
 595
 596	/* XXX get the first 3 from the surface info */
 597	vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
 598	vfbs->base.base.pitch = mode_cmd->pitch;
 599	vfbs->base.base.depth = mode_cmd->depth;
 600	vfbs->base.base.width = mode_cmd->width;
 601	vfbs->base.base.height = mode_cmd->height;
 602	vfbs->base.pin = &vmw_surface_dmabuf_pin;
 603	vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
 604	vfbs->surface = surface;
 605	vfbs->master = drm_master_get(file_priv->master);
 606	mutex_init(&vfbs->work_lock);
 607
 608	mutex_lock(&vmaster->fb_surf_mutex);
 609	INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
 610	list_add_tail(&vfbs->head, &vmaster->fb_surf);
 611	mutex_unlock(&vmaster->fb_surf_mutex);
 612
 613	*out = &vfbs->base;
 614
 615	return 0;
 616
 617out_err3:
 618	drm_framebuffer_cleanup(&vfbs->base.base);
 619out_err2:
 
 620	kfree(vfbs);
 621out_err1:
 622	return ret;
 623}
 624
 625/*
 626 * Dmabuf framebuffer code
 627 */
 628
 629#define vmw_framebuffer_to_vfbd(x) \
 630	container_of(x, struct vmw_framebuffer_dmabuf, base.base)
 
 
 
 
 631
 632struct vmw_framebuffer_dmabuf {
 633	struct vmw_framebuffer base;
 634	struct vmw_dma_buffer *buffer;
 635};
 636
 637void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
 638{
 639	struct vmw_framebuffer_dmabuf *vfbd =
 640		vmw_framebuffer_to_vfbd(framebuffer);
 641
 642	drm_framebuffer_cleanup(framebuffer);
 643	vmw_dmabuf_unreference(&vfbd->buffer);
 644
 645	kfree(vfbd);
 646}
 647
 648int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
 649				 struct drm_file *file_priv,
 650				 unsigned flags, unsigned color,
 651				 struct drm_clip_rect *clips,
 652				 unsigned num_clips)
 653{
 654	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
 655	struct vmw_master *vmaster = vmw_master(file_priv->master);
 
 656	struct drm_clip_rect norect;
 657	int ret;
 658	struct {
 659		uint32_t header;
 660		SVGAFifoCmdUpdate body;
 661	} *cmd;
 662	int i, increment = 1;
 663
 664	ret = ttm_read_lock(&vmaster->lock, true);
 665	if (unlikely(ret != 0))
 666		return ret;
 667
 668	if (!num_clips) {
 669		num_clips = 1;
 670		clips = &norect;
 671		norect.x1 = norect.y1 = 0;
 672		norect.x2 = framebuffer->width;
 673		norect.y2 = framebuffer->height;
 674	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
 675		num_clips /= 2;
 676		increment = 2;
 677	}
 678
 679	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
 680	if (unlikely(cmd == NULL)) {
 681		DRM_ERROR("Fifo reserve failed.\n");
 682		ttm_read_unlock(&vmaster->lock);
 683		return -ENOMEM;
 
 
 
 
 684	}
 685
 686	for (i = 0; i < num_clips; i++, clips += increment) {
 687		cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
 688		cmd[i].body.x = cpu_to_le32(clips->x1);
 689		cmd[i].body.y = cpu_to_le32(clips->y1);
 690		cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
 691		cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
 692	}
 693
 694	vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
 695	ttm_read_unlock(&vmaster->lock);
 696
 697	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 698}
 699
 700static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
 701	.destroy = vmw_framebuffer_dmabuf_destroy,
 702	.dirty = vmw_framebuffer_dmabuf_dirty,
 703	.create_handle = vmw_framebuffer_create_handle,
 704};
 705
 706static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
 
 
 
 
 707{
 708	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
 709	struct vmw_framebuffer_surface *vfbs =
 710		vmw_framebuffer_to_vfbs(&vfb->base);
 711	unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
 712	int ret;
 713
 714	vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
 715	if (unlikely(vfbs->buffer == NULL))
 716		return -ENOMEM;
 717
 718	vmw_overlay_pause_all(dev_priv);
 719	ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
 720			       &vmw_vram_ne_placement,
 721			       false, &vmw_dmabuf_bo_free);
 722	vmw_overlay_resume_all(dev_priv);
 723	if (unlikely(ret != 0))
 724		vfbs->buffer = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 725
 726	return ret;
 727}
 728
 729static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
 730{
 731	struct ttm_buffer_object *bo;
 732	struct vmw_framebuffer_surface *vfbs =
 733		vmw_framebuffer_to_vfbs(&vfb->base);
 734
 735	if (unlikely(vfbs->buffer == NULL))
 736		return 0;
 737
 738	bo = &vfbs->buffer->base;
 739	ttm_bo_unref(&bo);
 740	vfbs->buffer = NULL;
 741
 742	return 0;
 743}
 744
 745static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
 746{
 747	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
 748	struct vmw_framebuffer_dmabuf *vfbd =
 749		vmw_framebuffer_to_vfbd(&vfb->base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 750	int ret;
 751
 
 
 
 
 
 
 752
 753	vmw_overlay_pause_all(dev_priv);
 
 
 
 
 754
 755	ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
 
 
 
 756
 757	vmw_overlay_resume_all(dev_priv);
 
 
 
 
 758
 759	WARN_ON(ret != 0);
 
 
 
 
 
 
 760
 761	return 0;
 762}
 
 
 
 763
 764static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
 765{
 766	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
 767	struct vmw_framebuffer_dmabuf *vfbd =
 768		vmw_framebuffer_to_vfbd(&vfb->base);
 769
 770	if (!vfbd->buffer) {
 771		WARN_ON(!vfbd->buffer);
 772		return 0;
 773	}
 
 
 
 
 774
 775	return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
 776}
 777
 778static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
 779					  struct vmw_dma_buffer *dmabuf,
 780					  struct vmw_framebuffer **out,
 781					  const struct drm_mode_fb_cmd
 782					  *mode_cmd)
 
 
 783
 784{
 785	struct drm_device *dev = dev_priv->dev;
 786	struct vmw_framebuffer_dmabuf *vfbd;
 787	unsigned int requested_size;
 788	int ret;
 789
 790	requested_size = mode_cmd->height * mode_cmd->pitch;
 791	if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
 792		DRM_ERROR("Screen buffer object size is too small "
 793			  "for requested mode.\n");
 794		return -EINVAL;
 795	}
 796
 
 
 
 
 
 
 
 
 
 797	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
 798	if (!vfbd) {
 799		ret = -ENOMEM;
 800		goto out_err1;
 801	}
 802
 
 
 
 
 
 
 
 803	ret = drm_framebuffer_init(dev, &vfbd->base.base,
 804				   &vmw_framebuffer_dmabuf_funcs);
 805	if (ret)
 806		goto out_err2;
 807
 808	if (!vmw_dmabuf_reference(dmabuf)) {
 809		DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
 810		goto out_err3;
 811	}
 812
 813	vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
 814	vfbd->base.base.pitch = mode_cmd->pitch;
 815	vfbd->base.base.depth = mode_cmd->depth;
 816	vfbd->base.base.width = mode_cmd->width;
 817	vfbd->base.base.height = mode_cmd->height;
 818	vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
 819	vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
 820	vfbd->buffer = dmabuf;
 821	*out = &vfbd->base;
 822
 823	return 0;
 824
 825out_err3:
 826	drm_framebuffer_cleanup(&vfbd->base.base);
 827out_err2:
 
 828	kfree(vfbd);
 829out_err1:
 830	return ret;
 831}
 832
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 833/*
 834 * Generic Kernel modesetting functions
 835 */
 836
 837static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
 838						 struct drm_file *file_priv,
 839						 struct drm_mode_fb_cmd *mode_cmd)
 840{
 841	struct vmw_private *dev_priv = vmw_priv(dev);
 842	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 843	struct vmw_framebuffer *vfb = NULL;
 844	struct vmw_surface *surface = NULL;
 845	struct vmw_dma_buffer *bo = NULL;
 846	u64 required_size;
 847	int ret;
 848
 849	/**
 850	 * This code should be conditioned on Screen Objects not being used.
 851	 * If screen objects are used, we can allocate a GMR to hold the
 852	 * requested framebuffer.
 853	 */
 854
 855	required_size = mode_cmd->pitch * mode_cmd->height;
 856	if (unlikely(required_size > (u64) dev_priv->vram_size)) {
 857		DRM_ERROR("VRAM size is too small for requested mode.\n");
 858		return NULL;
 859	}
 860
 861	/**
 862	 * End conditioned code.
 863	 */
 864
 865	ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
 866					     mode_cmd->handle, &surface);
 867	if (ret)
 868		goto try_dmabuf;
 
 
 
 869
 870	if (!surface->scanout)
 871		goto err_not_scanout;
 872
 873	ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
 874					      &vfb, mode_cmd);
 
 
 
 
 
 875
 876	/* vmw_user_surface_lookup takes one ref so does new_fb */
 877	vmw_surface_unreference(&surface);
 
 
 
 
 
 
 878
 879	if (ret) {
 880		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
 881		return ERR_PTR(ret);
 882	}
 
 883	return &vfb->base;
 
 884
 885try_dmabuf:
 886	DRM_INFO("%s: trying buffer\n", __func__);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 887
 888	ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
 889	if (ret) {
 890		DRM_ERROR("failed to find buffer: %i\n", ret);
 891		return ERR_PTR(-ENOENT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 892	}
 893
 894	ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
 895					     mode_cmd);
 896
 897	/* vmw_user_dmabuf_lookup takes one ref so does new_fb */
 898	vmw_dmabuf_unreference(&bo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 899
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 900	if (ret) {
 901		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
 902		return ERR_PTR(ret);
 903	}
 904
 905	return &vfb->base;
 
 
 
 906
 907err_not_scanout:
 908	DRM_ERROR("surface not marked as scanout\n");
 909	/* vmw_user_surface_lookup takes one ref */
 910	vmw_surface_unreference(&surface);
 911
 912	return ERR_PTR(-EINVAL);
 913}
 914
 915static struct drm_mode_config_funcs vmw_kms_funcs = {
 916	.fb_create = vmw_kms_fb_create,
 
 
 917};
 918
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 919int vmw_kms_init(struct vmw_private *dev_priv)
 920{
 921	struct drm_device *dev = dev_priv->dev;
 922	int ret;
 
 
 
 
 
 
 
 923
 924	drm_mode_config_init(dev);
 925	dev->mode_config.funcs = &vmw_kms_funcs;
 926	dev->mode_config.min_width = 1;
 927	dev->mode_config.min_height = 1;
 928	/* assumed largest fb size */
 929	dev->mode_config.max_width = 8192;
 930	dev->mode_config.max_height = 8192;
 
 931
 932	ret = vmw_kms_init_legacy_display_system(dev_priv);
 
 933
 934	return 0;
 
 
 
 
 
 
 
 
 
 
 935}
 936
 937int vmw_kms_close(struct vmw_private *dev_priv)
 938{
 
 
 939	/*
 940	 * Docs says we should take the lock before calling this function
 941	 * but since it destroys encoders and our destructor calls
 942	 * drm_encoder_cleanup which takes the lock we deadlock.
 943	 */
 944	drm_mode_config_cleanup(dev_priv->dev);
 945	vmw_kms_close_legacy_display_system(dev_priv);
 946	return 0;
 
 
 947}
 948
 949int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
 950				struct drm_file *file_priv)
 951{
 952	struct drm_vmw_cursor_bypass_arg *arg = data;
 953	struct vmw_display_unit *du;
 954	struct drm_mode_object *obj;
 955	struct drm_crtc *crtc;
 956	int ret = 0;
 957
 958
 959	mutex_lock(&dev->mode_config.mutex);
 960	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
 961
 962		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 963			du = vmw_crtc_to_du(crtc);
 964			du->hotspot_x = arg->xhot;
 965			du->hotspot_y = arg->yhot;
 966		}
 967
 968		mutex_unlock(&dev->mode_config.mutex);
 969		return 0;
 970	}
 971
 972	obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
 973	if (!obj) {
 974		ret = -EINVAL;
 975		goto out;
 976	}
 977
 978	crtc = obj_to_crtc(obj);
 979	du = vmw_crtc_to_du(crtc);
 980
 981	du->hotspot_x = arg->xhot;
 982	du->hotspot_y = arg->yhot;
 983
 984out:
 985	mutex_unlock(&dev->mode_config.mutex);
 986
 987	return ret;
 988}
 989
 990void vmw_kms_write_svga(struct vmw_private *vmw_priv,
 991			unsigned width, unsigned height, unsigned pitch,
 992			unsigned bbp, unsigned depth)
 993{
 994	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
 995		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
 996	else if (vmw_fifo_have_pitchlock(vmw_priv))
 997		iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
 998	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
 999	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1000	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
1001	vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
1002	vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
1003	vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
1004	vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
 
 
 
 
 
1005}
1006
1007int vmw_kms_save_vga(struct vmw_private *vmw_priv)
 
 
1008{
1009	struct vmw_vga_topology_state *save;
1010	uint32_t i;
 
 
1011
1012	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
1013	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
1014	vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
1015	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
1016	vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
1017	vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
1018	vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
1019	vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
1020	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1021		vmw_priv->vga_pitchlock =
1022		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1023	else if (vmw_fifo_have_pitchlock(vmw_priv))
1024		vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
1025						       SVGA_FIFO_PITCHLOCK);
 
 
 
1026
1027	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1028		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1029
1030	vmw_priv->num_displays = vmw_read(vmw_priv,
1031					  SVGA_REG_NUM_GUEST_DISPLAYS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1032
1033	if (vmw_priv->num_displays == 0)
1034		vmw_priv->num_displays = 1;
1035
1036	for (i = 0; i < vmw_priv->num_displays; ++i) {
1037		save = &vmw_priv->vga_save[i];
1038		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1039		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
1040		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
1041		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
1042		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
1043		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
1044		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1045		if (i == 0 && vmw_priv->num_displays == 1 &&
1046		    save->width == 0 && save->height == 0) {
1047
1048			/*
1049			 * It should be fairly safe to assume that these
1050			 * values are uninitialized.
1051			 */
 
 
 
1052
1053			save->width = vmw_priv->vga_width - save->pos_x;
1054			save->height = vmw_priv->vga_height - save->pos_y;
1055		}
 
 
 
1056	}
1057
1058	return 0;
1059}
1060
1061int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
1062{
1063	struct vmw_vga_topology_state *save;
1064	uint32_t i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1065
1066	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
1067	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
1068	vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
1069	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
1070	vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
1071	vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
1072	vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
1073	vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
1074	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1075		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
1076			  vmw_priv->vga_pitchlock);
1077	else if (vmw_fifo_have_pitchlock(vmw_priv))
1078		iowrite32(vmw_priv->vga_pitchlock,
1079			  vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1080
1081	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1082		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
1083
1084	for (i = 0; i < vmw_priv->num_displays; ++i) {
1085		save = &vmw_priv->vga_save[i];
1086		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1087		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
1088		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
1089		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
1090		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
1091		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
1092		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1093	}
1094
1095	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1096}
1097
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1098int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1099				struct drm_file *file_priv)
1100{
1101	struct vmw_private *dev_priv = vmw_priv(dev);
 
1102	struct drm_vmw_update_layout_arg *arg =
1103		(struct drm_vmw_update_layout_arg *)data;
1104	struct vmw_master *vmaster = vmw_master(file_priv->master);
1105	void __user *user_rects;
1106	struct drm_vmw_rect *rects;
 
1107	unsigned rects_size;
1108	int ret;
1109
1110	ret = ttm_read_lock(&vmaster->lock, true);
1111	if (unlikely(ret != 0))
1112		return ret;
1113
1114	if (!arg->num_outputs) {
1115		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
1116		vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect);
1117		goto out_unlock;
 
 
1118	}
1119
1120	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
1121	rects = kzalloc(rects_size, GFP_KERNEL);
1122	if (unlikely(!rects)) {
1123		ret = -ENOMEM;
1124		goto out_unlock;
1125	}
1126
1127	user_rects = (void __user *)(unsigned long)arg->rects;
1128	ret = copy_from_user(rects, user_rects, rects_size);
1129	if (unlikely(ret != 0)) {
1130		DRM_ERROR("Failed to get rects.\n");
1131		ret = -EFAULT;
1132		goto out_free;
1133	}
1134
1135	vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1136
1137out_free:
1138	kfree(rects);
1139out_unlock:
1140	ttm_read_unlock(&vmaster->lock);
1141	return ret;
1142}
1143
1144bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1145				uint32_t pitch,
1146				uint32_t height)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1147{
1148	return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
 
 
 
 
 
 
1149}
1150
1151u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
 
 
 
 
 
 
1152{
 
 
 
 
 
 
 
 
 
 
 
 
1153	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1154}
v6.2
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_kms.h"
  29#include "vmw_surface_cache.h"
  30
  31#include <drm/drm_atomic.h>
  32#include <drm/drm_atomic_helper.h>
  33#include <drm/drm_damage_helper.h>
  34#include <drm/drm_fourcc.h>
  35#include <drm/drm_rect.h>
  36#include <drm/drm_sysfs.h>
  37
  38void vmw_du_cleanup(struct vmw_display_unit *du)
  39{
  40	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
  41	drm_plane_cleanup(&du->primary);
  42	if (vmw_cmd_supported(dev_priv))
  43		drm_plane_cleanup(&du->cursor.base);
  44
  45	drm_connector_unregister(&du->connector);
 
 
 
 
 
 
 
 
  46	drm_crtc_cleanup(&du->crtc);
  47	drm_encoder_cleanup(&du->encoder);
  48	drm_connector_cleanup(&du->connector);
  49}
  50
  51/*
  52 * Display Unit Cursor functions
  53 */
  54
  55static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
  56static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
  57				  struct vmw_plane_state *vps,
  58				  u32 *image, u32 width, u32 height,
  59				  u32 hotspotX, u32 hotspotY);
  60
  61struct vmw_svga_fifo_cmd_define_cursor {
  62	u32 cmd;
  63	SVGAFifoCmdDefineAlphaCursor cursor;
  64};
  65
  66/**
  67 * vmw_send_define_cursor_cmd - queue a define cursor command
  68 * @dev_priv: the private driver struct
  69 * @image: buffer which holds the cursor image
  70 * @width: width of the mouse cursor image
  71 * @height: height of the mouse cursor image
  72 * @hotspotX: the horizontal position of mouse hotspot
  73 * @hotspotY: the vertical position of mouse hotspot
  74 */
  75static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
  76				       u32 *image, u32 width, u32 height,
  77				       u32 hotspotX, u32 hotspotY)
  78{
  79	struct vmw_svga_fifo_cmd_define_cursor *cmd;
  80	const u32 image_size = width * height * sizeof(*image);
  81	const u32 cmd_size = sizeof(*cmd) + image_size;
  82
  83	/* Try to reserve fifocmd space and swallow any failures;
  84	   such reservations cannot be left unconsumed for long
  85	   under the risk of clogging other fifocmd users, so
  86	   we treat reservations separtely from the way we treat
  87	   other fallible KMS-atomic resources at prepare_fb */
  88	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
  89
  90	if (unlikely(!cmd))
  91		return;
 
 
 
  92
  93	memset(cmd, 0, sizeof(*cmd));
  94
  95	memcpy(&cmd[1], image, image_size);
  96
  97	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
  98	cmd->cursor.id = 0;
  99	cmd->cursor.width = width;
 100	cmd->cursor.height = height;
 101	cmd->cursor.hotspotX = hotspotX;
 102	cmd->cursor.hotspotY = hotspotY;
 103
 104	vmw_cmd_commit_flush(dev_priv, cmd_size);
 105}
 106
 107/**
 108 * vmw_cursor_update_image - update the cursor image on the provided plane
 109 * @dev_priv: the private driver struct
 110 * @vps: the plane state of the cursor plane
 111 * @image: buffer which holds the cursor image
 112 * @width: width of the mouse cursor image
 113 * @height: height of the mouse cursor image
 114 * @hotspotX: the horizontal position of mouse hotspot
 115 * @hotspotY: the vertical position of mouse hotspot
 116 */
 117static void vmw_cursor_update_image(struct vmw_private *dev_priv,
 118				    struct vmw_plane_state *vps,
 119				    u32 *image, u32 width, u32 height,
 120				    u32 hotspotX, u32 hotspotY)
 121{
 122	if (vps->cursor.bo)
 123		vmw_cursor_update_mob(dev_priv, vps, image,
 124				      vps->base.crtc_w, vps->base.crtc_h,
 125				      hotspotX, hotspotY);
 126
 127	else
 128		vmw_send_define_cursor_cmd(dev_priv, image, width, height,
 129					   hotspotX, hotspotY);
 130}
 131
 132
 133/**
 134 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
 135 *
 136 * Called from inside vmw_du_cursor_plane_atomic_update to actually
 137 * make the cursor-image live.
 138 *
 139 * @dev_priv: device to work with
 140 * @vps: the plane state of the cursor plane
 141 * @image: cursor source data to fill the MOB with
 142 * @width: source data width
 143 * @height: source data height
 144 * @hotspotX: cursor hotspot x
 145 * @hotspotY: cursor hotspot Y
 146 */
 147static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
 148				  struct vmw_plane_state *vps,
 149				  u32 *image, u32 width, u32 height,
 150				  u32 hotspotX, u32 hotspotY)
 151{
 152	SVGAGBCursorHeader *header;
 153	SVGAGBAlphaCursorHeader *alpha_header;
 154	const u32 image_size = width * height * sizeof(*image);
 155	bool dummy;
 156
 157	header = ttm_kmap_obj_virtual(&vps->cursor.map, &dummy);
 158	alpha_header = &header->header.alphaHeader;
 159
 160	memset(header, 0, sizeof(*header));
 161
 162	header->type = SVGA_ALPHA_CURSOR;
 163	header->sizeInBytes = image_size;
 164
 165	alpha_header->hotspotX = hotspotX;
 166	alpha_header->hotspotY = hotspotY;
 167	alpha_header->width = width;
 168	alpha_header->height = height;
 169
 170	memcpy(header + 1, image, image_size);
 171	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
 172		  vps->cursor.bo->resource->start);
 173}
 174
 175
 176static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
 177{
 178	return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
 179}
 180
 181/**
 182 * vmw_du_cursor_plane_acquire_image -- Acquire the image data
 183 * @vps: cursor plane state
 184 */
 185static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
 186{
 187	bool dummy;
 188	if (vps->surf) {
 189		if (vps->surf_mapped)
 190			return vmw_bo_map_and_cache(vps->surf->res.backup);
 191		return vps->surf->snooper.image;
 192	} else if (vps->bo)
 193		return ttm_kmap_obj_virtual(&vps->bo->map, &dummy);
 194	return NULL;
 195}
 196
 197static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
 198					    struct vmw_plane_state *new_vps)
 199{
 200	void *old_image;
 201	void *new_image;
 202	u32 size;
 203	bool changed;
 
 
 204
 205	if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
 206	    old_vps->base.crtc_h != new_vps->base.crtc_h)
 207	    return true;
 208
 209	if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
 210	    old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
 211	    return true;
 212
 213	size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
 214
 215	old_image = vmw_du_cursor_plane_acquire_image(old_vps);
 216	new_image = vmw_du_cursor_plane_acquire_image(new_vps);
 217
 218	changed = false;
 219	if (old_image && new_image)
 220		changed = memcmp(old_image, new_image, size) != 0;
 221
 222	return changed;
 223}
 224
 225static void vmw_du_destroy_cursor_mob(struct ttm_buffer_object **bo)
 226{
 227	if (!(*bo))
 228		return;
 229
 230	ttm_bo_unpin(*bo);
 231	ttm_bo_put(*bo);
 232	kfree(*bo);
 233	*bo = NULL;
 234}
 235
 236static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
 237				  struct vmw_plane_state *vps)
 238{
 239	u32 i;
 240
 241	if (!vps->cursor.bo)
 242		return;
 243
 244	vmw_du_cursor_plane_unmap_cm(vps);
 245
 246	/* Look for a free slot to return this mob to the cache. */
 247	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 248		if (!vcp->cursor_mobs[i]) {
 249			vcp->cursor_mobs[i] = vps->cursor.bo;
 250			vps->cursor.bo = NULL;
 251			return;
 252		}
 253	}
 254
 255	/* Cache is full: See if this mob is bigger than an existing mob. */
 256	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 257		if (vcp->cursor_mobs[i]->base.size <
 258		    vps->cursor.bo->base.size) {
 259			vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
 260			vcp->cursor_mobs[i] = vps->cursor.bo;
 261			vps->cursor.bo = NULL;
 262			return;
 263		}
 264	}
 
 
 265
 266	/* Destroy it if it's not worth caching. */
 267	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
 268}
 
 269
 270static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
 271				 struct vmw_plane_state *vps)
 272{
 273	struct vmw_private *dev_priv = vcp->base.dev->dev_private;
 274	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
 275	u32 i;
 276	u32 cursor_max_dim, mob_max_size;
 277	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 278
 279	if (!dev_priv->has_mob ||
 280	    (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
 281		return -EINVAL;
 282
 283	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
 284	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
 
 285
 286	if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
 287	    vps->base.crtc_h > cursor_max_dim)
 288		return -EINVAL;
 289
 290	if (vps->cursor.bo) {
 291		if (vps->cursor.bo->base.size >= size)
 292			return 0;
 293		vmw_du_put_cursor_mob(vcp, vps);
 294	}
 295
 296	/* Look for an unused mob in the cache. */
 297	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
 298		if (vcp->cursor_mobs[i] &&
 299		    vcp->cursor_mobs[i]->base.size >= size) {
 300			vps->cursor.bo = vcp->cursor_mobs[i];
 301			vcp->cursor_mobs[i] = NULL;
 302			return 0;
 303		}
 304	}
 305	/* Create a new mob if we can't find an existing one. */
 306	ret = vmw_bo_create_kernel(dev_priv, size, &vmw_mob_placement,
 307				   &vps->cursor.bo);
 308
 309	if (ret != 0)
 310		return ret;
 311
 312	/* Fence the mob creation so we are guarateed to have the mob */
 313	ret = ttm_bo_reserve(vps->cursor.bo, false, false, NULL);
 314	if (ret != 0)
 315		goto teardown;
 316
 317	vmw_bo_fence_single(vps->cursor.bo, NULL);
 318	ttm_bo_unreserve(vps->cursor.bo);
 319	return 0;
 
 320
 321teardown:
 322	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
 323	return ret;
 324}
 
 325
 
 
 326
 327static void vmw_cursor_update_position(struct vmw_private *dev_priv,
 328				       bool show, int x, int y)
 329{
 330	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
 331					     : SVGA_CURSOR_ON_HIDE;
 332	uint32_t count;
 333
 334	spin_lock(&dev_priv->cursor_lock);
 335	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
 336		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
 337		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
 338		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
 339		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
 340		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
 341	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
 342		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
 343		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
 344		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
 345		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
 346		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
 347	} else {
 348		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
 349		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
 350		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
 351	}
 352	spin_unlock(&dev_priv->cursor_lock);
 353}
 354
 355void vmw_kms_cursor_snoop(struct vmw_surface *srf,
 356			  struct ttm_object_file *tfile,
 357			  struct ttm_buffer_object *bo,
 358			  SVGA3dCmdHeader *header)
 359{
 360	struct ttm_bo_kmap_obj map;
 361	unsigned long kmap_offset;
 362	unsigned long kmap_num;
 363	SVGA3dCopyBox *box;
 364	unsigned box_count;
 365	void *virtual;
 366	bool dummy;
 367	struct vmw_dma_cmd {
 368		SVGA3dCmdHeader header;
 369		SVGA3dCmdSurfaceDMA dma;
 370	} *cmd;
 371	int i, ret;
 372	const struct SVGA3dSurfaceDesc *desc =
 373		vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
 374	const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
 375
 376	cmd = container_of(header, struct vmw_dma_cmd, header);
 377
 378	/* No snooper installed, nothing to copy */
 379	if (!srf->snooper.image)
 380		return;
 381
 382	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
 383		DRM_ERROR("face and mipmap for cursors should never != 0\n");
 384		return;
 385	}
 386
 387	if (cmd->header.size < 64) {
 388		DRM_ERROR("at least one full copy box must be given\n");
 389		return;
 390	}
 391
 392	box = (SVGA3dCopyBox *)&cmd[1];
 393	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
 394			sizeof(SVGA3dCopyBox);
 395
 396	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
 
 397	    box->x != 0    || box->y != 0    || box->z != 0    ||
 398	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
 399	    box->d != 1    || box_count != 1 ||
 400	    box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
 401		/* TODO handle none page aligned offsets */
 402		/* TODO handle more dst & src != 0 */
 403		/* TODO handle more then one copy */
 404		DRM_ERROR("Can't snoop dma request for cursor!\n");
 405		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
 406			  box->srcx, box->srcy, box->srcz,
 407			  box->x, box->y, box->z,
 408			  box->w, box->h, box->d, box_count,
 409			  cmd->dma.guest.ptr.offset);
 410		return;
 411	}
 412
 413	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
 414	kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
 415
 416	ret = ttm_bo_reserve(bo, true, false, NULL);
 417	if (unlikely(ret != 0)) {
 418		DRM_ERROR("reserve failed\n");
 419		return;
 420	}
 421
 422	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
 423	if (unlikely(ret != 0))
 424		goto err_unreserve;
 425
 426	virtual = ttm_kmap_obj_virtual(&map, &dummy);
 427
 428	if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
 429		memcpy(srf->snooper.image, virtual,
 430		       VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
 431	} else {
 432		/* Image is unsigned pointer. */
 433		for (i = 0; i < box->h; i++)
 434			memcpy(srf->snooper.image + i * image_pitch,
 435			       virtual + i * cmd->dma.guest.pitch,
 436			       box->w * desc->pitchBytesPerBlock);
 437	}
 438
 439	srf->snooper.age++;
 
 
 
 
 
 
 
 440
 441	ttm_bo_kunmap(&map);
 442err_unreserve:
 443	ttm_bo_unreserve(bo);
 444}
 445
 446/**
 447 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
 448 *
 449 * @dev_priv: Pointer to the device private struct.
 450 *
 451 * Clears all legacy hotspots.
 452 */
 453void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
 454{
 455	struct drm_device *dev = &dev_priv->drm;
 456	struct vmw_display_unit *du;
 457	struct drm_crtc *crtc;
 458
 459	drm_modeset_lock_all(dev);
 460	drm_for_each_crtc(crtc, dev) {
 461		du = vmw_crtc_to_du(crtc);
 462
 463		du->hotspot_x = 0;
 464		du->hotspot_y = 0;
 465	}
 466	drm_modeset_unlock_all(dev);
 467}
 468
 469void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 470{
 471	struct drm_device *dev = &dev_priv->drm;
 472	struct vmw_display_unit *du;
 473	struct drm_crtc *crtc;
 474
 475	mutex_lock(&dev->mode_config.mutex);
 476
 477	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 478		du = vmw_crtc_to_du(crtc);
 479		if (!du->cursor_surface ||
 480		    du->cursor_age == du->cursor_surface->snooper.age ||
 481		    !du->cursor_surface->snooper.image)
 482			continue;
 483
 484		du->cursor_age = du->cursor_surface->snooper.age;
 485		vmw_send_define_cursor_cmd(dev_priv,
 486					   du->cursor_surface->snooper.image,
 487					   VMW_CURSOR_SNOOP_WIDTH,
 488					   VMW_CURSOR_SNOOP_HEIGHT,
 489					   du->hotspot_x + du->core_hotspot_x,
 490					   du->hotspot_y + du->core_hotspot_y);
 491	}
 492
 493	mutex_unlock(&dev->mode_config.mutex);
 494}
 495
 496
 497void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
 498{
 499	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 500	u32 i;
 501
 502	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
 503
 504	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
 505		vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
 506
 507	drm_plane_cleanup(plane);
 508}
 509
 510
 511void vmw_du_primary_plane_destroy(struct drm_plane *plane)
 512{
 513	drm_plane_cleanup(plane);
 514
 515	/* Planes are static in our case so we don't free it */
 516}
 517
 518
 519/**
 520 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
 521 *
 522 * @vps: plane state associated with the display surface
 523 * @unreference: true if we also want to unreference the display.
 524 */
 525void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
 526			     bool unreference)
 527{
 528	if (vps->surf) {
 529		if (vps->pinned) {
 530			vmw_resource_unpin(&vps->surf->res);
 531			vps->pinned--;
 532		}
 533
 534		if (unreference) {
 535			if (vps->pinned)
 536				DRM_ERROR("Surface still pinned\n");
 537			vmw_surface_unreference(&vps->surf);
 538		}
 539	}
 540}
 541
 542
 543/**
 544 * vmw_du_plane_cleanup_fb - Unpins the plane surface
 545 *
 546 * @plane:  display plane
 547 * @old_state: Contains the FB to clean up
 548 *
 549 * Unpins the framebuffer surface
 550 *
 551 * Returns 0 on success
 552 */
 553void
 554vmw_du_plane_cleanup_fb(struct drm_plane *plane,
 555			struct drm_plane_state *old_state)
 556{
 557	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 558
 559	vmw_du_plane_unpin_surf(vps, false);
 560}
 561
 562
 563/**
 564 * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
 565 *
 566 * @vps: plane_state
 567 *
 568 * Returns 0 on success
 569 */
 570
 571static int
 572vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
 573{
 574	int ret;
 575	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
 576	struct ttm_buffer_object *bo = vps->cursor.bo;
 577
 578	if (!bo)
 579		return -EINVAL;
 580
 581	if (bo->base.size < size)
 582		return -EINVAL;
 583
 584	if (vps->cursor.mapped)
 585		return 0;
 586
 587	ret = ttm_bo_reserve(bo, false, false, NULL);
 588
 589	if (unlikely(ret != 0))
 590		return -ENOMEM;
 591
 592	ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vps->cursor.map);
 593
 594	/*
 595	 * We just want to try to get mob bind to finish
 596	 * so that the first write to SVGA_REG_CURSOR_MOBID
 597	 * is done with a buffer that the device has already
 598	 * seen
 599	 */
 600	(void) ttm_bo_wait(bo, false, false);
 601
 602	ttm_bo_unreserve(bo);
 603
 604	if (unlikely(ret != 0))
 605		return -ENOMEM;
 606
 607	vps->cursor.mapped = true;
 608
 609	return 0;
 610}
 611
 612
 613/**
 614 * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
 615 *
 616 * @vps: state of the cursor plane
 617 *
 618 * Returns 0 on success
 619 */
 620
 621static int
 622vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
 623{
 624	int ret = 0;
 625	struct ttm_buffer_object *bo = vps->cursor.bo;
 626
 627	if (!vps->cursor.mapped)
 628		return 0;
 629
 630	if (!bo)
 631		return 0;
 632
 633	ret = ttm_bo_reserve(bo, true, false, NULL);
 634	if (likely(ret == 0)) {
 635		ttm_bo_kunmap(&vps->cursor.map);
 636		ttm_bo_unreserve(bo);
 637		vps->cursor.mapped = false;
 638	}
 639
 640	return ret;
 641}
 642
 
 
 
 
 
 
 
 
 
 
 643
 644/**
 645 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
 646 *
 647 * @plane: cursor plane
 648 * @old_state: contains the state to clean up
 649 *
 650 * Unmaps all cursor bo mappings and unpins the cursor surface
 
 651 *
 652 * Returns 0 on success
 
 
 653 */
 654void
 655vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
 656			       struct drm_plane_state *old_state)
 657{
 658	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 659	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
 660	bool dummy;
 661
 662	if (vps->surf_mapped) {
 663		vmw_bo_unmap(vps->surf->res.backup);
 664		vps->surf_mapped = false;
 665	}
 666
 667	if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &dummy)) {
 668		const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
 669
 670		if (likely(ret == 0)) {
 671			if (atomic_read(&vps->bo->base_mapped_count) == 0)
 672			    ttm_bo_kunmap(&vps->bo->map);
 673			ttm_bo_unreserve(&vps->bo->base);
 674		}
 675	}
 676
 677	vmw_du_cursor_plane_unmap_cm(vps);
 678	vmw_du_put_cursor_mob(vcp, vps);
 679
 680	vmw_du_plane_unpin_surf(vps, false);
 681
 682	if (vps->surf) {
 683		vmw_surface_unreference(&vps->surf);
 684		vps->surf = NULL;
 685	}
 686
 687	if (vps->bo) {
 688		vmw_bo_unreference(&vps->bo);
 689		vps->bo = NULL;
 690	}
 691}
 692
 693
 694/**
 695 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
 696 *
 697 * @plane:  display plane
 698 * @new_state: info on the new plane state, including the FB
 699 *
 700 * Returns 0 on success
 701 */
 702int
 703vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
 704			       struct drm_plane_state *new_state)
 705{
 706	struct drm_framebuffer *fb = new_state->fb;
 707	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
 708	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
 709	int ret = 0;
 710
 711	if (vps->surf) {
 712		vmw_surface_unreference(&vps->surf);
 713		vps->surf = NULL;
 714	}
 715
 716	if (vps->bo) {
 717		vmw_bo_unreference(&vps->bo);
 718		vps->bo = NULL;
 719	}
 720
 721	if (fb) {
 722		if (vmw_framebuffer_to_vfb(fb)->bo) {
 723			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
 724			vmw_bo_reference(vps->bo);
 725		} else {
 726			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
 727			vmw_surface_reference(vps->surf);
 728		}
 729	}
 730
 731	if (!vps->surf && vps->bo) {
 732		const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
 733
 734		/*
 735		 * Not using vmw_bo_map_and_cache() helper here as we need to
 736		 * reserve the ttm_buffer_object first which
 737		 * vmw_bo_map_and_cache() omits.
 738		 */
 739		ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
 740
 741		if (unlikely(ret != 0))
 742			return -ENOMEM;
 743
 744		ret = ttm_bo_kmap(&vps->bo->base, 0, PFN_UP(size), &vps->bo->map);
 745
 746		if (likely(ret == 0))
 747			atomic_inc(&vps->bo->base_mapped_count);
 748
 749		ttm_bo_unreserve(&vps->bo->base);
 750
 751		if (unlikely(ret != 0))
 752			return -ENOMEM;
 753	} else if (vps->surf && !vps->bo && vps->surf->res.backup) {
 754
 755		WARN_ON(vps->surf->snooper.image);
 756		ret = ttm_bo_reserve(&vps->surf->res.backup->base, true, false,
 757				     NULL);
 758		if (unlikely(ret != 0))
 759			return -ENOMEM;
 760		vmw_bo_map_and_cache(vps->surf->res.backup);
 761		ttm_bo_unreserve(&vps->surf->res.backup->base);
 762		vps->surf_mapped = true;
 763	}
 764
 765	if (vps->surf || vps->bo) {
 766		vmw_du_get_cursor_mob(vcp, vps);
 767		vmw_du_cursor_plane_map_cm(vps);
 768	}
 769
 770	return 0;
 771}
 772
 773
 774void
 775vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
 776				  struct drm_atomic_state *state)
 777{
 778	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 779									   plane);
 780	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
 781									   plane);
 782	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
 783	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 784	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 785	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
 786	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
 787	s32 hotspot_x, hotspot_y;
 788	bool dummy;
 789
 790	hotspot_x = du->hotspot_x;
 791	hotspot_y = du->hotspot_y;
 792
 793	if (new_state->fb) {
 794		hotspot_x += new_state->fb->hot_x;
 795		hotspot_y += new_state->fb->hot_y;
 796	}
 797
 798	du->cursor_surface = vps->surf;
 799	du->cursor_bo = vps->bo;
 800
 801	if (!vps->surf && !vps->bo) {
 802		vmw_cursor_update_position(dev_priv, false, 0, 0);
 803		return;
 804	}
 805
 806	vps->cursor.hotspot_x = hotspot_x;
 807	vps->cursor.hotspot_y = hotspot_y;
 808
 809	if (vps->surf) {
 810		du->cursor_age = du->cursor_surface->snooper.age;
 811	}
 812
 813	if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
 814		/*
 815		 * If it hasn't changed, avoid making the device do extra
 816		 * work by keeping the old cursor active.
 817		 */
 818		struct vmw_cursor_plane_state tmp = old_vps->cursor;
 819		old_vps->cursor = vps->cursor;
 820		vps->cursor = tmp;
 821	} else {
 822		void *image = vmw_du_cursor_plane_acquire_image(vps);
 823		if (image)
 824			vmw_cursor_update_image(dev_priv, vps, image,
 825						new_state->crtc_w,
 826						new_state->crtc_h,
 827						hotspot_x, hotspot_y);
 828	}
 829
 830	if (vps->bo) {
 831		if (ttm_kmap_obj_virtual(&vps->bo->map, &dummy))
 832			atomic_dec(&vps->bo->base_mapped_count);
 833	}
 834
 835	du->cursor_x = new_state->crtc_x + du->set_gui_x;
 836	du->cursor_y = new_state->crtc_y + du->set_gui_y;
 837
 838	vmw_cursor_update_position(dev_priv, true,
 839				   du->cursor_x + hotspot_x,
 840				   du->cursor_y + hotspot_y);
 841
 842	du->core_hotspot_x = hotspot_x - du->hotspot_x;
 843	du->core_hotspot_y = hotspot_y - du->hotspot_y;
 844}
 845
 846
 847/**
 848 * vmw_du_primary_plane_atomic_check - check if the new state is okay
 849 *
 850 * @plane: display plane
 851 * @state: info on the new plane state, including the FB
 852 *
 853 * Check if the new state is settable given the current state.  Other
 854 * than what the atomic helper checks, we care about crtc fitting
 855 * the FB and maintaining one active framebuffer.
 856 *
 857 * Returns 0 on success
 858 */
 859int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
 860				      struct drm_atomic_state *state)
 861{
 862	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 863									   plane);
 864	struct drm_crtc_state *crtc_state = NULL;
 865	struct drm_framebuffer *new_fb = new_state->fb;
 866	int ret;
 867
 868	if (new_state->crtc)
 869		crtc_state = drm_atomic_get_new_crtc_state(state,
 870							   new_state->crtc);
 871
 872	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
 873						  DRM_PLANE_NO_SCALING,
 874						  DRM_PLANE_NO_SCALING,
 875						  false, true);
 876
 877	if (!ret && new_fb) {
 878		struct drm_crtc *crtc = new_state->crtc;
 879		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 880
 881		vmw_connector_state_to_vcs(du->connector.state);
 882	}
 883
 884
 885	return ret;
 886}
 887
 888
 889/**
 890 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
 891 *
 892 * @plane: cursor plane
 893 * @state: info on the new plane state
 894 *
 895 * This is a chance to fail if the new cursor state does not fit
 896 * our requirements.
 897 *
 898 * Returns 0 on success
 899 */
 900int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
 901				     struct drm_atomic_state *state)
 902{
 903	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 904									   plane);
 905	int ret = 0;
 906	struct drm_crtc_state *crtc_state = NULL;
 907	struct vmw_surface *surface = NULL;
 908	struct drm_framebuffer *fb = new_state->fb;
 909
 910	if (new_state->crtc)
 911		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
 912							   new_state->crtc);
 913
 914	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
 915						  DRM_PLANE_NO_SCALING,
 916						  DRM_PLANE_NO_SCALING,
 917						  true, true);
 918	if (ret)
 919		return ret;
 920
 921	/* Turning off */
 922	if (!fb)
 923		return 0;
 924
 925	/* A lot of the code assumes this */
 926	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
 927		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
 928			  new_state->crtc_w, new_state->crtc_h);
 929		return -EINVAL;
 930	}
 931
 932	if (!vmw_framebuffer_to_vfb(fb)->bo) {
 933		surface = vmw_framebuffer_to_vfbs(fb)->surface;
 934
 935		WARN_ON(!surface);
 936
 937		if (!surface ||
 938		    (!surface->snooper.image && !surface->res.backup)) {
 939			DRM_ERROR("surface not suitable for cursor\n");
 940			return -EINVAL;
 941		}
 942	}
 943
 944	return 0;
 945}
 946
 947
 948int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
 949			     struct drm_atomic_state *state)
 950{
 951	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
 952									 crtc);
 953	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
 954	int connector_mask = drm_connector_mask(&du->connector);
 955	bool has_primary = new_state->plane_mask &
 956			   drm_plane_mask(crtc->primary);
 
 957
 958	/* We always want to have an active plane with an active CRTC */
 959	if (has_primary != new_state->enable)
 960		return -EINVAL;
 
 
 961
 962
 963	if (new_state->connector_mask != connector_mask &&
 964	    new_state->connector_mask != 0) {
 965		DRM_ERROR("Invalid connectors configuration\n");
 966		return -EINVAL;
 967	}
 968
 969	/*
 970	 * Our virtual device does not have a dot clock, so use the logical
 971	 * clock value as the dot clock.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 972	 */
 973	if (new_state->mode.crtc_clock == 0)
 974		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
 975
 976	return 0;
 977}
 978
 979
 980void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
 981			      struct drm_atomic_state *state)
 
 
 
 982{
 983}
 
 
 
 
 
 
 
 
 984
 
 
 
 
 
 985
 986void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
 987			      struct drm_atomic_state *state)
 988{
 989}
 990
 
 
 
 991
 992/**
 993 * vmw_du_crtc_duplicate_state - duplicate crtc state
 994 * @crtc: DRM crtc
 995 *
 996 * Allocates and returns a copy of the crtc state (both common and
 997 * vmw-specific) for the specified crtc.
 998 *
 999 * Returns: The newly allocated crtc state, or NULL on failure.
1000 */
1001struct drm_crtc_state *
1002vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
1003{
1004	struct drm_crtc_state *state;
1005	struct vmw_crtc_state *vcs;
1006
1007	if (WARN_ON(!crtc->state))
1008		return NULL;
1009
1010	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
1011
1012	if (!vcs)
1013		return NULL;
1014
1015	state = &vcs->base;
1016
1017	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
1018
1019	return state;
1020}
1021
1022
1023/**
1024 * vmw_du_crtc_reset - creates a blank vmw crtc state
1025 * @crtc: DRM crtc
1026 *
1027 * Resets the atomic state for @crtc by freeing the state pointer (which
1028 * might be NULL, e.g. at driver load time) and allocating a new empty state
1029 * object.
1030 */
1031void vmw_du_crtc_reset(struct drm_crtc *crtc)
1032{
1033	struct vmw_crtc_state *vcs;
1034
1035
1036	if (crtc->state) {
1037		__drm_atomic_helper_crtc_destroy_state(crtc->state);
1038
1039		kfree(vmw_crtc_state_to_vcs(crtc->state));
1040	}
1041
1042	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1043
1044	if (!vcs) {
1045		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1046		return;
 
 
 
 
1047	}
1048
1049	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1050}
1051
1052
1053/**
1054 * vmw_du_crtc_destroy_state - destroy crtc state
1055 * @crtc: DRM crtc
1056 * @state: state object to destroy
1057 *
1058 * Destroys the crtc state (both common and vmw-specific) for the
1059 * specified plane.
1060 */
1061void
1062vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1063			  struct drm_crtc_state *state)
1064{
1065	drm_atomic_helper_crtc_destroy_state(crtc, state);
1066}
1067
1068
1069/**
1070 * vmw_du_plane_duplicate_state - duplicate plane state
1071 * @plane: drm plane
1072 *
1073 * Allocates and returns a copy of the plane state (both common and
1074 * vmw-specific) for the specified plane.
1075 *
1076 * Returns: The newly allocated plane state, or NULL on failure.
1077 */
1078struct drm_plane_state *
1079vmw_du_plane_duplicate_state(struct drm_plane *plane)
1080{
1081	struct drm_plane_state *state;
1082	struct vmw_plane_state *vps;
1083
1084	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1085
1086	if (!vps)
1087		return NULL;
1088
1089	vps->pinned = 0;
1090	vps->cpp = 0;
1091
1092	memset(&vps->cursor, 0, sizeof(vps->cursor));
1093
1094	/* Each ref counted resource needs to be acquired again */
1095	if (vps->surf)
1096		(void) vmw_surface_reference(vps->surf);
1097
1098	if (vps->bo)
1099		(void) vmw_bo_reference(vps->bo);
1100
1101	state = &vps->base;
1102
1103	__drm_atomic_helper_plane_duplicate_state(plane, state);
1104
1105	return state;
1106}
1107
1108
1109/**
1110 * vmw_du_plane_reset - creates a blank vmw plane state
1111 * @plane: drm plane
1112 *
1113 * Resets the atomic state for @plane by freeing the state pointer (which might
1114 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1115 */
1116void vmw_du_plane_reset(struct drm_plane *plane)
1117{
1118	struct vmw_plane_state *vps;
1119
1120	if (plane->state)
1121		vmw_du_plane_destroy_state(plane, plane->state);
1122
1123	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1124
1125	if (!vps) {
1126		DRM_ERROR("Cannot allocate vmw_plane_state\n");
1127		return;
1128	}
1129
1130	__drm_atomic_helper_plane_reset(plane, &vps->base);
1131}
1132
1133
1134/**
1135 * vmw_du_plane_destroy_state - destroy plane state
1136 * @plane: DRM plane
1137 * @state: state object to destroy
1138 *
1139 * Destroys the plane state (both common and vmw-specific) for the
1140 * specified plane.
1141 */
1142void
1143vmw_du_plane_destroy_state(struct drm_plane *plane,
1144			   struct drm_plane_state *state)
1145{
1146	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1147
1148	/* Should have been freed by cleanup_fb */
1149	if (vps->surf)
1150		vmw_surface_unreference(&vps->surf);
1151
1152	if (vps->bo)
1153		vmw_bo_unreference(&vps->bo);
1154
1155	drm_atomic_helper_plane_destroy_state(plane, state);
1156}
1157
1158
1159/**
1160 * vmw_du_connector_duplicate_state - duplicate connector state
1161 * @connector: DRM connector
1162 *
1163 * Allocates and returns a copy of the connector state (both common and
1164 * vmw-specific) for the specified connector.
1165 *
1166 * Returns: The newly allocated connector state, or NULL on failure.
1167 */
1168struct drm_connector_state *
1169vmw_du_connector_duplicate_state(struct drm_connector *connector)
1170{
1171	struct drm_connector_state *state;
1172	struct vmw_connector_state *vcs;
1173
1174	if (WARN_ON(!connector->state))
1175		return NULL;
1176
1177	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1178
1179	if (!vcs)
1180		return NULL;
1181
1182	state = &vcs->base;
1183
1184	__drm_atomic_helper_connector_duplicate_state(connector, state);
1185
1186	return state;
1187}
1188
1189
1190/**
1191 * vmw_du_connector_reset - creates a blank vmw connector state
1192 * @connector: DRM connector
1193 *
1194 * Resets the atomic state for @connector by freeing the state pointer (which
1195 * might be NULL, e.g. at driver load time) and allocating a new empty state
1196 * object.
1197 */
1198void vmw_du_connector_reset(struct drm_connector *connector)
1199{
1200	struct vmw_connector_state *vcs;
1201
1202
1203	if (connector->state) {
1204		__drm_atomic_helper_connector_destroy_state(connector->state);
1205
1206		kfree(vmw_connector_state_to_vcs(connector->state));
 
 
 
 
 
 
 
1207	}
1208
1209	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1210
1211	if (!vcs) {
1212		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1213		return;
1214	}
1215
1216	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1217}
1218
1219
1220/**
1221 * vmw_du_connector_destroy_state - destroy connector state
1222 * @connector: DRM connector
1223 * @state: state object to destroy
1224 *
1225 * Destroys the connector state (both common and vmw-specific) for the
1226 * specified plane.
1227 */
1228void
1229vmw_du_connector_destroy_state(struct drm_connector *connector,
1230			  struct drm_connector_state *state)
1231{
1232	drm_atomic_helper_connector_destroy_state(connector, state);
1233}
1234/*
1235 * Generic framebuffer code
1236 */
1237
1238/*
1239 * Surface framebuffer code
1240 */
1241
1242static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1243{
1244	struct vmw_framebuffer_surface *vfbs =
1245		vmw_framebuffer_to_vfbs(framebuffer);
1246
1247	drm_framebuffer_cleanup(framebuffer);
1248	vmw_surface_unreference(&vfbs->surface);
1249
1250	kfree(vfbs);
1251}
1252
1253/**
1254 * vmw_kms_readback - Perform a readback from the screen system to
1255 * a buffer-object backed framebuffer.
1256 *
1257 * @dev_priv: Pointer to the device private structure.
1258 * @file_priv: Pointer to a struct drm_file identifying the caller.
1259 * Must be set to NULL if @user_fence_rep is NULL.
1260 * @vfb: Pointer to the buffer-object backed framebuffer.
1261 * @user_fence_rep: User-space provided structure for fence information.
1262 * Must be set to non-NULL if @file_priv is non-NULL.
1263 * @vclips: Array of clip rects.
1264 * @num_clips: Number of clip rects in @vclips.
1265 *
1266 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1267 * interrupted.
1268 */
1269int vmw_kms_readback(struct vmw_private *dev_priv,
1270		     struct drm_file *file_priv,
1271		     struct vmw_framebuffer *vfb,
1272		     struct drm_vmw_fence_rep __user *user_fence_rep,
1273		     struct drm_vmw_rect *vclips,
1274		     uint32_t num_clips)
1275{
1276	switch (dev_priv->active_display_unit) {
1277	case vmw_du_screen_object:
1278		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1279					    user_fence_rep, vclips, num_clips,
1280					    NULL);
1281	case vmw_du_screen_target:
1282		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
1283					user_fence_rep, NULL, vclips, num_clips,
1284					1, false, true, NULL);
1285	default:
1286		WARN_ONCE(true,
1287			  "Readback called with invalid display system.\n");
1288}
1289
1290	return -ENOSYS;
1291}
1292
1293
1294static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1295	.destroy = vmw_framebuffer_surface_destroy,
1296	.dirty = drm_atomic_helper_dirtyfb,
 
1297};
1298
1299static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 
1300					   struct vmw_surface *surface,
1301					   struct vmw_framebuffer **out,
1302					   const struct drm_mode_fb_cmd2
1303					   *mode_cmd,
1304					   bool is_bo_proxy)
1305
1306{
1307	struct drm_device *dev = &dev_priv->drm;
1308	struct vmw_framebuffer_surface *vfbs;
1309	enum SVGA3dSurfaceFormat format;
 
1310	int ret;
1311
1312	/* 3D is only supported on HWv8 and newer hosts */
1313	if (dev_priv->active_display_unit == vmw_du_legacy)
1314		return -ENOSYS;
1315
1316	/*
1317	 * Sanity checks.
1318	 */
1319
1320	if (!drm_any_plane_has_format(&dev_priv->drm,
1321				      mode_cmd->pixel_format,
1322				      mode_cmd->modifier[0])) {
1323		drm_dbg(&dev_priv->drm,
1324			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1325			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1326		return -EINVAL;
1327	}
1328
1329	/* Surface must be marked as a scanout. */
1330	if (unlikely(!surface->metadata.scanout))
1331		return -EINVAL;
1332
1333	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1334		     surface->metadata.num_sizes != 1 ||
1335		     surface->metadata.base_size.width < mode_cmd->width ||
1336		     surface->metadata.base_size.height < mode_cmd->height ||
1337		     surface->metadata.base_size.depth != 1)) {
1338		DRM_ERROR("Incompatible surface dimensions "
1339			  "for requested mode.\n");
1340		return -EINVAL;
1341	}
1342
1343	switch (mode_cmd->pixel_format) {
1344	case DRM_FORMAT_ARGB8888:
1345		format = SVGA3D_A8R8G8B8;
1346		break;
1347	case DRM_FORMAT_XRGB8888:
1348		format = SVGA3D_X8R8G8B8;
1349		break;
1350	case DRM_FORMAT_RGB565:
1351		format = SVGA3D_R5G6B5;
1352		break;
1353	case DRM_FORMAT_XRGB1555:
1354		format = SVGA3D_A1R5G5B5;
1355		break;
1356	default:
1357		DRM_ERROR("Invalid pixel format: %p4cc\n",
1358			  &mode_cmd->pixel_format);
1359		return -EINVAL;
1360	}
1361
1362	/*
1363	 * For DX, surface format validation is done when surface->scanout
1364	 * is set.
1365	 */
1366	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1367		DRM_ERROR("Invalid surface format for requested mode.\n");
1368		return -EINVAL;
1369	}
1370
1371	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1372	if (!vfbs) {
1373		ret = -ENOMEM;
1374		goto out_err1;
1375	}
1376
1377	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1378	vfbs->surface = vmw_surface_reference(surface);
1379	vfbs->base.user_handle = mode_cmd->handles[0];
1380	vfbs->is_bo_proxy = is_bo_proxy;
1381
1382	*out = &vfbs->base;
1383
1384	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1385				   &vmw_framebuffer_surface_funcs);
1386	if (ret)
1387		goto out_err2;
1388
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1389	return 0;
1390
 
 
1391out_err2:
1392	vmw_surface_unreference(&surface);
1393	kfree(vfbs);
1394out_err1:
1395	return ret;
1396}
1397
1398/*
1399 * Buffer-object framebuffer code
1400 */
1401
1402static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1403					    struct drm_file *file_priv,
1404					    unsigned int *handle)
1405{
1406	struct vmw_framebuffer_bo *vfbd =
1407			vmw_framebuffer_to_vfbd(fb);
1408
1409	return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle);
1410}
 
 
1411
1412static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1413{
1414	struct vmw_framebuffer_bo *vfbd =
1415		vmw_framebuffer_to_vfbd(framebuffer);
1416
1417	drm_framebuffer_cleanup(framebuffer);
1418	vmw_bo_unreference(&vfbd->buffer);
1419
1420	kfree(vfbd);
1421}
1422
1423static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
1424				    struct drm_file *file_priv,
1425				    unsigned int flags, unsigned int color,
1426				    struct drm_clip_rect *clips,
1427				    unsigned int num_clips)
1428{
1429	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1430	struct vmw_framebuffer_bo *vfbd =
1431		vmw_framebuffer_to_vfbd(framebuffer);
1432	struct drm_clip_rect norect;
1433	int ret, increment = 1;
 
 
 
 
 
1434
1435	drm_modeset_lock_all(&dev_priv->drm);
 
 
1436
1437	if (!num_clips) {
1438		num_clips = 1;
1439		clips = &norect;
1440		norect.x1 = norect.y1 = 0;
1441		norect.x2 = framebuffer->width;
1442		norect.y2 = framebuffer->height;
1443	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
1444		num_clips /= 2;
1445		increment = 2;
1446	}
1447
1448	switch (dev_priv->active_display_unit) {
1449	case vmw_du_legacy:
1450		ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
1451					      clips, num_clips, increment);
1452		break;
1453	default:
1454		ret = -EINVAL;
1455		WARN_ONCE(true, "Dirty called with invalid display system.\n");
1456		break;
1457	}
1458
1459	vmw_cmd_flush(dev_priv, false);
 
 
 
 
 
 
1460
1461	drm_modeset_unlock_all(&dev_priv->drm);
 
1462
1463	return ret;
1464}
1465
1466static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
1467					struct drm_file *file_priv,
1468					unsigned int flags, unsigned int color,
1469					struct drm_clip_rect *clips,
1470					unsigned int num_clips)
1471{
1472	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1473
1474	if (dev_priv->active_display_unit == vmw_du_legacy &&
1475	    vmw_cmd_supported(dev_priv))
1476		return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
1477						color, clips, num_clips);
1478
1479	return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
1480					 clips, num_clips);
1481}
1482
1483static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1484	.create_handle = vmw_framebuffer_bo_create_handle,
1485	.destroy = vmw_framebuffer_bo_destroy,
1486	.dirty = vmw_framebuffer_bo_dirty_ext,
1487};
1488
1489/*
1490 * Pin the bofer in a location suitable for access by the
1491 * display system.
1492 */
1493static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1494{
1495	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1496	struct vmw_buffer_object *buf;
1497	struct ttm_placement *placement;
 
1498	int ret;
1499
1500	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1501		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
 
1502
1503	if (!buf)
1504		return 0;
1505
1506	switch (dev_priv->active_display_unit) {
1507	case vmw_du_legacy:
1508		vmw_overlay_pause_all(dev_priv);
1509		ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
1510		vmw_overlay_resume_all(dev_priv);
1511		break;
1512	case vmw_du_screen_object:
1513	case vmw_du_screen_target:
1514		if (vfb->bo) {
1515			if (dev_priv->capabilities & SVGA_CAP_3D) {
1516				/*
1517				 * Use surface DMA to get content to
1518				 * sreen target surface.
1519				 */
1520				placement = &vmw_vram_gmr_placement;
1521			} else {
1522				/* Use CPU blit. */
1523				placement = &vmw_sys_placement;
1524			}
1525		} else {
1526			/* Use surface / image update */
1527			placement = &vmw_mob_placement;
1528		}
1529
1530		return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
1531	default:
1532		return -EINVAL;
1533	}
1534
1535	return ret;
1536}
1537
1538static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1539{
1540	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1541	struct vmw_buffer_object *buf;
 
1542
1543	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1544		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1545
1546	if (WARN_ON(!buf))
1547		return 0;
 
1548
1549	return vmw_bo_unpin(dev_priv, buf, false);
1550}
1551
1552/**
1553 * vmw_create_bo_proxy - create a proxy surface for the buffer object
1554 *
1555 * @dev: DRM device
1556 * @mode_cmd: parameters for the new surface
1557 * @bo_mob: MOB backing the buffer object
1558 * @srf_out: newly created surface
1559 *
1560 * When the content FB is a buffer object, we create a surface as a proxy to the
1561 * same buffer.  This way we can do a surface copy rather than a surface DMA.
1562 * This is a more efficient approach
1563 *
1564 * RETURNS:
1565 * 0 on success, error code otherwise
1566 */
1567static int vmw_create_bo_proxy(struct drm_device *dev,
1568			       const struct drm_mode_fb_cmd2 *mode_cmd,
1569			       struct vmw_buffer_object *bo_mob,
1570			       struct vmw_surface **srf_out)
1571{
1572	struct vmw_surface_metadata metadata = {0};
1573	uint32_t format;
1574	struct vmw_resource *res;
1575	unsigned int bytes_pp;
1576	int ret;
1577
1578	switch (mode_cmd->pixel_format) {
1579	case DRM_FORMAT_ARGB8888:
1580	case DRM_FORMAT_XRGB8888:
1581		format = SVGA3D_X8R8G8B8;
1582		bytes_pp = 4;
1583		break;
1584
1585	case DRM_FORMAT_RGB565:
1586	case DRM_FORMAT_XRGB1555:
1587		format = SVGA3D_R5G6B5;
1588		bytes_pp = 2;
1589		break;
1590
1591	case 8:
1592		format = SVGA3D_P8;
1593		bytes_pp = 1;
1594		break;
1595
1596	default:
1597		DRM_ERROR("Invalid framebuffer format %p4cc\n",
1598			  &mode_cmd->pixel_format);
1599		return -EINVAL;
1600	}
1601
1602	metadata.format = format;
1603	metadata.mip_levels[0] = 1;
1604	metadata.num_sizes = 1;
1605	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1606	metadata.base_size.height =  mode_cmd->height;
1607	metadata.base_size.depth = 1;
1608	metadata.scanout = true;
1609
1610	ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1611	if (ret) {
1612		DRM_ERROR("Failed to allocate proxy content buffer\n");
1613		return ret;
1614	}
1615
1616	res = &(*srf_out)->res;
 
 
 
 
1617
1618	/* Reserve and switch the backing mob. */
1619	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1620	(void) vmw_resource_reserve(res, false, true);
1621	vmw_bo_unreference(&res->backup);
1622	res->backup = vmw_bo_reference(bo_mob);
1623	res->backup_offset = 0;
1624	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1625	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1626
1627	return 0;
1628}
1629
1630
1631
1632static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1633				      struct vmw_buffer_object *bo,
1634				      struct vmw_framebuffer **out,
1635				      const struct drm_mode_fb_cmd2
1636				      *mode_cmd)
1637
1638{
1639	struct drm_device *dev = &dev_priv->drm;
1640	struct vmw_framebuffer_bo *vfbd;
1641	unsigned int requested_size;
1642	int ret;
1643
1644	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1645	if (unlikely(requested_size > bo->base.base.size)) {
1646		DRM_ERROR("Screen buffer object size is too small "
1647			  "for requested mode.\n");
1648		return -EINVAL;
1649	}
1650
1651	if (!drm_any_plane_has_format(&dev_priv->drm,
1652				      mode_cmd->pixel_format,
1653				      mode_cmd->modifier[0])) {
1654		drm_dbg(&dev_priv->drm,
1655			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1656			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1657		return -EINVAL;
1658	}
1659
1660	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1661	if (!vfbd) {
1662		ret = -ENOMEM;
1663		goto out_err1;
1664	}
1665
1666	vfbd->base.base.obj[0] = &bo->base.base;
1667	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1668	vfbd->base.bo = true;
1669	vfbd->buffer = vmw_bo_reference(bo);
1670	vfbd->base.user_handle = mode_cmd->handles[0];
1671	*out = &vfbd->base;
1672
1673	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1674				   &vmw_framebuffer_bo_funcs);
1675	if (ret)
1676		goto out_err2;
1677
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1678	return 0;
1679
 
 
1680out_err2:
1681	vmw_bo_unreference(&bo);
1682	kfree(vfbd);
1683out_err1:
1684	return ret;
1685}
1686
1687
1688/**
1689 * vmw_kms_srf_ok - check if a surface can be created
1690 *
1691 * @dev_priv: Pointer to device private struct.
1692 * @width: requested width
1693 * @height: requested height
1694 *
1695 * Surfaces need to be less than texture size
1696 */
1697static bool
1698vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1699{
1700	if (width  > dev_priv->texture_max_width ||
1701	    height > dev_priv->texture_max_height)
1702		return false;
1703
1704	return true;
1705}
1706
1707/**
1708 * vmw_kms_new_framebuffer - Create a new framebuffer.
1709 *
1710 * @dev_priv: Pointer to device private struct.
1711 * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1712 * Either @bo or @surface must be NULL.
1713 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1714 * Either @bo or @surface must be NULL.
1715 * @only_2d: No presents will occur to this buffer object based framebuffer.
1716 * This helps the code to do some important optimizations.
1717 * @mode_cmd: Frame-buffer metadata.
1718 */
1719struct vmw_framebuffer *
1720vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1721			struct vmw_buffer_object *bo,
1722			struct vmw_surface *surface,
1723			bool only_2d,
1724			const struct drm_mode_fb_cmd2 *mode_cmd)
1725{
1726	struct vmw_framebuffer *vfb = NULL;
1727	bool is_bo_proxy = false;
1728	int ret;
1729
1730	/*
1731	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1732	 * therefore, wrap the buffer object in a surface so we can use the
1733	 * SurfaceCopy command.
1734	 */
1735	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1736	    bo && only_2d &&
1737	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1738	    dev_priv->active_display_unit == vmw_du_screen_target) {
1739		ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1740					  bo, &surface);
1741		if (ret)
1742			return ERR_PTR(ret);
1743
1744		is_bo_proxy = true;
1745	}
1746
1747	/* Create the new framebuffer depending one what we have */
1748	if (surface) {
1749		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1750						      mode_cmd,
1751						      is_bo_proxy);
1752		/*
1753		 * vmw_create_bo_proxy() adds a reference that is no longer
1754		 * needed
1755		 */
1756		if (is_bo_proxy)
1757			vmw_surface_unreference(&surface);
1758	} else if (bo) {
1759		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1760						 mode_cmd);
1761	} else {
1762		BUG();
1763	}
1764
1765	if (ret)
1766		return ERR_PTR(ret);
1767
1768	vfb->pin = vmw_framebuffer_pin;
1769	vfb->unpin = vmw_framebuffer_unpin;
1770
1771	return vfb;
1772}
1773
1774/*
1775 * Generic Kernel modesetting functions
1776 */
1777
1778static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1779						 struct drm_file *file_priv,
1780						 const struct drm_mode_fb_cmd2 *mode_cmd)
1781{
1782	struct vmw_private *dev_priv = vmw_priv(dev);
 
1783	struct vmw_framebuffer *vfb = NULL;
1784	struct vmw_surface *surface = NULL;
1785	struct vmw_buffer_object *bo = NULL;
 
1786	int ret;
1787
1788	/* returns either a bo or surface */
1789	ret = vmw_user_lookup_handle(dev_priv, file_priv,
1790				     mode_cmd->handles[0],
1791				     &surface, &bo);
1792	if (ret) {
1793		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1794			  mode_cmd->handles[0], mode_cmd->handles[0]);
1795		goto err_out;
 
 
1796	}
1797
 
 
 
1798
1799	if (!bo &&
1800	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1801		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1802			dev_priv->texture_max_width,
1803			dev_priv->texture_max_height);
1804		goto err_out;
1805	}
1806
 
 
1807
1808	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1809				      !(dev_priv->capabilities & SVGA_CAP_3D),
1810				      mode_cmd);
1811	if (IS_ERR(vfb)) {
1812		ret = PTR_ERR(vfb);
1813		goto err_out;
1814	}
1815
1816err_out:
1817	/* vmw_user_lookup_handle takes one ref so does new_fb */
1818	if (bo) {
1819		vmw_bo_unreference(&bo);
1820		drm_gem_object_put(&bo->base.base);
1821	}
1822	if (surface)
1823		vmw_surface_unreference(&surface);
1824
1825	if (ret) {
1826		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1827		return ERR_PTR(ret);
1828	}
1829
1830	return &vfb->base;
1831}
1832
1833/**
1834 * vmw_kms_check_display_memory - Validates display memory required for a
1835 * topology
1836 * @dev: DRM device
1837 * @num_rects: number of drm_rect in rects
1838 * @rects: array of drm_rect representing the topology to validate indexed by
1839 * crtc index.
1840 *
1841 * Returns:
1842 * 0 on success otherwise negative error code
1843 */
1844static int vmw_kms_check_display_memory(struct drm_device *dev,
1845					uint32_t num_rects,
1846					struct drm_rect *rects)
1847{
1848	struct vmw_private *dev_priv = vmw_priv(dev);
1849	struct drm_rect bounding_box = {0};
1850	u64 total_pixels = 0, pixel_mem, bb_mem;
1851	int i;
1852
1853	for (i = 0; i < num_rects; i++) {
1854		/*
1855		 * For STDU only individual screen (screen target) is limited by
1856		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1857		 */
1858		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1859		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1860		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1861			VMW_DEBUG_KMS("Screen size not supported.\n");
1862			return -EINVAL;
1863		}
1864
1865		/* Bounding box upper left is at (0,0). */
1866		if (rects[i].x2 > bounding_box.x2)
1867			bounding_box.x2 = rects[i].x2;
1868
1869		if (rects[i].y2 > bounding_box.y2)
1870			bounding_box.y2 = rects[i].y2;
1871
1872		total_pixels += (u64) drm_rect_width(&rects[i]) *
1873			(u64) drm_rect_height(&rects[i]);
1874	}
1875
1876	/* Virtual svga device primary limits are always in 32-bpp. */
1877	pixel_mem = total_pixels * 4;
1878
1879	/*
1880	 * For HV10 and below prim_bb_mem is vram size. When
1881	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1882	 * limit on primary bounding box
1883	 */
1884	if (pixel_mem > dev_priv->max_primary_mem) {
1885		VMW_DEBUG_KMS("Combined output size too large.\n");
1886		return -EINVAL;
1887	}
1888
1889	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1890	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1891	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1892		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1893
1894		if (bb_mem > dev_priv->max_primary_mem) {
1895			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1896			return -EINVAL;
1897		}
1898	}
1899
1900	return 0;
1901}
1902
1903/**
1904 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1905 * crtc mutex
1906 * @state: The atomic state pointer containing the new atomic state
1907 * @crtc: The crtc
1908 *
1909 * This function returns the new crtc state if it's part of the state update.
1910 * Otherwise returns the current crtc state. It also makes sure that the
1911 * crtc mutex is locked.
1912 *
1913 * Returns: A valid crtc state pointer or NULL. It may also return a
1914 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1915 */
1916static struct drm_crtc_state *
1917vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1918{
1919	struct drm_crtc_state *crtc_state;
1920
1921	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1922	if (crtc_state) {
1923		lockdep_assert_held(&crtc->mutex.mutex.base);
1924	} else {
1925		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1926
1927		if (ret != 0 && ret != -EALREADY)
1928			return ERR_PTR(ret);
1929
1930		crtc_state = crtc->state;
1931	}
1932
1933	return crtc_state;
1934}
1935
1936/**
1937 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1938 * from the same fb after the new state is committed.
1939 * @dev: The drm_device.
1940 * @state: The new state to be checked.
1941 *
1942 * Returns:
1943 *   Zero on success,
1944 *   -EINVAL on invalid state,
1945 *   -EDEADLK if modeset locking needs to be rerun.
1946 */
1947static int vmw_kms_check_implicit(struct drm_device *dev,
1948				  struct drm_atomic_state *state)
1949{
1950	struct drm_framebuffer *implicit_fb = NULL;
1951	struct drm_crtc *crtc;
1952	struct drm_crtc_state *crtc_state;
1953	struct drm_plane_state *plane_state;
1954
1955	drm_for_each_crtc(crtc, dev) {
1956		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1957
1958		if (!du->is_implicit)
1959			continue;
1960
1961		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1962		if (IS_ERR(crtc_state))
1963			return PTR_ERR(crtc_state);
1964
1965		if (!crtc_state || !crtc_state->enable)
1966			continue;
1967
1968		/*
1969		 * Can't move primary planes across crtcs, so this is OK.
1970		 * It also means we don't need to take the plane mutex.
1971		 */
1972		plane_state = du->primary.state;
1973		if (plane_state->crtc != crtc)
1974			continue;
1975
1976		if (!implicit_fb)
1977			implicit_fb = plane_state->fb;
1978		else if (implicit_fb != plane_state->fb)
1979			return -EINVAL;
1980	}
1981
1982	return 0;
1983}
1984
1985/**
1986 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1987 * @dev: DRM device
1988 * @state: the driver state object
1989 *
1990 * Returns:
1991 * 0 on success otherwise negative error code
1992 */
1993static int vmw_kms_check_topology(struct drm_device *dev,
1994				  struct drm_atomic_state *state)
1995{
1996	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1997	struct drm_rect *rects;
1998	struct drm_crtc *crtc;
1999	uint32_t i;
2000	int ret = 0;
2001
2002	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
2003			GFP_KERNEL);
2004	if (!rects)
2005		return -ENOMEM;
2006
2007	drm_for_each_crtc(crtc, dev) {
2008		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
2009		struct drm_crtc_state *crtc_state;
2010
2011		i = drm_crtc_index(crtc);
2012
2013		crtc_state = vmw_crtc_state_and_lock(state, crtc);
2014		if (IS_ERR(crtc_state)) {
2015			ret = PTR_ERR(crtc_state);
2016			goto clean;
2017		}
2018
2019		if (!crtc_state)
2020			continue;
2021
2022		if (crtc_state->enable) {
2023			rects[i].x1 = du->gui_x;
2024			rects[i].y1 = du->gui_y;
2025			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
2026			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
2027		} else {
2028			rects[i].x1 = 0;
2029			rects[i].y1 = 0;
2030			rects[i].x2 = 0;
2031			rects[i].y2 = 0;
2032		}
2033	}
2034
2035	/* Determine change to topology due to new atomic state */
2036	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
2037				      new_crtc_state, i) {
2038		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
2039		struct drm_connector *connector;
2040		struct drm_connector_state *conn_state;
2041		struct vmw_connector_state *vmw_conn_state;
2042
2043		if (!du->pref_active && new_crtc_state->enable) {
2044			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
2045			ret = -EINVAL;
2046			goto clean;
2047		}
2048
2049		/*
2050		 * For vmwgfx each crtc has only one connector attached and it
2051		 * is not changed so don't really need to check the
2052		 * crtc->connector_mask and iterate over it.
2053		 */
2054		connector = &du->connector;
2055		conn_state = drm_atomic_get_connector_state(state, connector);
2056		if (IS_ERR(conn_state)) {
2057			ret = PTR_ERR(conn_state);
2058			goto clean;
2059		}
2060
2061		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
2062		vmw_conn_state->gui_x = du->gui_x;
2063		vmw_conn_state->gui_y = du->gui_y;
2064	}
2065
2066	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
2067					   rects);
2068
2069clean:
2070	kfree(rects);
2071	return ret;
2072}
2073
2074/**
2075 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
2076 *
2077 * @dev: DRM device
2078 * @state: the driver state object
2079 *
2080 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
2081 * us to assign a value to mode->crtc_clock so that
2082 * drm_calc_timestamping_constants() won't throw an error message
2083 *
2084 * Returns:
2085 * Zero for success or -errno
2086 */
2087static int
2088vmw_kms_atomic_check_modeset(struct drm_device *dev,
2089			     struct drm_atomic_state *state)
2090{
2091	struct drm_crtc *crtc;
2092	struct drm_crtc_state *crtc_state;
2093	bool need_modeset = false;
2094	int i, ret;
2095
2096	ret = drm_atomic_helper_check(dev, state);
2097	if (ret)
2098		return ret;
2099
2100	ret = vmw_kms_check_implicit(dev, state);
2101	if (ret) {
2102		VMW_DEBUG_KMS("Invalid implicit state\n");
2103		return ret;
2104	}
2105
2106	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2107		if (drm_atomic_crtc_needs_modeset(crtc_state))
2108			need_modeset = true;
2109	}
2110
2111	if (need_modeset)
2112		return vmw_kms_check_topology(dev, state);
 
 
2113
2114	return ret;
2115}
2116
2117static const struct drm_mode_config_funcs vmw_kms_funcs = {
2118	.fb_create = vmw_kms_fb_create,
2119	.atomic_check = vmw_kms_atomic_check_modeset,
2120	.atomic_commit = drm_atomic_helper_commit,
2121};
2122
2123static int vmw_kms_generic_present(struct vmw_private *dev_priv,
2124				   struct drm_file *file_priv,
2125				   struct vmw_framebuffer *vfb,
2126				   struct vmw_surface *surface,
2127				   uint32_t sid,
2128				   int32_t destX, int32_t destY,
2129				   struct drm_vmw_rect *clips,
2130				   uint32_t num_clips)
2131{
2132	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
2133					    &surface->res, destX, destY,
2134					    num_clips, 1, NULL, NULL);
2135}
2136
2137
2138int vmw_kms_present(struct vmw_private *dev_priv,
2139		    struct drm_file *file_priv,
2140		    struct vmw_framebuffer *vfb,
2141		    struct vmw_surface *surface,
2142		    uint32_t sid,
2143		    int32_t destX, int32_t destY,
2144		    struct drm_vmw_rect *clips,
2145		    uint32_t num_clips)
2146{
2147	int ret;
2148
2149	switch (dev_priv->active_display_unit) {
2150	case vmw_du_screen_target:
2151		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2152						 &surface->res, destX, destY,
2153						 num_clips, 1, NULL, NULL);
2154		break;
2155	case vmw_du_screen_object:
2156		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2157					      sid, destX, destY, clips,
2158					      num_clips);
2159		break;
2160	default:
2161		WARN_ONCE(true,
2162			  "Present called with invalid display system.\n");
2163		ret = -ENOSYS;
2164		break;
2165	}
2166	if (ret)
2167		return ret;
2168
2169	vmw_cmd_flush(dev_priv, false);
2170
2171	return 0;
2172}
2173
2174static void
2175vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2176{
2177	if (dev_priv->hotplug_mode_update_property)
2178		return;
2179
2180	dev_priv->hotplug_mode_update_property =
2181		drm_property_create_range(&dev_priv->drm,
2182					  DRM_MODE_PROP_IMMUTABLE,
2183					  "hotplug_mode_update", 0, 1);
2184}
2185
2186int vmw_kms_init(struct vmw_private *dev_priv)
2187{
2188	struct drm_device *dev = &dev_priv->drm;
2189	int ret;
2190	static const char *display_unit_names[] = {
2191		"Invalid",
2192		"Legacy",
2193		"Screen Object",
2194		"Screen Target",
2195		"Invalid (max)"
2196	};
2197
2198	drm_mode_config_init(dev);
2199	dev->mode_config.funcs = &vmw_kms_funcs;
2200	dev->mode_config.min_width = 1;
2201	dev->mode_config.min_height = 1;
2202	dev->mode_config.max_width = dev_priv->texture_max_width;
2203	dev->mode_config.max_height = dev_priv->texture_max_height;
2204	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2205	dev->mode_config.prefer_shadow_fbdev = !dev_priv->has_mob;
2206
2207	drm_mode_create_suggested_offset_properties(dev);
2208	vmw_kms_create_hotplug_mode_update_property(dev_priv);
2209
2210	ret = vmw_kms_stdu_init_display(dev_priv);
2211	if (ret) {
2212		ret = vmw_kms_sou_init_display(dev_priv);
2213		if (ret) /* Fallback */
2214			ret = vmw_kms_ldu_init_display(dev_priv);
2215	}
2216	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2217	drm_info(&dev_priv->drm, "%s display unit initialized\n",
2218		 display_unit_names[dev_priv->active_display_unit]);
2219
2220	return ret;
2221}
2222
2223int vmw_kms_close(struct vmw_private *dev_priv)
2224{
2225	int ret = 0;
2226
2227	/*
2228	 * Docs says we should take the lock before calling this function
2229	 * but since it destroys encoders and our destructor calls
2230	 * drm_encoder_cleanup which takes the lock we deadlock.
2231	 */
2232	drm_mode_config_cleanup(&dev_priv->drm);
2233	if (dev_priv->active_display_unit == vmw_du_legacy)
2234		ret = vmw_kms_ldu_close_display(dev_priv);
2235
2236	return ret;
2237}
2238
2239int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2240				struct drm_file *file_priv)
2241{
2242	struct drm_vmw_cursor_bypass_arg *arg = data;
2243	struct vmw_display_unit *du;
 
2244	struct drm_crtc *crtc;
2245	int ret = 0;
2246
 
2247	mutex_lock(&dev->mode_config.mutex);
2248	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2249
2250		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2251			du = vmw_crtc_to_du(crtc);
2252			du->hotspot_x = arg->xhot;
2253			du->hotspot_y = arg->yhot;
2254		}
2255
2256		mutex_unlock(&dev->mode_config.mutex);
2257		return 0;
2258	}
2259
2260	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2261	if (!crtc) {
2262		ret = -ENOENT;
2263		goto out;
2264	}
2265
 
2266	du = vmw_crtc_to_du(crtc);
2267
2268	du->hotspot_x = arg->xhot;
2269	du->hotspot_y = arg->yhot;
2270
2271out:
2272	mutex_unlock(&dev->mode_config.mutex);
2273
2274	return ret;
2275}
2276
2277int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2278			unsigned width, unsigned height, unsigned pitch,
2279			unsigned bpp, unsigned depth)
2280{
2281	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2282		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2283	else if (vmw_fifo_have_pitchlock(vmw_priv))
2284		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2285	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2286	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2287	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2288		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2289
2290	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2291		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2292			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2293		return -EINVAL;
2294	}
2295
2296	return 0;
2297}
2298
2299bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2300				uint32_t pitch,
2301				uint32_t height)
2302{
2303	return ((u64) pitch * (u64) height) < (u64)
2304		((dev_priv->active_display_unit == vmw_du_screen_target) ?
2305		 dev_priv->max_primary_mem : dev_priv->vram_size);
2306}
2307
2308/**
2309 * vmw_du_update_layout - Update the display unit with topology from resolution
2310 * plugin and generate DRM uevent
2311 * @dev_priv: device private
2312 * @num_rects: number of drm_rect in rects
2313 * @rects: toplogy to update
2314 */
2315static int vmw_du_update_layout(struct vmw_private *dev_priv,
2316				unsigned int num_rects, struct drm_rect *rects)
2317{
2318	struct drm_device *dev = &dev_priv->drm;
2319	struct vmw_display_unit *du;
2320	struct drm_connector *con;
2321	struct drm_connector_list_iter conn_iter;
2322	struct drm_modeset_acquire_ctx ctx;
2323	struct drm_crtc *crtc;
2324	int ret;
2325
2326	/* Currently gui_x/y is protected with the crtc mutex */
2327	mutex_lock(&dev->mode_config.mutex);
2328	drm_modeset_acquire_init(&ctx, 0);
2329retry:
2330	drm_for_each_crtc(crtc, dev) {
2331		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2332		if (ret < 0) {
2333			if (ret == -EDEADLK) {
2334				drm_modeset_backoff(&ctx);
2335				goto retry;
2336		}
2337			goto out_fini;
2338		}
2339	}
2340
2341	drm_connector_list_iter_begin(dev, &conn_iter);
2342	drm_for_each_connector_iter(con, &conn_iter) {
2343		du = vmw_connector_to_du(con);
2344		if (num_rects > du->unit) {
2345			du->pref_width = drm_rect_width(&rects[du->unit]);
2346			du->pref_height = drm_rect_height(&rects[du->unit]);
2347			du->pref_active = true;
2348			du->gui_x = rects[du->unit].x1;
2349			du->gui_y = rects[du->unit].y1;
2350		} else {
2351			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2352			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2353			du->pref_active = false;
2354			du->gui_x = 0;
2355			du->gui_y = 0;
2356		}
2357	}
2358	drm_connector_list_iter_end(&conn_iter);
2359
2360	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2361		du = vmw_connector_to_du(con);
2362		if (num_rects > du->unit) {
2363			drm_object_property_set_value
2364			  (&con->base, dev->mode_config.suggested_x_property,
2365			   du->gui_x);
2366			drm_object_property_set_value
2367			  (&con->base, dev->mode_config.suggested_y_property,
2368			   du->gui_y);
2369		} else {
2370			drm_object_property_set_value
2371			  (&con->base, dev->mode_config.suggested_x_property,
2372			   0);
2373			drm_object_property_set_value
2374			  (&con->base, dev->mode_config.suggested_y_property,
2375			   0);
2376		}
2377		con->status = vmw_du_connector_detect(con, true);
2378	}
2379out_fini:
2380	drm_modeset_drop_locks(&ctx);
2381	drm_modeset_acquire_fini(&ctx);
2382	mutex_unlock(&dev->mode_config.mutex);
2383
2384	drm_sysfs_hotplug_event(dev);
 
2385
2386	return 0;
2387}
 
 
 
 
 
 
 
 
 
2388
2389int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2390			  u16 *r, u16 *g, u16 *b,
2391			  uint32_t size,
2392			  struct drm_modeset_acquire_ctx *ctx)
2393{
2394	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2395	int i;
2396
2397	for (i = 0; i < size; i++) {
2398		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2399			  r[i], g[i], b[i]);
2400		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2401		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2402		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2403	}
2404
2405	return 0;
2406}
2407
2408int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2409{
2410	return 0;
2411}
2412
2413enum drm_connector_status
2414vmw_du_connector_detect(struct drm_connector *connector, bool force)
2415{
2416	uint32_t num_displays;
2417	struct drm_device *dev = connector->dev;
2418	struct vmw_private *dev_priv = vmw_priv(dev);
2419	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2420
2421	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2422
2423	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2424		 du->pref_active) ?
2425		connector_status_connected : connector_status_disconnected);
2426}
2427
2428static struct drm_display_mode vmw_kms_connector_builtin[] = {
2429	/* 640x480@60Hz */
2430	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2431		   752, 800, 0, 480, 489, 492, 525, 0,
2432		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2433	/* 800x600@60Hz */
2434	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2435		   968, 1056, 0, 600, 601, 605, 628, 0,
2436		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2437	/* 1024x768@60Hz */
2438	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2439		   1184, 1344, 0, 768, 771, 777, 806, 0,
2440		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2441	/* 1152x864@75Hz */
2442	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2443		   1344, 1600, 0, 864, 865, 868, 900, 0,
2444		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2445	/* 1280x720@60Hz */
2446	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2447		   1472, 1664, 0, 720, 723, 728, 748, 0,
2448		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2449	/* 1280x768@60Hz */
2450	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2451		   1472, 1664, 0, 768, 771, 778, 798, 0,
2452		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2453	/* 1280x800@60Hz */
2454	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2455		   1480, 1680, 0, 800, 803, 809, 831, 0,
2456		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2457	/* 1280x960@60Hz */
2458	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2459		   1488, 1800, 0, 960, 961, 964, 1000, 0,
2460		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2461	/* 1280x1024@60Hz */
2462	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2463		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2464		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2465	/* 1360x768@60Hz */
2466	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2467		   1536, 1792, 0, 768, 771, 777, 795, 0,
2468		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2469	/* 1440x1050@60Hz */
2470	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2471		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2472		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2473	/* 1440x900@60Hz */
2474	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2475		   1672, 1904, 0, 900, 903, 909, 934, 0,
2476		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2477	/* 1600x1200@60Hz */
2478	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2479		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2480		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2481	/* 1680x1050@60Hz */
2482	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2483		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2484		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2485	/* 1792x1344@60Hz */
2486	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2487		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2488		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2489	/* 1853x1392@60Hz */
2490	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2491		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2492		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2493	/* 1920x1080@60Hz */
2494	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2495		   2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2496		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2497	/* 1920x1200@60Hz */
2498	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2499		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2500		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2501	/* 1920x1440@60Hz */
2502	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2503		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2504		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2505	/* 2560x1440@60Hz */
2506	{ DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2507		   2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2508		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2509	/* 2560x1600@60Hz */
2510	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2511		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2512		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2513	/* 2880x1800@60Hz */
2514	{ DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2515		   2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2516		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2517	/* 3840x2160@60Hz */
2518	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2519		   3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2520		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2521	/* 3840x2400@60Hz */
2522	{ DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2523		   3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2524		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2525	/* Terminate */
2526	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2527};
2528
2529/**
2530 * vmw_guess_mode_timing - Provide fake timings for a
2531 * 60Hz vrefresh mode.
2532 *
2533 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2534 * members filled in.
2535 */
2536void vmw_guess_mode_timing(struct drm_display_mode *mode)
2537{
2538	mode->hsync_start = mode->hdisplay + 50;
2539	mode->hsync_end = mode->hsync_start + 50;
2540	mode->htotal = mode->hsync_end + 50;
2541
2542	mode->vsync_start = mode->vdisplay + 50;
2543	mode->vsync_end = mode->vsync_start + 50;
2544	mode->vtotal = mode->vsync_end + 50;
2545
2546	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2547}
2548
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2549
2550int vmw_du_connector_fill_modes(struct drm_connector *connector,
2551				uint32_t max_width, uint32_t max_height)
2552{
2553	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2554	struct drm_device *dev = connector->dev;
2555	struct vmw_private *dev_priv = vmw_priv(dev);
2556	struct drm_display_mode *mode = NULL;
2557	struct drm_display_mode *bmode;
2558	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2559		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2560		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2561		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2562	};
2563	int i;
2564	u32 assumed_bpp = 4;
2565
2566	if (dev_priv->assume_16bpp)
2567		assumed_bpp = 2;
2568
2569	max_width  = min(max_width,  dev_priv->texture_max_width);
2570	max_height = min(max_height, dev_priv->texture_max_height);
2571
2572	/*
2573	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2574	 * HEIGHT registers.
2575	 */
2576	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2577		max_width  = min(max_width,  dev_priv->stdu_max_width);
2578		max_height = min(max_height, dev_priv->stdu_max_height);
2579	}
2580
2581	/* Add preferred mode */
2582	mode = drm_mode_duplicate(dev, &prefmode);
2583	if (!mode)
2584		return 0;
2585	mode->hdisplay = du->pref_width;
2586	mode->vdisplay = du->pref_height;
2587	vmw_guess_mode_timing(mode);
2588	drm_mode_set_name(mode);
2589
2590	if (vmw_kms_validate_mode_vram(dev_priv,
2591					mode->hdisplay * assumed_bpp,
2592					mode->vdisplay)) {
2593		drm_mode_probed_add(connector, mode);
2594	} else {
2595		drm_mode_destroy(dev, mode);
2596		mode = NULL;
2597	}
2598
2599	if (du->pref_mode) {
2600		list_del_init(&du->pref_mode->head);
2601		drm_mode_destroy(dev, du->pref_mode);
 
 
 
 
 
 
2602	}
2603
2604	/* mode might be null here, this is intended */
2605	du->pref_mode = mode;
2606
2607	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2608		bmode = &vmw_kms_connector_builtin[i];
2609		if (bmode->hdisplay > max_width ||
2610		    bmode->vdisplay > max_height)
2611			continue;
2612
2613		if (!vmw_kms_validate_mode_vram(dev_priv,
2614						bmode->hdisplay * assumed_bpp,
2615						bmode->vdisplay))
2616			continue;
2617
2618		mode = drm_mode_duplicate(dev, bmode);
2619		if (!mode)
2620			return 0;
2621
2622		drm_mode_probed_add(connector, mode);
2623	}
2624
2625	drm_connector_list_update(connector);
2626	/* Move the prefered mode first, help apps pick the right mode. */
2627	drm_mode_sort(&connector->modes);
2628
2629	return 1;
2630}
2631
2632/**
2633 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2634 * @dev: drm device for the ioctl
2635 * @data: data pointer for the ioctl
2636 * @file_priv: drm file for the ioctl call
2637 *
2638 * Update preferred topology of display unit as per ioctl request. The topology
2639 * is expressed as array of drm_vmw_rect.
2640 * e.g.
2641 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2642 *
2643 * NOTE:
2644 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2645 * device limit on topology, x + w and y + h (lower right) cannot be greater
2646 * than INT_MAX. So topology beyond these limits will return with error.
2647 *
2648 * Returns:
2649 * Zero on success, negative errno on failure.
2650 */
2651int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2652				struct drm_file *file_priv)
2653{
2654	struct vmw_private *dev_priv = vmw_priv(dev);
2655	struct drm_mode_config *mode_config = &dev->mode_config;
2656	struct drm_vmw_update_layout_arg *arg =
2657		(struct drm_vmw_update_layout_arg *)data;
 
2658	void __user *user_rects;
2659	struct drm_vmw_rect *rects;
2660	struct drm_rect *drm_rects;
2661	unsigned rects_size;
2662	int ret, i;
 
 
 
 
2663
2664	if (!arg->num_outputs) {
2665		struct drm_rect def_rect = {0, 0,
2666					    VMWGFX_MIN_INITIAL_WIDTH,
2667					    VMWGFX_MIN_INITIAL_HEIGHT};
2668		vmw_du_update_layout(dev_priv, 1, &def_rect);
2669		return 0;
2670	}
2671
2672	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2673	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2674			GFP_KERNEL);
2675	if (unlikely(!rects))
2676		return -ENOMEM;
 
2677
2678	user_rects = (void __user *)(unsigned long)arg->rects;
2679	ret = copy_from_user(rects, user_rects, rects_size);
2680	if (unlikely(ret != 0)) {
2681		DRM_ERROR("Failed to get rects.\n");
2682		ret = -EFAULT;
2683		goto out_free;
2684	}
2685
2686	drm_rects = (struct drm_rect *)rects;
2687
2688	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2689	for (i = 0; i < arg->num_outputs; i++) {
2690		struct drm_vmw_rect curr_rect;
2691
2692		/* Verify user-space for overflow as kernel use drm_rect */
2693		if ((rects[i].x + rects[i].w > INT_MAX) ||
2694		    (rects[i].y + rects[i].h > INT_MAX)) {
2695			ret = -ERANGE;
2696			goto out_free;
2697		}
2698
2699		curr_rect = rects[i];
2700		drm_rects[i].x1 = curr_rect.x;
2701		drm_rects[i].y1 = curr_rect.y;
2702		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2703		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2704
2705		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2706			      drm_rects[i].x1, drm_rects[i].y1,
2707			      drm_rects[i].x2, drm_rects[i].y2);
2708
2709		/*
2710		 * Currently this check is limiting the topology within
2711		 * mode_config->max (which actually is max texture size
2712		 * supported by virtual device). This limit is here to address
2713		 * window managers that create a big framebuffer for whole
2714		 * topology.
2715		 */
2716		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2717		    drm_rects[i].x2 > mode_config->max_width ||
2718		    drm_rects[i].y2 > mode_config->max_height) {
2719			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2720				      drm_rects[i].x1, drm_rects[i].y1,
2721				      drm_rects[i].x2, drm_rects[i].y2);
2722			ret = -EINVAL;
2723			goto out_free;
2724		}
2725	}
2726
2727	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2728
2729	if (ret == 0)
2730		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2731
2732out_free:
2733	kfree(rects);
 
 
2734	return ret;
2735}
2736
2737/**
2738 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2739 * on a set of cliprects and a set of display units.
2740 *
2741 * @dev_priv: Pointer to a device private structure.
2742 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2743 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2744 * Cliprects are given in framebuffer coordinates.
2745 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2746 * be NULL. Cliprects are given in source coordinates.
2747 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2748 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2749 * @num_clips: Number of cliprects in the @clips or @vclips array.
2750 * @increment: Integer with which to increment the clip counter when looping.
2751 * Used to skip a predetermined number of clip rects.
2752 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2753 */
2754int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2755			 struct vmw_framebuffer *framebuffer,
2756			 const struct drm_clip_rect *clips,
2757			 const struct drm_vmw_rect *vclips,
2758			 s32 dest_x, s32 dest_y,
2759			 int num_clips,
2760			 int increment,
2761			 struct vmw_kms_dirty *dirty)
2762{
2763	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2764	struct drm_crtc *crtc;
2765	u32 num_units = 0;
2766	u32 i, k;
2767
2768	dirty->dev_priv = dev_priv;
2769
2770	/* If crtc is passed, no need to iterate over other display units */
2771	if (dirty->crtc) {
2772		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2773	} else {
2774		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2775				    head) {
2776			struct drm_plane *plane = crtc->primary;
2777
2778			if (plane->state->fb == &framebuffer->base)
2779				units[num_units++] = vmw_crtc_to_du(crtc);
2780		}
2781	}
2782
2783	for (k = 0; k < num_units; k++) {
2784		struct vmw_display_unit *unit = units[k];
2785		s32 crtc_x = unit->crtc.x;
2786		s32 crtc_y = unit->crtc.y;
2787		s32 crtc_width = unit->crtc.mode.hdisplay;
2788		s32 crtc_height = unit->crtc.mode.vdisplay;
2789		const struct drm_clip_rect *clips_ptr = clips;
2790		const struct drm_vmw_rect *vclips_ptr = vclips;
2791
2792		dirty->unit = unit;
2793		if (dirty->fifo_reserve_size > 0) {
2794			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2795						      dirty->fifo_reserve_size);
2796			if (!dirty->cmd)
2797				return -ENOMEM;
2798
2799			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2800		}
2801		dirty->num_hits = 0;
2802		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2803		       vclips_ptr += increment) {
2804			s32 clip_left;
2805			s32 clip_top;
2806
2807			/*
2808			 * Select clip array type. Note that integer type
2809			 * in @clips is unsigned short, whereas in @vclips
2810			 * it's 32-bit.
2811			 */
2812			if (clips) {
2813				dirty->fb_x = (s32) clips_ptr->x1;
2814				dirty->fb_y = (s32) clips_ptr->y1;
2815				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2816					crtc_x;
2817				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2818					crtc_y;
2819			} else {
2820				dirty->fb_x = vclips_ptr->x;
2821				dirty->fb_y = vclips_ptr->y;
2822				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2823					dest_x - crtc_x;
2824				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2825					dest_y - crtc_y;
2826			}
2827
2828			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2829			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2830
2831			/* Skip this clip if it's outside the crtc region */
2832			if (dirty->unit_x1 >= crtc_width ||
2833			    dirty->unit_y1 >= crtc_height ||
2834			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2835				continue;
2836
2837			/* Clip right and bottom to crtc limits */
2838			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2839					       crtc_width);
2840			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2841					       crtc_height);
2842
2843			/* Clip left and top to crtc limits */
2844			clip_left = min_t(s32, dirty->unit_x1, 0);
2845			clip_top = min_t(s32, dirty->unit_y1, 0);
2846			dirty->unit_x1 -= clip_left;
2847			dirty->unit_y1 -= clip_top;
2848			dirty->fb_x -= clip_left;
2849			dirty->fb_y -= clip_top;
2850
2851			dirty->clip(dirty);
2852		}
2853
2854		dirty->fifo_commit(dirty);
2855	}
2856
2857	return 0;
2858}
2859
2860/**
2861 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2862 * cleanup and fencing
2863 * @dev_priv: Pointer to the device-private struct
2864 * @file_priv: Pointer identifying the client when user-space fencing is used
2865 * @ctx: Pointer to the validation context
2866 * @out_fence: If non-NULL, returned refcounted fence-pointer
2867 * @user_fence_rep: If non-NULL, pointer to user-space address area
2868 * in which to copy user-space fence info
2869 */
2870void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2871				      struct drm_file *file_priv,
2872				      struct vmw_validation_context *ctx,
2873				      struct vmw_fence_obj **out_fence,
2874				      struct drm_vmw_fence_rep __user *
2875				      user_fence_rep)
2876{
2877	struct vmw_fence_obj *fence = NULL;
2878	uint32_t handle = 0;
2879	int ret = 0;
2880
2881	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2882	    out_fence)
2883		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2884						 file_priv ? &handle : NULL);
2885	vmw_validation_done(ctx, fence);
2886	if (file_priv)
2887		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2888					    ret, user_fence_rep, fence,
2889					    handle, -1);
2890	if (out_fence)
2891		*out_fence = fence;
2892	else
2893		vmw_fence_obj_unreference(&fence);
2894}
2895
2896/**
2897 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2898 * its backing MOB.
2899 *
2900 * @res: Pointer to the surface resource
2901 * @clips: Clip rects in framebuffer (surface) space.
2902 * @num_clips: Number of clips in @clips.
2903 * @increment: Integer with which to increment the clip counter when looping.
2904 * Used to skip a predetermined number of clip rects.
2905 *
2906 * This function makes sure the proxy surface is updated from its backing MOB
2907 * using the region given by @clips. The surface resource @res and its backing
2908 * MOB needs to be reserved and validated on call.
2909 */
2910int vmw_kms_update_proxy(struct vmw_resource *res,
2911			 const struct drm_clip_rect *clips,
2912			 unsigned num_clips,
2913			 int increment)
2914{
2915	struct vmw_private *dev_priv = res->dev_priv;
2916	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2917	struct {
2918		SVGA3dCmdHeader header;
2919		SVGA3dCmdUpdateGBImage body;
2920	} *cmd;
2921	SVGA3dBox *box;
2922	size_t copy_size = 0;
2923	int i;
2924
2925	if (!clips)
2926		return 0;
2927
2928	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2929	if (!cmd)
2930		return -ENOMEM;
2931
2932	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2933		box = &cmd->body.box;
2934
2935		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2936		cmd->header.size = sizeof(cmd->body);
2937		cmd->body.image.sid = res->id;
2938		cmd->body.image.face = 0;
2939		cmd->body.image.mipmap = 0;
2940
2941		if (clips->x1 > size->width || clips->x2 > size->width ||
2942		    clips->y1 > size->height || clips->y2 > size->height) {
2943			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2944			return -EINVAL;
2945		}
2946
2947		box->x = clips->x1;
2948		box->y = clips->y1;
2949		box->z = 0;
2950		box->w = clips->x2 - clips->x1;
2951		box->h = clips->y2 - clips->y1;
2952		box->d = 1;
2953
2954		copy_size += sizeof(*cmd);
2955	}
2956
2957	vmw_cmd_commit(dev_priv, copy_size);
2958
2959	return 0;
2960}
2961
2962/**
2963 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2964 * property.
2965 *
2966 * @dev_priv: Pointer to a device private struct.
2967 *
2968 * Sets up the implicit placement property unless it's already set up.
2969 */
2970void
2971vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2972{
2973	if (dev_priv->implicit_placement_property)
2974		return;
2975
2976	dev_priv->implicit_placement_property =
2977		drm_property_create_range(&dev_priv->drm,
2978					  DRM_MODE_PROP_IMMUTABLE,
2979					  "implicit_placement", 0, 1);
2980}
2981
2982/**
2983 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2984 *
2985 * @dev: Pointer to the drm device
2986 * Return: 0 on success. Negative error code on failure.
2987 */
2988int vmw_kms_suspend(struct drm_device *dev)
2989{
2990	struct vmw_private *dev_priv = vmw_priv(dev);
2991
2992	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2993	if (IS_ERR(dev_priv->suspend_state)) {
2994		int ret = PTR_ERR(dev_priv->suspend_state);
2995
2996		DRM_ERROR("Failed kms suspend: %d\n", ret);
2997		dev_priv->suspend_state = NULL;
2998
2999		return ret;
3000	}
3001
3002	return 0;
3003}
3004
3005
3006/**
3007 * vmw_kms_resume - Re-enable modesetting and restore state
3008 *
3009 * @dev: Pointer to the drm device
3010 * Return: 0 on success. Negative error code on failure.
3011 *
3012 * State is resumed from a previous vmw_kms_suspend(). It's illegal
3013 * to call this function without a previous vmw_kms_suspend().
3014 */
3015int vmw_kms_resume(struct drm_device *dev)
3016{
3017	struct vmw_private *dev_priv = vmw_priv(dev);
3018	int ret;
3019
3020	if (WARN_ON(!dev_priv->suspend_state))
3021		return 0;
3022
3023	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
3024	dev_priv->suspend_state = NULL;
3025
3026	return ret;
3027}
3028
3029/**
3030 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
3031 *
3032 * @dev: Pointer to the drm device
3033 */
3034void vmw_kms_lost_device(struct drm_device *dev)
3035{
3036	drm_atomic_helper_shutdown(dev);
3037}
3038
3039/**
3040 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
3041 * @update: The closure structure.
3042 *
3043 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
3044 * update on display unit.
3045 *
3046 * Return: 0 on success or a negative error code on failure.
3047 */
3048int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
3049{
3050	struct drm_plane_state *state = update->plane->state;
3051	struct drm_plane_state *old_state = update->old_state;
3052	struct drm_atomic_helper_damage_iter iter;
3053	struct drm_rect clip;
3054	struct drm_rect bb;
3055	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
3056	uint32_t reserved_size = 0;
3057	uint32_t submit_size = 0;
3058	uint32_t curr_size = 0;
3059	uint32_t num_hits = 0;
3060	void *cmd_start;
3061	char *cmd_next;
3062	int ret;
3063
3064	/*
3065	 * Iterate in advance to check if really need plane update and find the
3066	 * number of clips that actually are in plane src for fifo allocation.
3067	 */
3068	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
3069	drm_atomic_for_each_plane_damage(&iter, &clip)
3070		num_hits++;
3071
3072	if (num_hits == 0)
3073		return 0;
3074
3075	if (update->vfb->bo) {
3076		struct vmw_framebuffer_bo *vfbbo =
3077			container_of(update->vfb, typeof(*vfbbo), base);
3078
3079		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
3080					    update->cpu_blit);
3081	} else {
3082		struct vmw_framebuffer_surface *vfbs =
3083			container_of(update->vfb, typeof(*vfbs), base);
3084
3085		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
3086						  0, VMW_RES_DIRTY_NONE, NULL,
3087						  NULL);
3088	}
3089
3090	if (ret)
3091		return ret;
3092
3093	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
3094	if (ret)
3095		goto out_unref;
3096
3097	reserved_size = update->calc_fifo_size(update, num_hits);
3098	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
3099	if (!cmd_start) {
3100		ret = -ENOMEM;
3101		goto out_revert;
3102	}
3103
3104	cmd_next = cmd_start;
3105
3106	if (update->post_prepare) {
3107		curr_size = update->post_prepare(update, cmd_next);
3108		cmd_next += curr_size;
3109		submit_size += curr_size;
3110	}
3111
3112	if (update->pre_clip) {
3113		curr_size = update->pre_clip(update, cmd_next, num_hits);
3114		cmd_next += curr_size;
3115		submit_size += curr_size;
3116	}
3117
3118	bb.x1 = INT_MAX;
3119	bb.y1 = INT_MAX;
3120	bb.x2 = INT_MIN;
3121	bb.y2 = INT_MIN;
3122
3123	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
3124	drm_atomic_for_each_plane_damage(&iter, &clip) {
3125		uint32_t fb_x = clip.x1;
3126		uint32_t fb_y = clip.y1;
3127
3128		vmw_du_translate_to_crtc(state, &clip);
3129		if (update->clip) {
3130			curr_size = update->clip(update, cmd_next, &clip, fb_x,
3131						 fb_y);
3132			cmd_next += curr_size;
3133			submit_size += curr_size;
3134		}
3135		bb.x1 = min_t(int, bb.x1, clip.x1);
3136		bb.y1 = min_t(int, bb.y1, clip.y1);
3137		bb.x2 = max_t(int, bb.x2, clip.x2);
3138		bb.y2 = max_t(int, bb.y2, clip.y2);
3139	}
3140
3141	curr_size = update->post_clip(update, cmd_next, &bb);
3142	submit_size += curr_size;
3143
3144	if (reserved_size < submit_size)
3145		submit_size = 0;
3146
3147	vmw_cmd_commit(update->dev_priv, submit_size);
3148
3149	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3150					 update->out_fence, NULL);
3151	return ret;
3152
3153out_revert:
3154	vmw_validation_revert(&val_ctx);
3155
3156out_unref:
3157	vmw_validation_unref_lists(&val_ctx);
3158	return ret;
3159}