Linux Audio

Check our new training course

Loading...
v4.6
   1/**************************************************************************
   2 *
   3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_kms.h"
  29
  30
  31/* Might need a hrtimer here? */
  32#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
  33
  34void vmw_du_cleanup(struct vmw_display_unit *du)
 
 
 
  35{
  36	if (du->cursor_surface)
  37		vmw_surface_unreference(&du->cursor_surface);
  38	if (du->cursor_dmabuf)
  39		vmw_dmabuf_unreference(&du->cursor_dmabuf);
  40	drm_connector_unregister(&du->connector);
  41	drm_crtc_cleanup(&du->crtc);
  42	drm_encoder_cleanup(&du->encoder);
  43	drm_connector_cleanup(&du->connector);
  44}
  45
  46/*
  47 * Display Unit Cursor functions
  48 */
  49
  50int vmw_cursor_update_image(struct vmw_private *dev_priv,
  51			    u32 *image, u32 width, u32 height,
  52			    u32 hotspotX, u32 hotspotY)
  53{
  54	struct {
  55		u32 cmd;
  56		SVGAFifoCmdDefineAlphaCursor cursor;
  57	} *cmd;
  58	u32 image_size = width * height * 4;
  59	u32 cmd_size = sizeof(*cmd) + image_size;
  60
  61	if (!image)
  62		return -EINVAL;
  63
  64	cmd = vmw_fifo_reserve(dev_priv, cmd_size);
  65	if (unlikely(cmd == NULL)) {
  66		DRM_ERROR("Fifo reserve failed.\n");
  67		return -ENOMEM;
  68	}
  69
  70	memset(cmd, 0, sizeof(*cmd));
  71
  72	memcpy(&cmd[1], image, image_size);
  73
  74	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
  75	cmd->cursor.id = 0;
  76	cmd->cursor.width = width;
  77	cmd->cursor.height = height;
  78	cmd->cursor.hotspotX = hotspotX;
  79	cmd->cursor.hotspotY = hotspotY;
  80
  81	vmw_fifo_commit_flush(dev_priv, cmd_size);
  82
  83	return 0;
  84}
  85
  86int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
  87			     struct vmw_dma_buffer *dmabuf,
  88			     u32 width, u32 height,
  89			     u32 hotspotX, u32 hotspotY)
  90{
  91	struct ttm_bo_kmap_obj map;
  92	unsigned long kmap_offset;
  93	unsigned long kmap_num;
  94	void *virtual;
  95	bool dummy;
  96	int ret;
  97
  98	kmap_offset = 0;
  99	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
 100
 101	ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
 102	if (unlikely(ret != 0)) {
 103		DRM_ERROR("reserve failed\n");
 104		return -EINVAL;
 105	}
 106
 107	ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
 108	if (unlikely(ret != 0))
 109		goto err_unreserve;
 110
 111	virtual = ttm_kmap_obj_virtual(&map, &dummy);
 112	ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
 113				      hotspotX, hotspotY);
 114
 115	ttm_bo_kunmap(&map);
 116err_unreserve:
 117	ttm_bo_unreserve(&dmabuf->base);
 118
 119	return ret;
 120}
 121
 122
 123void vmw_cursor_update_position(struct vmw_private *dev_priv,
 124				bool show, int x, int y)
 125{
 126	u32 *fifo_mem = dev_priv->mmio_virt;
 127	uint32_t count;
 128
 129	vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
 130	vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X);
 131	vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
 132	count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
 133	vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
 134}
 135
 136
 137/*
 138 * vmw_du_crtc_cursor_set2 - Driver cursor_set2 callback.
 139 */
 140int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
 141			    uint32_t handle, uint32_t width, uint32_t height,
 142			    int32_t hot_x, int32_t hot_y)
 143{
 144	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 
 145	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 146	struct vmw_surface *surface = NULL;
 147	struct vmw_dma_buffer *dmabuf = NULL;
 148	s32 hotspot_x, hotspot_y;
 149	int ret;
 150
 151	/*
 152	 * FIXME: Unclear whether there's any global state touched by the
 153	 * cursor_set function, especially vmw_cursor_update_position looks
 154	 * suspicious. For now take the easy route and reacquire all locks. We
 155	 * can do this since the caller in the drm core doesn't check anything
 156	 * which is protected by any looks.
 157	 */
 158	drm_modeset_unlock_crtc(crtc);
 159	drm_modeset_lock_all(dev_priv->dev);
 160	hotspot_x = hot_x + du->hotspot_x;
 161	hotspot_y = hot_y + du->hotspot_y;
 162
 163	/* A lot of the code assumes this */
 164	if (handle && (width != 64 || height != 64)) {
 165		ret = -EINVAL;
 166		goto out;
 167	}
 168
 169	if (handle) {
 170		struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 171
 172		ret = vmw_user_lookup_handle(dev_priv, tfile,
 173					     handle, &surface, &dmabuf);
 174		if (ret) {
 175			DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
 176			ret = -EINVAL;
 177			goto out;
 
 
 
 
 
 
 178		}
 179	}
 180
 181	/* need to do this before taking down old image */
 182	if (surface && !surface->snooper.image) {
 183		DRM_ERROR("surface not suitable for cursor\n");
 184		vmw_surface_unreference(&surface);
 185		ret = -EINVAL;
 186		goto out;
 187	}
 188
 189	/* takedown old cursor */
 190	if (du->cursor_surface) {
 191		du->cursor_surface->snooper.crtc = NULL;
 192		vmw_surface_unreference(&du->cursor_surface);
 193	}
 194	if (du->cursor_dmabuf)
 195		vmw_dmabuf_unreference(&du->cursor_dmabuf);
 196
 197	/* setup new image */
 198	ret = 0;
 199	if (surface) {
 200		/* vmw_user_surface_lookup takes one reference */
 201		du->cursor_surface = surface;
 202
 203		du->cursor_surface->snooper.crtc = crtc;
 204		du->cursor_age = du->cursor_surface->snooper.age;
 205		ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
 206					      64, 64, hotspot_x, hotspot_y);
 207	} else if (dmabuf) {
 
 
 
 
 
 
 208		/* vmw_user_surface_lookup takes one reference */
 209		du->cursor_dmabuf = dmabuf;
 210
 211		ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
 212					       hotspot_x, hotspot_y);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 213	} else {
 214		vmw_cursor_update_position(dev_priv, false, 0, 0);
 215		goto out;
 216	}
 217
 218	if (!ret) {
 219		vmw_cursor_update_position(dev_priv, true,
 220					   du->cursor_x + hotspot_x,
 221					   du->cursor_y + hotspot_y);
 222		du->core_hotspot_x = hot_x;
 223		du->core_hotspot_y = hot_y;
 224	}
 225
 226out:
 227	drm_modeset_unlock_all(dev_priv->dev);
 228	drm_modeset_lock_crtc(crtc, crtc->cursor);
 229
 230	return ret;
 231}
 232
 233int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 234{
 235	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 236	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 237	bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
 238
 239	du->cursor_x = x + du->set_gui_x;
 240	du->cursor_y = y + du->set_gui_y;
 241
 242	/*
 243	 * FIXME: Unclear whether there's any global state touched by the
 244	 * cursor_set function, especially vmw_cursor_update_position looks
 245	 * suspicious. For now take the easy route and reacquire all locks. We
 246	 * can do this since the caller in the drm core doesn't check anything
 247	 * which is protected by any looks.
 248	 */
 249	drm_modeset_unlock_crtc(crtc);
 250	drm_modeset_lock_all(dev_priv->dev);
 251
 252	vmw_cursor_update_position(dev_priv, shown,
 253				   du->cursor_x + du->hotspot_x +
 254				   du->core_hotspot_x,
 255				   du->cursor_y + du->hotspot_y +
 256				   du->core_hotspot_y);
 257
 258	drm_modeset_unlock_all(dev_priv->dev);
 259	drm_modeset_lock_crtc(crtc, crtc->cursor);
 260
 261	return 0;
 262}
 263
 264void vmw_kms_cursor_snoop(struct vmw_surface *srf,
 265			  struct ttm_object_file *tfile,
 266			  struct ttm_buffer_object *bo,
 267			  SVGA3dCmdHeader *header)
 268{
 269	struct ttm_bo_kmap_obj map;
 270	unsigned long kmap_offset;
 271	unsigned long kmap_num;
 272	SVGA3dCopyBox *box;
 273	unsigned box_count;
 274	void *virtual;
 275	bool dummy;
 276	struct vmw_dma_cmd {
 277		SVGA3dCmdHeader header;
 278		SVGA3dCmdSurfaceDMA dma;
 279	} *cmd;
 280	int i, ret;
 281
 282	cmd = container_of(header, struct vmw_dma_cmd, header);
 283
 284	/* No snooper installed */
 285	if (!srf->snooper.image)
 286		return;
 287
 288	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
 289		DRM_ERROR("face and mipmap for cursors should never != 0\n");
 290		return;
 291	}
 292
 293	if (cmd->header.size < 64) {
 294		DRM_ERROR("at least one full copy box must be given\n");
 295		return;
 296	}
 297
 298	box = (SVGA3dCopyBox *)&cmd[1];
 299	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
 300			sizeof(SVGA3dCopyBox);
 301
 302	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
 
 303	    box->x != 0    || box->y != 0    || box->z != 0    ||
 304	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
 305	    box->d != 1    || box_count != 1) {
 
 306		/* TODO handle none page aligned offsets */
 307		/* TODO handle more dst & src != 0 */
 308		/* TODO handle more then one copy */
 309		DRM_ERROR("Cant snoop dma request for cursor!\n");
 310		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
 311			  box->srcx, box->srcy, box->srcz,
 312			  box->x, box->y, box->z,
 313			  box->w, box->h, box->d, box_count,
 314			  cmd->dma.guest.ptr.offset);
 315		return;
 316	}
 317
 318	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
 319	kmap_num = (64*64*4) >> PAGE_SHIFT;
 320
 321	ret = ttm_bo_reserve(bo, true, false, false, NULL);
 322	if (unlikely(ret != 0)) {
 323		DRM_ERROR("reserve failed\n");
 324		return;
 325	}
 326
 327	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
 328	if (unlikely(ret != 0))
 329		goto err_unreserve;
 330
 331	virtual = ttm_kmap_obj_virtual(&map, &dummy);
 332
 333	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
 334		memcpy(srf->snooper.image, virtual, 64*64*4);
 335	} else {
 336		/* Image is unsigned pointer. */
 337		for (i = 0; i < box->h; i++)
 338			memcpy(srf->snooper.image + i * 64,
 339			       virtual + i * cmd->dma.guest.pitch,
 340			       box->w * 4);
 341	}
 342
 343	srf->snooper.age++;
 344
 
 
 
 
 
 
 
 
 
 345	ttm_bo_kunmap(&map);
 346err_unreserve:
 347	ttm_bo_unreserve(bo);
 348}
 349
 350/**
 351 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
 352 *
 353 * @dev_priv: Pointer to the device private struct.
 354 *
 355 * Clears all legacy hotspots.
 356 */
 357void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
 358{
 359	struct drm_device *dev = dev_priv->dev;
 360	struct vmw_display_unit *du;
 361	struct drm_crtc *crtc;
 362
 363	drm_modeset_lock_all(dev);
 364	drm_for_each_crtc(crtc, dev) {
 365		du = vmw_crtc_to_du(crtc);
 366
 367		du->hotspot_x = 0;
 368		du->hotspot_y = 0;
 369	}
 370	drm_modeset_unlock_all(dev);
 371}
 372
 373void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 374{
 375	struct drm_device *dev = dev_priv->dev;
 376	struct vmw_display_unit *du;
 377	struct drm_crtc *crtc;
 378
 379	mutex_lock(&dev->mode_config.mutex);
 380
 381	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 382		du = vmw_crtc_to_du(crtc);
 383		if (!du->cursor_surface ||
 384		    du->cursor_age == du->cursor_surface->snooper.age)
 385			continue;
 386
 387		du->cursor_age = du->cursor_surface->snooper.age;
 388		vmw_cursor_update_image(dev_priv,
 389					du->cursor_surface->snooper.image,
 390					64, 64,
 391					du->hotspot_x + du->core_hotspot_x,
 392					du->hotspot_y + du->core_hotspot_y);
 393	}
 394
 395	mutex_unlock(&dev->mode_config.mutex);
 396}
 397
 398/*
 399 * Generic framebuffer code
 400 */
 401
 
 
 
 
 
 
 
 
 
 
 402/*
 403 * Surface framebuffer code
 404 */
 405
 406static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 407{
 408	struct vmw_framebuffer_surface *vfbs =
 409		vmw_framebuffer_to_vfbs(framebuffer);
 
 
 
 
 
 
 410
 
 
 411	drm_framebuffer_cleanup(framebuffer);
 412	vmw_surface_unreference(&vfbs->surface);
 413	if (vfbs->base.user_obj)
 414		ttm_base_object_unref(&vfbs->base.user_obj);
 415
 416	kfree(vfbs);
 417}
 418
 419static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 420				  struct drm_file *file_priv,
 421				  unsigned flags, unsigned color,
 422				  struct drm_clip_rect *clips,
 423				  unsigned num_clips)
 424{
 425	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
 
 426	struct vmw_framebuffer_surface *vfbs =
 427		vmw_framebuffer_to_vfbs(framebuffer);
 
 428	struct drm_clip_rect norect;
 429	int ret, inc = 1;
 
 
 430
 431	/* Legacy Display Unit does not support 3D */
 432	if (dev_priv->active_display_unit == vmw_du_legacy)
 433		return -EINVAL;
 
 
 434
 435	drm_modeset_lock_all(dev_priv->dev);
 
 436
 437	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 438	if (unlikely(ret != 0)) {
 439		drm_modeset_unlock_all(dev_priv->dev);
 440		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 441	}
 442
 443	if (!num_clips) {
 444		num_clips = 1;
 445		clips = &norect;
 446		norect.x1 = norect.y1 = 0;
 447		norect.x2 = framebuffer->width;
 448		norect.y2 = framebuffer->height;
 449	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
 450		num_clips /= 2;
 451		inc = 2; /* skip source rects */
 452	}
 453
 454	if (dev_priv->active_display_unit == vmw_du_screen_object)
 455		ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
 456						   clips, NULL, NULL, 0, 0,
 457						   num_clips, inc, NULL);
 458	else
 459		ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
 460						 clips, NULL, NULL, 0, 0,
 461						 num_clips, inc, NULL);
 462
 463	vmw_fifo_flush(dev_priv, false);
 464	ttm_read_unlock(&dev_priv->reservation_sem);
 465
 466	drm_modeset_unlock_all(dev_priv->dev);
 
 
 
 
 
 
 
 
 
 
 
 467
 
 
 468	return 0;
 469}
 470
 471/**
 472 * vmw_kms_readback - Perform a readback from the screen system to
 473 * a dma-buffer backed framebuffer.
 474 *
 475 * @dev_priv: Pointer to the device private structure.
 476 * @file_priv: Pointer to a struct drm_file identifying the caller.
 477 * Must be set to NULL if @user_fence_rep is NULL.
 478 * @vfb: Pointer to the dma-buffer backed framebuffer.
 479 * @user_fence_rep: User-space provided structure for fence information.
 480 * Must be set to non-NULL if @file_priv is non-NULL.
 481 * @vclips: Array of clip rects.
 482 * @num_clips: Number of clip rects in @vclips.
 483 *
 484 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
 485 * interrupted.
 486 */
 487int vmw_kms_readback(struct vmw_private *dev_priv,
 488		     struct drm_file *file_priv,
 489		     struct vmw_framebuffer *vfb,
 490		     struct drm_vmw_fence_rep __user *user_fence_rep,
 491		     struct drm_vmw_rect *vclips,
 492		     uint32_t num_clips)
 493{
 494	switch (dev_priv->active_display_unit) {
 495	case vmw_du_screen_object:
 496		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
 497					    user_fence_rep, vclips, num_clips);
 498	case vmw_du_screen_target:
 499		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
 500					user_fence_rep, NULL, vclips, num_clips,
 501					1, false, true);
 502	default:
 503		WARN_ONCE(true,
 504			  "Readback called with invalid display system.\n");
 505}
 506
 507	return -ENOSYS;
 508}
 509
 510
 511static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
 512	.destroy = vmw_framebuffer_surface_destroy,
 513	.dirty = vmw_framebuffer_surface_dirty,
 
 514};
 515
 516static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 
 517					   struct vmw_surface *surface,
 518					   struct vmw_framebuffer **out,
 519					   const struct drm_mode_fb_cmd
 520					   *mode_cmd,
 521					   bool is_dmabuf_proxy)
 522
 523{
 524	struct drm_device *dev = dev_priv->dev;
 525	struct vmw_framebuffer_surface *vfbs;
 526	enum SVGA3dSurfaceFormat format;
 
 527	int ret;
 528
 529	/* 3D is only supported on HWv8 and newer hosts */
 530	if (dev_priv->active_display_unit == vmw_du_legacy)
 531		return -ENOSYS;
 532
 533	/*
 534	 * Sanity checks.
 535	 */
 536
 537	/* Surface must be marked as a scanout. */
 538	if (unlikely(!surface->scanout))
 539		return -EINVAL;
 540
 541	if (unlikely(surface->mip_levels[0] != 1 ||
 542		     surface->num_sizes != 1 ||
 543		     surface->base_size.width < mode_cmd->width ||
 544		     surface->base_size.height < mode_cmd->height ||
 545		     surface->base_size.depth != 1)) {
 546		DRM_ERROR("Incompatible surface dimensions "
 547			  "for requested mode.\n");
 548		return -EINVAL;
 549	}
 550
 551	switch (mode_cmd->depth) {
 552	case 32:
 553		format = SVGA3D_A8R8G8B8;
 554		break;
 555	case 24:
 556		format = SVGA3D_X8R8G8B8;
 557		break;
 558	case 16:
 559		format = SVGA3D_R5G6B5;
 560		break;
 561	case 15:
 562		format = SVGA3D_A1R5G5B5;
 563		break;
 564	default:
 565		DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
 566		return -EINVAL;
 567	}
 568
 569	/*
 570	 * For DX, surface format validation is done when surface->scanout
 571	 * is set.
 572	 */
 573	if (!dev_priv->has_dx && format != surface->format) {
 574		DRM_ERROR("Invalid surface format for requested mode.\n");
 575		return -EINVAL;
 576	}
 577
 578	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
 579	if (!vfbs) {
 580		ret = -ENOMEM;
 581		goto out_err1;
 582	}
 583
 
 
 
 
 
 
 
 
 
 
 584	/* XXX get the first 3 from the surface info */
 585	vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
 586	vfbs->base.base.pitches[0] = mode_cmd->pitch;
 587	vfbs->base.base.depth = mode_cmd->depth;
 588	vfbs->base.base.width = mode_cmd->width;
 589	vfbs->base.base.height = mode_cmd->height;
 590	vfbs->surface = vmw_surface_reference(surface);
 591	vfbs->base.user_handle = mode_cmd->handle;
 592	vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
 
 
 
 
 
 
 
 593
 594	*out = &vfbs->base;
 595
 596	ret = drm_framebuffer_init(dev, &vfbs->base.base,
 597				   &vmw_framebuffer_surface_funcs);
 598	if (ret)
 599		goto out_err2;
 600
 601	return 0;
 602
 
 
 603out_err2:
 604	vmw_surface_unreference(&surface);
 605	kfree(vfbs);
 606out_err1:
 607	return ret;
 608}
 609
 610/*
 611 * Dmabuf framebuffer code
 612 */
 613
 614static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
 
 
 
 
 
 
 
 
 615{
 616	struct vmw_framebuffer_dmabuf *vfbd =
 617		vmw_framebuffer_to_vfbd(framebuffer);
 618
 619	drm_framebuffer_cleanup(framebuffer);
 620	vmw_dmabuf_unreference(&vfbd->buffer);
 621	if (vfbd->base.user_obj)
 622		ttm_base_object_unref(&vfbd->base.user_obj);
 623
 624	kfree(vfbd);
 625}
 626
 627static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
 628				 struct drm_file *file_priv,
 629				 unsigned flags, unsigned color,
 630				 struct drm_clip_rect *clips,
 631				 unsigned num_clips)
 632{
 633	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
 634	struct vmw_framebuffer_dmabuf *vfbd =
 635		vmw_framebuffer_to_vfbd(framebuffer);
 636	struct drm_clip_rect norect;
 637	int ret, increment = 1;
 638
 639	drm_modeset_lock_all(dev_priv->dev);
 
 
 
 640
 641	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 642	if (unlikely(ret != 0)) {
 643		drm_modeset_unlock_all(dev_priv->dev);
 644		return ret;
 645	}
 646
 647	if (!num_clips) {
 648		num_clips = 1;
 649		clips = &norect;
 650		norect.x1 = norect.y1 = 0;
 651		norect.x2 = framebuffer->width;
 652		norect.y2 = framebuffer->height;
 653	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
 654		num_clips /= 2;
 655		increment = 2;
 656	}
 657
 658	switch (dev_priv->active_display_unit) {
 659	case vmw_du_screen_target:
 660		ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
 661				       clips, NULL, num_clips, increment,
 662				       true, true);
 663		break;
 664	case vmw_du_screen_object:
 665		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
 666						  clips, NULL, num_clips,
 667						  increment, true, NULL);
 668		break;
 669	case vmw_du_legacy:
 670		ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
 671						  clips, num_clips, increment);
 672		break;
 673	default:
 674		ret = -EINVAL;
 675		WARN_ONCE(true, "Dirty called with invalid display system.\n");
 676		break;
 677	}
 678
 679	vmw_fifo_flush(dev_priv, false);
 680	ttm_read_unlock(&dev_priv->reservation_sem);
 
 
 
 
 
 681
 682	drm_modeset_unlock_all(dev_priv->dev);
 
 683
 684	return ret;
 685}
 686
 687static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
 688	.destroy = vmw_framebuffer_dmabuf_destroy,
 689	.dirty = vmw_framebuffer_dmabuf_dirty,
 
 690};
 691
 692/**
 693 * Pin the dmabuffer to the start of vram.
 694 */
 695static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
 696{
 697	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
 698	struct vmw_dma_buffer *buf;
 
 
 699	int ret;
 700
 701	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
 702		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
 703
 704	if (!buf)
 705		return 0;
 706
 707	switch (dev_priv->active_display_unit) {
 708	case vmw_du_legacy:
 709		vmw_overlay_pause_all(dev_priv);
 710		ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
 711		vmw_overlay_resume_all(dev_priv);
 712		break;
 713	case vmw_du_screen_object:
 714	case vmw_du_screen_target:
 715		if (vfb->dmabuf)
 716			return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
 717							     false);
 718
 719		return vmw_dmabuf_pin_in_placement(dev_priv, buf,
 720						   &vmw_mob_placement, false);
 721	default:
 722		return -EINVAL;
 723	}
 
 
 724
 725	return ret;
 726}
 727
 728static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
 729{
 730	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
 731	struct vmw_dma_buffer *buf;
 732
 733	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
 734		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
 735
 736	if (WARN_ON(!buf))
 737		return 0;
 738
 739	return vmw_dmabuf_unpin(dev_priv, buf, false);
 
 
 
 
 740}
 741
 742/**
 743 * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
 744 *
 745 * @dev: DRM device
 746 * @mode_cmd: parameters for the new surface
 747 * @dmabuf_mob: MOB backing the DMA buf
 748 * @srf_out: newly created surface
 749 *
 750 * When the content FB is a DMA buf, we create a surface as a proxy to the
 751 * same buffer.  This way we can do a surface copy rather than a surface DMA.
 752 * This is a more efficient approach
 753 *
 754 * RETURNS:
 755 * 0 on success, error code otherwise
 756 */
 757static int vmw_create_dmabuf_proxy(struct drm_device *dev,
 758				   const struct drm_mode_fb_cmd *mode_cmd,
 759				   struct vmw_dma_buffer *dmabuf_mob,
 760				   struct vmw_surface **srf_out)
 761{
 762	uint32_t format;
 763	struct drm_vmw_size content_base_size;
 764	struct vmw_resource *res;
 765	unsigned int bytes_pp;
 766	int ret;
 767
 768	switch (mode_cmd->depth) {
 769	case 32:
 770	case 24:
 771		format = SVGA3D_X8R8G8B8;
 772		bytes_pp = 4;
 773		break;
 774
 775	case 16:
 776	case 15:
 777		format = SVGA3D_R5G6B5;
 778		bytes_pp = 2;
 779		break;
 780
 781	case 8:
 782		format = SVGA3D_P8;
 783		bytes_pp = 1;
 784		break;
 785
 786	default:
 787		DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
 788		return -EINVAL;
 789	}
 790
 791	content_base_size.width  = mode_cmd->pitch / bytes_pp;
 792	content_base_size.height = mode_cmd->height;
 793	content_base_size.depth  = 1;
 794
 795	ret = vmw_surface_gb_priv_define(dev,
 796			0, /* kernel visible only */
 797			0, /* flags */
 798			format,
 799			true, /* can be a scanout buffer */
 800			1, /* num of mip levels */
 801			0,
 802			0,
 803			content_base_size,
 804			srf_out);
 805	if (ret) {
 806		DRM_ERROR("Failed to allocate proxy content buffer\n");
 807		return ret;
 808	}
 809
 810	res = &(*srf_out)->res;
 811
 812	/* Reserve and switch the backing mob. */
 813	mutex_lock(&res->dev_priv->cmdbuf_mutex);
 814	(void) vmw_resource_reserve(res, false, true);
 815	vmw_dmabuf_unreference(&res->backup);
 816	res->backup = vmw_dmabuf_reference(dmabuf_mob);
 817	res->backup_offset = 0;
 818	vmw_resource_unreserve(res, false, NULL, 0);
 819	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 820
 821	return 0;
 822}
 823
 
 
 
 
 
 
 
 
 
 
 824
 
 
 825
 826static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
 827					  struct vmw_dma_buffer *dmabuf,
 828					  struct vmw_framebuffer **out,
 829					  const struct drm_mode_fb_cmd
 830					  *mode_cmd)
 831
 832{
 833	struct drm_device *dev = dev_priv->dev;
 834	struct vmw_framebuffer_dmabuf *vfbd;
 835	unsigned int requested_size;
 836	int ret;
 837
 838	requested_size = mode_cmd->height * mode_cmd->pitch;
 839	if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
 840		DRM_ERROR("Screen buffer object size is too small "
 841			  "for requested mode.\n");
 842		return -EINVAL;
 843	}
 844
 845	/* Limited framebuffer color depth support for screen objects */
 846	if (dev_priv->active_display_unit == vmw_du_screen_object) {
 847		switch (mode_cmd->depth) {
 848		case 32:
 849		case 24:
 850			/* Only support 32 bpp for 32 and 24 depth fbs */
 851			if (mode_cmd->bpp == 32)
 852				break;
 853
 854			DRM_ERROR("Invalid color depth/bbp: %d %d\n",
 855				  mode_cmd->depth, mode_cmd->bpp);
 856			return -EINVAL;
 857		case 16:
 858		case 15:
 859			/* Only support 16 bpp for 16 and 15 depth fbs */
 860			if (mode_cmd->bpp == 16)
 861				break;
 862
 863			DRM_ERROR("Invalid color depth/bbp: %d %d\n",
 864				  mode_cmd->depth, mode_cmd->bpp);
 865			return -EINVAL;
 866		default:
 867			DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
 868			return -EINVAL;
 869		}
 870	}
 871
 872	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
 873	if (!vfbd) {
 874		ret = -ENOMEM;
 875		goto out_err1;
 876	}
 877
 
 
 
 
 
 
 
 
 
 
 878	vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
 879	vfbd->base.base.pitches[0] = mode_cmd->pitch;
 880	vfbd->base.base.depth = mode_cmd->depth;
 881	vfbd->base.base.width = mode_cmd->width;
 882	vfbd->base.base.height = mode_cmd->height;
 883	vfbd->base.dmabuf = true;
 884	vfbd->buffer = vmw_dmabuf_reference(dmabuf);
 885	vfbd->base.user_handle = mode_cmd->handle;
 886	*out = &vfbd->base;
 887
 888	ret = drm_framebuffer_init(dev, &vfbd->base.base,
 889				   &vmw_framebuffer_dmabuf_funcs);
 890	if (ret)
 891		goto out_err2;
 892
 893	return 0;
 894
 
 
 895out_err2:
 896	vmw_dmabuf_unreference(&dmabuf);
 897	kfree(vfbd);
 898out_err1:
 899	return ret;
 900}
 901
 902/**
 903 * vmw_kms_new_framebuffer - Create a new framebuffer.
 904 *
 905 * @dev_priv: Pointer to device private struct.
 906 * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
 907 * Either @dmabuf or @surface must be NULL.
 908 * @surface: Pointer to a surface to wrap the kms framebuffer around.
 909 * Either @dmabuf or @surface must be NULL.
 910 * @only_2d: No presents will occur to this dma buffer based framebuffer. This
 911 * Helps the code to do some important optimizations.
 912 * @mode_cmd: Frame-buffer metadata.
 913 */
 914struct vmw_framebuffer *
 915vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
 916			struct vmw_dma_buffer *dmabuf,
 917			struct vmw_surface *surface,
 918			bool only_2d,
 919			const struct drm_mode_fb_cmd *mode_cmd)
 920{
 921	struct vmw_framebuffer *vfb = NULL;
 922	bool is_dmabuf_proxy = false;
 923	int ret;
 924
 925	/*
 926	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
 927	 * therefore, wrap the DMA buf in a surface so we can use the
 928	 * SurfaceCopy command.
 929	 */
 930	if (dmabuf && only_2d &&
 931	    dev_priv->active_display_unit == vmw_du_screen_target) {
 932		ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
 933					      dmabuf, &surface);
 934		if (ret)
 935			return ERR_PTR(ret);
 936
 937		is_dmabuf_proxy = true;
 938	}
 939
 940	/* Create the new framebuffer depending one what we have */
 941	if (surface) {
 942		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
 943						      mode_cmd,
 944						      is_dmabuf_proxy);
 945
 946		/*
 947		 * vmw_create_dmabuf_proxy() adds a reference that is no longer
 948		 * needed
 949		 */
 950		if (is_dmabuf_proxy)
 951			vmw_surface_unreference(&surface);
 952	} else if (dmabuf) {
 953		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
 954						     mode_cmd);
 955	} else {
 956		BUG();
 957	}
 958
 959	if (ret)
 960		return ERR_PTR(ret);
 961
 962	vfb->pin = vmw_framebuffer_pin;
 963	vfb->unpin = vmw_framebuffer_unpin;
 964
 965	return vfb;
 966}
 967
 968/*
 969 * Generic Kernel modesetting functions
 970 */
 971
 972static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
 973						 struct drm_file *file_priv,
 974						 const struct drm_mode_fb_cmd2 *mode_cmd2)
 975{
 976	struct vmw_private *dev_priv = vmw_priv(dev);
 977	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 978	struct vmw_framebuffer *vfb = NULL;
 979	struct vmw_surface *surface = NULL;
 980	struct vmw_dma_buffer *bo = NULL;
 981	struct ttm_base_object *user_obj;
 982	struct drm_mode_fb_cmd mode_cmd;
 983	int ret;
 984
 985	mode_cmd.width = mode_cmd2->width;
 986	mode_cmd.height = mode_cmd2->height;
 987	mode_cmd.pitch = mode_cmd2->pitches[0];
 988	mode_cmd.handle = mode_cmd2->handles[0];
 989	drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
 990				    &mode_cmd.bpp);
 991
 992	/**
 993	 * This code should be conditioned on Screen Objects not being used.
 994	 * If screen objects are used, we can allocate a GMR to hold the
 995	 * requested framebuffer.
 996	 */
 997
 998	if (!vmw_kms_validate_mode_vram(dev_priv,
 999					mode_cmd.pitch,
1000					mode_cmd.height)) {
1001		DRM_ERROR("Requested mode exceed bounding box limit.\n");
1002		return ERR_PTR(-ENOMEM);
1003	}
1004
1005	/*
1006	 * Take a reference on the user object of the resource
1007	 * backing the kms fb. This ensures that user-space handle
1008	 * lookups on that resource will always work as long as
1009	 * it's registered with a kms framebuffer. This is important,
1010	 * since vmw_execbuf_process identifies resources in the
1011	 * command stream using user-space handles.
1012	 */
1013
1014	user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
1015	if (unlikely(user_obj == NULL)) {
1016		DRM_ERROR("Could not locate requested kms frame buffer.\n");
1017		return ERR_PTR(-ENOENT);
1018	}
1019
1020	/**
1021	 * End conditioned code.
1022	 */
1023
1024	/* returns either a dmabuf or surface */
1025	ret = vmw_user_lookup_handle(dev_priv, tfile,
1026				     mode_cmd.handle,
1027				     &surface, &bo);
1028	if (ret)
1029		goto err_out;
 
 
 
1030
1031	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1032				      !(dev_priv->capabilities & SVGA_CAP_3D),
1033				      &mode_cmd);
1034	if (IS_ERR(vfb)) {
1035		ret = PTR_ERR(vfb);
1036		goto err_out;
1037 	}
1038
1039err_out:
1040	/* vmw_user_lookup_handle takes one ref so does new_fb */
1041	if (bo)
1042		vmw_dmabuf_unreference(&bo);
1043	if (surface)
1044		vmw_surface_unreference(&surface);
1045
1046	if (ret) {
1047		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1048		ttm_base_object_unref(&user_obj);
1049		return ERR_PTR(ret);
1050	} else
1051		vfb->user_obj = user_obj;
1052
1053	return &vfb->base;
1054}
1055
1056static const struct drm_mode_config_funcs vmw_kms_funcs = {
1057	.fb_create = vmw_kms_fb_create,
1058};
1059
1060static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1061				   struct drm_file *file_priv,
1062				   struct vmw_framebuffer *vfb,
1063				   struct vmw_surface *surface,
1064				   uint32_t sid,
1065				   int32_t destX, int32_t destY,
1066				   struct drm_vmw_rect *clips,
1067				   uint32_t num_clips)
1068{
1069	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1070					    &surface->res, destX, destY,
1071					    num_clips, 1, NULL);
1072}
1073
 
 
1074
1075int vmw_kms_present(struct vmw_private *dev_priv,
1076		    struct drm_file *file_priv,
1077		    struct vmw_framebuffer *vfb,
1078		    struct vmw_surface *surface,
1079		    uint32_t sid,
1080		    int32_t destX, int32_t destY,
1081		    struct drm_vmw_rect *clips,
1082		    uint32_t num_clips)
1083{
1084	int ret;
1085
1086	switch (dev_priv->active_display_unit) {
1087	case vmw_du_screen_target:
1088		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1089						 &surface->res, destX, destY,
1090						 num_clips, 1, NULL);
1091		break;
1092	case vmw_du_screen_object:
1093		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1094					      sid, destX, destY, clips,
1095					      num_clips);
1096		break;
1097	default:
1098		WARN_ONCE(true,
1099			  "Present called with invalid display system.\n");
1100		ret = -ENOSYS;
1101		break;
1102	}
1103	if (ret)
1104		return ret;
1105
1106	vmw_fifo_flush(dev_priv, false);
 
1107
1108	return 0;
1109}
1110
1111static void
1112vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1113{
1114	if (dev_priv->hotplug_mode_update_property)
1115		return;
1116
1117	dev_priv->hotplug_mode_update_property =
1118		drm_property_create_range(dev_priv->dev,
1119					  DRM_MODE_PROP_IMMUTABLE,
1120					  "hotplug_mode_update", 0, 1);
1121
1122	if (!dev_priv->hotplug_mode_update_property)
1123		return;
 
 
1124
 
1125}
1126
 
 
 
 
1127int vmw_kms_init(struct vmw_private *dev_priv)
1128{
1129	struct drm_device *dev = dev_priv->dev;
1130	int ret;
1131
1132	drm_mode_config_init(dev);
1133	dev->mode_config.funcs = &vmw_kms_funcs;
1134	dev->mode_config.min_width = 1;
1135	dev->mode_config.min_height = 1;
1136	dev->mode_config.max_width = dev_priv->texture_max_width;
1137	dev->mode_config.max_height = dev_priv->texture_max_height;
1138
1139	drm_mode_create_suggested_offset_properties(dev);
1140	vmw_kms_create_hotplug_mode_update_property(dev_priv);
1141
1142	ret = vmw_kms_stdu_init_display(dev_priv);
1143	if (ret) {
1144		ret = vmw_kms_sou_init_display(dev_priv);
1145		if (ret) /* Fallback */
1146			ret = vmw_kms_ldu_init_display(dev_priv);
1147	}
1148
1149	return ret;
1150}
1151
1152int vmw_kms_close(struct vmw_private *dev_priv)
1153{
1154	int ret;
1155
1156	/*
1157	 * Docs says we should take the lock before calling this function
1158	 * but since it destroys encoders and our destructor calls
1159	 * drm_encoder_cleanup which takes the lock we deadlock.
1160	 */
1161	drm_mode_config_cleanup(dev_priv->dev);
1162	if (dev_priv->active_display_unit == vmw_du_screen_object)
1163		ret = vmw_kms_sou_close_display(dev_priv);
1164	else if (dev_priv->active_display_unit == vmw_du_screen_target)
1165		ret = vmw_kms_stdu_close_display(dev_priv);
1166	else
1167		ret = vmw_kms_ldu_close_display(dev_priv);
1168
1169	return ret;
1170}
1171
1172int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1173				struct drm_file *file_priv)
1174{
1175	struct drm_vmw_cursor_bypass_arg *arg = data;
1176	struct vmw_display_unit *du;
 
1177	struct drm_crtc *crtc;
1178	int ret = 0;
1179
1180
1181	mutex_lock(&dev->mode_config.mutex);
1182	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1183
1184		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1185			du = vmw_crtc_to_du(crtc);
1186			du->hotspot_x = arg->xhot;
1187			du->hotspot_y = arg->yhot;
1188		}
1189
1190		mutex_unlock(&dev->mode_config.mutex);
1191		return 0;
1192	}
1193
1194	crtc = drm_crtc_find(dev, arg->crtc_id);
1195	if (!crtc) {
1196		ret = -ENOENT;
1197		goto out;
1198	}
1199
 
1200	du = vmw_crtc_to_du(crtc);
1201
1202	du->hotspot_x = arg->xhot;
1203	du->hotspot_y = arg->yhot;
1204
1205out:
1206	mutex_unlock(&dev->mode_config.mutex);
1207
1208	return ret;
1209}
1210
1211int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1212			unsigned width, unsigned height, unsigned pitch,
1213			unsigned bpp, unsigned depth)
1214{
1215	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1216		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1217	else if (vmw_fifo_have_pitchlock(vmw_priv))
1218		vmw_mmio_write(pitch, vmw_priv->mmio_virt +
1219			       SVGA_FIFO_PITCHLOCK);
1220	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1221	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1222	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
1223
1224	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1225		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1226			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1227		return -EINVAL;
1228	}
1229
1230	return 0;
1231}
1232
1233int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1234{
1235	struct vmw_vga_topology_state *save;
1236	uint32_t i;
1237
1238	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
1239	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
 
1240	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
 
 
 
 
1241	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1242		vmw_priv->vga_pitchlock =
1243		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1244	else if (vmw_fifo_have_pitchlock(vmw_priv))
1245		vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
1246							SVGA_FIFO_PITCHLOCK);
1247
1248	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1249		return 0;
1250
1251	vmw_priv->num_displays = vmw_read(vmw_priv,
1252					  SVGA_REG_NUM_GUEST_DISPLAYS);
1253
1254	if (vmw_priv->num_displays == 0)
1255		vmw_priv->num_displays = 1;
1256
1257	for (i = 0; i < vmw_priv->num_displays; ++i) {
1258		save = &vmw_priv->vga_save[i];
1259		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1260		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
1261		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
1262		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
1263		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
1264		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
1265		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1266		if (i == 0 && vmw_priv->num_displays == 1 &&
1267		    save->width == 0 && save->height == 0) {
1268
1269			/*
1270			 * It should be fairly safe to assume that these
1271			 * values are uninitialized.
1272			 */
1273
1274			save->width = vmw_priv->vga_width - save->pos_x;
1275			save->height = vmw_priv->vga_height - save->pos_y;
1276		}
1277	}
1278
1279	return 0;
1280}
1281
1282int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
1283{
1284	struct vmw_vga_topology_state *save;
1285	uint32_t i;
1286
1287	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
1288	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
 
1289	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
 
 
 
 
1290	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1291		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
1292			  vmw_priv->vga_pitchlock);
1293	else if (vmw_fifo_have_pitchlock(vmw_priv))
1294		vmw_mmio_write(vmw_priv->vga_pitchlock,
1295			       vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1296
1297	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1298		return 0;
1299
1300	for (i = 0; i < vmw_priv->num_displays; ++i) {
1301		save = &vmw_priv->vga_save[i];
1302		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1303		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
1304		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
1305		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
1306		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
1307		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
1308		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1309	}
1310
1311	return 0;
1312}
1313
1314bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1315				uint32_t pitch,
1316				uint32_t height)
1317{
1318	return ((u64) pitch * (u64) height) < (u64)
1319		((dev_priv->active_display_unit == vmw_du_screen_target) ?
1320		 dev_priv->prim_bb_mem : dev_priv->vram_size);
1321}
1322
1323
1324/**
1325 * Function called by DRM code called with vbl_lock held.
1326 */
1327u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
1328{
1329	return 0;
1330}
1331
1332/**
1333 * Function called by DRM code called with vbl_lock held.
1334 */
1335int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
1336{
1337	return -ENOSYS;
1338}
1339
1340/**
1341 * Function called by DRM code called with vbl_lock held.
1342 */
1343void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
1344{
1345}
1346
1347
1348/*
1349 * Small shared kms functions.
1350 */
1351
1352static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1353			 struct drm_vmw_rect *rects)
1354{
1355	struct drm_device *dev = dev_priv->dev;
1356	struct vmw_display_unit *du;
1357	struct drm_connector *con;
1358
1359	mutex_lock(&dev->mode_config.mutex);
1360
1361#if 0
1362	{
1363		unsigned int i;
1364
1365		DRM_INFO("%s: new layout ", __func__);
1366		for (i = 0; i < num; i++)
1367			DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
1368				 rects[i].w, rects[i].h);
1369		DRM_INFO("\n");
1370	}
1371#endif
1372
1373	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
1374		du = vmw_connector_to_du(con);
1375		if (num > du->unit) {
1376			du->pref_width = rects[du->unit].w;
1377			du->pref_height = rects[du->unit].h;
1378			du->pref_active = true;
1379			du->gui_x = rects[du->unit].x;
1380			du->gui_y = rects[du->unit].y;
1381			drm_object_property_set_value
1382			  (&con->base, dev->mode_config.suggested_x_property,
1383			   du->gui_x);
1384			drm_object_property_set_value
1385			  (&con->base, dev->mode_config.suggested_y_property,
1386			   du->gui_y);
1387		} else {
1388			du->pref_width = 800;
1389			du->pref_height = 600;
1390			du->pref_active = false;
1391			drm_object_property_set_value
1392			  (&con->base, dev->mode_config.suggested_x_property,
1393			   0);
1394			drm_object_property_set_value
1395			  (&con->base, dev->mode_config.suggested_y_property,
1396			   0);
1397		}
1398		con->status = vmw_du_connector_detect(con, true);
1399	}
1400
1401	mutex_unlock(&dev->mode_config.mutex);
1402	drm_sysfs_hotplug_event(dev);
1403
1404	return 0;
1405}
1406
1407void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1408			   u16 *r, u16 *g, u16 *b,
1409			   uint32_t start, uint32_t size)
1410{
1411	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1412	int i;
1413
1414	for (i = 0; i < size; i++) {
1415		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
1416			  r[i], g[i], b[i]);
1417		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
1418		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
1419		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
1420	}
1421}
1422
1423int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
1424{
1425	return 0;
1426}
1427
1428enum drm_connector_status
1429vmw_du_connector_detect(struct drm_connector *connector, bool force)
1430{
1431	uint32_t num_displays;
1432	struct drm_device *dev = connector->dev;
1433	struct vmw_private *dev_priv = vmw_priv(dev);
1434	struct vmw_display_unit *du = vmw_connector_to_du(connector);
1435
1436	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
1437
1438	return ((vmw_connector_to_du(connector)->unit < num_displays &&
1439		 du->pref_active) ?
1440		connector_status_connected : connector_status_disconnected);
1441}
1442
1443static struct drm_display_mode vmw_kms_connector_builtin[] = {
1444	/* 640x480@60Hz */
1445	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
1446		   752, 800, 0, 480, 489, 492, 525, 0,
1447		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1448	/* 800x600@60Hz */
1449	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
1450		   968, 1056, 0, 600, 601, 605, 628, 0,
1451		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1452	/* 1024x768@60Hz */
1453	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1454		   1184, 1344, 0, 768, 771, 777, 806, 0,
1455		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
1456	/* 1152x864@75Hz */
1457	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1458		   1344, 1600, 0, 864, 865, 868, 900, 0,
1459		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1460	/* 1280x768@60Hz */
1461	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1462		   1472, 1664, 0, 768, 771, 778, 798, 0,
1463		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1464	/* 1280x800@60Hz */
1465	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1466		   1480, 1680, 0, 800, 803, 809, 831, 0,
1467		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
1468	/* 1280x960@60Hz */
1469	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1470		   1488, 1800, 0, 960, 961, 964, 1000, 0,
1471		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1472	/* 1280x1024@60Hz */
1473	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1474		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
1475		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1476	/* 1360x768@60Hz */
1477	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1478		   1536, 1792, 0, 768, 771, 777, 795, 0,
1479		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1480	/* 1440x1050@60Hz */
1481	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1482		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
1483		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1484	/* 1440x900@60Hz */
1485	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1486		   1672, 1904, 0, 900, 903, 909, 934, 0,
1487		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1488	/* 1600x1200@60Hz */
1489	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1490		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
1491		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1492	/* 1680x1050@60Hz */
1493	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1494		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
1495		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1496	/* 1792x1344@60Hz */
1497	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
1498		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
1499		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1500	/* 1853x1392@60Hz */
1501	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
1502		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
1503		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1504	/* 1920x1200@60Hz */
1505	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
1506		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
1507		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1508	/* 1920x1440@60Hz */
1509	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
1510		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
1511		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1512	/* 2560x1600@60Hz */
1513	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
1514		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
1515		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
1516	/* Terminate */
1517	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
1518};
1519
1520/**
1521 * vmw_guess_mode_timing - Provide fake timings for a
1522 * 60Hz vrefresh mode.
1523 *
1524 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
1525 * members filled in.
1526 */
1527void vmw_guess_mode_timing(struct drm_display_mode *mode)
1528{
1529	mode->hsync_start = mode->hdisplay + 50;
1530	mode->hsync_end = mode->hsync_start + 50;
1531	mode->htotal = mode->hsync_end + 50;
1532
1533	mode->vsync_start = mode->vdisplay + 50;
1534	mode->vsync_end = mode->vsync_start + 50;
1535	mode->vtotal = mode->vsync_end + 50;
1536
1537	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
1538	mode->vrefresh = drm_mode_vrefresh(mode);
1539}
1540
1541
1542int vmw_du_connector_fill_modes(struct drm_connector *connector,
1543				uint32_t max_width, uint32_t max_height)
1544{
1545	struct vmw_display_unit *du = vmw_connector_to_du(connector);
1546	struct drm_device *dev = connector->dev;
1547	struct vmw_private *dev_priv = vmw_priv(dev);
1548	struct drm_display_mode *mode = NULL;
1549	struct drm_display_mode *bmode;
1550	struct drm_display_mode prefmode = { DRM_MODE("preferred",
1551		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
1552		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1553		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
1554	};
1555	int i;
1556	u32 assumed_bpp = 2;
1557
1558	/*
1559	 * If using screen objects, then assume 32-bpp because that's what the
1560	 * SVGA device is assuming
1561	 */
1562	if (dev_priv->active_display_unit == vmw_du_screen_object)
1563		assumed_bpp = 4;
1564
1565	if (dev_priv->active_display_unit == vmw_du_screen_target) {
1566		max_width  = min(max_width,  dev_priv->stdu_max_width);
1567		max_height = min(max_height, dev_priv->stdu_max_height);
1568	}
1569
1570	/* Add preferred mode */
1571	mode = drm_mode_duplicate(dev, &prefmode);
1572	if (!mode)
1573		return 0;
1574	mode->hdisplay = du->pref_width;
1575	mode->vdisplay = du->pref_height;
1576	vmw_guess_mode_timing(mode);
1577
1578	if (vmw_kms_validate_mode_vram(dev_priv,
1579					mode->hdisplay * assumed_bpp,
1580					mode->vdisplay)) {
1581		drm_mode_probed_add(connector, mode);
1582	} else {
1583		drm_mode_destroy(dev, mode);
1584		mode = NULL;
1585	}
1586
1587	if (du->pref_mode) {
1588		list_del_init(&du->pref_mode->head);
1589		drm_mode_destroy(dev, du->pref_mode);
1590	}
1591
1592	/* mode might be null here, this is intended */
1593	du->pref_mode = mode;
1594
1595	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
1596		bmode = &vmw_kms_connector_builtin[i];
1597		if (bmode->hdisplay > max_width ||
1598		    bmode->vdisplay > max_height)
1599			continue;
1600
1601		if (!vmw_kms_validate_mode_vram(dev_priv,
1602						bmode->hdisplay * assumed_bpp,
1603						bmode->vdisplay))
1604			continue;
1605
1606		mode = drm_mode_duplicate(dev, bmode);
1607		if (!mode)
1608			return 0;
1609		mode->vrefresh = drm_mode_vrefresh(mode);
1610
1611		drm_mode_probed_add(connector, mode);
1612	}
1613
1614	drm_mode_connector_list_update(connector);
1615	/* Move the prefered mode first, help apps pick the right mode. */
1616	drm_mode_sort(&connector->modes);
1617
1618	return 1;
1619}
1620
1621int vmw_du_connector_set_property(struct drm_connector *connector,
1622				  struct drm_property *property,
1623				  uint64_t val)
1624{
1625	struct vmw_display_unit *du = vmw_connector_to_du(connector);
1626	struct vmw_private *dev_priv = vmw_priv(connector->dev);
1627
1628	if (property == dev_priv->implicit_placement_property)
1629		du->is_implicit = val;
1630
1631	return 0;
1632}
1633
1634
1635int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1636				struct drm_file *file_priv)
1637{
1638	struct vmw_private *dev_priv = vmw_priv(dev);
1639	struct drm_vmw_update_layout_arg *arg =
1640		(struct drm_vmw_update_layout_arg *)data;
 
1641	void __user *user_rects;
1642	struct drm_vmw_rect *rects;
1643	unsigned rects_size;
1644	int ret;
1645	int i;
1646	u64 total_pixels = 0;
1647	struct drm_mode_config *mode_config = &dev->mode_config;
1648	struct drm_vmw_rect bounding_box = {0};
1649
1650	if (!arg->num_outputs) {
1651		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
1652		vmw_du_update_layout(dev_priv, 1, &def_rect);
1653		return 0;
1654	}
1655
1656	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
1657	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
1658			GFP_KERNEL);
1659	if (unlikely(!rects))
1660		return -ENOMEM;
 
1661
1662	user_rects = (void __user *)(unsigned long)arg->rects;
1663	ret = copy_from_user(rects, user_rects, rects_size);
1664	if (unlikely(ret != 0)) {
1665		DRM_ERROR("Failed to get rects.\n");
1666		ret = -EFAULT;
1667		goto out_free;
1668	}
1669
1670	for (i = 0; i < arg->num_outputs; ++i) {
1671		if (rects[i].x < 0 ||
1672		    rects[i].y < 0 ||
1673		    rects[i].x + rects[i].w > mode_config->max_width ||
1674		    rects[i].y + rects[i].h > mode_config->max_height) {
1675			DRM_ERROR("Invalid GUI layout.\n");
1676			ret = -EINVAL;
1677			goto out_free;
1678		}
1679
1680		/*
1681		 * bounding_box.w and bunding_box.h are used as
1682		 * lower-right coordinates
1683		 */
1684		if (rects[i].x + rects[i].w > bounding_box.w)
1685			bounding_box.w = rects[i].x + rects[i].w;
1686
1687		if (rects[i].y + rects[i].h > bounding_box.h)
1688			bounding_box.h = rects[i].y + rects[i].h;
1689
1690		total_pixels += (u64) rects[i].w * (u64) rects[i].h;
1691	}
1692
1693	if (dev_priv->active_display_unit == vmw_du_screen_target) {
1694		/*
1695		 * For Screen Targets, the limits for a toplogy are:
1696		 *	1. Bounding box (assuming 32bpp) must be < prim_bb_mem
1697		 *      2. Total pixels (assuming 32bpp) must be < prim_bb_mem
1698		 */
1699		u64 bb_mem    = bounding_box.w * bounding_box.h * 4;
1700		u64 pixel_mem = total_pixels * 4;
1701
1702		if (bb_mem > dev_priv->prim_bb_mem) {
1703			DRM_ERROR("Topology is beyond supported limits.\n");
1704			ret = -EINVAL;
1705			goto out_free;
1706		}
1707
1708		if (pixel_mem > dev_priv->prim_bb_mem) {
1709			DRM_ERROR("Combined output size too large\n");
1710			ret = -EINVAL;
1711			goto out_free;
1712		}
1713	}
1714
1715	vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
1716
1717out_free:
1718	kfree(rects);
1719	return ret;
1720}
1721
1722/**
1723 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
1724 * on a set of cliprects and a set of display units.
1725 *
1726 * @dev_priv: Pointer to a device private structure.
1727 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
1728 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
1729 * Cliprects are given in framebuffer coordinates.
1730 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
1731 * be NULL. Cliprects are given in source coordinates.
1732 * @dest_x: X coordinate offset for the crtc / destination clip rects.
1733 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
1734 * @num_clips: Number of cliprects in the @clips or @vclips array.
1735 * @increment: Integer with which to increment the clip counter when looping.
1736 * Used to skip a predetermined number of clip rects.
1737 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
1738 */
1739int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
1740			 struct vmw_framebuffer *framebuffer,
1741			 const struct drm_clip_rect *clips,
1742			 const struct drm_vmw_rect *vclips,
1743			 s32 dest_x, s32 dest_y,
1744			 int num_clips,
1745			 int increment,
1746			 struct vmw_kms_dirty *dirty)
1747{
1748	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
1749	struct drm_crtc *crtc;
1750	u32 num_units = 0;
1751	u32 i, k;
1752
1753	dirty->dev_priv = dev_priv;
1754
1755	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
1756		if (crtc->primary->fb != &framebuffer->base)
1757			continue;
1758		units[num_units++] = vmw_crtc_to_du(crtc);
1759	}
1760
1761	for (k = 0; k < num_units; k++) {
1762		struct vmw_display_unit *unit = units[k];
1763		s32 crtc_x = unit->crtc.x;
1764		s32 crtc_y = unit->crtc.y;
1765		s32 crtc_width = unit->crtc.mode.hdisplay;
1766		s32 crtc_height = unit->crtc.mode.vdisplay;
1767		const struct drm_clip_rect *clips_ptr = clips;
1768		const struct drm_vmw_rect *vclips_ptr = vclips;
1769
1770		dirty->unit = unit;
1771		if (dirty->fifo_reserve_size > 0) {
1772			dirty->cmd = vmw_fifo_reserve(dev_priv,
1773						      dirty->fifo_reserve_size);
1774			if (!dirty->cmd) {
1775				DRM_ERROR("Couldn't reserve fifo space "
1776					  "for dirty blits.\n");
1777				return -ENOMEM;
1778			}
1779			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
1780		}
1781		dirty->num_hits = 0;
1782		for (i = 0; i < num_clips; i++, clips_ptr += increment,
1783		       vclips_ptr += increment) {
1784			s32 clip_left;
1785			s32 clip_top;
1786
1787			/*
1788			 * Select clip array type. Note that integer type
1789			 * in @clips is unsigned short, whereas in @vclips
1790			 * it's 32-bit.
1791			 */
1792			if (clips) {
1793				dirty->fb_x = (s32) clips_ptr->x1;
1794				dirty->fb_y = (s32) clips_ptr->y1;
1795				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
1796					crtc_x;
1797				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
1798					crtc_y;
1799			} else {
1800				dirty->fb_x = vclips_ptr->x;
1801				dirty->fb_y = vclips_ptr->y;
1802				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
1803					dest_x - crtc_x;
1804				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
1805					dest_y - crtc_y;
1806			}
1807
1808			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
1809			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
1810
1811			/* Skip this clip if it's outside the crtc region */
1812			if (dirty->unit_x1 >= crtc_width ||
1813			    dirty->unit_y1 >= crtc_height ||
1814			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
1815				continue;
1816
1817			/* Clip right and bottom to crtc limits */
1818			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
1819					       crtc_width);
1820			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
1821					       crtc_height);
1822
1823			/* Clip left and top to crtc limits */
1824			clip_left = min_t(s32, dirty->unit_x1, 0);
1825			clip_top = min_t(s32, dirty->unit_y1, 0);
1826			dirty->unit_x1 -= clip_left;
1827			dirty->unit_y1 -= clip_top;
1828			dirty->fb_x -= clip_left;
1829			dirty->fb_y -= clip_top;
1830
1831			dirty->clip(dirty);
1832		}
1833
1834		dirty->fifo_commit(dirty);
1835	}
1836
1837	return 0;
1838}
1839
1840/**
1841 * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
1842 * command submission.
1843 *
1844 * @dev_priv. Pointer to a device private structure.
1845 * @buf: The buffer object
1846 * @interruptible: Whether to perform waits as interruptible.
1847 * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
1848 * The buffer will be validated as a GMR. Already pinned buffers will not be
1849 * validated.
1850 *
1851 * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
1852 * interrupted by a signal.
1853 */
1854int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
1855				  struct vmw_dma_buffer *buf,
1856				  bool interruptible,
1857				  bool validate_as_mob)
1858{
1859	struct ttm_buffer_object *bo = &buf->base;
1860	int ret;
1861
1862	ttm_bo_reserve(bo, false, false, interruptible, NULL);
1863	ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
1864					 validate_as_mob);
1865	if (ret)
1866		ttm_bo_unreserve(bo);
1867
1868	return ret;
1869}
1870
1871/**
1872 * vmw_kms_helper_buffer_revert - Undo the actions of
1873 * vmw_kms_helper_buffer_prepare.
1874 *
1875 * @res: Pointer to the buffer object.
1876 *
1877 * Helper to be used if an error forces the caller to undo the actions of
1878 * vmw_kms_helper_buffer_prepare.
1879 */
1880void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
1881{
1882	if (buf)
1883		ttm_bo_unreserve(&buf->base);
1884}
1885
1886/**
1887 * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
1888 * kms command submission.
1889 *
1890 * @dev_priv: Pointer to a device private structure.
1891 * @file_priv: Pointer to a struct drm_file representing the caller's
1892 * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
1893 * if non-NULL, @user_fence_rep must be non-NULL.
1894 * @buf: The buffer object.
1895 * @out_fence:  Optional pointer to a fence pointer. If non-NULL, a
1896 * ref-counted fence pointer is returned here.
1897 * @user_fence_rep: Optional pointer to a user-space provided struct
1898 * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
1899 * function copies fence data to user-space in a fail-safe manner.
1900 */
1901void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
1902				  struct drm_file *file_priv,
1903				  struct vmw_dma_buffer *buf,
1904				  struct vmw_fence_obj **out_fence,
1905				  struct drm_vmw_fence_rep __user *
1906				  user_fence_rep)
1907{
1908	struct vmw_fence_obj *fence;
1909	uint32_t handle;
1910	int ret;
1911
1912	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
1913					 file_priv ? &handle : NULL);
1914	if (buf)
1915		vmw_fence_single_bo(&buf->base, fence);
1916	if (file_priv)
1917		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
1918					    ret, user_fence_rep, fence,
1919					    handle);
1920	if (out_fence)
1921		*out_fence = fence;
1922	else
1923		vmw_fence_obj_unreference(&fence);
1924
1925	vmw_kms_helper_buffer_revert(buf);
1926}
1927
1928
1929/**
1930 * vmw_kms_helper_resource_revert - Undo the actions of
1931 * vmw_kms_helper_resource_prepare.
1932 *
1933 * @res: Pointer to the resource. Typically a surface.
1934 *
1935 * Helper to be used if an error forces the caller to undo the actions of
1936 * vmw_kms_helper_resource_prepare.
1937 */
1938void vmw_kms_helper_resource_revert(struct vmw_resource *res)
1939{
1940	vmw_kms_helper_buffer_revert(res->backup);
1941	vmw_resource_unreserve(res, false, NULL, 0);
1942	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1943}
1944
1945/**
1946 * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
1947 * command submission.
1948 *
1949 * @res: Pointer to the resource. Typically a surface.
1950 * @interruptible: Whether to perform waits as interruptible.
1951 *
1952 * Reserves and validates also the backup buffer if a guest-backed resource.
1953 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1954 * interrupted by a signal.
1955 */
1956int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
1957				    bool interruptible)
1958{
1959	int ret = 0;
1960
1961	if (interruptible)
1962		ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
1963	else
1964		mutex_lock(&res->dev_priv->cmdbuf_mutex);
1965
1966	if (unlikely(ret != 0))
1967		return -ERESTARTSYS;
1968
1969	ret = vmw_resource_reserve(res, interruptible, false);
1970	if (ret)
1971		goto out_unlock;
1972
1973	if (res->backup) {
1974		ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
1975						    interruptible,
1976						    res->dev_priv->has_mob);
1977		if (ret)
1978			goto out_unreserve;
1979	}
1980	ret = vmw_resource_validate(res);
1981	if (ret)
1982		goto out_revert;
1983	return 0;
1984
1985out_revert:
1986	vmw_kms_helper_buffer_revert(res->backup);
1987out_unreserve:
1988	vmw_resource_unreserve(res, false, NULL, 0);
1989out_unlock:
1990	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1991	return ret;
1992}
1993
1994/**
1995 * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
1996 * kms command submission.
1997 *
1998 * @res: Pointer to the resource. Typically a surface.
1999 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
2000 * ref-counted fence pointer is returned here.
2001 */
2002void vmw_kms_helper_resource_finish(struct vmw_resource *res,
2003			     struct vmw_fence_obj **out_fence)
2004{
2005	if (res->backup || out_fence)
2006		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
2007					     out_fence, NULL);
2008
2009	vmw_resource_unreserve(res, false, NULL, 0);
2010	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2011}
2012
2013/**
2014 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2015 * its backing MOB.
2016 *
2017 * @res: Pointer to the surface resource
2018 * @clips: Clip rects in framebuffer (surface) space.
2019 * @num_clips: Number of clips in @clips.
2020 * @increment: Integer with which to increment the clip counter when looping.
2021 * Used to skip a predetermined number of clip rects.
2022 *
2023 * This function makes sure the proxy surface is updated from its backing MOB
2024 * using the region given by @clips. The surface resource @res and its backing
2025 * MOB needs to be reserved and validated on call.
2026 */
2027int vmw_kms_update_proxy(struct vmw_resource *res,
2028			 const struct drm_clip_rect *clips,
2029			 unsigned num_clips,
2030			 int increment)
2031{
2032	struct vmw_private *dev_priv = res->dev_priv;
2033	struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
2034	struct {
2035		SVGA3dCmdHeader header;
2036		SVGA3dCmdUpdateGBImage body;
2037	} *cmd;
2038	SVGA3dBox *box;
2039	size_t copy_size = 0;
2040	int i;
2041
2042	if (!clips)
2043		return 0;
2044
2045	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
2046	if (!cmd) {
2047		DRM_ERROR("Couldn't reserve fifo space for proxy surface "
2048			  "update.\n");
2049		return -ENOMEM;
2050	}
2051
2052	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2053		box = &cmd->body.box;
2054
2055		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2056		cmd->header.size = sizeof(cmd->body);
2057		cmd->body.image.sid = res->id;
2058		cmd->body.image.face = 0;
2059		cmd->body.image.mipmap = 0;
2060
2061		if (clips->x1 > size->width || clips->x2 > size->width ||
2062		    clips->y1 > size->height || clips->y2 > size->height) {
2063			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2064			return -EINVAL;
2065		}
2066
2067		box->x = clips->x1;
2068		box->y = clips->y1;
2069		box->z = 0;
2070		box->w = clips->x2 - clips->x1;
2071		box->h = clips->y2 - clips->y1;
2072		box->d = 1;
2073
2074		copy_size += sizeof(*cmd);
2075	}
2076
2077	vmw_fifo_commit(dev_priv, copy_size);
2078
2079	return 0;
2080}
2081
2082int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2083			    unsigned unit,
2084			    u32 max_width,
2085			    u32 max_height,
2086			    struct drm_connector **p_con,
2087			    struct drm_crtc **p_crtc,
2088			    struct drm_display_mode **p_mode)
2089{
2090	struct drm_connector *con;
2091	struct vmw_display_unit *du;
2092	struct drm_display_mode *mode;
2093	int i = 0;
2094
2095	list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
2096			    head) {
2097		if (i == unit)
2098			break;
2099
2100		++i;
2101	}
2102
2103	if (i != unit) {
2104		DRM_ERROR("Could not find initial display unit.\n");
2105		return -EINVAL;
2106	}
2107
2108	if (list_empty(&con->modes))
2109		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
2110
2111	if (list_empty(&con->modes)) {
2112		DRM_ERROR("Could not find initial display mode.\n");
2113		return -EINVAL;
2114	}
2115
2116	du = vmw_connector_to_du(con);
2117	*p_con = con;
2118	*p_crtc = &du->crtc;
2119
2120	list_for_each_entry(mode, &con->modes, head) {
2121		if (mode->type & DRM_MODE_TYPE_PREFERRED)
2122			break;
2123	}
2124
2125	if (mode->type & DRM_MODE_TYPE_PREFERRED)
2126		*p_mode = mode;
2127	else {
2128		WARN_ONCE(true, "Could not find initial preferred mode.\n");
2129		*p_mode = list_first_entry(&con->modes,
2130					   struct drm_display_mode,
2131					   head);
2132	}
2133
2134	return 0;
2135}
2136
2137/**
2138 * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
2139 *
2140 * @dev_priv: Pointer to a device private struct.
2141 * @du: The display unit of the crtc.
2142 */
2143void vmw_kms_del_active(struct vmw_private *dev_priv,
2144			struct vmw_display_unit *du)
2145{
2146	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
2147
2148	if (du->active_implicit) {
2149		if (--(dev_priv->num_implicit) == 0)
2150			dev_priv->implicit_fb = NULL;
2151		du->active_implicit = false;
2152	}
2153}
2154
2155/**
2156 * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
2157 *
2158 * @vmw_priv: Pointer to a device private struct.
2159 * @du: The display unit of the crtc.
2160 * @vfb: The implicit framebuffer
2161 *
2162 * Registers a binding to an implicit framebuffer.
2163 */
2164void vmw_kms_add_active(struct vmw_private *dev_priv,
2165			struct vmw_display_unit *du,
2166			struct vmw_framebuffer *vfb)
2167{
2168	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
2169
2170	WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);
2171
2172	if (!du->active_implicit && du->is_implicit) {
2173		dev_priv->implicit_fb = vfb;
2174		du->active_implicit = true;
2175		dev_priv->num_implicit++;
2176	}
2177}
2178
2179/**
2180 * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
2181 *
2182 * @dev_priv: Pointer to device-private struct.
2183 * @crtc: The crtc we want to flip.
2184 *
2185 * Returns true or false depending whether it's OK to flip this crtc
2186 * based on the criterion that we must not have more than one implicit
2187 * frame-buffer at any one time.
2188 */
2189bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
2190			    struct drm_crtc *crtc)
2191{
2192	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
2193
2194	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
2195
2196	if (!du->is_implicit)
2197		return true;
2198
2199	if (dev_priv->num_implicit != 1)
2200		return false;
2201
2202	return true;
2203}
2204
2205/**
2206 * vmw_kms_update_implicit_fb - Update the implicit fb.
2207 *
2208 * @dev_priv: Pointer to device-private struct.
2209 * @crtc: The crtc the new implicit frame-buffer is bound to.
2210 */
2211void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
2212				struct drm_crtc *crtc)
2213{
2214	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
2215	struct vmw_framebuffer *vfb;
2216
2217	lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
2218
2219	if (!du->is_implicit)
2220		return;
2221
2222	vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
2223	WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
2224		     dev_priv->implicit_fb != vfb);
2225
2226	dev_priv->implicit_fb = vfb;
2227}
2228
2229/**
2230 * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
2231 * property.
2232 *
2233 * @dev_priv: Pointer to a device private struct.
2234 * @immutable: Whether the property is immutable.
2235 *
2236 * Sets up the implicit placement property unless it's already set up.
2237 */
2238void
2239vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
2240					   bool immutable)
2241{
2242	if (dev_priv->implicit_placement_property)
2243		return;
2244
2245	dev_priv->implicit_placement_property =
2246		drm_property_create_range(dev_priv->dev,
2247					  immutable ?
2248					  DRM_MODE_PROP_IMMUTABLE : 0,
2249					  "implicit_placement", 0, 1);
2250
2251}
v3.1
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_kms.h"
  29
 
  30/* Might need a hrtimer here? */
  31#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
  32
  33static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
  34static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
  35
  36void vmw_display_unit_cleanup(struct vmw_display_unit *du)
  37{
  38	if (du->cursor_surface)
  39		vmw_surface_unreference(&du->cursor_surface);
  40	if (du->cursor_dmabuf)
  41		vmw_dmabuf_unreference(&du->cursor_dmabuf);
 
  42	drm_crtc_cleanup(&du->crtc);
  43	drm_encoder_cleanup(&du->encoder);
  44	drm_connector_cleanup(&du->connector);
  45}
  46
  47/*
  48 * Display Unit Cursor functions
  49 */
  50
  51int vmw_cursor_update_image(struct vmw_private *dev_priv,
  52			    u32 *image, u32 width, u32 height,
  53			    u32 hotspotX, u32 hotspotY)
  54{
  55	struct {
  56		u32 cmd;
  57		SVGAFifoCmdDefineAlphaCursor cursor;
  58	} *cmd;
  59	u32 image_size = width * height * 4;
  60	u32 cmd_size = sizeof(*cmd) + image_size;
  61
  62	if (!image)
  63		return -EINVAL;
  64
  65	cmd = vmw_fifo_reserve(dev_priv, cmd_size);
  66	if (unlikely(cmd == NULL)) {
  67		DRM_ERROR("Fifo reserve failed.\n");
  68		return -ENOMEM;
  69	}
  70
  71	memset(cmd, 0, sizeof(*cmd));
  72
  73	memcpy(&cmd[1], image, image_size);
  74
  75	cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
  76	cmd->cursor.id = cpu_to_le32(0);
  77	cmd->cursor.width = cpu_to_le32(width);
  78	cmd->cursor.height = cpu_to_le32(height);
  79	cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
  80	cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
  81
  82	vmw_fifo_commit(dev_priv, cmd_size);
  83
  84	return 0;
  85}
  86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  87void vmw_cursor_update_position(struct vmw_private *dev_priv,
  88				bool show, int x, int y)
  89{
  90	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
  91	uint32_t count;
  92
  93	iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
  94	iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
  95	iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
  96	count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
  97	iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
  98}
  99
 100int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
 101			   uint32_t handle, uint32_t width, uint32_t height)
 
 
 
 
 
 102{
 103	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 104	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 105	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 106	struct vmw_surface *surface = NULL;
 107	struct vmw_dma_buffer *dmabuf = NULL;
 
 108	int ret;
 109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 110	if (handle) {
 111		ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
 112						     handle, &surface);
 113		if (!ret) {
 114			if (!surface->snooper.image) {
 115				DRM_ERROR("surface not suitable for cursor\n");
 116				return -EINVAL;
 117			}
 118		} else {
 119			ret = vmw_user_dmabuf_lookup(tfile,
 120						     handle, &dmabuf);
 121			if (ret) {
 122				DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
 123				return -EINVAL;
 124			}
 125		}
 126	}
 127
 
 
 
 
 
 
 
 
 128	/* takedown old cursor */
 129	if (du->cursor_surface) {
 130		du->cursor_surface->snooper.crtc = NULL;
 131		vmw_surface_unreference(&du->cursor_surface);
 132	}
 133	if (du->cursor_dmabuf)
 134		vmw_dmabuf_unreference(&du->cursor_dmabuf);
 135
 136	/* setup new image */
 
 137	if (surface) {
 138		/* vmw_user_surface_lookup takes one reference */
 139		du->cursor_surface = surface;
 140
 141		du->cursor_surface->snooper.crtc = crtc;
 142		du->cursor_age = du->cursor_surface->snooper.age;
 143		vmw_cursor_update_image(dev_priv, surface->snooper.image,
 144					64, 64, du->hotspot_x, du->hotspot_y);
 145	} else if (dmabuf) {
 146		struct ttm_bo_kmap_obj map;
 147		unsigned long kmap_offset;
 148		unsigned long kmap_num;
 149		void *virtual;
 150		bool dummy;
 151
 152		/* vmw_user_surface_lookup takes one reference */
 153		du->cursor_dmabuf = dmabuf;
 154
 155		kmap_offset = 0;
 156		kmap_num = (64*64*4) >> PAGE_SHIFT;
 157
 158		ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
 159		if (unlikely(ret != 0)) {
 160			DRM_ERROR("reserve failed\n");
 161			return -EINVAL;
 162		}
 163
 164		ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
 165		if (unlikely(ret != 0))
 166			goto err_unreserve;
 167
 168		virtual = ttm_kmap_obj_virtual(&map, &dummy);
 169		vmw_cursor_update_image(dev_priv, virtual, 64, 64,
 170					du->hotspot_x, du->hotspot_y);
 171
 172		ttm_bo_kunmap(&map);
 173err_unreserve:
 174		ttm_bo_unreserve(&dmabuf->base);
 175
 176	} else {
 177		vmw_cursor_update_position(dev_priv, false, 0, 0);
 178		return 0;
 
 
 
 
 
 
 
 
 179	}
 180
 181	vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
 
 
 182
 183	return 0;
 184}
 185
 186int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 187{
 188	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
 189	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 190	bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
 191
 192	du->cursor_x = x + crtc->x;
 193	du->cursor_y = y + crtc->y;
 
 
 
 
 
 
 
 
 
 
 194
 195	vmw_cursor_update_position(dev_priv, shown,
 196				   du->cursor_x, du->cursor_y);
 
 
 
 
 
 
 197
 198	return 0;
 199}
 200
 201void vmw_kms_cursor_snoop(struct vmw_surface *srf,
 202			  struct ttm_object_file *tfile,
 203			  struct ttm_buffer_object *bo,
 204			  SVGA3dCmdHeader *header)
 205{
 206	struct ttm_bo_kmap_obj map;
 207	unsigned long kmap_offset;
 208	unsigned long kmap_num;
 209	SVGA3dCopyBox *box;
 210	unsigned box_count;
 211	void *virtual;
 212	bool dummy;
 213	struct vmw_dma_cmd {
 214		SVGA3dCmdHeader header;
 215		SVGA3dCmdSurfaceDMA dma;
 216	} *cmd;
 217	int ret;
 218
 219	cmd = container_of(header, struct vmw_dma_cmd, header);
 220
 221	/* No snooper installed */
 222	if (!srf->snooper.image)
 223		return;
 224
 225	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
 226		DRM_ERROR("face and mipmap for cursors should never != 0\n");
 227		return;
 228	}
 229
 230	if (cmd->header.size < 64) {
 231		DRM_ERROR("at least one full copy box must be given\n");
 232		return;
 233	}
 234
 235	box = (SVGA3dCopyBox *)&cmd[1];
 236	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
 237			sizeof(SVGA3dCopyBox);
 238
 239	if (cmd->dma.guest.pitch != (64 * 4) ||
 240	    cmd->dma.guest.ptr.offset % PAGE_SIZE ||
 241	    box->x != 0    || box->y != 0    || box->z != 0    ||
 242	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
 243	    box->w != 64   || box->h != 64   || box->d != 1    ||
 244	    box_count != 1) {
 245		/* TODO handle none page aligned offsets */
 246		/* TODO handle partial uploads and pitch != 256 */
 247		/* TODO handle more then one copy (size != 64) */
 248		DRM_ERROR("lazy programmer, can't handle weird stuff\n");
 
 
 
 
 
 249		return;
 250	}
 251
 252	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
 253	kmap_num = (64*64*4) >> PAGE_SHIFT;
 254
 255	ret = ttm_bo_reserve(bo, true, false, false, 0);
 256	if (unlikely(ret != 0)) {
 257		DRM_ERROR("reserve failed\n");
 258		return;
 259	}
 260
 261	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
 262	if (unlikely(ret != 0))
 263		goto err_unreserve;
 264
 265	virtual = ttm_kmap_obj_virtual(&map, &dummy);
 266
 267	memcpy(srf->snooper.image, virtual, 64*64*4);
 
 
 
 
 
 
 
 
 
 268	srf->snooper.age++;
 269
 270	/* we can't call this function from this function since execbuf has
 271	 * reserved fifo space.
 272	 *
 273	 * if (srf->snooper.crtc)
 274	 *	vmw_ldu_crtc_cursor_update_image(dev_priv,
 275	 *					 srf->snooper.image, 64, 64,
 276	 *					 du->hotspot_x, du->hotspot_y);
 277	 */
 278
 279	ttm_bo_kunmap(&map);
 280err_unreserve:
 281	ttm_bo_unreserve(bo);
 282}
 283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 285{
 286	struct drm_device *dev = dev_priv->dev;
 287	struct vmw_display_unit *du;
 288	struct drm_crtc *crtc;
 289
 290	mutex_lock(&dev->mode_config.mutex);
 291
 292	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 293		du = vmw_crtc_to_du(crtc);
 294		if (!du->cursor_surface ||
 295		    du->cursor_age == du->cursor_surface->snooper.age)
 296			continue;
 297
 298		du->cursor_age = du->cursor_surface->snooper.age;
 299		vmw_cursor_update_image(dev_priv,
 300					du->cursor_surface->snooper.image,
 301					64, 64, du->hotspot_x, du->hotspot_y);
 
 
 302	}
 303
 304	mutex_unlock(&dev->mode_config.mutex);
 305}
 306
 307/*
 308 * Generic framebuffer code
 309 */
 310
 311int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
 312				  struct drm_file *file_priv,
 313				  unsigned int *handle)
 314{
 315	if (handle)
 316		handle = 0;
 317
 318	return 0;
 319}
 320
 321/*
 322 * Surface framebuffer code
 323 */
 324
 325#define vmw_framebuffer_to_vfbs(x) \
 326	container_of(x, struct vmw_framebuffer_surface, base.base)
 327
 328struct vmw_framebuffer_surface {
 329	struct vmw_framebuffer base;
 330	struct vmw_surface *surface;
 331	struct vmw_dma_buffer *buffer;
 332	struct delayed_work d_work;
 333	struct mutex work_lock;
 334	bool present_fs;
 335	struct list_head head;
 336	struct drm_master *master;
 337};
 338
 339/**
 340 * vmw_kms_idle_workqueues - Flush workqueues on this master
 341 *
 342 * @vmaster - Pointer identifying the master, for the surfaces of which
 343 * we idle the dirty work queues.
 344 *
 345 * This function should be called with the ttm lock held in exclusive mode
 346 * to idle all dirty work queues before the fifo is taken down.
 347 *
 348 * The work task may actually requeue itself, but after the flush returns we're
 349 * sure that there's nothing to present, since the ttm lock is held in
 350 * exclusive mode, so the fifo will never get used.
 351 */
 352
 353void vmw_kms_idle_workqueues(struct vmw_master *vmaster)
 354{
 355	struct vmw_framebuffer_surface *entry;
 356
 357	mutex_lock(&vmaster->fb_surf_mutex);
 358	list_for_each_entry(entry, &vmaster->fb_surf, head) {
 359		if (cancel_delayed_work_sync(&entry->d_work))
 360			(void) entry->d_work.work.func(&entry->d_work.work);
 361
 362		(void) cancel_delayed_work_sync(&entry->d_work);
 363	}
 364	mutex_unlock(&vmaster->fb_surf_mutex);
 365}
 366
 367void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 368{
 369	struct vmw_framebuffer_surface *vfbs =
 370		vmw_framebuffer_to_vfbs(framebuffer);
 371	struct vmw_master *vmaster = vmw_master(vfbs->master);
 372
 373
 374	mutex_lock(&vmaster->fb_surf_mutex);
 375	list_del(&vfbs->head);
 376	mutex_unlock(&vmaster->fb_surf_mutex);
 377
 378	cancel_delayed_work_sync(&vfbs->d_work);
 379	drm_master_put(&vfbs->master);
 380	drm_framebuffer_cleanup(framebuffer);
 381	vmw_surface_unreference(&vfbs->surface);
 
 
 382
 383	kfree(vfbs);
 384}
 385
 386static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
 387{
 388	struct delayed_work *d_work =
 389		container_of(work, struct delayed_work, work);
 390	struct vmw_framebuffer_surface *vfbs =
 391		container_of(d_work, struct vmw_framebuffer_surface, d_work);
 392	struct vmw_surface *surf = vfbs->surface;
 393	struct drm_framebuffer *framebuffer = &vfbs->base.base;
 394	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
 395
 396	struct {
 397		SVGA3dCmdHeader header;
 398		SVGA3dCmdPresent body;
 399		SVGA3dCopyRect cr;
 400	} *cmd;
 401
 402	/**
 403	 * Strictly we should take the ttm_lock in read mode before accessing
 404	 * the fifo, to make sure the fifo is present and up. However,
 405	 * instead we flush all workqueues under the ttm lock in exclusive mode
 406	 * before taking down the fifo.
 407	 */
 408	mutex_lock(&vfbs->work_lock);
 409	if (!vfbs->present_fs)
 410		goto out_unlock;
 411
 412	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 413	if (unlikely(cmd == NULL))
 414		goto out_resched;
 415
 416	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
 417	cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
 418	cmd->body.sid = cpu_to_le32(surf->res.id);
 419	cmd->cr.x = cpu_to_le32(0);
 420	cmd->cr.y = cpu_to_le32(0);
 421	cmd->cr.srcx = cmd->cr.x;
 422	cmd->cr.srcy = cmd->cr.y;
 423	cmd->cr.w = cpu_to_le32(framebuffer->width);
 424	cmd->cr.h = cpu_to_le32(framebuffer->height);
 425	vfbs->present_fs = false;
 426	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 427out_resched:
 428	/**
 429	 * Will not re-add if already pending.
 430	 */
 431	schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
 432out_unlock:
 433	mutex_unlock(&vfbs->work_lock);
 434}
 435
 436
 437int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 438				  struct drm_file *file_priv,
 439				  unsigned flags, unsigned color,
 440				  struct drm_clip_rect *clips,
 441				  unsigned num_clips)
 442{
 443	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
 444	struct vmw_master *vmaster = vmw_master(file_priv->master);
 445	struct vmw_framebuffer_surface *vfbs =
 446		vmw_framebuffer_to_vfbs(framebuffer);
 447	struct vmw_surface *surf = vfbs->surface;
 448	struct drm_clip_rect norect;
 449	SVGA3dCopyRect *cr;
 450	int i, inc = 1;
 451	int ret;
 452
 453	struct {
 454		SVGA3dCmdHeader header;
 455		SVGA3dCmdPresent body;
 456		SVGA3dCopyRect cr;
 457	} *cmd;
 458
 459	if (unlikely(vfbs->master != file_priv->master))
 460		return -EINVAL;
 461
 462	ret = ttm_read_lock(&vmaster->lock, true);
 463	if (unlikely(ret != 0))
 
 464		return ret;
 465
 466	if (!num_clips ||
 467	    !(dev_priv->fifo.capabilities &
 468	      SVGA_FIFO_CAP_SCREEN_OBJECT)) {
 469		int ret;
 470
 471		mutex_lock(&vfbs->work_lock);
 472		vfbs->present_fs = true;
 473		ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
 474		mutex_unlock(&vfbs->work_lock);
 475		if (ret) {
 476			/**
 477			 * No work pending, Force immediate present.
 478			 */
 479			vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
 480		}
 481		ttm_read_unlock(&vmaster->lock);
 482		return 0;
 483	}
 484
 485	if (!num_clips) {
 486		num_clips = 1;
 487		clips = &norect;
 488		norect.x1 = norect.y1 = 0;
 489		norect.x2 = framebuffer->width;
 490		norect.y2 = framebuffer->height;
 491	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
 492		num_clips /= 2;
 493		inc = 2; /* skip source rects */
 494	}
 495
 496	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
 497	if (unlikely(cmd == NULL)) {
 498		DRM_ERROR("Fifo reserve failed.\n");
 499		ttm_read_unlock(&vmaster->lock);
 500		return -ENOMEM;
 501	}
 
 
 502
 503	memset(cmd, 0, sizeof(*cmd));
 
 504
 505	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
 506	cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
 507	cmd->body.sid = cpu_to_le32(surf->res.id);
 508
 509	for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
 510		cr->x = cpu_to_le16(clips->x1);
 511		cr->y = cpu_to_le16(clips->y1);
 512		cr->srcx = cr->x;
 513		cr->srcy = cr->y;
 514		cr->w = cpu_to_le16(clips->x2 - clips->x1);
 515		cr->h = cpu_to_le16(clips->y2 - clips->y1);
 516	}
 517
 518	vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
 519	ttm_read_unlock(&vmaster->lock);
 520	return 0;
 521}
 522
 523static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 524	.destroy = vmw_framebuffer_surface_destroy,
 525	.dirty = vmw_framebuffer_surface_dirty,
 526	.create_handle = vmw_framebuffer_create_handle,
 527};
 528
 529static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 530					   struct drm_file *file_priv,
 531					   struct vmw_surface *surface,
 532					   struct vmw_framebuffer **out,
 533					   const struct drm_mode_fb_cmd
 534					   *mode_cmd)
 
 535
 536{
 537	struct drm_device *dev = dev_priv->dev;
 538	struct vmw_framebuffer_surface *vfbs;
 539	enum SVGA3dSurfaceFormat format;
 540	struct vmw_master *vmaster = vmw_master(file_priv->master);
 541	int ret;
 542
 
 
 
 
 543	/*
 544	 * Sanity checks.
 545	 */
 546
 
 
 
 
 547	if (unlikely(surface->mip_levels[0] != 1 ||
 548		     surface->num_sizes != 1 ||
 549		     surface->sizes[0].width < mode_cmd->width ||
 550		     surface->sizes[0].height < mode_cmd->height ||
 551		     surface->sizes[0].depth != 1)) {
 552		DRM_ERROR("Incompatible surface dimensions "
 553			  "for requested mode.\n");
 554		return -EINVAL;
 555	}
 556
 557	switch (mode_cmd->depth) {
 558	case 32:
 559		format = SVGA3D_A8R8G8B8;
 560		break;
 561	case 24:
 562		format = SVGA3D_X8R8G8B8;
 563		break;
 564	case 16:
 565		format = SVGA3D_R5G6B5;
 566		break;
 567	case 15:
 568		format = SVGA3D_A1R5G5B5;
 569		break;
 570	default:
 571		DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
 572		return -EINVAL;
 573	}
 574
 575	if (unlikely(format != surface->format)) {
 
 
 
 
 576		DRM_ERROR("Invalid surface format for requested mode.\n");
 577		return -EINVAL;
 578	}
 579
 580	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
 581	if (!vfbs) {
 582		ret = -ENOMEM;
 583		goto out_err1;
 584	}
 585
 586	ret = drm_framebuffer_init(dev, &vfbs->base.base,
 587				   &vmw_framebuffer_surface_funcs);
 588	if (ret)
 589		goto out_err2;
 590
 591	if (!vmw_surface_reference(surface)) {
 592		DRM_ERROR("failed to reference surface %p\n", surface);
 593		goto out_err3;
 594	}
 595
 596	/* XXX get the first 3 from the surface info */
 597	vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
 598	vfbs->base.base.pitch = mode_cmd->pitch;
 599	vfbs->base.base.depth = mode_cmd->depth;
 600	vfbs->base.base.width = mode_cmd->width;
 601	vfbs->base.base.height = mode_cmd->height;
 602	vfbs->base.pin = &vmw_surface_dmabuf_pin;
 603	vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
 604	vfbs->surface = surface;
 605	vfbs->master = drm_master_get(file_priv->master);
 606	mutex_init(&vfbs->work_lock);
 607
 608	mutex_lock(&vmaster->fb_surf_mutex);
 609	INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
 610	list_add_tail(&vfbs->head, &vmaster->fb_surf);
 611	mutex_unlock(&vmaster->fb_surf_mutex);
 612
 613	*out = &vfbs->base;
 614
 
 
 
 
 
 615	return 0;
 616
 617out_err3:
 618	drm_framebuffer_cleanup(&vfbs->base.base);
 619out_err2:
 
 620	kfree(vfbs);
 621out_err1:
 622	return ret;
 623}
 624
 625/*
 626 * Dmabuf framebuffer code
 627 */
 628
 629#define vmw_framebuffer_to_vfbd(x) \
 630	container_of(x, struct vmw_framebuffer_dmabuf, base.base)
 631
 632struct vmw_framebuffer_dmabuf {
 633	struct vmw_framebuffer base;
 634	struct vmw_dma_buffer *buffer;
 635};
 636
 637void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
 638{
 639	struct vmw_framebuffer_dmabuf *vfbd =
 640		vmw_framebuffer_to_vfbd(framebuffer);
 641
 642	drm_framebuffer_cleanup(framebuffer);
 643	vmw_dmabuf_unreference(&vfbd->buffer);
 
 
 644
 645	kfree(vfbd);
 646}
 647
 648int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
 649				 struct drm_file *file_priv,
 650				 unsigned flags, unsigned color,
 651				 struct drm_clip_rect *clips,
 652				 unsigned num_clips)
 653{
 654	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
 655	struct vmw_master *vmaster = vmw_master(file_priv->master);
 
 656	struct drm_clip_rect norect;
 657	int ret;
 658	struct {
 659		uint32_t header;
 660		SVGAFifoCmdUpdate body;
 661	} *cmd;
 662	int i, increment = 1;
 663
 664	ret = ttm_read_lock(&vmaster->lock, true);
 665	if (unlikely(ret != 0))
 
 666		return ret;
 
 667
 668	if (!num_clips) {
 669		num_clips = 1;
 670		clips = &norect;
 671		norect.x1 = norect.y1 = 0;
 672		norect.x2 = framebuffer->width;
 673		norect.y2 = framebuffer->height;
 674	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
 675		num_clips /= 2;
 676		increment = 2;
 677	}
 678
 679	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
 680	if (unlikely(cmd == NULL)) {
 681		DRM_ERROR("Fifo reserve failed.\n");
 682		ttm_read_unlock(&vmaster->lock);
 683		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 684	}
 685
 686	for (i = 0; i < num_clips; i++, clips += increment) {
 687		cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
 688		cmd[i].body.x = cpu_to_le32(clips->x1);
 689		cmd[i].body.y = cpu_to_le32(clips->y1);
 690		cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
 691		cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
 692	}
 693
 694	vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
 695	ttm_read_unlock(&vmaster->lock);
 696
 697	return 0;
 698}
 699
 700static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
 701	.destroy = vmw_framebuffer_dmabuf_destroy,
 702	.dirty = vmw_framebuffer_dmabuf_dirty,
 703	.create_handle = vmw_framebuffer_create_handle,
 704};
 705
 706static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
 
 
 
 707{
 708	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
 709	struct vmw_framebuffer_surface *vfbs =
 710		vmw_framebuffer_to_vfbs(&vfb->base);
 711	unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
 712	int ret;
 713
 714	vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
 715	if (unlikely(vfbs->buffer == NULL))
 716		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 717
 718	vmw_overlay_pause_all(dev_priv);
 719	ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
 720			       &vmw_vram_ne_placement,
 721			       false, &vmw_dmabuf_bo_free);
 722	vmw_overlay_resume_all(dev_priv);
 723	if (unlikely(ret != 0))
 724		vfbs->buffer = NULL;
 725
 726	return ret;
 727}
 728
 729static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
 730{
 731	struct ttm_buffer_object *bo;
 732	struct vmw_framebuffer_surface *vfbs =
 733		vmw_framebuffer_to_vfbs(&vfb->base);
 
 
 734
 735	if (unlikely(vfbs->buffer == NULL))
 736		return 0;
 737
 738	bo = &vfbs->buffer->base;
 739	ttm_bo_unref(&bo);
 740	vfbs->buffer = NULL;
 741
 742	return 0;
 743}
 744
 745static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
 746{
 747	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
 748	struct vmw_framebuffer_dmabuf *vfbd =
 749		vmw_framebuffer_to_vfbd(&vfb->base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 750	int ret;
 751
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 752
 753	vmw_overlay_pause_all(dev_priv);
 
 
 
 754
 755	ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 756
 757	vmw_overlay_resume_all(dev_priv);
 758
 759	WARN_ON(ret != 0);
 
 
 
 
 
 
 
 760
 761	return 0;
 762}
 763
 764static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
 765{
 766	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
 767	struct vmw_framebuffer_dmabuf *vfbd =
 768		vmw_framebuffer_to_vfbd(&vfb->base);
 769
 770	if (!vfbd->buffer) {
 771		WARN_ON(!vfbd->buffer);
 772		return 0;
 773	}
 774
 775	return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
 776}
 777
 778static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
 779					  struct vmw_dma_buffer *dmabuf,
 780					  struct vmw_framebuffer **out,
 781					  const struct drm_mode_fb_cmd
 782					  *mode_cmd)
 783
 784{
 785	struct drm_device *dev = dev_priv->dev;
 786	struct vmw_framebuffer_dmabuf *vfbd;
 787	unsigned int requested_size;
 788	int ret;
 789
 790	requested_size = mode_cmd->height * mode_cmd->pitch;
 791	if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
 792		DRM_ERROR("Screen buffer object size is too small "
 793			  "for requested mode.\n");
 794		return -EINVAL;
 795	}
 796
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 797	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
 798	if (!vfbd) {
 799		ret = -ENOMEM;
 800		goto out_err1;
 801	}
 802
 803	ret = drm_framebuffer_init(dev, &vfbd->base.base,
 804				   &vmw_framebuffer_dmabuf_funcs);
 805	if (ret)
 806		goto out_err2;
 807
 808	if (!vmw_dmabuf_reference(dmabuf)) {
 809		DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
 810		goto out_err3;
 811	}
 812
 813	vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
 814	vfbd->base.base.pitch = mode_cmd->pitch;
 815	vfbd->base.base.depth = mode_cmd->depth;
 816	vfbd->base.base.width = mode_cmd->width;
 817	vfbd->base.base.height = mode_cmd->height;
 818	vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
 819	vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
 820	vfbd->buffer = dmabuf;
 821	*out = &vfbd->base;
 822
 
 
 
 
 
 823	return 0;
 824
 825out_err3:
 826	drm_framebuffer_cleanup(&vfbd->base.base);
 827out_err2:
 
 828	kfree(vfbd);
 829out_err1:
 830	return ret;
 831}
 832
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 833/*
 834 * Generic Kernel modesetting functions
 835 */
 836
 837static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
 838						 struct drm_file *file_priv,
 839						 struct drm_mode_fb_cmd *mode_cmd)
 840{
 841	struct vmw_private *dev_priv = vmw_priv(dev);
 842	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 843	struct vmw_framebuffer *vfb = NULL;
 844	struct vmw_surface *surface = NULL;
 845	struct vmw_dma_buffer *bo = NULL;
 846	u64 required_size;
 
 847	int ret;
 848
 
 
 
 
 
 
 
 849	/**
 850	 * This code should be conditioned on Screen Objects not being used.
 851	 * If screen objects are used, we can allocate a GMR to hold the
 852	 * requested framebuffer.
 853	 */
 854
 855	required_size = mode_cmd->pitch * mode_cmd->height;
 856	if (unlikely(required_size > (u64) dev_priv->vram_size)) {
 857		DRM_ERROR("VRAM size is too small for requested mode.\n");
 858		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 859	}
 860
 861	/**
 862	 * End conditioned code.
 863	 */
 864
 865	ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
 866					     mode_cmd->handle, &surface);
 
 
 867	if (ret)
 868		goto try_dmabuf;
 869
 870	if (!surface->scanout)
 871		goto err_not_scanout;
 872
 873	ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
 874					      &vfb, mode_cmd);
 875
 876	/* vmw_user_surface_lookup takes one ref so does new_fb */
 877	vmw_surface_unreference(&surface);
 
 
 
 
 
 
 
 
 
 878
 879	if (ret) {
 880		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
 
 881		return ERR_PTR(ret);
 882	}
 
 
 883	return &vfb->base;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 884
 885try_dmabuf:
 886	DRM_INFO("%s: trying buffer\n", __func__);
 887
 888	ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
 889	if (ret) {
 890		DRM_ERROR("failed to find buffer: %i\n", ret);
 891		return ERR_PTR(-ENOENT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 892	}
 
 
 893
 894	ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
 895					     mode_cmd);
 896
 897	/* vmw_user_dmabuf_lookup takes one ref so does new_fb */
 898	vmw_dmabuf_unreference(&bo);
 899
 900	if (ret) {
 901		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
 902		return ERR_PTR(ret);
 903	}
 
 904
 905	return &vfb->base;
 
 
 
 906
 907err_not_scanout:
 908	DRM_ERROR("surface not marked as scanout\n");
 909	/* vmw_user_surface_lookup takes one ref */
 910	vmw_surface_unreference(&surface);
 911
 912	return ERR_PTR(-EINVAL);
 913}
 914
 915static struct drm_mode_config_funcs vmw_kms_funcs = {
 916	.fb_create = vmw_kms_fb_create,
 917};
 918
 919int vmw_kms_init(struct vmw_private *dev_priv)
 920{
 921	struct drm_device *dev = dev_priv->dev;
 922	int ret;
 923
 924	drm_mode_config_init(dev);
 925	dev->mode_config.funcs = &vmw_kms_funcs;
 926	dev->mode_config.min_width = 1;
 927	dev->mode_config.min_height = 1;
 928	/* assumed largest fb size */
 929	dev->mode_config.max_width = 8192;
 930	dev->mode_config.max_height = 8192;
 
 
 931
 932	ret = vmw_kms_init_legacy_display_system(dev_priv);
 
 
 
 
 
 933
 934	return 0;
 935}
 936
 937int vmw_kms_close(struct vmw_private *dev_priv)
 938{
 
 
 939	/*
 940	 * Docs says we should take the lock before calling this function
 941	 * but since it destroys encoders and our destructor calls
 942	 * drm_encoder_cleanup which takes the lock we deadlock.
 943	 */
 944	drm_mode_config_cleanup(dev_priv->dev);
 945	vmw_kms_close_legacy_display_system(dev_priv);
 946	return 0;
 
 
 
 
 
 
 947}
 948
 949int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
 950				struct drm_file *file_priv)
 951{
 952	struct drm_vmw_cursor_bypass_arg *arg = data;
 953	struct vmw_display_unit *du;
 954	struct drm_mode_object *obj;
 955	struct drm_crtc *crtc;
 956	int ret = 0;
 957
 958
 959	mutex_lock(&dev->mode_config.mutex);
 960	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
 961
 962		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 963			du = vmw_crtc_to_du(crtc);
 964			du->hotspot_x = arg->xhot;
 965			du->hotspot_y = arg->yhot;
 966		}
 967
 968		mutex_unlock(&dev->mode_config.mutex);
 969		return 0;
 970	}
 971
 972	obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
 973	if (!obj) {
 974		ret = -EINVAL;
 975		goto out;
 976	}
 977
 978	crtc = obj_to_crtc(obj);
 979	du = vmw_crtc_to_du(crtc);
 980
 981	du->hotspot_x = arg->xhot;
 982	du->hotspot_y = arg->yhot;
 983
 984out:
 985	mutex_unlock(&dev->mode_config.mutex);
 986
 987	return ret;
 988}
 989
 990void vmw_kms_write_svga(struct vmw_private *vmw_priv,
 991			unsigned width, unsigned height, unsigned pitch,
 992			unsigned bbp, unsigned depth)
 993{
 994	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
 995		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
 996	else if (vmw_fifo_have_pitchlock(vmw_priv))
 997		iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
 
 998	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
 999	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1000	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
1001	vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
1002	vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
1003	vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
1004	vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
 
 
 
 
1005}
1006
1007int vmw_kms_save_vga(struct vmw_private *vmw_priv)
1008{
1009	struct vmw_vga_topology_state *save;
1010	uint32_t i;
1011
1012	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
1013	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
1014	vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
1015	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
1016	vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
1017	vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
1018	vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
1019	vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
1020	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1021		vmw_priv->vga_pitchlock =
1022		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
1023	else if (vmw_fifo_have_pitchlock(vmw_priv))
1024		vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
1025						       SVGA_FIFO_PITCHLOCK);
1026
1027	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1028		return 0;
1029
1030	vmw_priv->num_displays = vmw_read(vmw_priv,
1031					  SVGA_REG_NUM_GUEST_DISPLAYS);
1032
1033	if (vmw_priv->num_displays == 0)
1034		vmw_priv->num_displays = 1;
1035
1036	for (i = 0; i < vmw_priv->num_displays; ++i) {
1037		save = &vmw_priv->vga_save[i];
1038		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1039		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
1040		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
1041		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
1042		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
1043		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
1044		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1045		if (i == 0 && vmw_priv->num_displays == 1 &&
1046		    save->width == 0 && save->height == 0) {
1047
1048			/*
1049			 * It should be fairly safe to assume that these
1050			 * values are uninitialized.
1051			 */
1052
1053			save->width = vmw_priv->vga_width - save->pos_x;
1054			save->height = vmw_priv->vga_height - save->pos_y;
1055		}
1056	}
1057
1058	return 0;
1059}
1060
1061int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
1062{
1063	struct vmw_vga_topology_state *save;
1064	uint32_t i;
1065
1066	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
1067	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
1068	vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
1069	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
1070	vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
1071	vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
1072	vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
1073	vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
1074	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1075		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
1076			  vmw_priv->vga_pitchlock);
1077	else if (vmw_fifo_have_pitchlock(vmw_priv))
1078		iowrite32(vmw_priv->vga_pitchlock,
1079			  vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
1080
1081	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
1082		return 0;
1083
1084	for (i = 0; i < vmw_priv->num_displays; ++i) {
1085		save = &vmw_priv->vga_save[i];
1086		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
1087		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
1088		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
1089		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
1090		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
1091		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
1092		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
1093	}
1094
1095	return 0;
1096}
1097
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1098int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1099				struct drm_file *file_priv)
1100{
1101	struct vmw_private *dev_priv = vmw_priv(dev);
1102	struct drm_vmw_update_layout_arg *arg =
1103		(struct drm_vmw_update_layout_arg *)data;
1104	struct vmw_master *vmaster = vmw_master(file_priv->master);
1105	void __user *user_rects;
1106	struct drm_vmw_rect *rects;
1107	unsigned rects_size;
1108	int ret;
1109
1110	ret = ttm_read_lock(&vmaster->lock, true);
1111	if (unlikely(ret != 0))
1112		return ret;
1113
1114	if (!arg->num_outputs) {
1115		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
1116		vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect);
1117		goto out_unlock;
1118	}
1119
1120	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
1121	rects = kzalloc(rects_size, GFP_KERNEL);
1122	if (unlikely(!rects)) {
1123		ret = -ENOMEM;
1124		goto out_unlock;
1125	}
1126
1127	user_rects = (void __user *)(unsigned long)arg->rects;
1128	ret = copy_from_user(rects, user_rects, rects_size);
1129	if (unlikely(ret != 0)) {
1130		DRM_ERROR("Failed to get rects.\n");
1131		ret = -EFAULT;
1132		goto out_free;
1133	}
1134
1135	vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1136
1137out_free:
1138	kfree(rects);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1139out_unlock:
1140	ttm_read_unlock(&vmaster->lock);
1141	return ret;
1142}
1143
1144bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1145				uint32_t pitch,
1146				uint32_t height)
 
 
 
 
 
 
 
1147{
1148	return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
 
 
 
 
 
1149}
1150
1151u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1152{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1153	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1154}