Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v4.6
   1/**************************************************************************
   2 *
   3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27#include <linux/module.h>
  28#include <linux/console.h>
  29
  30#include <drm/drmP.h>
  31#include "vmwgfx_drv.h"
  32#include "vmwgfx_binding.h"
  33#include <drm/ttm/ttm_placement.h>
  34#include <drm/ttm/ttm_bo_driver.h>
  35#include <drm/ttm/ttm_object.h>
  36#include <drm/ttm/ttm_module.h>
  37#include <linux/dma_remapping.h>
  38
  39#define VMWGFX_DRIVER_NAME "vmwgfx"
  40#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
  41#define VMWGFX_CHIP_SVGAII 0
  42#define VMW_FB_RESERVATION 0
  43
  44#define VMW_MIN_INITIAL_WIDTH 800
  45#define VMW_MIN_INITIAL_HEIGHT 600
  46
  47
  48/**
  49 * Fully encoded drm commands. Might move to vmw_drm.h
  50 */
  51
  52#define DRM_IOCTL_VMW_GET_PARAM					\
  53	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
  54		 struct drm_vmw_getparam_arg)
  55#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
  56	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
  57		union drm_vmw_alloc_dmabuf_arg)
  58#define DRM_IOCTL_VMW_UNREF_DMABUF				\
  59	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
  60		struct drm_vmw_unref_dmabuf_arg)
  61#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
  62	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
  63		 struct drm_vmw_cursor_bypass_arg)
  64
  65#define DRM_IOCTL_VMW_CONTROL_STREAM				\
  66	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
  67		 struct drm_vmw_control_stream_arg)
  68#define DRM_IOCTL_VMW_CLAIM_STREAM				\
  69	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
  70		 struct drm_vmw_stream_arg)
  71#define DRM_IOCTL_VMW_UNREF_STREAM				\
  72	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
  73		 struct drm_vmw_stream_arg)
  74
  75#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
  76	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
  77		struct drm_vmw_context_arg)
  78#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
  79	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
  80		struct drm_vmw_context_arg)
  81#define DRM_IOCTL_VMW_CREATE_SURFACE				\
  82	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
  83		 union drm_vmw_surface_create_arg)
  84#define DRM_IOCTL_VMW_UNREF_SURFACE				\
  85	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
  86		 struct drm_vmw_surface_arg)
  87#define DRM_IOCTL_VMW_REF_SURFACE				\
  88	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
  89		 union drm_vmw_surface_reference_arg)
  90#define DRM_IOCTL_VMW_EXECBUF					\
  91	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
  92		struct drm_vmw_execbuf_arg)
  93#define DRM_IOCTL_VMW_GET_3D_CAP				\
  94	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
  95		 struct drm_vmw_get_3d_cap_arg)
  96#define DRM_IOCTL_VMW_FENCE_WAIT				\
  97	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
  98		 struct drm_vmw_fence_wait_arg)
  99#define DRM_IOCTL_VMW_FENCE_SIGNALED				\
 100	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
 101		 struct drm_vmw_fence_signaled_arg)
 102#define DRM_IOCTL_VMW_FENCE_UNREF				\
 103	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
 104		 struct drm_vmw_fence_arg)
 105#define DRM_IOCTL_VMW_FENCE_EVENT				\
 106	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
 107		 struct drm_vmw_fence_event_arg)
 108#define DRM_IOCTL_VMW_PRESENT					\
 109	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
 110		 struct drm_vmw_present_arg)
 111#define DRM_IOCTL_VMW_PRESENT_READBACK				\
 112	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
 113		 struct drm_vmw_present_readback_arg)
 114#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
 115	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
 116		 struct drm_vmw_update_layout_arg)
 117#define DRM_IOCTL_VMW_CREATE_SHADER				\
 118	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
 119		 struct drm_vmw_shader_create_arg)
 120#define DRM_IOCTL_VMW_UNREF_SHADER				\
 121	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
 122		 struct drm_vmw_shader_arg)
 123#define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
 124	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
 125		 union drm_vmw_gb_surface_create_arg)
 126#define DRM_IOCTL_VMW_GB_SURFACE_REF				\
 127	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
 128		 union drm_vmw_gb_surface_reference_arg)
 129#define DRM_IOCTL_VMW_SYNCCPU					\
 130	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
 131		 struct drm_vmw_synccpu_arg)
 132#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
 133	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
 134		struct drm_vmw_context_arg)
 135
 136/**
 137 * The core DRM version of this macro doesn't account for
 138 * DRM_COMMAND_BASE.
 139 */
 140
 141#define VMW_IOCTL_DEF(ioctl, func, flags) \
 142  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
 143
 144/**
 145 * Ioctl definitions.
 146 */
 147
 148static const struct drm_ioctl_desc vmw_ioctls[] = {
 149	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
 150		      DRM_AUTH | DRM_RENDER_ALLOW),
 151	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
 152		      DRM_AUTH | DRM_RENDER_ALLOW),
 153	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
 154		      DRM_RENDER_ALLOW),
 155	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
 156		      vmw_kms_cursor_bypass_ioctl,
 157		      DRM_MASTER | DRM_CONTROL_ALLOW),
 158
 159	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
 160		      DRM_MASTER | DRM_CONTROL_ALLOW),
 161	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
 162		      DRM_MASTER | DRM_CONTROL_ALLOW),
 163	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
 164		      DRM_MASTER | DRM_CONTROL_ALLOW),
 165
 166	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
 167		      DRM_AUTH | DRM_RENDER_ALLOW),
 168	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
 169		      DRM_RENDER_ALLOW),
 170	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
 171		      DRM_AUTH | DRM_RENDER_ALLOW),
 172	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
 173		      DRM_RENDER_ALLOW),
 174	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
 175		      DRM_AUTH | DRM_RENDER_ALLOW),
 176	VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
 177		      DRM_RENDER_ALLOW),
 178	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
 179		      DRM_RENDER_ALLOW),
 180	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
 181		      vmw_fence_obj_signaled_ioctl,
 182		      DRM_RENDER_ALLOW),
 183	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
 184		      DRM_RENDER_ALLOW),
 185	VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
 186		      DRM_AUTH | DRM_RENDER_ALLOW),
 187	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
 188		      DRM_AUTH | DRM_RENDER_ALLOW),
 189
 190	/* these allow direct access to the framebuffers mark as master only */
 191	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
 192		      DRM_MASTER | DRM_AUTH),
 193	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
 194		      vmw_present_readback_ioctl,
 195		      DRM_MASTER | DRM_AUTH),
 196	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
 197		      vmw_kms_update_layout_ioctl,
 198		      DRM_MASTER | DRM_CONTROL_ALLOW),
 199	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
 200		      vmw_shader_define_ioctl,
 201		      DRM_AUTH | DRM_RENDER_ALLOW),
 202	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
 203		      vmw_shader_destroy_ioctl,
 204		      DRM_RENDER_ALLOW),
 205	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
 206		      vmw_gb_surface_define_ioctl,
 207		      DRM_AUTH | DRM_RENDER_ALLOW),
 208	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
 209		      vmw_gb_surface_reference_ioctl,
 210		      DRM_AUTH | DRM_RENDER_ALLOW),
 211	VMW_IOCTL_DEF(VMW_SYNCCPU,
 212		      vmw_user_dmabuf_synccpu_ioctl,
 213		      DRM_RENDER_ALLOW),
 214	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
 215		      vmw_extended_context_define_ioctl,
 216		      DRM_AUTH | DRM_RENDER_ALLOW),
 217};
 218
 219static struct pci_device_id vmw_pci_id_list[] = {
 220	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
 221	{0, 0, 0}
 222};
 223MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
 224
 225static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
 226static int vmw_force_iommu;
 227static int vmw_restrict_iommu;
 228static int vmw_force_coherent;
 229static int vmw_restrict_dma_mask;
 230
 231static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 232static void vmw_master_init(struct vmw_master *);
 233static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 234			      void *ptr);
 235
 236MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
 237module_param_named(enable_fbdev, enable_fbdev, int, 0600);
 238MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
 239module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
 240MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
 241module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
 242MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
 243module_param_named(force_coherent, vmw_force_coherent, int, 0600);
 244MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
 245module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
 246
 247
 248static void vmw_print_capabilities(uint32_t capabilities)
 249{
 250	DRM_INFO("Capabilities:\n");
 251	if (capabilities & SVGA_CAP_RECT_COPY)
 252		DRM_INFO("  Rect copy.\n");
 253	if (capabilities & SVGA_CAP_CURSOR)
 254		DRM_INFO("  Cursor.\n");
 255	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
 256		DRM_INFO("  Cursor bypass.\n");
 257	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
 258		DRM_INFO("  Cursor bypass 2.\n");
 259	if (capabilities & SVGA_CAP_8BIT_EMULATION)
 260		DRM_INFO("  8bit emulation.\n");
 261	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
 262		DRM_INFO("  Alpha cursor.\n");
 263	if (capabilities & SVGA_CAP_3D)
 264		DRM_INFO("  3D.\n");
 265	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
 266		DRM_INFO("  Extended Fifo.\n");
 267	if (capabilities & SVGA_CAP_MULTIMON)
 268		DRM_INFO("  Multimon.\n");
 269	if (capabilities & SVGA_CAP_PITCHLOCK)
 270		DRM_INFO("  Pitchlock.\n");
 271	if (capabilities & SVGA_CAP_IRQMASK)
 272		DRM_INFO("  Irq mask.\n");
 273	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
 274		DRM_INFO("  Display Topology.\n");
 275	if (capabilities & SVGA_CAP_GMR)
 276		DRM_INFO("  GMR.\n");
 277	if (capabilities & SVGA_CAP_TRACES)
 278		DRM_INFO("  Traces.\n");
 279	if (capabilities & SVGA_CAP_GMR2)
 280		DRM_INFO("  GMR2.\n");
 281	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
 282		DRM_INFO("  Screen Object 2.\n");
 283	if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
 284		DRM_INFO("  Command Buffers.\n");
 285	if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
 286		DRM_INFO("  Command Buffers 2.\n");
 287	if (capabilities & SVGA_CAP_GBOBJECTS)
 288		DRM_INFO("  Guest Backed Resources.\n");
 289	if (capabilities & SVGA_CAP_DX)
 290		DRM_INFO("  DX Features.\n");
 291}
 292
 293/**
 294 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
 295 *
 296 * @dev_priv: A device private structure.
 297 *
 298 * This function creates a small buffer object that holds the query
 299 * result for dummy queries emitted as query barriers.
 300 * The function will then map the first page and initialize a pending
 301 * occlusion query result structure, Finally it will unmap the buffer.
 302 * No interruptible waits are done within this function.
 303 *
 304 * Returns an error if bo creation or initialization fails.
 305 */
 306static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 307{
 308	int ret;
 309	struct vmw_dma_buffer *vbo;
 310	struct ttm_bo_kmap_obj map;
 311	volatile SVGA3dQueryResult *result;
 312	bool dummy;
 313
 314	/*
 315	 * Create the vbo as pinned, so that a tryreserve will
 316	 * immediately succeed. This is because we're the only
 317	 * user of the bo currently.
 318	 */
 319	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
 320	if (!vbo)
 321		return -ENOMEM;
 
 
 
 322
 323	ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
 324			      &vmw_sys_ne_placement, false,
 325			      &vmw_dmabuf_bo_free);
 326	if (unlikely(ret != 0))
 327		return ret;
 328
 329	ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
 330	BUG_ON(ret != 0);
 331	vmw_bo_pin_reserved(vbo, true);
 332
 333	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
 334	if (likely(ret == 0)) {
 335		result = ttm_kmap_obj_virtual(&map, &dummy);
 336		result->totalSize = sizeof(*result);
 337		result->state = SVGA3D_QUERYSTATE_PENDING;
 338		result->result32 = 0xff;
 339		ttm_bo_kunmap(&map);
 340	}
 341	vmw_bo_pin_reserved(vbo, false);
 342	ttm_bo_unreserve(&vbo->base);
 343
 344	if (unlikely(ret != 0)) {
 345		DRM_ERROR("Dummy query buffer map failed.\n");
 346		vmw_dmabuf_unreference(&vbo);
 347	} else
 348		dev_priv->dummy_query_bo = vbo;
 349
 350	return ret;
 351}
 352
 353/**
 354 * vmw_request_device_late - Perform late device setup
 355 *
 356 * @dev_priv: Pointer to device private.
 357 *
 358 * This function performs setup of otables and enables large command
 359 * buffer submission. These tasks are split out to a separate function
 360 * because it reverts vmw_release_device_early and is intended to be used
 361 * by an error path in the hibernation code.
 362 */
 363static int vmw_request_device_late(struct vmw_private *dev_priv)
 364{
 365	int ret;
 366
 367	if (dev_priv->has_mob) {
 368		ret = vmw_otables_setup(dev_priv);
 369		if (unlikely(ret != 0)) {
 370			DRM_ERROR("Unable to initialize "
 371				  "guest Memory OBjects.\n");
 372			return ret;
 373		}
 374	}
 375
 376	if (dev_priv->cman) {
 377		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
 378					       256*4096, 2*4096);
 379		if (ret) {
 380			struct vmw_cmdbuf_man *man = dev_priv->cman;
 381
 382			dev_priv->cman = NULL;
 383			vmw_cmdbuf_man_destroy(man);
 384		}
 385	}
 386
 387	return 0;
 388}
 389
 390static int vmw_request_device(struct vmw_private *dev_priv)
 391{
 392	int ret;
 393
 394	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
 395	if (unlikely(ret != 0)) {
 396		DRM_ERROR("Unable to initialize FIFO.\n");
 397		return ret;
 398	}
 399	vmw_fence_fifo_up(dev_priv->fman);
 400	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
 401	if (IS_ERR(dev_priv->cman)) {
 402		dev_priv->cman = NULL;
 403		dev_priv->has_dx = false;
 
 
 
 404	}
 405
 406	ret = vmw_request_device_late(dev_priv);
 407	if (ret)
 408		goto out_no_mob;
 409
 410	ret = vmw_dummy_query_bo_create(dev_priv);
 411	if (unlikely(ret != 0))
 412		goto out_no_query_bo;
 413
 414	return 0;
 415
 416out_no_query_bo:
 417	if (dev_priv->cman)
 418		vmw_cmdbuf_remove_pool(dev_priv->cman);
 419	if (dev_priv->has_mob) {
 420		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
 421		vmw_otables_takedown(dev_priv);
 422	}
 423	if (dev_priv->cman)
 424		vmw_cmdbuf_man_destroy(dev_priv->cman);
 425out_no_mob:
 426	vmw_fence_fifo_down(dev_priv->fman);
 427	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 428	return ret;
 429}
 430
 431/**
 432 * vmw_release_device_early - Early part of fifo takedown.
 433 *
 434 * @dev_priv: Pointer to device private struct.
 435 *
 436 * This is the first part of command submission takedown, to be called before
 437 * buffer management is taken down.
 438 */
 439static void vmw_release_device_early(struct vmw_private *dev_priv)
 440{
 441	/*
 442	 * Previous destructions should've released
 443	 * the pinned bo.
 444	 */
 445
 446	BUG_ON(dev_priv->pinned_bo != NULL);
 447
 448	vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
 449	if (dev_priv->cman)
 450		vmw_cmdbuf_remove_pool(dev_priv->cman);
 451
 452	if (dev_priv->has_mob) {
 453		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
 454		vmw_otables_takedown(dev_priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 455	}
 
 
 
 456}
 457
 458/**
 459 * vmw_release_device_late - Late part of fifo takedown.
 460 *
 461 * @dev_priv: Pointer to device private struct.
 462 *
 463 * This is the last part of the command submission takedown, to be called when
 464 * command submission is no longer needed. It may wait on pending fences.
 465 */
 466static void vmw_release_device_late(struct vmw_private *dev_priv)
 
 467{
 468	vmw_fence_fifo_down(dev_priv->fman);
 469	if (dev_priv->cman)
 470		vmw_cmdbuf_man_destroy(dev_priv->cman);
 471
 472	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 473}
 474
 475/**
 476 * Sets the initial_[width|height] fields on the given vmw_private.
 477 *
 478 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
 479 * clamping the value to fb_max_[width|height] fields and the
 480 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 481 * If the values appear to be invalid, set them to
 482 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 483 */
 484static void vmw_get_initial_size(struct vmw_private *dev_priv)
 485{
 486	uint32_t width;
 487	uint32_t height;
 488
 489	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
 490	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
 491
 492	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
 493	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
 494
 495	if (width > dev_priv->fb_max_width ||
 496	    height > dev_priv->fb_max_height) {
 497
 498		/*
 499		 * This is a host error and shouldn't occur.
 500		 */
 501
 502		width = VMW_MIN_INITIAL_WIDTH;
 503		height = VMW_MIN_INITIAL_HEIGHT;
 504	}
 505
 506	dev_priv->initial_width = width;
 507	dev_priv->initial_height = height;
 508}
 509
 510/**
 511 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
 512 * system.
 513 *
 514 * @dev_priv: Pointer to a struct vmw_private
 515 *
 516 * This functions tries to determine the IOMMU setup and what actions
 517 * need to be taken by the driver to make system pages visible to the
 518 * device.
 519 * If this function decides that DMA is not possible, it returns -EINVAL.
 520 * The driver may then try to disable features of the device that require
 521 * DMA.
 522 */
 523static int vmw_dma_select_mode(struct vmw_private *dev_priv)
 524{
 525	static const char *names[vmw_dma_map_max] = {
 526		[vmw_dma_phys] = "Using physical TTM page addresses.",
 527		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
 528		[vmw_dma_map_populate] = "Keeping DMA mappings.",
 529		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
 530#ifdef CONFIG_X86
 531	const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
 532
 533#ifdef CONFIG_INTEL_IOMMU
 534	if (intel_iommu_enabled) {
 535		dev_priv->map_mode = vmw_dma_map_populate;
 536		goto out_fixup;
 537	}
 538#endif
 539
 540	if (!(vmw_force_iommu || vmw_force_coherent)) {
 541		dev_priv->map_mode = vmw_dma_phys;
 542		DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
 543		return 0;
 544	}
 545
 546	dev_priv->map_mode = vmw_dma_map_populate;
 547
 548	if (dma_ops->sync_single_for_cpu)
 549		dev_priv->map_mode = vmw_dma_alloc_coherent;
 550#ifdef CONFIG_SWIOTLB
 551	if (swiotlb_nr_tbl() == 0)
 552		dev_priv->map_mode = vmw_dma_map_populate;
 553#endif
 554
 555#ifdef CONFIG_INTEL_IOMMU
 556out_fixup:
 557#endif
 558	if (dev_priv->map_mode == vmw_dma_map_populate &&
 559	    vmw_restrict_iommu)
 560		dev_priv->map_mode = vmw_dma_map_bind;
 561
 562	if (vmw_force_coherent)
 563		dev_priv->map_mode = vmw_dma_alloc_coherent;
 564
 565#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
 566	/*
 567	 * No coherent page pool
 568	 */
 569	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
 570		return -EINVAL;
 571#endif
 572
 573#else /* CONFIG_X86 */
 574	dev_priv->map_mode = vmw_dma_map_populate;
 575#endif /* CONFIG_X86 */
 576
 577	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
 578
 579	return 0;
 580}
 581
 582/**
 583 * vmw_dma_masks - set required page- and dma masks
 584 *
 585 * @dev: Pointer to struct drm-device
 586 *
 587 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
 588 * restriction also for 64-bit systems.
 589 */
 590#ifdef CONFIG_INTEL_IOMMU
 591static int vmw_dma_masks(struct vmw_private *dev_priv)
 592{
 593	struct drm_device *dev = dev_priv->dev;
 594
 595	if (intel_iommu_enabled &&
 596	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
 597		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
 598		return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
 599	}
 600	return 0;
 601}
 602#else
 603static int vmw_dma_masks(struct vmw_private *dev_priv)
 604{
 605	return 0;
 606}
 607#endif
 608
 609static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 610{
 611	struct vmw_private *dev_priv;
 612	int ret;
 613	uint32_t svga_id;
 614	enum vmw_res_type i;
 615	bool refuse_dma = false;
 616
 617	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 618	if (unlikely(dev_priv == NULL)) {
 619		DRM_ERROR("Failed allocating a device private struct.\n");
 620		return -ENOMEM;
 621	}
 622
 623	pci_set_master(dev->pdev);
 624
 625	dev_priv->dev = dev;
 626	dev_priv->vmw_chipset = chipset;
 627	dev_priv->last_read_seqno = (uint32_t) -100;
 
 628	mutex_init(&dev_priv->cmdbuf_mutex);
 629	mutex_init(&dev_priv->release_mutex);
 630	mutex_init(&dev_priv->binding_mutex);
 631	rwlock_init(&dev_priv->resource_lock);
 632	ttm_lock_init(&dev_priv->reservation_sem);
 633	spin_lock_init(&dev_priv->hw_lock);
 634	spin_lock_init(&dev_priv->waiter_lock);
 635	spin_lock_init(&dev_priv->cap_lock);
 636	spin_lock_init(&dev_priv->svga_lock);
 637
 638	for (i = vmw_res_context; i < vmw_res_max; ++i) {
 639		idr_init(&dev_priv->res_idr[i]);
 640		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
 641	}
 642
 643	mutex_init(&dev_priv->init_mutex);
 644	init_waitqueue_head(&dev_priv->fence_queue);
 645	init_waitqueue_head(&dev_priv->fifo_queue);
 646	dev_priv->fence_queue_waiters = 0;
 647	dev_priv->fifo_queue_waiters = 0;
 648
 649	dev_priv->used_memory_size = 0;
 650
 651	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
 652	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
 653	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 654
 655	dev_priv->enable_fb = enable_fbdev;
 656
 
 
 657	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
 658	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
 659	if (svga_id != SVGA_ID_2) {
 660		ret = -ENOSYS;
 661		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
 
 662		goto out_err0;
 663	}
 664
 665	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
 666	ret = vmw_dma_select_mode(dev_priv);
 667	if (unlikely(ret != 0)) {
 668		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
 669		refuse_dma = true;
 670	}
 671
 672	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
 673	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
 674	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
 675	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
 676
 677	vmw_get_initial_size(dev_priv);
 678
 679	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 680		dev_priv->max_gmr_ids =
 681			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
 682		dev_priv->max_gmr_pages =
 683			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
 684		dev_priv->memory_size =
 685			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
 686		dev_priv->memory_size -= dev_priv->vram_size;
 687	} else {
 688		/*
 689		 * An arbitrary limit of 512MiB on surface
 690		 * memory. But all HWV8 hardware supports GMR2.
 691		 */
 692		dev_priv->memory_size = 512*1024*1024;
 693	}
 694	dev_priv->max_mob_pages = 0;
 695	dev_priv->max_mob_size = 0;
 696	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
 697		uint64_t mem_size =
 698			vmw_read(dev_priv,
 699				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
 700
 701		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
 702		dev_priv->prim_bb_mem =
 703			vmw_read(dev_priv,
 704				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
 705		dev_priv->max_mob_size =
 706			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
 707		dev_priv->stdu_max_width =
 708			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
 709		dev_priv->stdu_max_height =
 710			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
 711
 712		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
 713			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
 714		dev_priv->texture_max_width = vmw_read(dev_priv,
 715						       SVGA_REG_DEV_CAP);
 716		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
 717			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
 718		dev_priv->texture_max_height = vmw_read(dev_priv,
 719							SVGA_REG_DEV_CAP);
 720	} else {
 721		dev_priv->texture_max_width = 8192;
 722		dev_priv->texture_max_height = 8192;
 723		dev_priv->prim_bb_mem = dev_priv->vram_size;
 724	}
 725
 726	vmw_print_capabilities(dev_priv->capabilities);
 727
 728	ret = vmw_dma_masks(dev_priv);
 729	if (unlikely(ret != 0))
 
 730		goto out_err0;
 
 
 
 
 
 
 
 
 731
 732	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 733		DRM_INFO("Max GMR ids is %u\n",
 734			 (unsigned)dev_priv->max_gmr_ids);
 735		DRM_INFO("Max number of GMR pages is %u\n",
 736			 (unsigned)dev_priv->max_gmr_pages);
 737		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
 738			 (unsigned)dev_priv->memory_size / 1024);
 739	}
 740	DRM_INFO("Maximum display memory size is %u kiB\n",
 741		 dev_priv->prim_bb_mem / 1024);
 742	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
 743		 dev_priv->vram_start, dev_priv->vram_size / 1024);
 744	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
 745		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
 746
 747	ret = vmw_ttm_global_init(dev_priv);
 748	if (unlikely(ret != 0))
 749		goto out_err0;
 750
 751
 752	vmw_master_init(&dev_priv->fbdev_master);
 753	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
 754	dev_priv->active_master = &dev_priv->fbdev_master;
 755
 756	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
 757				       dev_priv->mmio_size, MEMREMAP_WB);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 758
 759	if (unlikely(dev_priv->mmio_virt == NULL)) {
 760		ret = -ENOMEM;
 761		DRM_ERROR("Failed mapping MMIO.\n");
 762		goto out_err3;
 763	}
 764
 765	/* Need mmio memory to check for fifo pitchlock cap. */
 766	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
 767	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
 768	    !vmw_fifo_have_pitchlock(dev_priv)) {
 769		ret = -ENOSYS;
 770		DRM_ERROR("Hardware has no pitchlock\n");
 771		goto out_err4;
 772	}
 773
 774	dev_priv->tdev = ttm_object_device_init
 775		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
 776
 777	if (unlikely(dev_priv->tdev == NULL)) {
 778		DRM_ERROR("Unable to initialize TTM object management.\n");
 779		ret = -ENOMEM;
 780		goto out_err4;
 781	}
 782
 783	dev->dev_private = dev_priv;
 784
 785	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
 786	dev_priv->stealth = (ret != 0);
 787	if (dev_priv->stealth) {
 788		/**
 789		 * Request at least the mmio PCI resource.
 790		 */
 791
 792		DRM_INFO("It appears like vesafb is loaded. "
 793			 "Ignore above error if any.\n");
 794		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
 795		if (unlikely(ret != 0)) {
 796			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
 797			goto out_no_device;
 798		}
 799	}
 800
 801	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
 802		ret = drm_irq_install(dev, dev->pdev->irq);
 803		if (ret != 0) {
 804			DRM_ERROR("Failed installing irq: %d\n", ret);
 805			goto out_no_irq;
 806		}
 807	}
 808
 809	dev_priv->fman = vmw_fence_manager_init(dev_priv);
 810	if (unlikely(dev_priv->fman == NULL)) {
 811		ret = -ENOMEM;
 812		goto out_no_fman;
 813	}
 814
 815	ret = ttm_bo_device_init(&dev_priv->bdev,
 816				 dev_priv->bo_global_ref.ref.object,
 817				 &vmw_bo_driver,
 818				 dev->anon_inode->i_mapping,
 819				 VMWGFX_FILE_PAGE_OFFSET,
 820				 false);
 821	if (unlikely(ret != 0)) {
 822		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
 823		goto out_no_bdev;
 824	}
 825
 826	/*
 827	 * Enable VRAM, but initially don't use it until SVGA is enabled and
 828	 * unhidden.
 829	 */
 830	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
 831			     (dev_priv->vram_size >> PAGE_SHIFT));
 832	if (unlikely(ret != 0)) {
 833		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
 834		goto out_no_vram;
 835	}
 836	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
 837
 838	dev_priv->has_gmr = true;
 839	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
 840	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
 841					 VMW_PL_GMR) != 0) {
 842		DRM_INFO("No GMR memory available. "
 843			 "Graphics memory resources are very limited.\n");
 844		dev_priv->has_gmr = false;
 845	}
 846
 847	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
 848		dev_priv->has_mob = true;
 849		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
 850				   VMW_PL_MOB) != 0) {
 851			DRM_INFO("No MOB memory available. "
 852				 "3D will be disabled.\n");
 853			dev_priv->has_mob = false;
 854		}
 855	}
 856
 857	if (dev_priv->has_mob) {
 858		spin_lock(&dev_priv->cap_lock);
 859		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
 860		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
 861		spin_unlock(&dev_priv->cap_lock);
 862	}
 863
 864
 
 865	ret = vmw_kms_init(dev_priv);
 866	if (unlikely(ret != 0))
 867		goto out_no_kms;
 868	vmw_overlay_init(dev_priv);
 869
 870	ret = vmw_request_device(dev_priv);
 871	if (ret)
 872		goto out_no_fifo;
 873
 874	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
 875
 876	if (dev_priv->enable_fb) {
 877		vmw_fifo_resource_inc(dev_priv);
 878		vmw_svga_enable(dev_priv);
 
 879		vmw_fb_init(dev_priv);
 880	}
 881
 882	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
 883	register_pm_notifier(&dev_priv->pm_nb);
 884
 885	return 0;
 886
 887out_no_fifo:
 888	vmw_overlay_close(dev_priv);
 889	vmw_kms_close(dev_priv);
 890out_no_kms:
 891	if (dev_priv->has_mob)
 892		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 893	if (dev_priv->has_gmr)
 894		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 895	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 896out_no_vram:
 897	(void)ttm_bo_device_release(&dev_priv->bdev);
 898out_no_bdev:
 899	vmw_fence_manager_takedown(dev_priv->fman);
 900out_no_fman:
 901	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 902		drm_irq_uninstall(dev_priv->dev);
 903out_no_irq:
 904	if (dev_priv->stealth)
 905		pci_release_region(dev->pdev, 2);
 906	else
 907		pci_release_regions(dev->pdev);
 908out_no_device:
 909	ttm_object_device_release(&dev_priv->tdev);
 910out_err4:
 911	memunmap(dev_priv->mmio_virt);
 912out_err3:
 
 
 
 
 
 
 
 
 
 913	vmw_ttm_global_release(dev_priv);
 914out_err0:
 915	for (i = vmw_res_context; i < vmw_res_max; ++i)
 916		idr_destroy(&dev_priv->res_idr[i]);
 917
 918	if (dev_priv->ctx.staged_bindings)
 919		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 920	kfree(dev_priv);
 921	return ret;
 922}
 923
 924static int vmw_driver_unload(struct drm_device *dev)
 925{
 926	struct vmw_private *dev_priv = vmw_priv(dev);
 927	enum vmw_res_type i;
 928
 929	unregister_pm_notifier(&dev_priv->pm_nb);
 930
 931	if (dev_priv->ctx.res_ht_initialized)
 932		drm_ht_remove(&dev_priv->ctx.res_ht);
 933	vfree(dev_priv->ctx.cmd_bounce);
 
 934	if (dev_priv->enable_fb) {
 935		vmw_fb_off(dev_priv);
 936		vmw_fb_close(dev_priv);
 937		vmw_fifo_resource_dec(dev_priv);
 938		vmw_svga_disable(dev_priv);
 939	}
 940
 941	vmw_kms_close(dev_priv);
 942	vmw_overlay_close(dev_priv);
 943
 944	if (dev_priv->has_gmr)
 945		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 946	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 947
 948	vmw_release_device_early(dev_priv);
 949	if (dev_priv->has_mob)
 950		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 951	(void) ttm_bo_device_release(&dev_priv->bdev);
 952	vmw_release_device_late(dev_priv);
 953	vmw_fence_manager_takedown(dev_priv->fman);
 954	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 955		drm_irq_uninstall(dev_priv->dev);
 956	if (dev_priv->stealth)
 957		pci_release_region(dev->pdev, 2);
 958	else
 959		pci_release_regions(dev->pdev);
 960
 961	ttm_object_device_release(&dev_priv->tdev);
 962	memunmap(dev_priv->mmio_virt);
 963	if (dev_priv->ctx.staged_bindings)
 964		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 
 
 
 
 
 965	vmw_ttm_global_release(dev_priv);
 966
 967	for (i = vmw_res_context; i < vmw_res_max; ++i)
 968		idr_destroy(&dev_priv->res_idr[i]);
 969
 970	kfree(dev_priv);
 971
 972	return 0;
 973}
 974
 
 
 
 
 
 
 
 
 
 975static void vmw_postclose(struct drm_device *dev,
 976			 struct drm_file *file_priv)
 977{
 978	struct vmw_fpriv *vmw_fp;
 979
 980	vmw_fp = vmw_fpriv(file_priv);
 981
 982	if (vmw_fp->locked_master) {
 983		struct vmw_master *vmaster =
 984			vmw_master(vmw_fp->locked_master);
 985
 986		ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
 987		ttm_vt_unlock(&vmaster->lock);
 988		drm_master_put(&vmw_fp->locked_master);
 989	}
 990
 
 991	ttm_object_file_release(&vmw_fp->tfile);
 992	kfree(vmw_fp);
 993}
 994
 995static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
 996{
 997	struct vmw_private *dev_priv = vmw_priv(dev);
 998	struct vmw_fpriv *vmw_fp;
 999	int ret = -ENOMEM;
1000
1001	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1002	if (unlikely(vmw_fp == NULL))
1003		return ret;
1004
 
1005	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1006	if (unlikely(vmw_fp->tfile == NULL))
1007		goto out_no_tfile;
1008
 
 
 
 
1009	file_priv->driver_priv = vmw_fp;
1010
1011	return 0;
1012
 
 
1013out_no_tfile:
1014	kfree(vmw_fp);
1015	return ret;
1016}
1017
1018static struct vmw_master *vmw_master_check(struct drm_device *dev,
1019					   struct drm_file *file_priv,
1020					   unsigned int flags)
1021{
1022	int ret;
1023	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1024	struct vmw_master *vmaster;
1025
1026	if (file_priv->minor->type != DRM_MINOR_LEGACY ||
1027	    !(flags & DRM_AUTH))
1028		return NULL;
1029
1030	ret = mutex_lock_interruptible(&dev->master_mutex);
1031	if (unlikely(ret != 0))
1032		return ERR_PTR(-ERESTARTSYS);
1033
1034	if (file_priv->is_master) {
1035		mutex_unlock(&dev->master_mutex);
1036		return NULL;
1037	}
1038
1039	/*
1040	 * Check if we were previously master, but now dropped. In that
1041	 * case, allow at least render node functionality.
1042	 */
1043	if (vmw_fp->locked_master) {
1044		mutex_unlock(&dev->master_mutex);
1045
1046		if (flags & DRM_RENDER_ALLOW)
1047			return NULL;
1048
1049		DRM_ERROR("Dropped master trying to access ioctl that "
1050			  "requires authentication.\n");
1051		return ERR_PTR(-EACCES);
1052	}
1053	mutex_unlock(&dev->master_mutex);
1054
1055	/*
 
 
 
 
 
 
 
 
1056	 * Take the TTM lock. Possibly sleep waiting for the authenticating
1057	 * master to become master again, or for a SIGTERM if the
1058	 * authenticating master exits.
1059	 */
1060	vmaster = vmw_master(file_priv->master);
1061	ret = ttm_read_lock(&vmaster->lock, true);
1062	if (unlikely(ret != 0))
1063		vmaster = ERR_PTR(ret);
1064
1065	return vmaster;
1066}
1067
1068static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1069			      unsigned long arg,
1070			      long (*ioctl_func)(struct file *, unsigned int,
1071						 unsigned long))
1072{
1073	struct drm_file *file_priv = filp->private_data;
1074	struct drm_device *dev = file_priv->minor->dev;
1075	unsigned int nr = DRM_IOCTL_NR(cmd);
1076	struct vmw_master *vmaster;
1077	unsigned int flags;
1078	long ret;
1079
1080	/*
1081	 * Do extra checking on driver private ioctls.
1082	 */
1083
1084	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1085	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1086		const struct drm_ioctl_desc *ioctl =
1087			&vmw_ioctls[nr - DRM_COMMAND_BASE];
1088
1089		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1090			ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1091			if (unlikely(ret != 0))
1092				return ret;
1093
1094			if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1095				goto out_io_encoding;
1096
1097			return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1098							_IOC_SIZE(cmd));
1099		}
1100
1101		if (unlikely(ioctl->cmd != cmd))
1102			goto out_io_encoding;
1103
1104		flags = ioctl->flags;
1105	} else if (!drm_ioctl_flags(nr, &flags))
1106		return -EINVAL;
1107
1108	vmaster = vmw_master_check(dev, file_priv, flags);
1109	if (IS_ERR(vmaster)) {
1110		ret = PTR_ERR(vmaster);
1111
1112		if (ret != -ERESTARTSYS)
1113			DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1114				 nr, ret);
1115		return ret;
1116	}
1117
1118	ret = ioctl_func(filp, cmd, arg);
1119	if (vmaster)
1120		ttm_read_unlock(&vmaster->lock);
1121
1122	return ret;
1123
1124out_io_encoding:
1125	DRM_ERROR("Invalid command format, ioctl %d\n",
1126		  nr - DRM_COMMAND_BASE);
1127
1128	return -EINVAL;
1129}
1130
1131static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1132			       unsigned long arg)
1133{
1134	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1135}
1136
1137#ifdef CONFIG_COMPAT
1138static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1139			     unsigned long arg)
1140{
1141	return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1142}
1143#endif
1144
1145static void vmw_lastclose(struct drm_device *dev)
1146{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1147}
1148
1149static void vmw_master_init(struct vmw_master *vmaster)
1150{
1151	ttm_lock_init(&vmaster->lock);
 
 
1152}
1153
1154static int vmw_master_create(struct drm_device *dev,
1155			     struct drm_master *master)
1156{
1157	struct vmw_master *vmaster;
1158
1159	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1160	if (unlikely(vmaster == NULL))
1161		return -ENOMEM;
1162
1163	vmw_master_init(vmaster);
1164	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1165	master->driver_priv = vmaster;
1166
1167	return 0;
1168}
1169
1170static void vmw_master_destroy(struct drm_device *dev,
1171			       struct drm_master *master)
1172{
1173	struct vmw_master *vmaster = vmw_master(master);
1174
1175	master->driver_priv = NULL;
1176	kfree(vmaster);
1177}
1178
 
1179static int vmw_master_set(struct drm_device *dev,
1180			  struct drm_file *file_priv,
1181			  bool from_open)
1182{
1183	struct vmw_private *dev_priv = vmw_priv(dev);
1184	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1185	struct vmw_master *active = dev_priv->active_master;
1186	struct vmw_master *vmaster = vmw_master(file_priv->master);
1187	int ret = 0;
1188
 
 
 
 
 
 
 
 
 
 
1189	if (active) {
1190		BUG_ON(active != &dev_priv->fbdev_master);
1191		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1192		if (unlikely(ret != 0))
1193			return ret;
1194
1195		ttm_lock_set_kill(&active->lock, true, SIGTERM);
 
 
 
 
 
 
1196		dev_priv->active_master = NULL;
1197	}
1198
1199	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1200	if (!from_open) {
1201		ttm_vt_unlock(&vmaster->lock);
1202		BUG_ON(vmw_fp->locked_master != file_priv->master);
1203		drm_master_put(&vmw_fp->locked_master);
1204	}
1205
1206	dev_priv->active_master = vmaster;
1207	drm_sysfs_hotplug_event(dev);
1208
1209	return 0;
 
 
 
 
 
 
 
 
 
 
1210}
1211
1212static void vmw_master_drop(struct drm_device *dev,
1213			    struct drm_file *file_priv,
1214			    bool from_release)
1215{
1216	struct vmw_private *dev_priv = vmw_priv(dev);
1217	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1218	struct vmw_master *vmaster = vmw_master(file_priv->master);
1219	int ret;
1220
1221	/**
1222	 * Make sure the master doesn't disappear while we have
1223	 * it locked.
1224	 */
1225
1226	vmw_fp->locked_master = drm_master_get(file_priv->master);
1227	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1228	vmw_kms_legacy_hotspot_clear(dev_priv);
1229	if (unlikely((ret != 0))) {
1230		DRM_ERROR("Unable to lock TTM at VT switch.\n");
1231		drm_master_put(&vmw_fp->locked_master);
1232	}
1233
1234	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
 
1235
1236	if (!dev_priv->enable_fb)
1237		vmw_svga_disable(dev_priv);
 
 
 
 
 
 
 
 
1238
1239	dev_priv->active_master = &dev_priv->fbdev_master;
1240	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1241	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1242
1243	if (dev_priv->enable_fb)
1244		vmw_fb_on(dev_priv);
1245}
1246
1247/**
1248 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1249 *
1250 * @dev_priv: Pointer to device private struct.
1251 * Needs the reservation sem to be held in non-exclusive mode.
1252 */
1253static void __vmw_svga_enable(struct vmw_private *dev_priv)
1254{
1255	spin_lock(&dev_priv->svga_lock);
1256	if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1257		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1258		dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1259	}
1260	spin_unlock(&dev_priv->svga_lock);
1261}
1262
1263/**
1264 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1265 *
1266 * @dev_priv: Pointer to device private struct.
1267 */
1268void vmw_svga_enable(struct vmw_private *dev_priv)
1269{
1270	ttm_read_lock(&dev_priv->reservation_sem, false);
1271	__vmw_svga_enable(dev_priv);
1272	ttm_read_unlock(&dev_priv->reservation_sem);
1273}
1274
1275/**
1276 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1277 *
1278 * @dev_priv: Pointer to device private struct.
1279 * Needs the reservation sem to be held in exclusive mode.
1280 * Will not empty VRAM. VRAM must be emptied by caller.
1281 */
1282static void __vmw_svga_disable(struct vmw_private *dev_priv)
1283{
1284	spin_lock(&dev_priv->svga_lock);
1285	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1286		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1287		vmw_write(dev_priv, SVGA_REG_ENABLE,
1288			  SVGA_REG_ENABLE_HIDE |
1289			  SVGA_REG_ENABLE_ENABLE);
1290	}
1291	spin_unlock(&dev_priv->svga_lock);
1292}
1293
1294/**
1295 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1296 * running.
1297 *
1298 * @dev_priv: Pointer to device private struct.
1299 * Will empty VRAM.
1300 */
1301void vmw_svga_disable(struct vmw_private *dev_priv)
1302{
1303	ttm_write_lock(&dev_priv->reservation_sem, false);
1304	spin_lock(&dev_priv->svga_lock);
1305	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1306		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1307		spin_unlock(&dev_priv->svga_lock);
1308		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1309			DRM_ERROR("Failed evicting VRAM buffers.\n");
1310		vmw_write(dev_priv, SVGA_REG_ENABLE,
1311			  SVGA_REG_ENABLE_HIDE |
1312			  SVGA_REG_ENABLE_ENABLE);
1313	} else
1314		spin_unlock(&dev_priv->svga_lock);
1315	ttm_write_unlock(&dev_priv->reservation_sem);
1316}
1317
1318static void vmw_remove(struct pci_dev *pdev)
1319{
1320	struct drm_device *dev = pci_get_drvdata(pdev);
1321
1322	pci_disable_device(pdev);
1323	drm_put_dev(dev);
1324}
1325
1326static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1327			      void *ptr)
1328{
1329	struct vmw_private *dev_priv =
1330		container_of(nb, struct vmw_private, pm_nb);
1331
1332	switch (val) {
1333	case PM_HIBERNATION_PREPARE:
1334		if (dev_priv->enable_fb)
1335			vmw_fb_off(dev_priv);
1336		ttm_suspend_lock(&dev_priv->reservation_sem);
1337
1338		/*
1339		 * This empties VRAM and unbinds all GMR bindings.
1340		 * Buffer contents is moved to swappable memory.
1341		 */
1342		vmw_execbuf_release_pinned_bo(dev_priv);
1343		vmw_resource_evict_all(dev_priv);
1344		vmw_release_device_early(dev_priv);
1345		ttm_bo_swapout_all(&dev_priv->bdev);
1346		vmw_fence_fifo_down(dev_priv->fman);
1347		break;
1348	case PM_POST_HIBERNATION:
 
1349	case PM_POST_RESTORE:
1350		vmw_fence_fifo_up(dev_priv->fman);
1351		ttm_suspend_unlock(&dev_priv->reservation_sem);
1352		if (dev_priv->enable_fb)
1353			vmw_fb_on(dev_priv);
1354		break;
1355	case PM_RESTORE_PREPARE:
1356		break;
1357	default:
1358		break;
1359	}
1360	return 0;
1361}
1362
 
 
 
 
1363static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1364{
1365	struct drm_device *dev = pci_get_drvdata(pdev);
1366	struct vmw_private *dev_priv = vmw_priv(dev);
1367
1368	if (dev_priv->refuse_hibernation)
 
 
1369		return -EBUSY;
 
1370
1371	pci_save_state(pdev);
1372	pci_disable_device(pdev);
1373	pci_set_power_state(pdev, PCI_D3hot);
1374	return 0;
1375}
1376
1377static int vmw_pci_resume(struct pci_dev *pdev)
1378{
1379	pci_set_power_state(pdev, PCI_D0);
1380	pci_restore_state(pdev);
1381	return pci_enable_device(pdev);
1382}
1383
1384static int vmw_pm_suspend(struct device *kdev)
1385{
1386	struct pci_dev *pdev = to_pci_dev(kdev);
1387	struct pm_message dummy;
1388
1389	dummy.event = 0;
1390
1391	return vmw_pci_suspend(pdev, dummy);
1392}
1393
1394static int vmw_pm_resume(struct device *kdev)
1395{
1396	struct pci_dev *pdev = to_pci_dev(kdev);
1397
1398	return vmw_pci_resume(pdev);
1399}
1400
1401static int vmw_pm_freeze(struct device *kdev)
1402{
1403	struct pci_dev *pdev = to_pci_dev(kdev);
1404	struct drm_device *dev = pci_get_drvdata(pdev);
1405	struct vmw_private *dev_priv = vmw_priv(dev);
1406
 
 
 
 
1407	dev_priv->suspended = true;
1408	if (dev_priv->enable_fb)
1409		vmw_fifo_resource_dec(dev_priv);
 
 
 
 
 
1410
1411	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1412		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1413		if (dev_priv->enable_fb)
1414			vmw_fifo_resource_inc(dev_priv);
1415		WARN_ON(vmw_request_device_late(dev_priv));
1416		dev_priv->suspended = false;
1417		return -EBUSY;
1418	}
1419
1420	if (dev_priv->enable_fb)
1421		__vmw_svga_disable(dev_priv);
1422	
1423	vmw_release_device_late(dev_priv);
1424
1425	return 0;
1426}
1427
1428static int vmw_pm_restore(struct device *kdev)
1429{
1430	struct pci_dev *pdev = to_pci_dev(kdev);
1431	struct drm_device *dev = pci_get_drvdata(pdev);
1432	struct vmw_private *dev_priv = vmw_priv(dev);
1433	int ret;
1434
 
1435	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1436	(void) vmw_read(dev_priv, SVGA_REG_ID);
 
1437
 
 
 
 
1438	if (dev_priv->enable_fb)
1439		vmw_fifo_resource_inc(dev_priv);
1440
1441	ret = vmw_request_device(dev_priv);
1442	if (ret)
1443		return ret;
1444
1445	if (dev_priv->enable_fb)
1446		__vmw_svga_enable(dev_priv);
1447
1448	dev_priv->suspended = false;
1449
1450	return 0;
1451}
1452
1453static const struct dev_pm_ops vmw_pm_ops = {
1454	.freeze = vmw_pm_freeze,
1455	.thaw = vmw_pm_restore,
1456	.restore = vmw_pm_restore,
1457	.suspend = vmw_pm_suspend,
1458	.resume = vmw_pm_resume,
1459};
1460
1461static const struct file_operations vmwgfx_driver_fops = {
1462	.owner = THIS_MODULE,
1463	.open = drm_open,
1464	.release = drm_release,
1465	.unlocked_ioctl = vmw_unlocked_ioctl,
1466	.mmap = vmw_mmap,
1467	.poll = vmw_fops_poll,
1468	.read = vmw_fops_read,
1469#if defined(CONFIG_COMPAT)
1470	.compat_ioctl = vmw_compat_ioctl,
1471#endif
1472	.llseek = noop_llseek,
1473};
1474
1475static struct drm_driver driver = {
1476	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1477	DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
1478	.load = vmw_driver_load,
1479	.unload = vmw_driver_unload,
1480	.lastclose = vmw_lastclose,
1481	.irq_preinstall = vmw_irq_preinstall,
1482	.irq_postinstall = vmw_irq_postinstall,
1483	.irq_uninstall = vmw_irq_uninstall,
1484	.irq_handler = vmw_irq_handler,
1485	.get_vblank_counter = vmw_get_vblank_counter,
1486	.enable_vblank = vmw_enable_vblank,
1487	.disable_vblank = vmw_disable_vblank,
1488	.ioctls = vmw_ioctls,
1489	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
1490	.master_create = vmw_master_create,
1491	.master_destroy = vmw_master_destroy,
1492	.master_set = vmw_master_set,
1493	.master_drop = vmw_master_drop,
1494	.open = vmw_driver_open,
 
1495	.postclose = vmw_postclose,
1496	.set_busid = drm_pci_set_busid,
1497
1498	.dumb_create = vmw_dumb_create,
1499	.dumb_map_offset = vmw_dumb_map_offset,
1500	.dumb_destroy = vmw_dumb_destroy,
1501
1502	.prime_fd_to_handle = vmw_prime_fd_to_handle,
1503	.prime_handle_to_fd = vmw_prime_handle_to_fd,
1504
1505	.fops = &vmwgfx_driver_fops,
1506	.name = VMWGFX_DRIVER_NAME,
1507	.desc = VMWGFX_DRIVER_DESC,
1508	.date = VMWGFX_DRIVER_DATE,
1509	.major = VMWGFX_DRIVER_MAJOR,
1510	.minor = VMWGFX_DRIVER_MINOR,
1511	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1512};
1513
1514static struct pci_driver vmw_pci_driver = {
1515	.name = VMWGFX_DRIVER_NAME,
1516	.id_table = vmw_pci_id_list,
1517	.probe = vmw_probe,
1518	.remove = vmw_remove,
1519	.driver = {
1520		.pm = &vmw_pm_ops
1521	}
1522};
1523
1524static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1525{
1526	return drm_get_pci_dev(pdev, ent, &driver);
1527}
1528
1529static int __init vmwgfx_init(void)
1530{
1531	int ret;
1532
1533#ifdef CONFIG_VGA_CONSOLE
1534	if (vgacon_text_force())
1535		return -EINVAL;
1536#endif
1537
1538	ret = drm_pci_init(&driver, &vmw_pci_driver);
1539	if (ret)
1540		DRM_ERROR("Failed initializing DRM.\n");
1541	return ret;
1542}
1543
1544static void __exit vmwgfx_exit(void)
1545{
1546	drm_pci_exit(&driver, &vmw_pci_driver);
1547}
1548
1549module_init(vmwgfx_init);
1550module_exit(vmwgfx_exit);
1551
1552MODULE_AUTHOR("VMware Inc. and others");
1553MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1554MODULE_LICENSE("GPL and additional rights");
1555MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1556	       __stringify(VMWGFX_DRIVER_MINOR) "."
1557	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1558	       "0");
v3.15
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27#include <linux/module.h>
 
  28
  29#include <drm/drmP.h>
  30#include "vmwgfx_drv.h"
 
  31#include <drm/ttm/ttm_placement.h>
  32#include <drm/ttm/ttm_bo_driver.h>
  33#include <drm/ttm/ttm_object.h>
  34#include <drm/ttm/ttm_module.h>
  35#include <linux/dma_remapping.h>
  36
  37#define VMWGFX_DRIVER_NAME "vmwgfx"
  38#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
  39#define VMWGFX_CHIP_SVGAII 0
  40#define VMW_FB_RESERVATION 0
  41
  42#define VMW_MIN_INITIAL_WIDTH 800
  43#define VMW_MIN_INITIAL_HEIGHT 600
  44
  45
  46/**
  47 * Fully encoded drm commands. Might move to vmw_drm.h
  48 */
  49
  50#define DRM_IOCTL_VMW_GET_PARAM					\
  51	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
  52		 struct drm_vmw_getparam_arg)
  53#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
  54	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
  55		union drm_vmw_alloc_dmabuf_arg)
  56#define DRM_IOCTL_VMW_UNREF_DMABUF				\
  57	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
  58		struct drm_vmw_unref_dmabuf_arg)
  59#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
  60	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
  61		 struct drm_vmw_cursor_bypass_arg)
  62
  63#define DRM_IOCTL_VMW_CONTROL_STREAM				\
  64	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
  65		 struct drm_vmw_control_stream_arg)
  66#define DRM_IOCTL_VMW_CLAIM_STREAM				\
  67	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
  68		 struct drm_vmw_stream_arg)
  69#define DRM_IOCTL_VMW_UNREF_STREAM				\
  70	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
  71		 struct drm_vmw_stream_arg)
  72
  73#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
  74	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
  75		struct drm_vmw_context_arg)
  76#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
  77	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
  78		struct drm_vmw_context_arg)
  79#define DRM_IOCTL_VMW_CREATE_SURFACE				\
  80	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
  81		 union drm_vmw_surface_create_arg)
  82#define DRM_IOCTL_VMW_UNREF_SURFACE				\
  83	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
  84		 struct drm_vmw_surface_arg)
  85#define DRM_IOCTL_VMW_REF_SURFACE				\
  86	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
  87		 union drm_vmw_surface_reference_arg)
  88#define DRM_IOCTL_VMW_EXECBUF					\
  89	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
  90		struct drm_vmw_execbuf_arg)
  91#define DRM_IOCTL_VMW_GET_3D_CAP				\
  92	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
  93		 struct drm_vmw_get_3d_cap_arg)
  94#define DRM_IOCTL_VMW_FENCE_WAIT				\
  95	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
  96		 struct drm_vmw_fence_wait_arg)
  97#define DRM_IOCTL_VMW_FENCE_SIGNALED				\
  98	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
  99		 struct drm_vmw_fence_signaled_arg)
 100#define DRM_IOCTL_VMW_FENCE_UNREF				\
 101	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
 102		 struct drm_vmw_fence_arg)
 103#define DRM_IOCTL_VMW_FENCE_EVENT				\
 104	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
 105		 struct drm_vmw_fence_event_arg)
 106#define DRM_IOCTL_VMW_PRESENT					\
 107	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
 108		 struct drm_vmw_present_arg)
 109#define DRM_IOCTL_VMW_PRESENT_READBACK				\
 110	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
 111		 struct drm_vmw_present_readback_arg)
 112#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
 113	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
 114		 struct drm_vmw_update_layout_arg)
 115#define DRM_IOCTL_VMW_CREATE_SHADER				\
 116	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
 117		 struct drm_vmw_shader_create_arg)
 118#define DRM_IOCTL_VMW_UNREF_SHADER				\
 119	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
 120		 struct drm_vmw_shader_arg)
 121#define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
 122	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
 123		 union drm_vmw_gb_surface_create_arg)
 124#define DRM_IOCTL_VMW_GB_SURFACE_REF				\
 125	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
 126		 union drm_vmw_gb_surface_reference_arg)
 127#define DRM_IOCTL_VMW_SYNCCPU					\
 128	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
 129		 struct drm_vmw_synccpu_arg)
 
 
 
 130
 131/**
 132 * The core DRM version of this macro doesn't account for
 133 * DRM_COMMAND_BASE.
 134 */
 135
 136#define VMW_IOCTL_DEF(ioctl, func, flags) \
 137  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
 138
 139/**
 140 * Ioctl definitions.
 141 */
 142
 143static const struct drm_ioctl_desc vmw_ioctls[] = {
 144	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
 145		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 146	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
 147		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 148	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
 149		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
 150	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
 151		      vmw_kms_cursor_bypass_ioctl,
 152		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 153
 154	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
 155		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 156	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
 157		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 158	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
 159		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 160
 161	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
 162		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 163	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
 164		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
 165	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
 166		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 167	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
 168		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
 169	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
 170		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 171	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
 172		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 173	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
 174		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
 175	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
 176		      vmw_fence_obj_signaled_ioctl,
 177		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
 178	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
 179		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
 180	VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
 181		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 182	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
 183		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 184
 185	/* these allow direct access to the framebuffers mark as master only */
 186	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
 187		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
 188	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
 189		      vmw_present_readback_ioctl,
 190		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
 191	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
 192		      vmw_kms_update_layout_ioctl,
 193		      DRM_MASTER | DRM_UNLOCKED),
 194	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
 195		      vmw_shader_define_ioctl,
 196		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 197	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
 198		      vmw_shader_destroy_ioctl,
 199		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
 200	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
 201		      vmw_gb_surface_define_ioctl,
 202		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 203	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
 204		      vmw_gb_surface_reference_ioctl,
 205		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 206	VMW_IOCTL_DEF(VMW_SYNCCPU,
 207		      vmw_user_dmabuf_synccpu_ioctl,
 208		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
 
 
 
 209};
 210
 211static struct pci_device_id vmw_pci_id_list[] = {
 212	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
 213	{0, 0, 0}
 214};
 215MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
 216
 217static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
 218static int vmw_force_iommu;
 219static int vmw_restrict_iommu;
 220static int vmw_force_coherent;
 221static int vmw_restrict_dma_mask;
 222
 223static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 224static void vmw_master_init(struct vmw_master *);
 225static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 226			      void *ptr);
 227
 228MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
 229module_param_named(enable_fbdev, enable_fbdev, int, 0600);
 230MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
 231module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
 232MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
 233module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
 234MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
 235module_param_named(force_coherent, vmw_force_coherent, int, 0600);
 236MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
 237module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
 238
 239
 240static void vmw_print_capabilities(uint32_t capabilities)
 241{
 242	DRM_INFO("Capabilities:\n");
 243	if (capabilities & SVGA_CAP_RECT_COPY)
 244		DRM_INFO("  Rect copy.\n");
 245	if (capabilities & SVGA_CAP_CURSOR)
 246		DRM_INFO("  Cursor.\n");
 247	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
 248		DRM_INFO("  Cursor bypass.\n");
 249	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
 250		DRM_INFO("  Cursor bypass 2.\n");
 251	if (capabilities & SVGA_CAP_8BIT_EMULATION)
 252		DRM_INFO("  8bit emulation.\n");
 253	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
 254		DRM_INFO("  Alpha cursor.\n");
 255	if (capabilities & SVGA_CAP_3D)
 256		DRM_INFO("  3D.\n");
 257	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
 258		DRM_INFO("  Extended Fifo.\n");
 259	if (capabilities & SVGA_CAP_MULTIMON)
 260		DRM_INFO("  Multimon.\n");
 261	if (capabilities & SVGA_CAP_PITCHLOCK)
 262		DRM_INFO("  Pitchlock.\n");
 263	if (capabilities & SVGA_CAP_IRQMASK)
 264		DRM_INFO("  Irq mask.\n");
 265	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
 266		DRM_INFO("  Display Topology.\n");
 267	if (capabilities & SVGA_CAP_GMR)
 268		DRM_INFO("  GMR.\n");
 269	if (capabilities & SVGA_CAP_TRACES)
 270		DRM_INFO("  Traces.\n");
 271	if (capabilities & SVGA_CAP_GMR2)
 272		DRM_INFO("  GMR2.\n");
 273	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
 274		DRM_INFO("  Screen Object 2.\n");
 275	if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
 276		DRM_INFO("  Command Buffers.\n");
 277	if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
 278		DRM_INFO("  Command Buffers 2.\n");
 279	if (capabilities & SVGA_CAP_GBOBJECTS)
 280		DRM_INFO("  Guest Backed Resources.\n");
 
 
 281}
 282
 283/**
 284 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
 285 *
 286 * @dev_priv: A device private structure.
 287 *
 288 * This function creates a small buffer object that holds the query
 289 * result for dummy queries emitted as query barriers.
 290 * The function will then map the first page and initialize a pending
 291 * occlusion query result structure, Finally it will unmap the buffer.
 292 * No interruptible waits are done within this function.
 293 *
 294 * Returns an error if bo creation or initialization fails.
 295 */
 296static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 297{
 298	int ret;
 299	struct ttm_buffer_object *bo;
 300	struct ttm_bo_kmap_obj map;
 301	volatile SVGA3dQueryResult *result;
 302	bool dummy;
 303
 304	/*
 305	 * Create the bo as pinned, so that a tryreserve will
 306	 * immediately succeed. This is because we're the only
 307	 * user of the bo currently.
 308	 */
 309	ret = ttm_bo_create(&dev_priv->bdev,
 310			    PAGE_SIZE,
 311			    ttm_bo_type_device,
 312			    &vmw_sys_ne_placement,
 313			    0, false, NULL,
 314			    &bo);
 315
 
 
 
 316	if (unlikely(ret != 0))
 317		return ret;
 318
 319	ret = ttm_bo_reserve(bo, false, true, false, 0);
 320	BUG_ON(ret != 0);
 
 321
 322	ret = ttm_bo_kmap(bo, 0, 1, &map);
 323	if (likely(ret == 0)) {
 324		result = ttm_kmap_obj_virtual(&map, &dummy);
 325		result->totalSize = sizeof(*result);
 326		result->state = SVGA3D_QUERYSTATE_PENDING;
 327		result->result32 = 0xff;
 328		ttm_bo_kunmap(&map);
 329	}
 330	vmw_bo_pin(bo, false);
 331	ttm_bo_unreserve(bo);
 332
 333	if (unlikely(ret != 0)) {
 334		DRM_ERROR("Dummy query buffer map failed.\n");
 335		ttm_bo_unref(&bo);
 336	} else
 337		dev_priv->dummy_query_bo = bo;
 338
 339	return ret;
 340}
 341
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 342static int vmw_request_device(struct vmw_private *dev_priv)
 343{
 344	int ret;
 345
 346	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
 347	if (unlikely(ret != 0)) {
 348		DRM_ERROR("Unable to initialize FIFO.\n");
 349		return ret;
 350	}
 351	vmw_fence_fifo_up(dev_priv->fman);
 352	if (dev_priv->has_mob) {
 353		ret = vmw_otables_setup(dev_priv);
 354		if (unlikely(ret != 0)) {
 355			DRM_ERROR("Unable to initialize "
 356				  "guest Memory OBjects.\n");
 357			goto out_no_mob;
 358		}
 359	}
 
 
 
 
 
 360	ret = vmw_dummy_query_bo_create(dev_priv);
 361	if (unlikely(ret != 0))
 362		goto out_no_query_bo;
 363
 364	return 0;
 365
 366out_no_query_bo:
 367	if (dev_priv->has_mob)
 
 
 
 368		vmw_otables_takedown(dev_priv);
 
 
 
 369out_no_mob:
 370	vmw_fence_fifo_down(dev_priv->fman);
 371	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 372	return ret;
 373}
 374
 375static void vmw_release_device(struct vmw_private *dev_priv)
 
 
 
 
 
 
 
 
 376{
 377	/*
 378	 * Previous destructions should've released
 379	 * the pinned bo.
 380	 */
 381
 382	BUG_ON(dev_priv->pinned_bo != NULL);
 383
 384	ttm_bo_unref(&dev_priv->dummy_query_bo);
 385	if (dev_priv->has_mob)
 
 
 
 
 386		vmw_otables_takedown(dev_priv);
 387	vmw_fence_fifo_down(dev_priv->fman);
 388	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 389}
 390
 391
 392/**
 393 * Increase the 3d resource refcount.
 394 * If the count was prevously zero, initialize the fifo, switching to svga
 395 * mode. Note that the master holds a ref as well, and may request an
 396 * explicit switch to svga mode if fb is not running, using @unhide_svga.
 397 */
 398int vmw_3d_resource_inc(struct vmw_private *dev_priv,
 399			bool unhide_svga)
 400{
 401	int ret = 0;
 402
 403	mutex_lock(&dev_priv->release_mutex);
 404	if (unlikely(dev_priv->num_3d_resources++ == 0)) {
 405		ret = vmw_request_device(dev_priv);
 406		if (unlikely(ret != 0))
 407			--dev_priv->num_3d_resources;
 408	} else if (unhide_svga) {
 409		mutex_lock(&dev_priv->hw_mutex);
 410		vmw_write(dev_priv, SVGA_REG_ENABLE,
 411			  vmw_read(dev_priv, SVGA_REG_ENABLE) &
 412			  ~SVGA_REG_ENABLE_HIDE);
 413		mutex_unlock(&dev_priv->hw_mutex);
 414	}
 415
 416	mutex_unlock(&dev_priv->release_mutex);
 417	return ret;
 418}
 419
 420/**
 421 * Decrease the 3d resource refcount.
 422 * If the count reaches zero, disable the fifo, switching to vga mode.
 423 * Note that the master holds a refcount as well, and may request an
 424 * explicit switch to vga mode when it releases its refcount to account
 425 * for the situation of an X server vt switch to VGA with 3d resources
 426 * active.
 427 */
 428void vmw_3d_resource_dec(struct vmw_private *dev_priv,
 429			 bool hide_svga)
 430{
 431	int32_t n3d;
 
 
 432
 433	mutex_lock(&dev_priv->release_mutex);
 434	if (unlikely(--dev_priv->num_3d_resources == 0))
 435		vmw_release_device(dev_priv);
 436	else if (hide_svga) {
 437		mutex_lock(&dev_priv->hw_mutex);
 438		vmw_write(dev_priv, SVGA_REG_ENABLE,
 439			  vmw_read(dev_priv, SVGA_REG_ENABLE) |
 440			  SVGA_REG_ENABLE_HIDE);
 441		mutex_unlock(&dev_priv->hw_mutex);
 442	}
 443
 444	n3d = (int32_t) dev_priv->num_3d_resources;
 445	mutex_unlock(&dev_priv->release_mutex);
 446
 447	BUG_ON(n3d < 0);
 448}
 449
 450/**
 451 * Sets the initial_[width|height] fields on the given vmw_private.
 452 *
 453 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
 454 * clamping the value to fb_max_[width|height] fields and the
 455 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 456 * If the values appear to be invalid, set them to
 457 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 458 */
 459static void vmw_get_initial_size(struct vmw_private *dev_priv)
 460{
 461	uint32_t width;
 462	uint32_t height;
 463
 464	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
 465	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
 466
 467	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
 468	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
 469
 470	if (width > dev_priv->fb_max_width ||
 471	    height > dev_priv->fb_max_height) {
 472
 473		/*
 474		 * This is a host error and shouldn't occur.
 475		 */
 476
 477		width = VMW_MIN_INITIAL_WIDTH;
 478		height = VMW_MIN_INITIAL_HEIGHT;
 479	}
 480
 481	dev_priv->initial_width = width;
 482	dev_priv->initial_height = height;
 483}
 484
 485/**
 486 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
 487 * system.
 488 *
 489 * @dev_priv: Pointer to a struct vmw_private
 490 *
 491 * This functions tries to determine the IOMMU setup and what actions
 492 * need to be taken by the driver to make system pages visible to the
 493 * device.
 494 * If this function decides that DMA is not possible, it returns -EINVAL.
 495 * The driver may then try to disable features of the device that require
 496 * DMA.
 497 */
 498static int vmw_dma_select_mode(struct vmw_private *dev_priv)
 499{
 500	static const char *names[vmw_dma_map_max] = {
 501		[vmw_dma_phys] = "Using physical TTM page addresses.",
 502		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
 503		[vmw_dma_map_populate] = "Keeping DMA mappings.",
 504		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
 505#ifdef CONFIG_X86
 506	const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
 507
 508#ifdef CONFIG_INTEL_IOMMU
 509	if (intel_iommu_enabled) {
 510		dev_priv->map_mode = vmw_dma_map_populate;
 511		goto out_fixup;
 512	}
 513#endif
 514
 515	if (!(vmw_force_iommu || vmw_force_coherent)) {
 516		dev_priv->map_mode = vmw_dma_phys;
 517		DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
 518		return 0;
 519	}
 520
 521	dev_priv->map_mode = vmw_dma_map_populate;
 522
 523	if (dma_ops->sync_single_for_cpu)
 524		dev_priv->map_mode = vmw_dma_alloc_coherent;
 525#ifdef CONFIG_SWIOTLB
 526	if (swiotlb_nr_tbl() == 0)
 527		dev_priv->map_mode = vmw_dma_map_populate;
 528#endif
 529
 530#ifdef CONFIG_INTEL_IOMMU
 531out_fixup:
 532#endif
 533	if (dev_priv->map_mode == vmw_dma_map_populate &&
 534	    vmw_restrict_iommu)
 535		dev_priv->map_mode = vmw_dma_map_bind;
 536
 537	if (vmw_force_coherent)
 538		dev_priv->map_mode = vmw_dma_alloc_coherent;
 539
 540#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
 541	/*
 542	 * No coherent page pool
 543	 */
 544	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
 545		return -EINVAL;
 546#endif
 547
 548#else /* CONFIG_X86 */
 549	dev_priv->map_mode = vmw_dma_map_populate;
 550#endif /* CONFIG_X86 */
 551
 552	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
 553
 554	return 0;
 555}
 556
 557/**
 558 * vmw_dma_masks - set required page- and dma masks
 559 *
 560 * @dev: Pointer to struct drm-device
 561 *
 562 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
 563 * restriction also for 64-bit systems.
 564 */
 565#ifdef CONFIG_INTEL_IOMMU
 566static int vmw_dma_masks(struct vmw_private *dev_priv)
 567{
 568	struct drm_device *dev = dev_priv->dev;
 569
 570	if (intel_iommu_enabled &&
 571	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
 572		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
 573		return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
 574	}
 575	return 0;
 576}
 577#else
 578static int vmw_dma_masks(struct vmw_private *dev_priv)
 579{
 580	return 0;
 581}
 582#endif
 583
 584static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 585{
 586	struct vmw_private *dev_priv;
 587	int ret;
 588	uint32_t svga_id;
 589	enum vmw_res_type i;
 590	bool refuse_dma = false;
 591
 592	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 593	if (unlikely(dev_priv == NULL)) {
 594		DRM_ERROR("Failed allocating a device private struct.\n");
 595		return -ENOMEM;
 596	}
 597
 598	pci_set_master(dev->pdev);
 599
 600	dev_priv->dev = dev;
 601	dev_priv->vmw_chipset = chipset;
 602	dev_priv->last_read_seqno = (uint32_t) -100;
 603	mutex_init(&dev_priv->hw_mutex);
 604	mutex_init(&dev_priv->cmdbuf_mutex);
 605	mutex_init(&dev_priv->release_mutex);
 606	mutex_init(&dev_priv->binding_mutex);
 607	rwlock_init(&dev_priv->resource_lock);
 608	ttm_lock_init(&dev_priv->reservation_sem);
 
 
 
 
 609
 610	for (i = vmw_res_context; i < vmw_res_max; ++i) {
 611		idr_init(&dev_priv->res_idr[i]);
 612		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
 613	}
 614
 615	mutex_init(&dev_priv->init_mutex);
 616	init_waitqueue_head(&dev_priv->fence_queue);
 617	init_waitqueue_head(&dev_priv->fifo_queue);
 618	dev_priv->fence_queue_waiters = 0;
 619	atomic_set(&dev_priv->fifo_queue_waiters, 0);
 620
 621	dev_priv->used_memory_size = 0;
 622
 623	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
 624	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
 625	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 626
 627	dev_priv->enable_fb = enable_fbdev;
 628
 629	mutex_lock(&dev_priv->hw_mutex);
 630
 631	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
 632	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
 633	if (svga_id != SVGA_ID_2) {
 634		ret = -ENOSYS;
 635		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
 636		mutex_unlock(&dev_priv->hw_mutex);
 637		goto out_err0;
 638	}
 639
 640	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
 641	ret = vmw_dma_select_mode(dev_priv);
 642	if (unlikely(ret != 0)) {
 643		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
 644		refuse_dma = true;
 645	}
 646
 647	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
 648	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
 649	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
 650	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
 651
 652	vmw_get_initial_size(dev_priv);
 653
 654	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 655		dev_priv->max_gmr_ids =
 656			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
 657		dev_priv->max_gmr_pages =
 658			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
 659		dev_priv->memory_size =
 660			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
 661		dev_priv->memory_size -= dev_priv->vram_size;
 662	} else {
 663		/*
 664		 * An arbitrary limit of 512MiB on surface
 665		 * memory. But all HWV8 hardware supports GMR2.
 666		 */
 667		dev_priv->memory_size = 512*1024*1024;
 668	}
 669	dev_priv->max_mob_pages = 0;
 670	dev_priv->max_mob_size = 0;
 671	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
 672		uint64_t mem_size =
 673			vmw_read(dev_priv,
 674				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
 675
 676		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
 677		dev_priv->prim_bb_mem =
 678			vmw_read(dev_priv,
 679				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
 680		dev_priv->max_mob_size =
 681			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
 682	} else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 683		dev_priv->prim_bb_mem = dev_priv->vram_size;
 
 
 
 684
 685	ret = vmw_dma_masks(dev_priv);
 686	if (unlikely(ret != 0)) {
 687		mutex_unlock(&dev_priv->hw_mutex);
 688		goto out_err0;
 689	}
 690
 691	if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
 692		dev_priv->prim_bb_mem = dev_priv->vram_size;
 693
 694	mutex_unlock(&dev_priv->hw_mutex);
 695
 696	vmw_print_capabilities(dev_priv->capabilities);
 697
 698	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 699		DRM_INFO("Max GMR ids is %u\n",
 700			 (unsigned)dev_priv->max_gmr_ids);
 701		DRM_INFO("Max number of GMR pages is %u\n",
 702			 (unsigned)dev_priv->max_gmr_pages);
 703		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
 704			 (unsigned)dev_priv->memory_size / 1024);
 705	}
 706	DRM_INFO("Maximum display memory size is %u kiB\n",
 707		 dev_priv->prim_bb_mem / 1024);
 708	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
 709		 dev_priv->vram_start, dev_priv->vram_size / 1024);
 710	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
 711		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
 712
 713	ret = vmw_ttm_global_init(dev_priv);
 714	if (unlikely(ret != 0))
 715		goto out_err0;
 716
 717
 718	vmw_master_init(&dev_priv->fbdev_master);
 719	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
 720	dev_priv->active_master = &dev_priv->fbdev_master;
 721
 722
 723	ret = ttm_bo_device_init(&dev_priv->bdev,
 724				 dev_priv->bo_global_ref.ref.object,
 725				 &vmw_bo_driver,
 726				 dev->anon_inode->i_mapping,
 727				 VMWGFX_FILE_PAGE_OFFSET,
 728				 false);
 729	if (unlikely(ret != 0)) {
 730		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
 731		goto out_err1;
 732	}
 733
 734	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
 735			     (dev_priv->vram_size >> PAGE_SHIFT));
 736	if (unlikely(ret != 0)) {
 737		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
 738		goto out_err2;
 739	}
 740
 741	dev_priv->has_gmr = true;
 742	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
 743	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
 744					 VMW_PL_GMR) != 0) {
 745		DRM_INFO("No GMR memory available. "
 746			 "Graphics memory resources are very limited.\n");
 747		dev_priv->has_gmr = false;
 748	}
 749
 750	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
 751		dev_priv->has_mob = true;
 752		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
 753				   VMW_PL_MOB) != 0) {
 754			DRM_INFO("No MOB memory available. "
 755				 "3D will be disabled.\n");
 756			dev_priv->has_mob = false;
 757		}
 758	}
 759
 760	dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
 761					       dev_priv->mmio_size);
 762
 763	dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
 764					 dev_priv->mmio_size);
 765
 766	if (unlikely(dev_priv->mmio_virt == NULL)) {
 767		ret = -ENOMEM;
 768		DRM_ERROR("Failed mapping MMIO.\n");
 769		goto out_err3;
 770	}
 771
 772	/* Need mmio memory to check for fifo pitchlock cap. */
 773	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
 774	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
 775	    !vmw_fifo_have_pitchlock(dev_priv)) {
 776		ret = -ENOSYS;
 777		DRM_ERROR("Hardware has no pitchlock\n");
 778		goto out_err4;
 779	}
 780
 781	dev_priv->tdev = ttm_object_device_init
 782		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
 783
 784	if (unlikely(dev_priv->tdev == NULL)) {
 785		DRM_ERROR("Unable to initialize TTM object management.\n");
 786		ret = -ENOMEM;
 787		goto out_err4;
 788	}
 789
 790	dev->dev_private = dev_priv;
 791
 792	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
 793	dev_priv->stealth = (ret != 0);
 794	if (dev_priv->stealth) {
 795		/**
 796		 * Request at least the mmio PCI resource.
 797		 */
 798
 799		DRM_INFO("It appears like vesafb is loaded. "
 800			 "Ignore above error if any.\n");
 801		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
 802		if (unlikely(ret != 0)) {
 803			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
 804			goto out_no_device;
 805		}
 806	}
 807
 808	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
 809		ret = drm_irq_install(dev);
 810		if (ret != 0) {
 811			DRM_ERROR("Failed installing irq: %d\n", ret);
 812			goto out_no_irq;
 813		}
 814	}
 815
 816	dev_priv->fman = vmw_fence_manager_init(dev_priv);
 817	if (unlikely(dev_priv->fman == NULL)) {
 818		ret = -ENOMEM;
 819		goto out_no_fman;
 820	}
 821
 822	vmw_kms_save_vga(dev_priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 823
 824	/* Start kms and overlay systems, needs fifo. */
 825	ret = vmw_kms_init(dev_priv);
 826	if (unlikely(ret != 0))
 827		goto out_no_kms;
 828	vmw_overlay_init(dev_priv);
 829
 
 
 
 
 
 
 830	if (dev_priv->enable_fb) {
 831		ret = vmw_3d_resource_inc(dev_priv, true);
 832		if (unlikely(ret != 0))
 833			goto out_no_fifo;
 834		vmw_fb_init(dev_priv);
 835	}
 836
 837	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
 838	register_pm_notifier(&dev_priv->pm_nb);
 839
 840	return 0;
 841
 842out_no_fifo:
 843	vmw_overlay_close(dev_priv);
 844	vmw_kms_close(dev_priv);
 845out_no_kms:
 846	vmw_kms_restore_vga(dev_priv);
 
 
 
 
 
 
 
 847	vmw_fence_manager_takedown(dev_priv->fman);
 848out_no_fman:
 849	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 850		drm_irq_uninstall(dev_priv->dev);
 851out_no_irq:
 852	if (dev_priv->stealth)
 853		pci_release_region(dev->pdev, 2);
 854	else
 855		pci_release_regions(dev->pdev);
 856out_no_device:
 857	ttm_object_device_release(&dev_priv->tdev);
 858out_err4:
 859	iounmap(dev_priv->mmio_virt);
 860out_err3:
 861	arch_phys_wc_del(dev_priv->mmio_mtrr);
 862	if (dev_priv->has_mob)
 863		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 864	if (dev_priv->has_gmr)
 865		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 866	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 867out_err2:
 868	(void)ttm_bo_device_release(&dev_priv->bdev);
 869out_err1:
 870	vmw_ttm_global_release(dev_priv);
 871out_err0:
 872	for (i = vmw_res_context; i < vmw_res_max; ++i)
 873		idr_destroy(&dev_priv->res_idr[i]);
 874
 
 
 875	kfree(dev_priv);
 876	return ret;
 877}
 878
 879static int vmw_driver_unload(struct drm_device *dev)
 880{
 881	struct vmw_private *dev_priv = vmw_priv(dev);
 882	enum vmw_res_type i;
 883
 884	unregister_pm_notifier(&dev_priv->pm_nb);
 885
 886	if (dev_priv->ctx.res_ht_initialized)
 887		drm_ht_remove(&dev_priv->ctx.res_ht);
 888	if (dev_priv->ctx.cmd_bounce)
 889		vfree(dev_priv->ctx.cmd_bounce);
 890	if (dev_priv->enable_fb) {
 
 891		vmw_fb_close(dev_priv);
 892		vmw_kms_restore_vga(dev_priv);
 893		vmw_3d_resource_dec(dev_priv, false);
 894	}
 
 895	vmw_kms_close(dev_priv);
 896	vmw_overlay_close(dev_priv);
 
 
 
 
 
 
 
 
 
 
 897	vmw_fence_manager_takedown(dev_priv->fman);
 898	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 899		drm_irq_uninstall(dev_priv->dev);
 900	if (dev_priv->stealth)
 901		pci_release_region(dev->pdev, 2);
 902	else
 903		pci_release_regions(dev->pdev);
 904
 905	ttm_object_device_release(&dev_priv->tdev);
 906	iounmap(dev_priv->mmio_virt);
 907	arch_phys_wc_del(dev_priv->mmio_mtrr);
 908	if (dev_priv->has_mob)
 909		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 910	if (dev_priv->has_gmr)
 911		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 912	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 913	(void)ttm_bo_device_release(&dev_priv->bdev);
 914	vmw_ttm_global_release(dev_priv);
 915
 916	for (i = vmw_res_context; i < vmw_res_max; ++i)
 917		idr_destroy(&dev_priv->res_idr[i]);
 918
 919	kfree(dev_priv);
 920
 921	return 0;
 922}
 923
 924static void vmw_preclose(struct drm_device *dev,
 925			 struct drm_file *file_priv)
 926{
 927	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 928	struct vmw_private *dev_priv = vmw_priv(dev);
 929
 930	vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
 931}
 932
 933static void vmw_postclose(struct drm_device *dev,
 934			 struct drm_file *file_priv)
 935{
 936	struct vmw_fpriv *vmw_fp;
 937
 938	vmw_fp = vmw_fpriv(file_priv);
 939
 940	if (vmw_fp->locked_master) {
 941		struct vmw_master *vmaster =
 942			vmw_master(vmw_fp->locked_master);
 943
 944		ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
 945		ttm_vt_unlock(&vmaster->lock);
 946		drm_master_put(&vmw_fp->locked_master);
 947	}
 948
 949	vmw_compat_shader_man_destroy(vmw_fp->shman);
 950	ttm_object_file_release(&vmw_fp->tfile);
 951	kfree(vmw_fp);
 952}
 953
 954static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
 955{
 956	struct vmw_private *dev_priv = vmw_priv(dev);
 957	struct vmw_fpriv *vmw_fp;
 958	int ret = -ENOMEM;
 959
 960	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
 961	if (unlikely(vmw_fp == NULL))
 962		return ret;
 963
 964	INIT_LIST_HEAD(&vmw_fp->fence_events);
 965	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
 966	if (unlikely(vmw_fp->tfile == NULL))
 967		goto out_no_tfile;
 968
 969	vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
 970	if (IS_ERR(vmw_fp->shman))
 971		goto out_no_shman;
 972
 973	file_priv->driver_priv = vmw_fp;
 974
 975	return 0;
 976
 977out_no_shman:
 978	ttm_object_file_release(&vmw_fp->tfile);
 979out_no_tfile:
 980	kfree(vmw_fp);
 981	return ret;
 982}
 983
 984static struct vmw_master *vmw_master_check(struct drm_device *dev,
 985					   struct drm_file *file_priv,
 986					   unsigned int flags)
 987{
 988	int ret;
 989	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 990	struct vmw_master *vmaster;
 991
 992	if (file_priv->minor->type != DRM_MINOR_LEGACY ||
 993	    !(flags & DRM_AUTH))
 994		return NULL;
 995
 996	ret = mutex_lock_interruptible(&dev->master_mutex);
 997	if (unlikely(ret != 0))
 998		return ERR_PTR(-ERESTARTSYS);
 999
1000	if (file_priv->is_master) {
1001		mutex_unlock(&dev->master_mutex);
1002		return NULL;
1003	}
1004
1005	/*
1006	 * Check if we were previously master, but now dropped.
 
1007	 */
1008	if (vmw_fp->locked_master) {
1009		mutex_unlock(&dev->master_mutex);
 
 
 
 
1010		DRM_ERROR("Dropped master trying to access ioctl that "
1011			  "requires authentication.\n");
1012		return ERR_PTR(-EACCES);
1013	}
1014	mutex_unlock(&dev->master_mutex);
1015
1016	/*
1017	 * Taking the drm_global_mutex after the TTM lock might deadlock
1018	 */
1019	if (!(flags & DRM_UNLOCKED)) {
1020		DRM_ERROR("Refusing locked ioctl access.\n");
1021		return ERR_PTR(-EDEADLK);
1022	}
1023
1024	/*
1025	 * Take the TTM lock. Possibly sleep waiting for the authenticating
1026	 * master to become master again, or for a SIGTERM if the
1027	 * authenticating master exits.
1028	 */
1029	vmaster = vmw_master(file_priv->master);
1030	ret = ttm_read_lock(&vmaster->lock, true);
1031	if (unlikely(ret != 0))
1032		vmaster = ERR_PTR(ret);
1033
1034	return vmaster;
1035}
1036
1037static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1038			      unsigned long arg,
1039			      long (*ioctl_func)(struct file *, unsigned int,
1040						 unsigned long))
1041{
1042	struct drm_file *file_priv = filp->private_data;
1043	struct drm_device *dev = file_priv->minor->dev;
1044	unsigned int nr = DRM_IOCTL_NR(cmd);
1045	struct vmw_master *vmaster;
1046	unsigned int flags;
1047	long ret;
1048
1049	/*
1050	 * Do extra checking on driver private ioctls.
1051	 */
1052
1053	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1054	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1055		const struct drm_ioctl_desc *ioctl =
1056			&vmw_ioctls[nr - DRM_COMMAND_BASE];
1057
1058		if (unlikely(ioctl->cmd_drv != cmd)) {
1059			DRM_ERROR("Invalid command format, ioctl %d\n",
1060				  nr - DRM_COMMAND_BASE);
1061			return -EINVAL;
 
 
 
 
 
 
1062		}
 
 
 
 
1063		flags = ioctl->flags;
1064	} else if (!drm_ioctl_flags(nr, &flags))
1065		return -EINVAL;
1066
1067	vmaster = vmw_master_check(dev, file_priv, flags);
1068	if (unlikely(IS_ERR(vmaster))) {
1069		DRM_INFO("IOCTL ERROR %d\n", nr);
1070		return PTR_ERR(vmaster);
 
 
 
 
1071	}
1072
1073	ret = ioctl_func(filp, cmd, arg);
1074	if (vmaster)
1075		ttm_read_unlock(&vmaster->lock);
1076
1077	return ret;
 
 
 
 
 
 
1078}
1079
1080static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1081			       unsigned long arg)
1082{
1083	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1084}
1085
1086#ifdef CONFIG_COMPAT
1087static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1088			     unsigned long arg)
1089{
1090	return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1091}
1092#endif
1093
1094static void vmw_lastclose(struct drm_device *dev)
1095{
1096	struct drm_crtc *crtc;
1097	struct drm_mode_set set;
1098	int ret;
1099
1100	set.x = 0;
1101	set.y = 0;
1102	set.fb = NULL;
1103	set.mode = NULL;
1104	set.connectors = NULL;
1105	set.num_connectors = 0;
1106
1107	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1108		set.crtc = crtc;
1109		ret = drm_mode_set_config_internal(&set);
1110		WARN_ON(ret != 0);
1111	}
1112
1113}
1114
1115static void vmw_master_init(struct vmw_master *vmaster)
1116{
1117	ttm_lock_init(&vmaster->lock);
1118	INIT_LIST_HEAD(&vmaster->fb_surf);
1119	mutex_init(&vmaster->fb_surf_mutex);
1120}
1121
1122static int vmw_master_create(struct drm_device *dev,
1123			     struct drm_master *master)
1124{
1125	struct vmw_master *vmaster;
1126
1127	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1128	if (unlikely(vmaster == NULL))
1129		return -ENOMEM;
1130
1131	vmw_master_init(vmaster);
1132	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1133	master->driver_priv = vmaster;
1134
1135	return 0;
1136}
1137
1138static void vmw_master_destroy(struct drm_device *dev,
1139			       struct drm_master *master)
1140{
1141	struct vmw_master *vmaster = vmw_master(master);
1142
1143	master->driver_priv = NULL;
1144	kfree(vmaster);
1145}
1146
1147
1148static int vmw_master_set(struct drm_device *dev,
1149			  struct drm_file *file_priv,
1150			  bool from_open)
1151{
1152	struct vmw_private *dev_priv = vmw_priv(dev);
1153	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1154	struct vmw_master *active = dev_priv->active_master;
1155	struct vmw_master *vmaster = vmw_master(file_priv->master);
1156	int ret = 0;
1157
1158	if (!dev_priv->enable_fb) {
1159		ret = vmw_3d_resource_inc(dev_priv, true);
1160		if (unlikely(ret != 0))
1161			return ret;
1162		vmw_kms_save_vga(dev_priv);
1163		mutex_lock(&dev_priv->hw_mutex);
1164		vmw_write(dev_priv, SVGA_REG_TRACES, 0);
1165		mutex_unlock(&dev_priv->hw_mutex);
1166	}
1167
1168	if (active) {
1169		BUG_ON(active != &dev_priv->fbdev_master);
1170		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1171		if (unlikely(ret != 0))
1172			goto out_no_active_lock;
1173
1174		ttm_lock_set_kill(&active->lock, true, SIGTERM);
1175		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
1176		if (unlikely(ret != 0)) {
1177			DRM_ERROR("Unable to clean VRAM on "
1178				  "master drop.\n");
1179		}
1180
1181		dev_priv->active_master = NULL;
1182	}
1183
1184	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1185	if (!from_open) {
1186		ttm_vt_unlock(&vmaster->lock);
1187		BUG_ON(vmw_fp->locked_master != file_priv->master);
1188		drm_master_put(&vmw_fp->locked_master);
1189	}
1190
1191	dev_priv->active_master = vmaster;
 
1192
1193	return 0;
1194
1195out_no_active_lock:
1196	if (!dev_priv->enable_fb) {
1197		vmw_kms_restore_vga(dev_priv);
1198		vmw_3d_resource_dec(dev_priv, true);
1199		mutex_lock(&dev_priv->hw_mutex);
1200		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1201		mutex_unlock(&dev_priv->hw_mutex);
1202	}
1203	return ret;
1204}
1205
1206static void vmw_master_drop(struct drm_device *dev,
1207			    struct drm_file *file_priv,
1208			    bool from_release)
1209{
1210	struct vmw_private *dev_priv = vmw_priv(dev);
1211	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1212	struct vmw_master *vmaster = vmw_master(file_priv->master);
1213	int ret;
1214
1215	/**
1216	 * Make sure the master doesn't disappear while we have
1217	 * it locked.
1218	 */
1219
1220	vmw_fp->locked_master = drm_master_get(file_priv->master);
1221	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
 
1222	if (unlikely((ret != 0))) {
1223		DRM_ERROR("Unable to lock TTM at VT switch.\n");
1224		drm_master_put(&vmw_fp->locked_master);
1225	}
1226
1227	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1228	vmw_execbuf_release_pinned_bo(dev_priv);
1229
1230	if (!dev_priv->enable_fb) {
1231		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
1232		if (unlikely(ret != 0))
1233			DRM_ERROR("Unable to clean VRAM on master drop.\n");
1234		vmw_kms_restore_vga(dev_priv);
1235		vmw_3d_resource_dec(dev_priv, true);
1236		mutex_lock(&dev_priv->hw_mutex);
1237		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1238		mutex_unlock(&dev_priv->hw_mutex);
1239	}
1240
1241	dev_priv->active_master = &dev_priv->fbdev_master;
1242	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1243	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1244
1245	if (dev_priv->enable_fb)
1246		vmw_fb_on(dev_priv);
1247}
1248
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1249
1250static void vmw_remove(struct pci_dev *pdev)
1251{
1252	struct drm_device *dev = pci_get_drvdata(pdev);
1253
 
1254	drm_put_dev(dev);
1255}
1256
1257static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1258			      void *ptr)
1259{
1260	struct vmw_private *dev_priv =
1261		container_of(nb, struct vmw_private, pm_nb);
1262
1263	switch (val) {
1264	case PM_HIBERNATION_PREPARE:
1265	case PM_SUSPEND_PREPARE:
 
1266		ttm_suspend_lock(&dev_priv->reservation_sem);
1267
1268		/**
1269		 * This empties VRAM and unbinds all GMR bindings.
1270		 * Buffer contents is moved to swappable memory.
1271		 */
1272		vmw_execbuf_release_pinned_bo(dev_priv);
1273		vmw_resource_evict_all(dev_priv);
 
1274		ttm_bo_swapout_all(&dev_priv->bdev);
1275
1276		break;
1277	case PM_POST_HIBERNATION:
1278	case PM_POST_SUSPEND:
1279	case PM_POST_RESTORE:
 
1280		ttm_suspend_unlock(&dev_priv->reservation_sem);
1281
 
1282		break;
1283	case PM_RESTORE_PREPARE:
1284		break;
1285	default:
1286		break;
1287	}
1288	return 0;
1289}
1290
1291/**
1292 * These might not be needed with the virtual SVGA device.
1293 */
1294
1295static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1296{
1297	struct drm_device *dev = pci_get_drvdata(pdev);
1298	struct vmw_private *dev_priv = vmw_priv(dev);
1299
1300	if (dev_priv->num_3d_resources != 0) {
1301		DRM_INFO("Can't suspend or hibernate "
1302			 "while 3D resources are active.\n");
1303		return -EBUSY;
1304	}
1305
1306	pci_save_state(pdev);
1307	pci_disable_device(pdev);
1308	pci_set_power_state(pdev, PCI_D3hot);
1309	return 0;
1310}
1311
1312static int vmw_pci_resume(struct pci_dev *pdev)
1313{
1314	pci_set_power_state(pdev, PCI_D0);
1315	pci_restore_state(pdev);
1316	return pci_enable_device(pdev);
1317}
1318
1319static int vmw_pm_suspend(struct device *kdev)
1320{
1321	struct pci_dev *pdev = to_pci_dev(kdev);
1322	struct pm_message dummy;
1323
1324	dummy.event = 0;
1325
1326	return vmw_pci_suspend(pdev, dummy);
1327}
1328
1329static int vmw_pm_resume(struct device *kdev)
1330{
1331	struct pci_dev *pdev = to_pci_dev(kdev);
1332
1333	return vmw_pci_resume(pdev);
1334}
1335
1336static int vmw_pm_prepare(struct device *kdev)
1337{
1338	struct pci_dev *pdev = to_pci_dev(kdev);
1339	struct drm_device *dev = pci_get_drvdata(pdev);
1340	struct vmw_private *dev_priv = vmw_priv(dev);
1341
1342	/**
1343	 * Release 3d reference held by fbdev and potentially
1344	 * stop fifo.
1345	 */
1346	dev_priv->suspended = true;
1347	if (dev_priv->enable_fb)
1348			vmw_3d_resource_dec(dev_priv, true);
1349
1350	if (dev_priv->num_3d_resources != 0) {
1351
1352		DRM_INFO("Can't suspend or hibernate "
1353			 "while 3D resources are active.\n");
1354
 
 
1355		if (dev_priv->enable_fb)
1356			vmw_3d_resource_inc(dev_priv, true);
 
1357		dev_priv->suspended = false;
1358		return -EBUSY;
1359	}
1360
 
 
 
 
 
1361	return 0;
1362}
1363
1364static void vmw_pm_complete(struct device *kdev)
1365{
1366	struct pci_dev *pdev = to_pci_dev(kdev);
1367	struct drm_device *dev = pci_get_drvdata(pdev);
1368	struct vmw_private *dev_priv = vmw_priv(dev);
 
1369
1370	mutex_lock(&dev_priv->hw_mutex);
1371	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1372	(void) vmw_read(dev_priv, SVGA_REG_ID);
1373	mutex_unlock(&dev_priv->hw_mutex);
1374
1375	/**
1376	 * Reclaim 3d reference held by fbdev and potentially
1377	 * start fifo.
1378	 */
1379	if (dev_priv->enable_fb)
1380			vmw_3d_resource_inc(dev_priv, false);
 
 
 
 
 
 
 
1381
1382	dev_priv->suspended = false;
 
 
1383}
1384
1385static const struct dev_pm_ops vmw_pm_ops = {
1386	.prepare = vmw_pm_prepare,
1387	.complete = vmw_pm_complete,
 
1388	.suspend = vmw_pm_suspend,
1389	.resume = vmw_pm_resume,
1390};
1391
1392static const struct file_operations vmwgfx_driver_fops = {
1393	.owner = THIS_MODULE,
1394	.open = drm_open,
1395	.release = drm_release,
1396	.unlocked_ioctl = vmw_unlocked_ioctl,
1397	.mmap = vmw_mmap,
1398	.poll = vmw_fops_poll,
1399	.read = vmw_fops_read,
1400#if defined(CONFIG_COMPAT)
1401	.compat_ioctl = vmw_compat_ioctl,
1402#endif
1403	.llseek = noop_llseek,
1404};
1405
1406static struct drm_driver driver = {
1407	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1408	DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
1409	.load = vmw_driver_load,
1410	.unload = vmw_driver_unload,
1411	.lastclose = vmw_lastclose,
1412	.irq_preinstall = vmw_irq_preinstall,
1413	.irq_postinstall = vmw_irq_postinstall,
1414	.irq_uninstall = vmw_irq_uninstall,
1415	.irq_handler = vmw_irq_handler,
1416	.get_vblank_counter = vmw_get_vblank_counter,
1417	.enable_vblank = vmw_enable_vblank,
1418	.disable_vblank = vmw_disable_vblank,
1419	.ioctls = vmw_ioctls,
1420	.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1421	.master_create = vmw_master_create,
1422	.master_destroy = vmw_master_destroy,
1423	.master_set = vmw_master_set,
1424	.master_drop = vmw_master_drop,
1425	.open = vmw_driver_open,
1426	.preclose = vmw_preclose,
1427	.postclose = vmw_postclose,
 
1428
1429	.dumb_create = vmw_dumb_create,
1430	.dumb_map_offset = vmw_dumb_map_offset,
1431	.dumb_destroy = vmw_dumb_destroy,
1432
1433	.prime_fd_to_handle = vmw_prime_fd_to_handle,
1434	.prime_handle_to_fd = vmw_prime_handle_to_fd,
1435
1436	.fops = &vmwgfx_driver_fops,
1437	.name = VMWGFX_DRIVER_NAME,
1438	.desc = VMWGFX_DRIVER_DESC,
1439	.date = VMWGFX_DRIVER_DATE,
1440	.major = VMWGFX_DRIVER_MAJOR,
1441	.minor = VMWGFX_DRIVER_MINOR,
1442	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1443};
1444
1445static struct pci_driver vmw_pci_driver = {
1446	.name = VMWGFX_DRIVER_NAME,
1447	.id_table = vmw_pci_id_list,
1448	.probe = vmw_probe,
1449	.remove = vmw_remove,
1450	.driver = {
1451		.pm = &vmw_pm_ops
1452	}
1453};
1454
1455static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1456{
1457	return drm_get_pci_dev(pdev, ent, &driver);
1458}
1459
1460static int __init vmwgfx_init(void)
1461{
1462	int ret;
 
 
 
 
 
 
1463	ret = drm_pci_init(&driver, &vmw_pci_driver);
1464	if (ret)
1465		DRM_ERROR("Failed initializing DRM.\n");
1466	return ret;
1467}
1468
1469static void __exit vmwgfx_exit(void)
1470{
1471	drm_pci_exit(&driver, &vmw_pci_driver);
1472}
1473
1474module_init(vmwgfx_init);
1475module_exit(vmwgfx_exit);
1476
1477MODULE_AUTHOR("VMware Inc. and others");
1478MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1479MODULE_LICENSE("GPL and additional rights");
1480MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1481	       __stringify(VMWGFX_DRIVER_MINOR) "."
1482	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1483	       "0");