Linux Audio

Check our new training course

Loading...
v4.6
 
   1/**************************************************************************
   2 *
   3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27#include <linux/module.h>
  28#include <linux/console.h>
 
 
  29
  30#include <drm/drmP.h>
  31#include "vmwgfx_drv.h"
  32#include "vmwgfx_binding.h"
  33#include <drm/ttm/ttm_placement.h>
  34#include <drm/ttm/ttm_bo_driver.h>
  35#include <drm/ttm/ttm_object.h>
  36#include <drm/ttm/ttm_module.h>
  37#include <linux/dma_remapping.h>
 
 
 
 
  38
  39#define VMWGFX_DRIVER_NAME "vmwgfx"
  40#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
  41#define VMWGFX_CHIP_SVGAII 0
  42#define VMW_FB_RESERVATION 0
  43
  44#define VMW_MIN_INITIAL_WIDTH 800
  45#define VMW_MIN_INITIAL_HEIGHT 600
  46
 
 
 
 
 
 
 
 
  47
  48/**
  49 * Fully encoded drm commands. Might move to vmw_drm.h
  50 */
  51
  52#define DRM_IOCTL_VMW_GET_PARAM					\
  53	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
  54		 struct drm_vmw_getparam_arg)
  55#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
  56	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
  57		union drm_vmw_alloc_dmabuf_arg)
  58#define DRM_IOCTL_VMW_UNREF_DMABUF				\
  59	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
  60		struct drm_vmw_unref_dmabuf_arg)
  61#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
  62	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
  63		 struct drm_vmw_cursor_bypass_arg)
  64
  65#define DRM_IOCTL_VMW_CONTROL_STREAM				\
  66	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
  67		 struct drm_vmw_control_stream_arg)
  68#define DRM_IOCTL_VMW_CLAIM_STREAM				\
  69	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
  70		 struct drm_vmw_stream_arg)
  71#define DRM_IOCTL_VMW_UNREF_STREAM				\
  72	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
  73		 struct drm_vmw_stream_arg)
  74
  75#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
  76	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
  77		struct drm_vmw_context_arg)
  78#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
  79	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
  80		struct drm_vmw_context_arg)
  81#define DRM_IOCTL_VMW_CREATE_SURFACE				\
  82	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
  83		 union drm_vmw_surface_create_arg)
  84#define DRM_IOCTL_VMW_UNREF_SURFACE				\
  85	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
  86		 struct drm_vmw_surface_arg)
  87#define DRM_IOCTL_VMW_REF_SURFACE				\
  88	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
  89		 union drm_vmw_surface_reference_arg)
  90#define DRM_IOCTL_VMW_EXECBUF					\
  91	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
  92		struct drm_vmw_execbuf_arg)
  93#define DRM_IOCTL_VMW_GET_3D_CAP				\
  94	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
  95		 struct drm_vmw_get_3d_cap_arg)
  96#define DRM_IOCTL_VMW_FENCE_WAIT				\
  97	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
  98		 struct drm_vmw_fence_wait_arg)
  99#define DRM_IOCTL_VMW_FENCE_SIGNALED				\
 100	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
 101		 struct drm_vmw_fence_signaled_arg)
 102#define DRM_IOCTL_VMW_FENCE_UNREF				\
 103	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
 104		 struct drm_vmw_fence_arg)
 105#define DRM_IOCTL_VMW_FENCE_EVENT				\
 106	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
 107		 struct drm_vmw_fence_event_arg)
 108#define DRM_IOCTL_VMW_PRESENT					\
 109	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
 110		 struct drm_vmw_present_arg)
 111#define DRM_IOCTL_VMW_PRESENT_READBACK				\
 112	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
 113		 struct drm_vmw_present_readback_arg)
 114#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
 115	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
 116		 struct drm_vmw_update_layout_arg)
 117#define DRM_IOCTL_VMW_CREATE_SHADER				\
 118	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
 119		 struct drm_vmw_shader_create_arg)
 120#define DRM_IOCTL_VMW_UNREF_SHADER				\
 121	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
 122		 struct drm_vmw_shader_arg)
 123#define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
 124	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
 125		 union drm_vmw_gb_surface_create_arg)
 126#define DRM_IOCTL_VMW_GB_SURFACE_REF				\
 127	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
 128		 union drm_vmw_gb_surface_reference_arg)
 129#define DRM_IOCTL_VMW_SYNCCPU					\
 130	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
 131		 struct drm_vmw_synccpu_arg)
 132#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
 133	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
 134		struct drm_vmw_context_arg)
 
 
 
 
 
 
 135
 136/**
 137 * The core DRM version of this macro doesn't account for
 138 * DRM_COMMAND_BASE.
 139 */
 140
 141#define VMW_IOCTL_DEF(ioctl, func, flags) \
 142  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
 143
 144/**
 145 * Ioctl definitions.
 146 */
 147
 148static const struct drm_ioctl_desc vmw_ioctls[] = {
 149	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
 150		      DRM_AUTH | DRM_RENDER_ALLOW),
 151	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
 152		      DRM_AUTH | DRM_RENDER_ALLOW),
 153	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
 154		      DRM_RENDER_ALLOW),
 155	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
 156		      vmw_kms_cursor_bypass_ioctl,
 157		      DRM_MASTER | DRM_CONTROL_ALLOW),
 158
 159	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
 160		      DRM_MASTER | DRM_CONTROL_ALLOW),
 161	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
 162		      DRM_MASTER | DRM_CONTROL_ALLOW),
 163	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
 164		      DRM_MASTER | DRM_CONTROL_ALLOW),
 165
 166	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
 167		      DRM_AUTH | DRM_RENDER_ALLOW),
 168	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
 169		      DRM_RENDER_ALLOW),
 170	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
 171		      DRM_AUTH | DRM_RENDER_ALLOW),
 172	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
 173		      DRM_RENDER_ALLOW),
 174	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
 175		      DRM_AUTH | DRM_RENDER_ALLOW),
 176	VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
 177		      DRM_RENDER_ALLOW),
 178	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
 179		      DRM_RENDER_ALLOW),
 180	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
 181		      vmw_fence_obj_signaled_ioctl,
 182		      DRM_RENDER_ALLOW),
 183	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
 184		      DRM_RENDER_ALLOW),
 185	VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
 186		      DRM_AUTH | DRM_RENDER_ALLOW),
 187	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
 188		      DRM_AUTH | DRM_RENDER_ALLOW),
 189
 190	/* these allow direct access to the framebuffers mark as master only */
 191	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
 192		      DRM_MASTER | DRM_AUTH),
 193	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
 194		      vmw_present_readback_ioctl,
 195		      DRM_MASTER | DRM_AUTH),
 
 
 
 
 
 196	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
 197		      vmw_kms_update_layout_ioctl,
 198		      DRM_MASTER | DRM_CONTROL_ALLOW),
 199	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
 200		      vmw_shader_define_ioctl,
 201		      DRM_AUTH | DRM_RENDER_ALLOW),
 202	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
 203		      vmw_shader_destroy_ioctl,
 204		      DRM_RENDER_ALLOW),
 205	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
 206		      vmw_gb_surface_define_ioctl,
 207		      DRM_AUTH | DRM_RENDER_ALLOW),
 208	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
 209		      vmw_gb_surface_reference_ioctl,
 210		      DRM_AUTH | DRM_RENDER_ALLOW),
 211	VMW_IOCTL_DEF(VMW_SYNCCPU,
 212		      vmw_user_dmabuf_synccpu_ioctl,
 213		      DRM_RENDER_ALLOW),
 214	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
 215		      vmw_extended_context_define_ioctl,
 216		      DRM_AUTH | DRM_RENDER_ALLOW),
 
 
 
 
 
 
 217};
 218
 219static struct pci_device_id vmw_pci_id_list[] = {
 220	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
 221	{0, 0, 0}
 222};
 223MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
 224
 225static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
 226static int vmw_force_iommu;
 227static int vmw_restrict_iommu;
 228static int vmw_force_coherent;
 229static int vmw_restrict_dma_mask;
 
 230
 231static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 232static void vmw_master_init(struct vmw_master *);
 233static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 234			      void *ptr);
 235
 236MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
 237module_param_named(enable_fbdev, enable_fbdev, int, 0600);
 238MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
 239module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
 240MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
 241module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
 242MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
 243module_param_named(force_coherent, vmw_force_coherent, int, 0600);
 244MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
 245module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
 
 
 246
 247
 
 
 
 
 
 
 
 
 
 248static void vmw_print_capabilities(uint32_t capabilities)
 249{
 250	DRM_INFO("Capabilities:\n");
 251	if (capabilities & SVGA_CAP_RECT_COPY)
 252		DRM_INFO("  Rect copy.\n");
 253	if (capabilities & SVGA_CAP_CURSOR)
 254		DRM_INFO("  Cursor.\n");
 255	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
 256		DRM_INFO("  Cursor bypass.\n");
 257	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
 258		DRM_INFO("  Cursor bypass 2.\n");
 259	if (capabilities & SVGA_CAP_8BIT_EMULATION)
 260		DRM_INFO("  8bit emulation.\n");
 261	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
 262		DRM_INFO("  Alpha cursor.\n");
 263	if (capabilities & SVGA_CAP_3D)
 264		DRM_INFO("  3D.\n");
 265	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
 266		DRM_INFO("  Extended Fifo.\n");
 267	if (capabilities & SVGA_CAP_MULTIMON)
 268		DRM_INFO("  Multimon.\n");
 269	if (capabilities & SVGA_CAP_PITCHLOCK)
 270		DRM_INFO("  Pitchlock.\n");
 271	if (capabilities & SVGA_CAP_IRQMASK)
 272		DRM_INFO("  Irq mask.\n");
 273	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
 274		DRM_INFO("  Display Topology.\n");
 275	if (capabilities & SVGA_CAP_GMR)
 276		DRM_INFO("  GMR.\n");
 277	if (capabilities & SVGA_CAP_TRACES)
 278		DRM_INFO("  Traces.\n");
 279	if (capabilities & SVGA_CAP_GMR2)
 280		DRM_INFO("  GMR2.\n");
 281	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
 282		DRM_INFO("  Screen Object 2.\n");
 283	if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
 284		DRM_INFO("  Command Buffers.\n");
 285	if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
 286		DRM_INFO("  Command Buffers 2.\n");
 287	if (capabilities & SVGA_CAP_GBOBJECTS)
 288		DRM_INFO("  Guest Backed Resources.\n");
 289	if (capabilities & SVGA_CAP_DX)
 290		DRM_INFO("  DX Features.\n");
 
 
 291}
 292
 293/**
 294 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
 295 *
 296 * @dev_priv: A device private structure.
 297 *
 298 * This function creates a small buffer object that holds the query
 299 * result for dummy queries emitted as query barriers.
 300 * The function will then map the first page and initialize a pending
 301 * occlusion query result structure, Finally it will unmap the buffer.
 302 * No interruptible waits are done within this function.
 303 *
 304 * Returns an error if bo creation or initialization fails.
 305 */
 306static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 307{
 308	int ret;
 309	struct vmw_dma_buffer *vbo;
 310	struct ttm_bo_kmap_obj map;
 311	volatile SVGA3dQueryResult *result;
 312	bool dummy;
 313
 314	/*
 315	 * Create the vbo as pinned, so that a tryreserve will
 316	 * immediately succeed. This is because we're the only
 317	 * user of the bo currently.
 318	 */
 319	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
 320	if (!vbo)
 321		return -ENOMEM;
 322
 323	ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
 324			      &vmw_sys_ne_placement, false,
 325			      &vmw_dmabuf_bo_free);
 326	if (unlikely(ret != 0))
 327		return ret;
 328
 329	ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
 330	BUG_ON(ret != 0);
 331	vmw_bo_pin_reserved(vbo, true);
 332
 333	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
 334	if (likely(ret == 0)) {
 335		result = ttm_kmap_obj_virtual(&map, &dummy);
 336		result->totalSize = sizeof(*result);
 337		result->state = SVGA3D_QUERYSTATE_PENDING;
 338		result->result32 = 0xff;
 339		ttm_bo_kunmap(&map);
 340	}
 341	vmw_bo_pin_reserved(vbo, false);
 342	ttm_bo_unreserve(&vbo->base);
 343
 344	if (unlikely(ret != 0)) {
 345		DRM_ERROR("Dummy query buffer map failed.\n");
 346		vmw_dmabuf_unreference(&vbo);
 347	} else
 348		dev_priv->dummy_query_bo = vbo;
 349
 350	return ret;
 351}
 352
 353/**
 354 * vmw_request_device_late - Perform late device setup
 355 *
 356 * @dev_priv: Pointer to device private.
 357 *
 358 * This function performs setup of otables and enables large command
 359 * buffer submission. These tasks are split out to a separate function
 360 * because it reverts vmw_release_device_early and is intended to be used
 361 * by an error path in the hibernation code.
 362 */
 363static int vmw_request_device_late(struct vmw_private *dev_priv)
 364{
 365	int ret;
 366
 367	if (dev_priv->has_mob) {
 368		ret = vmw_otables_setup(dev_priv);
 369		if (unlikely(ret != 0)) {
 370			DRM_ERROR("Unable to initialize "
 371				  "guest Memory OBjects.\n");
 372			return ret;
 373		}
 374	}
 375
 376	if (dev_priv->cman) {
 377		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
 378					       256*4096, 2*4096);
 379		if (ret) {
 380			struct vmw_cmdbuf_man *man = dev_priv->cman;
 381
 382			dev_priv->cman = NULL;
 383			vmw_cmdbuf_man_destroy(man);
 384		}
 385	}
 386
 387	return 0;
 388}
 389
 390static int vmw_request_device(struct vmw_private *dev_priv)
 391{
 392	int ret;
 393
 394	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
 395	if (unlikely(ret != 0)) {
 396		DRM_ERROR("Unable to initialize FIFO.\n");
 397		return ret;
 398	}
 399	vmw_fence_fifo_up(dev_priv->fman);
 400	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
 401	if (IS_ERR(dev_priv->cman)) {
 402		dev_priv->cman = NULL;
 403		dev_priv->has_dx = false;
 404	}
 405
 406	ret = vmw_request_device_late(dev_priv);
 407	if (ret)
 408		goto out_no_mob;
 409
 410	ret = vmw_dummy_query_bo_create(dev_priv);
 411	if (unlikely(ret != 0))
 412		goto out_no_query_bo;
 413
 414	return 0;
 415
 416out_no_query_bo:
 417	if (dev_priv->cman)
 418		vmw_cmdbuf_remove_pool(dev_priv->cman);
 419	if (dev_priv->has_mob) {
 420		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
 421		vmw_otables_takedown(dev_priv);
 422	}
 423	if (dev_priv->cman)
 424		vmw_cmdbuf_man_destroy(dev_priv->cman);
 425out_no_mob:
 426	vmw_fence_fifo_down(dev_priv->fman);
 427	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 428	return ret;
 429}
 430
 431/**
 432 * vmw_release_device_early - Early part of fifo takedown.
 433 *
 434 * @dev_priv: Pointer to device private struct.
 435 *
 436 * This is the first part of command submission takedown, to be called before
 437 * buffer management is taken down.
 438 */
 439static void vmw_release_device_early(struct vmw_private *dev_priv)
 440{
 441	/*
 442	 * Previous destructions should've released
 443	 * the pinned bo.
 444	 */
 445
 446	BUG_ON(dev_priv->pinned_bo != NULL);
 447
 448	vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
 449	if (dev_priv->cman)
 450		vmw_cmdbuf_remove_pool(dev_priv->cman);
 451
 452	if (dev_priv->has_mob) {
 453		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
 454		vmw_otables_takedown(dev_priv);
 455	}
 456}
 457
 458/**
 459 * vmw_release_device_late - Late part of fifo takedown.
 460 *
 461 * @dev_priv: Pointer to device private struct.
 462 *
 463 * This is the last part of the command submission takedown, to be called when
 464 * command submission is no longer needed. It may wait on pending fences.
 465 */
 466static void vmw_release_device_late(struct vmw_private *dev_priv)
 467{
 468	vmw_fence_fifo_down(dev_priv->fman);
 469	if (dev_priv->cman)
 470		vmw_cmdbuf_man_destroy(dev_priv->cman);
 471
 472	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 473}
 474
 475/**
 476 * Sets the initial_[width|height] fields on the given vmw_private.
 477 *
 478 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
 479 * clamping the value to fb_max_[width|height] fields and the
 480 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 481 * If the values appear to be invalid, set them to
 482 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 483 */
 484static void vmw_get_initial_size(struct vmw_private *dev_priv)
 485{
 486	uint32_t width;
 487	uint32_t height;
 488
 489	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
 490	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
 491
 492	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
 493	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
 494
 495	if (width > dev_priv->fb_max_width ||
 496	    height > dev_priv->fb_max_height) {
 497
 498		/*
 499		 * This is a host error and shouldn't occur.
 500		 */
 501
 502		width = VMW_MIN_INITIAL_WIDTH;
 503		height = VMW_MIN_INITIAL_HEIGHT;
 504	}
 505
 506	dev_priv->initial_width = width;
 507	dev_priv->initial_height = height;
 508}
 509
 510/**
 511 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
 512 * system.
 513 *
 514 * @dev_priv: Pointer to a struct vmw_private
 515 *
 516 * This functions tries to determine the IOMMU setup and what actions
 517 * need to be taken by the driver to make system pages visible to the
 518 * device.
 519 * If this function decides that DMA is not possible, it returns -EINVAL.
 520 * The driver may then try to disable features of the device that require
 521 * DMA.
 522 */
 523static int vmw_dma_select_mode(struct vmw_private *dev_priv)
 524{
 525	static const char *names[vmw_dma_map_max] = {
 526		[vmw_dma_phys] = "Using physical TTM page addresses.",
 527		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
 528		[vmw_dma_map_populate] = "Keeping DMA mappings.",
 529		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
 530#ifdef CONFIG_X86
 531	const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
 532
 533#ifdef CONFIG_INTEL_IOMMU
 534	if (intel_iommu_enabled) {
 535		dev_priv->map_mode = vmw_dma_map_populate;
 536		goto out_fixup;
 537	}
 538#endif
 539
 540	if (!(vmw_force_iommu || vmw_force_coherent)) {
 541		dev_priv->map_mode = vmw_dma_phys;
 542		DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
 543		return 0;
 544	}
 545
 546	dev_priv->map_mode = vmw_dma_map_populate;
 547
 548	if (dma_ops->sync_single_for_cpu)
 549		dev_priv->map_mode = vmw_dma_alloc_coherent;
 550#ifdef CONFIG_SWIOTLB
 551	if (swiotlb_nr_tbl() == 0)
 552		dev_priv->map_mode = vmw_dma_map_populate;
 553#endif
 554
 555#ifdef CONFIG_INTEL_IOMMU
 556out_fixup:
 557#endif
 558	if (dev_priv->map_mode == vmw_dma_map_populate &&
 559	    vmw_restrict_iommu)
 560		dev_priv->map_mode = vmw_dma_map_bind;
 561
 562	if (vmw_force_coherent)
 563		dev_priv->map_mode = vmw_dma_alloc_coherent;
 
 
 
 
 564
 565#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
 566	/*
 567	 * No coherent page pool
 568	 */
 569	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
 570		return -EINVAL;
 571#endif
 572
 573#else /* CONFIG_X86 */
 574	dev_priv->map_mode = vmw_dma_map_populate;
 575#endif /* CONFIG_X86 */
 576
 577	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
 578
 579	return 0;
 580}
 581
 582/**
 583 * vmw_dma_masks - set required page- and dma masks
 584 *
 585 * @dev: Pointer to struct drm-device
 586 *
 587 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
 588 * restriction also for 64-bit systems.
 589 */
 590#ifdef CONFIG_INTEL_IOMMU
 591static int vmw_dma_masks(struct vmw_private *dev_priv)
 592{
 593	struct drm_device *dev = dev_priv->dev;
 
 594
 595	if (intel_iommu_enabled &&
 
 596	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
 597		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
 598		return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
 599	}
 600	return 0;
 601}
 602#else
 603static int vmw_dma_masks(struct vmw_private *dev_priv)
 604{
 605	return 0;
 606}
 607#endif
 608
 609static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 610{
 611	struct vmw_private *dev_priv;
 612	int ret;
 613	uint32_t svga_id;
 614	enum vmw_res_type i;
 615	bool refuse_dma = false;
 
 616
 617	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 618	if (unlikely(dev_priv == NULL)) {
 619		DRM_ERROR("Failed allocating a device private struct.\n");
 620		return -ENOMEM;
 621	}
 622
 623	pci_set_master(dev->pdev);
 624
 625	dev_priv->dev = dev;
 626	dev_priv->vmw_chipset = chipset;
 627	dev_priv->last_read_seqno = (uint32_t) -100;
 628	mutex_init(&dev_priv->cmdbuf_mutex);
 629	mutex_init(&dev_priv->release_mutex);
 630	mutex_init(&dev_priv->binding_mutex);
 631	rwlock_init(&dev_priv->resource_lock);
 632	ttm_lock_init(&dev_priv->reservation_sem);
 
 633	spin_lock_init(&dev_priv->hw_lock);
 634	spin_lock_init(&dev_priv->waiter_lock);
 635	spin_lock_init(&dev_priv->cap_lock);
 636	spin_lock_init(&dev_priv->svga_lock);
 
 637
 638	for (i = vmw_res_context; i < vmw_res_max; ++i) {
 639		idr_init(&dev_priv->res_idr[i]);
 640		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
 641	}
 642
 643	mutex_init(&dev_priv->init_mutex);
 644	init_waitqueue_head(&dev_priv->fence_queue);
 645	init_waitqueue_head(&dev_priv->fifo_queue);
 646	dev_priv->fence_queue_waiters = 0;
 647	dev_priv->fifo_queue_waiters = 0;
 648
 649	dev_priv->used_memory_size = 0;
 650
 651	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
 652	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
 653	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 654
 
 
 655	dev_priv->enable_fb = enable_fbdev;
 656
 657	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
 658	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
 659	if (svga_id != SVGA_ID_2) {
 660		ret = -ENOSYS;
 661		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
 662		goto out_err0;
 663	}
 664
 665	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
 
 
 
 
 
 
 666	ret = vmw_dma_select_mode(dev_priv);
 667	if (unlikely(ret != 0)) {
 668		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
 669		refuse_dma = true;
 670	}
 671
 672	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
 673	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
 674	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
 675	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
 676
 677	vmw_get_initial_size(dev_priv);
 678
 679	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 680		dev_priv->max_gmr_ids =
 681			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
 682		dev_priv->max_gmr_pages =
 683			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
 684		dev_priv->memory_size =
 685			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
 686		dev_priv->memory_size -= dev_priv->vram_size;
 687	} else {
 688		/*
 689		 * An arbitrary limit of 512MiB on surface
 690		 * memory. But all HWV8 hardware supports GMR2.
 691		 */
 692		dev_priv->memory_size = 512*1024*1024;
 693	}
 694	dev_priv->max_mob_pages = 0;
 695	dev_priv->max_mob_size = 0;
 696	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
 697		uint64_t mem_size =
 698			vmw_read(dev_priv,
 699				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
 700
 
 
 
 
 
 
 
 701		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
 702		dev_priv->prim_bb_mem =
 703			vmw_read(dev_priv,
 704				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
 705		dev_priv->max_mob_size =
 706			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
 707		dev_priv->stdu_max_width =
 708			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
 709		dev_priv->stdu_max_height =
 710			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
 711
 712		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
 713			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
 714		dev_priv->texture_max_width = vmw_read(dev_priv,
 715						       SVGA_REG_DEV_CAP);
 716		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
 717			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
 718		dev_priv->texture_max_height = vmw_read(dev_priv,
 719							SVGA_REG_DEV_CAP);
 720	} else {
 721		dev_priv->texture_max_width = 8192;
 722		dev_priv->texture_max_height = 8192;
 723		dev_priv->prim_bb_mem = dev_priv->vram_size;
 724	}
 725
 726	vmw_print_capabilities(dev_priv->capabilities);
 
 
 727
 728	ret = vmw_dma_masks(dev_priv);
 729	if (unlikely(ret != 0))
 730		goto out_err0;
 731
 
 
 
 732	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 733		DRM_INFO("Max GMR ids is %u\n",
 734			 (unsigned)dev_priv->max_gmr_ids);
 735		DRM_INFO("Max number of GMR pages is %u\n",
 736			 (unsigned)dev_priv->max_gmr_pages);
 737		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
 738			 (unsigned)dev_priv->memory_size / 1024);
 739	}
 740	DRM_INFO("Maximum display memory size is %u kiB\n",
 741		 dev_priv->prim_bb_mem / 1024);
 742	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
 743		 dev_priv->vram_start, dev_priv->vram_size / 1024);
 744	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
 745		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
 746
 747	ret = vmw_ttm_global_init(dev_priv);
 748	if (unlikely(ret != 0))
 749		goto out_err0;
 750
 751
 752	vmw_master_init(&dev_priv->fbdev_master);
 753	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
 754	dev_priv->active_master = &dev_priv->fbdev_master;
 755
 756	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
 757				       dev_priv->mmio_size, MEMREMAP_WB);
 758
 759	if (unlikely(dev_priv->mmio_virt == NULL)) {
 760		ret = -ENOMEM;
 761		DRM_ERROR("Failed mapping MMIO.\n");
 762		goto out_err3;
 763	}
 764
 765	/* Need mmio memory to check for fifo pitchlock cap. */
 766	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
 767	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
 768	    !vmw_fifo_have_pitchlock(dev_priv)) {
 769		ret = -ENOSYS;
 770		DRM_ERROR("Hardware has no pitchlock\n");
 771		goto out_err4;
 772	}
 773
 774	dev_priv->tdev = ttm_object_device_init
 775		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
 776
 777	if (unlikely(dev_priv->tdev == NULL)) {
 778		DRM_ERROR("Unable to initialize TTM object management.\n");
 779		ret = -ENOMEM;
 780		goto out_err4;
 781	}
 782
 783	dev->dev_private = dev_priv;
 784
 785	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
 786	dev_priv->stealth = (ret != 0);
 787	if (dev_priv->stealth) {
 788		/**
 789		 * Request at least the mmio PCI resource.
 790		 */
 791
 792		DRM_INFO("It appears like vesafb is loaded. "
 793			 "Ignore above error if any.\n");
 794		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
 795		if (unlikely(ret != 0)) {
 796			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
 797			goto out_no_device;
 798		}
 799	}
 800
 801	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
 802		ret = drm_irq_install(dev, dev->pdev->irq);
 803		if (ret != 0) {
 804			DRM_ERROR("Failed installing irq: %d\n", ret);
 805			goto out_no_irq;
 806		}
 807	}
 808
 809	dev_priv->fman = vmw_fence_manager_init(dev_priv);
 810	if (unlikely(dev_priv->fman == NULL)) {
 811		ret = -ENOMEM;
 812		goto out_no_fman;
 813	}
 814
 815	ret = ttm_bo_device_init(&dev_priv->bdev,
 816				 dev_priv->bo_global_ref.ref.object,
 817				 &vmw_bo_driver,
 818				 dev->anon_inode->i_mapping,
 819				 VMWGFX_FILE_PAGE_OFFSET,
 820				 false);
 821	if (unlikely(ret != 0)) {
 822		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
 823		goto out_no_bdev;
 824	}
 825
 826	/*
 827	 * Enable VRAM, but initially don't use it until SVGA is enabled and
 828	 * unhidden.
 829	 */
 830	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
 831			     (dev_priv->vram_size >> PAGE_SHIFT));
 832	if (unlikely(ret != 0)) {
 833		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
 834		goto out_no_vram;
 835	}
 836	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
 837
 838	dev_priv->has_gmr = true;
 839	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
 840	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
 841					 VMW_PL_GMR) != 0) {
 842		DRM_INFO("No GMR memory available. "
 843			 "Graphics memory resources are very limited.\n");
 844		dev_priv->has_gmr = false;
 845	}
 846
 847	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
 848		dev_priv->has_mob = true;
 849		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
 850				   VMW_PL_MOB) != 0) {
 851			DRM_INFO("No MOB memory available. "
 852				 "3D will be disabled.\n");
 853			dev_priv->has_mob = false;
 854		}
 855	}
 856
 857	if (dev_priv->has_mob) {
 858		spin_lock(&dev_priv->cap_lock);
 859		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
 860		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
 861		spin_unlock(&dev_priv->cap_lock);
 862	}
 863
 864
 865	ret = vmw_kms_init(dev_priv);
 866	if (unlikely(ret != 0))
 867		goto out_no_kms;
 868	vmw_overlay_init(dev_priv);
 869
 870	ret = vmw_request_device(dev_priv);
 871	if (ret)
 872		goto out_no_fifo;
 873
 
 
 
 
 
 
 
 
 
 
 
 
 
 874	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
 
 
 
 
 
 
 
 
 
 
 
 
 
 875
 876	if (dev_priv->enable_fb) {
 877		vmw_fifo_resource_inc(dev_priv);
 878		vmw_svga_enable(dev_priv);
 879		vmw_fb_init(dev_priv);
 880	}
 881
 882	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
 883	register_pm_notifier(&dev_priv->pm_nb);
 884
 885	return 0;
 886
 887out_no_fifo:
 888	vmw_overlay_close(dev_priv);
 889	vmw_kms_close(dev_priv);
 890out_no_kms:
 891	if (dev_priv->has_mob)
 892		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 893	if (dev_priv->has_gmr)
 894		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 895	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 896out_no_vram:
 897	(void)ttm_bo_device_release(&dev_priv->bdev);
 898out_no_bdev:
 899	vmw_fence_manager_takedown(dev_priv->fman);
 900out_no_fman:
 901	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 902		drm_irq_uninstall(dev_priv->dev);
 903out_no_irq:
 904	if (dev_priv->stealth)
 905		pci_release_region(dev->pdev, 2);
 906	else
 907		pci_release_regions(dev->pdev);
 908out_no_device:
 909	ttm_object_device_release(&dev_priv->tdev);
 910out_err4:
 911	memunmap(dev_priv->mmio_virt);
 912out_err3:
 913	vmw_ttm_global_release(dev_priv);
 914out_err0:
 915	for (i = vmw_res_context; i < vmw_res_max; ++i)
 916		idr_destroy(&dev_priv->res_idr[i]);
 917
 918	if (dev_priv->ctx.staged_bindings)
 919		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 920	kfree(dev_priv);
 921	return ret;
 922}
 923
 924static int vmw_driver_unload(struct drm_device *dev)
 925{
 926	struct vmw_private *dev_priv = vmw_priv(dev);
 927	enum vmw_res_type i;
 928
 929	unregister_pm_notifier(&dev_priv->pm_nb);
 930
 931	if (dev_priv->ctx.res_ht_initialized)
 932		drm_ht_remove(&dev_priv->ctx.res_ht);
 933	vfree(dev_priv->ctx.cmd_bounce);
 934	if (dev_priv->enable_fb) {
 935		vmw_fb_off(dev_priv);
 936		vmw_fb_close(dev_priv);
 937		vmw_fifo_resource_dec(dev_priv);
 938		vmw_svga_disable(dev_priv);
 939	}
 940
 941	vmw_kms_close(dev_priv);
 942	vmw_overlay_close(dev_priv);
 943
 944	if (dev_priv->has_gmr)
 945		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 946	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 947
 948	vmw_release_device_early(dev_priv);
 949	if (dev_priv->has_mob)
 950		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 951	(void) ttm_bo_device_release(&dev_priv->bdev);
 952	vmw_release_device_late(dev_priv);
 953	vmw_fence_manager_takedown(dev_priv->fman);
 954	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 955		drm_irq_uninstall(dev_priv->dev);
 956	if (dev_priv->stealth)
 957		pci_release_region(dev->pdev, 2);
 958	else
 959		pci_release_regions(dev->pdev);
 960
 961	ttm_object_device_release(&dev_priv->tdev);
 962	memunmap(dev_priv->mmio_virt);
 963	if (dev_priv->ctx.staged_bindings)
 964		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 965	vmw_ttm_global_release(dev_priv);
 966
 967	for (i = vmw_res_context; i < vmw_res_max; ++i)
 968		idr_destroy(&dev_priv->res_idr[i]);
 969
 970	kfree(dev_priv);
 971
 972	return 0;
 973}
 974
 975static void vmw_postclose(struct drm_device *dev,
 976			 struct drm_file *file_priv)
 977{
 978	struct vmw_fpriv *vmw_fp;
 979
 980	vmw_fp = vmw_fpriv(file_priv);
 981
 982	if (vmw_fp->locked_master) {
 983		struct vmw_master *vmaster =
 984			vmw_master(vmw_fp->locked_master);
 985
 986		ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
 987		ttm_vt_unlock(&vmaster->lock);
 988		drm_master_put(&vmw_fp->locked_master);
 989	}
 990
 991	ttm_object_file_release(&vmw_fp->tfile);
 992	kfree(vmw_fp);
 993}
 994
 995static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
 996{
 997	struct vmw_private *dev_priv = vmw_priv(dev);
 998	struct vmw_fpriv *vmw_fp;
 999	int ret = -ENOMEM;
1000
1001	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1002	if (unlikely(vmw_fp == NULL))
1003		return ret;
1004
1005	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1006	if (unlikely(vmw_fp->tfile == NULL))
1007		goto out_no_tfile;
1008
1009	file_priv->driver_priv = vmw_fp;
1010
1011	return 0;
1012
1013out_no_tfile:
1014	kfree(vmw_fp);
1015	return ret;
1016}
1017
1018static struct vmw_master *vmw_master_check(struct drm_device *dev,
1019					   struct drm_file *file_priv,
1020					   unsigned int flags)
1021{
1022	int ret;
1023	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1024	struct vmw_master *vmaster;
1025
1026	if (file_priv->minor->type != DRM_MINOR_LEGACY ||
1027	    !(flags & DRM_AUTH))
1028		return NULL;
1029
1030	ret = mutex_lock_interruptible(&dev->master_mutex);
1031	if (unlikely(ret != 0))
1032		return ERR_PTR(-ERESTARTSYS);
1033
1034	if (file_priv->is_master) {
1035		mutex_unlock(&dev->master_mutex);
1036		return NULL;
1037	}
1038
1039	/*
1040	 * Check if we were previously master, but now dropped. In that
1041	 * case, allow at least render node functionality.
1042	 */
1043	if (vmw_fp->locked_master) {
1044		mutex_unlock(&dev->master_mutex);
1045
1046		if (flags & DRM_RENDER_ALLOW)
1047			return NULL;
1048
1049		DRM_ERROR("Dropped master trying to access ioctl that "
1050			  "requires authentication.\n");
1051		return ERR_PTR(-EACCES);
1052	}
1053	mutex_unlock(&dev->master_mutex);
1054
1055	/*
1056	 * Take the TTM lock. Possibly sleep waiting for the authenticating
1057	 * master to become master again, or for a SIGTERM if the
1058	 * authenticating master exits.
1059	 */
1060	vmaster = vmw_master(file_priv->master);
1061	ret = ttm_read_lock(&vmaster->lock, true);
1062	if (unlikely(ret != 0))
1063		vmaster = ERR_PTR(ret);
1064
1065	return vmaster;
1066}
1067
1068static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1069			      unsigned long arg,
1070			      long (*ioctl_func)(struct file *, unsigned int,
1071						 unsigned long))
1072{
1073	struct drm_file *file_priv = filp->private_data;
1074	struct drm_device *dev = file_priv->minor->dev;
1075	unsigned int nr = DRM_IOCTL_NR(cmd);
1076	struct vmw_master *vmaster;
1077	unsigned int flags;
1078	long ret;
1079
1080	/*
1081	 * Do extra checking on driver private ioctls.
1082	 */
1083
1084	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1085	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1086		const struct drm_ioctl_desc *ioctl =
1087			&vmw_ioctls[nr - DRM_COMMAND_BASE];
1088
1089		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1090			ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1091			if (unlikely(ret != 0))
1092				return ret;
1093
1094			if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1095				goto out_io_encoding;
1096
1097			return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1098							_IOC_SIZE(cmd));
1099		}
1100
1101		if (unlikely(ioctl->cmd != cmd))
1102			goto out_io_encoding;
1103
1104		flags = ioctl->flags;
1105	} else if (!drm_ioctl_flags(nr, &flags))
1106		return -EINVAL;
1107
1108	vmaster = vmw_master_check(dev, file_priv, flags);
1109	if (IS_ERR(vmaster)) {
1110		ret = PTR_ERR(vmaster);
1111
1112		if (ret != -ERESTARTSYS)
1113			DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1114				 nr, ret);
1115		return ret;
1116	}
1117
1118	ret = ioctl_func(filp, cmd, arg);
1119	if (vmaster)
1120		ttm_read_unlock(&vmaster->lock);
1121
1122	return ret;
1123
1124out_io_encoding:
1125	DRM_ERROR("Invalid command format, ioctl %d\n",
1126		  nr - DRM_COMMAND_BASE);
1127
1128	return -EINVAL;
1129}
1130
1131static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1132			       unsigned long arg)
1133{
1134	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1135}
1136
1137#ifdef CONFIG_COMPAT
1138static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1139			     unsigned long arg)
1140{
1141	return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1142}
1143#endif
1144
1145static void vmw_lastclose(struct drm_device *dev)
1146{
1147}
1148
1149static void vmw_master_init(struct vmw_master *vmaster)
1150{
1151	ttm_lock_init(&vmaster->lock);
1152}
1153
1154static int vmw_master_create(struct drm_device *dev,
1155			     struct drm_master *master)
1156{
1157	struct vmw_master *vmaster;
1158
1159	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1160	if (unlikely(vmaster == NULL))
1161		return -ENOMEM;
1162
1163	vmw_master_init(vmaster);
1164	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1165	master->driver_priv = vmaster;
1166
1167	return 0;
1168}
1169
1170static void vmw_master_destroy(struct drm_device *dev,
1171			       struct drm_master *master)
1172{
1173	struct vmw_master *vmaster = vmw_master(master);
1174
1175	master->driver_priv = NULL;
1176	kfree(vmaster);
1177}
1178
1179static int vmw_master_set(struct drm_device *dev,
1180			  struct drm_file *file_priv,
1181			  bool from_open)
1182{
1183	struct vmw_private *dev_priv = vmw_priv(dev);
1184	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1185	struct vmw_master *active = dev_priv->active_master;
1186	struct vmw_master *vmaster = vmw_master(file_priv->master);
1187	int ret = 0;
1188
1189	if (active) {
1190		BUG_ON(active != &dev_priv->fbdev_master);
1191		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1192		if (unlikely(ret != 0))
1193			return ret;
1194
1195		ttm_lock_set_kill(&active->lock, true, SIGTERM);
1196		dev_priv->active_master = NULL;
1197	}
1198
1199	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1200	if (!from_open) {
1201		ttm_vt_unlock(&vmaster->lock);
1202		BUG_ON(vmw_fp->locked_master != file_priv->master);
1203		drm_master_put(&vmw_fp->locked_master);
1204	}
1205
1206	dev_priv->active_master = vmaster;
1207	drm_sysfs_hotplug_event(dev);
1208
1209	return 0;
1210}
1211
1212static void vmw_master_drop(struct drm_device *dev,
1213			    struct drm_file *file_priv,
1214			    bool from_release)
1215{
1216	struct vmw_private *dev_priv = vmw_priv(dev);
1217	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1218	struct vmw_master *vmaster = vmw_master(file_priv->master);
1219	int ret;
1220
1221	/**
1222	 * Make sure the master doesn't disappear while we have
1223	 * it locked.
1224	 */
1225
1226	vmw_fp->locked_master = drm_master_get(file_priv->master);
1227	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1228	vmw_kms_legacy_hotspot_clear(dev_priv);
1229	if (unlikely((ret != 0))) {
1230		DRM_ERROR("Unable to lock TTM at VT switch.\n");
1231		drm_master_put(&vmw_fp->locked_master);
1232	}
1233
1234	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1235
1236	if (!dev_priv->enable_fb)
1237		vmw_svga_disable(dev_priv);
1238
1239	dev_priv->active_master = &dev_priv->fbdev_master;
1240	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1241	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1242
1243	if (dev_priv->enable_fb)
1244		vmw_fb_on(dev_priv);
1245}
1246
1247/**
1248 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1249 *
1250 * @dev_priv: Pointer to device private struct.
1251 * Needs the reservation sem to be held in non-exclusive mode.
1252 */
1253static void __vmw_svga_enable(struct vmw_private *dev_priv)
1254{
1255	spin_lock(&dev_priv->svga_lock);
1256	if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1257		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1258		dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1259	}
1260	spin_unlock(&dev_priv->svga_lock);
1261}
1262
1263/**
1264 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1265 *
1266 * @dev_priv: Pointer to device private struct.
1267 */
1268void vmw_svga_enable(struct vmw_private *dev_priv)
1269{
1270	ttm_read_lock(&dev_priv->reservation_sem, false);
1271	__vmw_svga_enable(dev_priv);
1272	ttm_read_unlock(&dev_priv->reservation_sem);
1273}
1274
1275/**
1276 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1277 *
1278 * @dev_priv: Pointer to device private struct.
1279 * Needs the reservation sem to be held in exclusive mode.
1280 * Will not empty VRAM. VRAM must be emptied by caller.
1281 */
1282static void __vmw_svga_disable(struct vmw_private *dev_priv)
1283{
1284	spin_lock(&dev_priv->svga_lock);
1285	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1286		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1287		vmw_write(dev_priv, SVGA_REG_ENABLE,
1288			  SVGA_REG_ENABLE_HIDE |
1289			  SVGA_REG_ENABLE_ENABLE);
1290	}
1291	spin_unlock(&dev_priv->svga_lock);
1292}
1293
1294/**
1295 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1296 * running.
1297 *
1298 * @dev_priv: Pointer to device private struct.
1299 * Will empty VRAM.
1300 */
1301void vmw_svga_disable(struct vmw_private *dev_priv)
1302{
 
 
 
 
 
 
 
 
 
 
 
 
 
1303	ttm_write_lock(&dev_priv->reservation_sem, false);
1304	spin_lock(&dev_priv->svga_lock);
1305	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1306		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1307		spin_unlock(&dev_priv->svga_lock);
1308		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1309			DRM_ERROR("Failed evicting VRAM buffers.\n");
1310		vmw_write(dev_priv, SVGA_REG_ENABLE,
1311			  SVGA_REG_ENABLE_HIDE |
1312			  SVGA_REG_ENABLE_ENABLE);
1313	} else
1314		spin_unlock(&dev_priv->svga_lock);
1315	ttm_write_unlock(&dev_priv->reservation_sem);
1316}
1317
1318static void vmw_remove(struct pci_dev *pdev)
1319{
1320	struct drm_device *dev = pci_get_drvdata(pdev);
1321
1322	pci_disable_device(pdev);
1323	drm_put_dev(dev);
1324}
1325
1326static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1327			      void *ptr)
1328{
1329	struct vmw_private *dev_priv =
1330		container_of(nb, struct vmw_private, pm_nb);
1331
1332	switch (val) {
1333	case PM_HIBERNATION_PREPARE:
1334		if (dev_priv->enable_fb)
1335			vmw_fb_off(dev_priv);
1336		ttm_suspend_lock(&dev_priv->reservation_sem);
1337
1338		/*
1339		 * This empties VRAM and unbinds all GMR bindings.
1340		 * Buffer contents is moved to swappable memory.
 
 
 
 
1341		 */
1342		vmw_execbuf_release_pinned_bo(dev_priv);
1343		vmw_resource_evict_all(dev_priv);
1344		vmw_release_device_early(dev_priv);
1345		ttm_bo_swapout_all(&dev_priv->bdev);
1346		vmw_fence_fifo_down(dev_priv->fman);
1347		break;
1348	case PM_POST_HIBERNATION:
1349	case PM_POST_RESTORE:
1350		vmw_fence_fifo_up(dev_priv->fman);
1351		ttm_suspend_unlock(&dev_priv->reservation_sem);
1352		if (dev_priv->enable_fb)
1353			vmw_fb_on(dev_priv);
1354		break;
1355	case PM_RESTORE_PREPARE:
1356		break;
1357	default:
1358		break;
1359	}
1360	return 0;
1361}
1362
1363static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1364{
1365	struct drm_device *dev = pci_get_drvdata(pdev);
1366	struct vmw_private *dev_priv = vmw_priv(dev);
1367
1368	if (dev_priv->refuse_hibernation)
1369		return -EBUSY;
1370
1371	pci_save_state(pdev);
1372	pci_disable_device(pdev);
1373	pci_set_power_state(pdev, PCI_D3hot);
1374	return 0;
1375}
1376
1377static int vmw_pci_resume(struct pci_dev *pdev)
1378{
1379	pci_set_power_state(pdev, PCI_D0);
1380	pci_restore_state(pdev);
1381	return pci_enable_device(pdev);
1382}
1383
1384static int vmw_pm_suspend(struct device *kdev)
1385{
1386	struct pci_dev *pdev = to_pci_dev(kdev);
1387	struct pm_message dummy;
1388
1389	dummy.event = 0;
1390
1391	return vmw_pci_suspend(pdev, dummy);
1392}
1393
1394static int vmw_pm_resume(struct device *kdev)
1395{
1396	struct pci_dev *pdev = to_pci_dev(kdev);
1397
1398	return vmw_pci_resume(pdev);
1399}
1400
1401static int vmw_pm_freeze(struct device *kdev)
1402{
1403	struct pci_dev *pdev = to_pci_dev(kdev);
1404	struct drm_device *dev = pci_get_drvdata(pdev);
1405	struct vmw_private *dev_priv = vmw_priv(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1406
1407	dev_priv->suspended = true;
 
 
 
 
1408	if (dev_priv->enable_fb)
1409		vmw_fifo_resource_dec(dev_priv);
1410
1411	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1412		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1413		if (dev_priv->enable_fb)
1414			vmw_fifo_resource_inc(dev_priv);
1415		WARN_ON(vmw_request_device_late(dev_priv));
1416		dev_priv->suspended = false;
 
 
 
 
 
1417		return -EBUSY;
1418	}
1419
1420	if (dev_priv->enable_fb)
1421		__vmw_svga_disable(dev_priv);
1422	
1423	vmw_release_device_late(dev_priv);
1424
1425	return 0;
1426}
1427
1428static int vmw_pm_restore(struct device *kdev)
1429{
1430	struct pci_dev *pdev = to_pci_dev(kdev);
1431	struct drm_device *dev = pci_get_drvdata(pdev);
1432	struct vmw_private *dev_priv = vmw_priv(dev);
1433	int ret;
1434
1435	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1436	(void) vmw_read(dev_priv, SVGA_REG_ID);
1437
1438	if (dev_priv->enable_fb)
1439		vmw_fifo_resource_inc(dev_priv);
1440
1441	ret = vmw_request_device(dev_priv);
1442	if (ret)
1443		return ret;
1444
1445	if (dev_priv->enable_fb)
1446		__vmw_svga_enable(dev_priv);
1447
1448	dev_priv->suspended = false;
 
 
 
 
 
 
 
1449
1450	return 0;
1451}
1452
1453static const struct dev_pm_ops vmw_pm_ops = {
1454	.freeze = vmw_pm_freeze,
1455	.thaw = vmw_pm_restore,
1456	.restore = vmw_pm_restore,
1457	.suspend = vmw_pm_suspend,
1458	.resume = vmw_pm_resume,
1459};
1460
1461static const struct file_operations vmwgfx_driver_fops = {
1462	.owner = THIS_MODULE,
1463	.open = drm_open,
1464	.release = drm_release,
1465	.unlocked_ioctl = vmw_unlocked_ioctl,
1466	.mmap = vmw_mmap,
1467	.poll = vmw_fops_poll,
1468	.read = vmw_fops_read,
1469#if defined(CONFIG_COMPAT)
1470	.compat_ioctl = vmw_compat_ioctl,
1471#endif
1472	.llseek = noop_llseek,
1473};
1474
1475static struct drm_driver driver = {
1476	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1477	DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
1478	.load = vmw_driver_load,
1479	.unload = vmw_driver_unload,
1480	.lastclose = vmw_lastclose,
1481	.irq_preinstall = vmw_irq_preinstall,
1482	.irq_postinstall = vmw_irq_postinstall,
1483	.irq_uninstall = vmw_irq_uninstall,
1484	.irq_handler = vmw_irq_handler,
1485	.get_vblank_counter = vmw_get_vblank_counter,
1486	.enable_vblank = vmw_enable_vblank,
1487	.disable_vblank = vmw_disable_vblank,
1488	.ioctls = vmw_ioctls,
1489	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
1490	.master_create = vmw_master_create,
1491	.master_destroy = vmw_master_destroy,
1492	.master_set = vmw_master_set,
1493	.master_drop = vmw_master_drop,
1494	.open = vmw_driver_open,
1495	.postclose = vmw_postclose,
1496	.set_busid = drm_pci_set_busid,
1497
1498	.dumb_create = vmw_dumb_create,
1499	.dumb_map_offset = vmw_dumb_map_offset,
1500	.dumb_destroy = vmw_dumb_destroy,
1501
1502	.prime_fd_to_handle = vmw_prime_fd_to_handle,
1503	.prime_handle_to_fd = vmw_prime_handle_to_fd,
1504
1505	.fops = &vmwgfx_driver_fops,
1506	.name = VMWGFX_DRIVER_NAME,
1507	.desc = VMWGFX_DRIVER_DESC,
1508	.date = VMWGFX_DRIVER_DATE,
1509	.major = VMWGFX_DRIVER_MAJOR,
1510	.minor = VMWGFX_DRIVER_MINOR,
1511	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1512};
1513
1514static struct pci_driver vmw_pci_driver = {
1515	.name = VMWGFX_DRIVER_NAME,
1516	.id_table = vmw_pci_id_list,
1517	.probe = vmw_probe,
1518	.remove = vmw_remove,
1519	.driver = {
1520		.pm = &vmw_pm_ops
1521	}
1522};
1523
1524static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1525{
1526	return drm_get_pci_dev(pdev, ent, &driver);
1527}
1528
1529static int __init vmwgfx_init(void)
1530{
1531	int ret;
1532
1533#ifdef CONFIG_VGA_CONSOLE
1534	if (vgacon_text_force())
1535		return -EINVAL;
1536#endif
1537
1538	ret = drm_pci_init(&driver, &vmw_pci_driver);
1539	if (ret)
1540		DRM_ERROR("Failed initializing DRM.\n");
1541	return ret;
1542}
1543
1544static void __exit vmwgfx_exit(void)
1545{
1546	drm_pci_exit(&driver, &vmw_pci_driver);
1547}
1548
1549module_init(vmwgfx_init);
1550module_exit(vmwgfx_exit);
1551
1552MODULE_AUTHOR("VMware Inc. and others");
1553MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1554MODULE_LICENSE("GPL and additional rights");
1555MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1556	       __stringify(VMWGFX_DRIVER_MINOR) "."
1557	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1558	       "0");
v5.4
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <linux/console.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/module.h>
  31
  32#include <drm/drm_drv.h>
  33#include <drm/drm_ioctl.h>
  34#include <drm/drm_pci.h>
  35#include <drm/drm_sysfs.h>
  36#include <drm/ttm/ttm_bo_driver.h>
 
  37#include <drm/ttm/ttm_module.h>
  38#include <drm/ttm/ttm_placement.h>
  39
  40#include "ttm_object.h"
  41#include "vmwgfx_binding.h"
  42#include "vmwgfx_drv.h"
  43
 
  44#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
  45#define VMWGFX_CHIP_SVGAII 0
  46#define VMW_FB_RESERVATION 0
  47
  48#define VMW_MIN_INITIAL_WIDTH 800
  49#define VMW_MIN_INITIAL_HEIGHT 600
  50
  51#ifndef VMWGFX_GIT_VERSION
  52#define VMWGFX_GIT_VERSION "Unknown"
  53#endif
  54
  55#define VMWGFX_REPO "In Tree"
  56
  57#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
  58
  59
  60/**
  61 * Fully encoded drm commands. Might move to vmw_drm.h
  62 */
  63
  64#define DRM_IOCTL_VMW_GET_PARAM					\
  65	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
  66		 struct drm_vmw_getparam_arg)
  67#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
  68	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
  69		union drm_vmw_alloc_dmabuf_arg)
  70#define DRM_IOCTL_VMW_UNREF_DMABUF				\
  71	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
  72		struct drm_vmw_unref_dmabuf_arg)
  73#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
  74	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
  75		 struct drm_vmw_cursor_bypass_arg)
  76
  77#define DRM_IOCTL_VMW_CONTROL_STREAM				\
  78	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
  79		 struct drm_vmw_control_stream_arg)
  80#define DRM_IOCTL_VMW_CLAIM_STREAM				\
  81	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
  82		 struct drm_vmw_stream_arg)
  83#define DRM_IOCTL_VMW_UNREF_STREAM				\
  84	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
  85		 struct drm_vmw_stream_arg)
  86
  87#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
  88	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
  89		struct drm_vmw_context_arg)
  90#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
  91	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
  92		struct drm_vmw_context_arg)
  93#define DRM_IOCTL_VMW_CREATE_SURFACE				\
  94	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
  95		 union drm_vmw_surface_create_arg)
  96#define DRM_IOCTL_VMW_UNREF_SURFACE				\
  97	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
  98		 struct drm_vmw_surface_arg)
  99#define DRM_IOCTL_VMW_REF_SURFACE				\
 100	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
 101		 union drm_vmw_surface_reference_arg)
 102#define DRM_IOCTL_VMW_EXECBUF					\
 103	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
 104		struct drm_vmw_execbuf_arg)
 105#define DRM_IOCTL_VMW_GET_3D_CAP				\
 106	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
 107		 struct drm_vmw_get_3d_cap_arg)
 108#define DRM_IOCTL_VMW_FENCE_WAIT				\
 109	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
 110		 struct drm_vmw_fence_wait_arg)
 111#define DRM_IOCTL_VMW_FENCE_SIGNALED				\
 112	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
 113		 struct drm_vmw_fence_signaled_arg)
 114#define DRM_IOCTL_VMW_FENCE_UNREF				\
 115	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
 116		 struct drm_vmw_fence_arg)
 117#define DRM_IOCTL_VMW_FENCE_EVENT				\
 118	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
 119		 struct drm_vmw_fence_event_arg)
 120#define DRM_IOCTL_VMW_PRESENT					\
 121	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
 122		 struct drm_vmw_present_arg)
 123#define DRM_IOCTL_VMW_PRESENT_READBACK				\
 124	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
 125		 struct drm_vmw_present_readback_arg)
 126#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
 127	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
 128		 struct drm_vmw_update_layout_arg)
 129#define DRM_IOCTL_VMW_CREATE_SHADER				\
 130	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
 131		 struct drm_vmw_shader_create_arg)
 132#define DRM_IOCTL_VMW_UNREF_SHADER				\
 133	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
 134		 struct drm_vmw_shader_arg)
 135#define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
 136	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
 137		 union drm_vmw_gb_surface_create_arg)
 138#define DRM_IOCTL_VMW_GB_SURFACE_REF				\
 139	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
 140		 union drm_vmw_gb_surface_reference_arg)
 141#define DRM_IOCTL_VMW_SYNCCPU					\
 142	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
 143		 struct drm_vmw_synccpu_arg)
 144#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
 145	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
 146		struct drm_vmw_context_arg)
 147#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT				\
 148	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT,	\
 149		union drm_vmw_gb_surface_create_ext_arg)
 150#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT				\
 151	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT,		\
 152		union drm_vmw_gb_surface_reference_ext_arg)
 153
 154/**
 155 * The core DRM version of this macro doesn't account for
 156 * DRM_COMMAND_BASE.
 157 */
 158
 159#define VMW_IOCTL_DEF(ioctl, func, flags) \
 160  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
 161
 162/**
 163 * Ioctl definitions.
 164 */
 165
 166static const struct drm_ioctl_desc vmw_ioctls[] = {
 167	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
 168		      DRM_AUTH | DRM_RENDER_ALLOW),
 169	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
 170		      DRM_AUTH | DRM_RENDER_ALLOW),
 171	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
 172		      DRM_RENDER_ALLOW),
 173	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
 174		      vmw_kms_cursor_bypass_ioctl,
 175		      DRM_MASTER),
 176
 177	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
 178		      DRM_MASTER),
 179	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
 180		      DRM_MASTER),
 181	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
 182		      DRM_MASTER),
 183
 184	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
 185		      DRM_AUTH | DRM_RENDER_ALLOW),
 186	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
 187		      DRM_RENDER_ALLOW),
 188	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
 189		      DRM_AUTH | DRM_RENDER_ALLOW),
 190	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
 191		      DRM_RENDER_ALLOW),
 192	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
 193		      DRM_AUTH | DRM_RENDER_ALLOW),
 194	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
 195		      DRM_RENDER_ALLOW),
 196	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
 197		      DRM_RENDER_ALLOW),
 198	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
 199		      vmw_fence_obj_signaled_ioctl,
 200		      DRM_RENDER_ALLOW),
 201	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
 202		      DRM_RENDER_ALLOW),
 203	VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
 204		      DRM_AUTH | DRM_RENDER_ALLOW),
 205	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
 206		      DRM_AUTH | DRM_RENDER_ALLOW),
 207
 208	/* these allow direct access to the framebuffers mark as master only */
 209	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
 210		      DRM_MASTER | DRM_AUTH),
 211	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
 212		      vmw_present_readback_ioctl,
 213		      DRM_MASTER | DRM_AUTH),
 214	/*
 215	 * The permissions of the below ioctl are overridden in
 216	 * vmw_generic_ioctl(). We require either
 217	 * DRM_MASTER or capable(CAP_SYS_ADMIN).
 218	 */
 219	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
 220		      vmw_kms_update_layout_ioctl,
 221		      DRM_RENDER_ALLOW),
 222	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
 223		      vmw_shader_define_ioctl,
 224		      DRM_AUTH | DRM_RENDER_ALLOW),
 225	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
 226		      vmw_shader_destroy_ioctl,
 227		      DRM_RENDER_ALLOW),
 228	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
 229		      vmw_gb_surface_define_ioctl,
 230		      DRM_AUTH | DRM_RENDER_ALLOW),
 231	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
 232		      vmw_gb_surface_reference_ioctl,
 233		      DRM_AUTH | DRM_RENDER_ALLOW),
 234	VMW_IOCTL_DEF(VMW_SYNCCPU,
 235		      vmw_user_bo_synccpu_ioctl,
 236		      DRM_RENDER_ALLOW),
 237	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
 238		      vmw_extended_context_define_ioctl,
 239		      DRM_AUTH | DRM_RENDER_ALLOW),
 240	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
 241		      vmw_gb_surface_define_ext_ioctl,
 242		      DRM_AUTH | DRM_RENDER_ALLOW),
 243	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
 244		      vmw_gb_surface_reference_ext_ioctl,
 245		      DRM_AUTH | DRM_RENDER_ALLOW),
 246};
 247
 248static const struct pci_device_id vmw_pci_id_list[] = {
 249	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
 250	{0, 0, 0}
 251};
 252MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
 253
 254static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
 255static int vmw_force_iommu;
 256static int vmw_restrict_iommu;
 257static int vmw_force_coherent;
 258static int vmw_restrict_dma_mask;
 259static int vmw_assume_16bpp;
 260
 261static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 
 262static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 263			      void *ptr);
 264
 265MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
 266module_param_named(enable_fbdev, enable_fbdev, int, 0600);
 267MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
 268module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
 269MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
 270module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
 271MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
 272module_param_named(force_coherent, vmw_force_coherent, int, 0600);
 273MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
 274module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
 275MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
 276module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
 277
 278
 279static void vmw_print_capabilities2(uint32_t capabilities2)
 280{
 281	DRM_INFO("Capabilities2:\n");
 282	if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
 283		DRM_INFO("  Grow oTable.\n");
 284	if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
 285		DRM_INFO("  IntraSurface copy.\n");
 286}
 287
 288static void vmw_print_capabilities(uint32_t capabilities)
 289{
 290	DRM_INFO("Capabilities:\n");
 291	if (capabilities & SVGA_CAP_RECT_COPY)
 292		DRM_INFO("  Rect copy.\n");
 293	if (capabilities & SVGA_CAP_CURSOR)
 294		DRM_INFO("  Cursor.\n");
 295	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
 296		DRM_INFO("  Cursor bypass.\n");
 297	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
 298		DRM_INFO("  Cursor bypass 2.\n");
 299	if (capabilities & SVGA_CAP_8BIT_EMULATION)
 300		DRM_INFO("  8bit emulation.\n");
 301	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
 302		DRM_INFO("  Alpha cursor.\n");
 303	if (capabilities & SVGA_CAP_3D)
 304		DRM_INFO("  3D.\n");
 305	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
 306		DRM_INFO("  Extended Fifo.\n");
 307	if (capabilities & SVGA_CAP_MULTIMON)
 308		DRM_INFO("  Multimon.\n");
 309	if (capabilities & SVGA_CAP_PITCHLOCK)
 310		DRM_INFO("  Pitchlock.\n");
 311	if (capabilities & SVGA_CAP_IRQMASK)
 312		DRM_INFO("  Irq mask.\n");
 313	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
 314		DRM_INFO("  Display Topology.\n");
 315	if (capabilities & SVGA_CAP_GMR)
 316		DRM_INFO("  GMR.\n");
 317	if (capabilities & SVGA_CAP_TRACES)
 318		DRM_INFO("  Traces.\n");
 319	if (capabilities & SVGA_CAP_GMR2)
 320		DRM_INFO("  GMR2.\n");
 321	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
 322		DRM_INFO("  Screen Object 2.\n");
 323	if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
 324		DRM_INFO("  Command Buffers.\n");
 325	if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
 326		DRM_INFO("  Command Buffers 2.\n");
 327	if (capabilities & SVGA_CAP_GBOBJECTS)
 328		DRM_INFO("  Guest Backed Resources.\n");
 329	if (capabilities & SVGA_CAP_DX)
 330		DRM_INFO("  DX Features.\n");
 331	if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
 332		DRM_INFO("  HP Command Queue.\n");
 333}
 334
 335/**
 336 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
 337 *
 338 * @dev_priv: A device private structure.
 339 *
 340 * This function creates a small buffer object that holds the query
 341 * result for dummy queries emitted as query barriers.
 342 * The function will then map the first page and initialize a pending
 343 * occlusion query result structure, Finally it will unmap the buffer.
 344 * No interruptible waits are done within this function.
 345 *
 346 * Returns an error if bo creation or initialization fails.
 347 */
 348static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 349{
 350	int ret;
 351	struct vmw_buffer_object *vbo;
 352	struct ttm_bo_kmap_obj map;
 353	volatile SVGA3dQueryResult *result;
 354	bool dummy;
 355
 356	/*
 357	 * Create the vbo as pinned, so that a tryreserve will
 358	 * immediately succeed. This is because we're the only
 359	 * user of the bo currently.
 360	 */
 361	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
 362	if (!vbo)
 363		return -ENOMEM;
 364
 365	ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
 366			  &vmw_sys_ne_placement, false,
 367			  &vmw_bo_bo_free);
 368	if (unlikely(ret != 0))
 369		return ret;
 370
 371	ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
 372	BUG_ON(ret != 0);
 373	vmw_bo_pin_reserved(vbo, true);
 374
 375	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
 376	if (likely(ret == 0)) {
 377		result = ttm_kmap_obj_virtual(&map, &dummy);
 378		result->totalSize = sizeof(*result);
 379		result->state = SVGA3D_QUERYSTATE_PENDING;
 380		result->result32 = 0xff;
 381		ttm_bo_kunmap(&map);
 382	}
 383	vmw_bo_pin_reserved(vbo, false);
 384	ttm_bo_unreserve(&vbo->base);
 385
 386	if (unlikely(ret != 0)) {
 387		DRM_ERROR("Dummy query buffer map failed.\n");
 388		vmw_bo_unreference(&vbo);
 389	} else
 390		dev_priv->dummy_query_bo = vbo;
 391
 392	return ret;
 393}
 394
 395/**
 396 * vmw_request_device_late - Perform late device setup
 397 *
 398 * @dev_priv: Pointer to device private.
 399 *
 400 * This function performs setup of otables and enables large command
 401 * buffer submission. These tasks are split out to a separate function
 402 * because it reverts vmw_release_device_early and is intended to be used
 403 * by an error path in the hibernation code.
 404 */
 405static int vmw_request_device_late(struct vmw_private *dev_priv)
 406{
 407	int ret;
 408
 409	if (dev_priv->has_mob) {
 410		ret = vmw_otables_setup(dev_priv);
 411		if (unlikely(ret != 0)) {
 412			DRM_ERROR("Unable to initialize "
 413				  "guest Memory OBjects.\n");
 414			return ret;
 415		}
 416	}
 417
 418	if (dev_priv->cman) {
 419		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
 420					       256*4096, 2*4096);
 421		if (ret) {
 422			struct vmw_cmdbuf_man *man = dev_priv->cman;
 423
 424			dev_priv->cman = NULL;
 425			vmw_cmdbuf_man_destroy(man);
 426		}
 427	}
 428
 429	return 0;
 430}
 431
 432static int vmw_request_device(struct vmw_private *dev_priv)
 433{
 434	int ret;
 435
 436	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
 437	if (unlikely(ret != 0)) {
 438		DRM_ERROR("Unable to initialize FIFO.\n");
 439		return ret;
 440	}
 441	vmw_fence_fifo_up(dev_priv->fman);
 442	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
 443	if (IS_ERR(dev_priv->cman)) {
 444		dev_priv->cman = NULL;
 445		dev_priv->has_dx = false;
 446	}
 447
 448	ret = vmw_request_device_late(dev_priv);
 449	if (ret)
 450		goto out_no_mob;
 451
 452	ret = vmw_dummy_query_bo_create(dev_priv);
 453	if (unlikely(ret != 0))
 454		goto out_no_query_bo;
 455
 456	return 0;
 457
 458out_no_query_bo:
 459	if (dev_priv->cman)
 460		vmw_cmdbuf_remove_pool(dev_priv->cman);
 461	if (dev_priv->has_mob) {
 462		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
 463		vmw_otables_takedown(dev_priv);
 464	}
 465	if (dev_priv->cman)
 466		vmw_cmdbuf_man_destroy(dev_priv->cman);
 467out_no_mob:
 468	vmw_fence_fifo_down(dev_priv->fman);
 469	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 470	return ret;
 471}
 472
 473/**
 474 * vmw_release_device_early - Early part of fifo takedown.
 475 *
 476 * @dev_priv: Pointer to device private struct.
 477 *
 478 * This is the first part of command submission takedown, to be called before
 479 * buffer management is taken down.
 480 */
 481static void vmw_release_device_early(struct vmw_private *dev_priv)
 482{
 483	/*
 484	 * Previous destructions should've released
 485	 * the pinned bo.
 486	 */
 487
 488	BUG_ON(dev_priv->pinned_bo != NULL);
 489
 490	vmw_bo_unreference(&dev_priv->dummy_query_bo);
 491	if (dev_priv->cman)
 492		vmw_cmdbuf_remove_pool(dev_priv->cman);
 493
 494	if (dev_priv->has_mob) {
 495		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
 496		vmw_otables_takedown(dev_priv);
 497	}
 498}
 499
 500/**
 501 * vmw_release_device_late - Late part of fifo takedown.
 502 *
 503 * @dev_priv: Pointer to device private struct.
 504 *
 505 * This is the last part of the command submission takedown, to be called when
 506 * command submission is no longer needed. It may wait on pending fences.
 507 */
 508static void vmw_release_device_late(struct vmw_private *dev_priv)
 509{
 510	vmw_fence_fifo_down(dev_priv->fman);
 511	if (dev_priv->cman)
 512		vmw_cmdbuf_man_destroy(dev_priv->cman);
 513
 514	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 515}
 516
 517/**
 518 * Sets the initial_[width|height] fields on the given vmw_private.
 519 *
 520 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
 521 * clamping the value to fb_max_[width|height] fields and the
 522 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 523 * If the values appear to be invalid, set them to
 524 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 525 */
 526static void vmw_get_initial_size(struct vmw_private *dev_priv)
 527{
 528	uint32_t width;
 529	uint32_t height;
 530
 531	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
 532	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
 533
 534	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
 535	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
 536
 537	if (width > dev_priv->fb_max_width ||
 538	    height > dev_priv->fb_max_height) {
 539
 540		/*
 541		 * This is a host error and shouldn't occur.
 542		 */
 543
 544		width = VMW_MIN_INITIAL_WIDTH;
 545		height = VMW_MIN_INITIAL_HEIGHT;
 546	}
 547
 548	dev_priv->initial_width = width;
 549	dev_priv->initial_height = height;
 550}
 551
 552/**
 553 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
 554 * system.
 555 *
 556 * @dev_priv: Pointer to a struct vmw_private
 557 *
 558 * This functions tries to determine what actions need to be taken by the
 559 * driver to make system pages visible to the device.
 
 560 * If this function decides that DMA is not possible, it returns -EINVAL.
 561 * The driver may then try to disable features of the device that require
 562 * DMA.
 563 */
 564static int vmw_dma_select_mode(struct vmw_private *dev_priv)
 565{
 566	static const char *names[vmw_dma_map_max] = {
 567		[vmw_dma_phys] = "Using physical TTM page addresses.",
 568		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
 569		[vmw_dma_map_populate] = "Caching DMA mappings.",
 570		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 571
 572	if (vmw_force_coherent)
 573		dev_priv->map_mode = vmw_dma_alloc_coherent;
 574	else if (vmw_restrict_iommu)
 575		dev_priv->map_mode = vmw_dma_map_bind;
 576	else
 577		dev_priv->map_mode = vmw_dma_map_populate;
 578
 579	/* No TTM coherent page pool? FIXME: Ask TTM instead! */
 580        if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
 581	    (dev_priv->map_mode == vmw_dma_alloc_coherent))
 
 
 582		return -EINVAL;
 
 
 
 
 
 583
 584	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
 
 585	return 0;
 586}
 587
 588/**
 589 * vmw_dma_masks - set required page- and dma masks
 590 *
 591 * @dev: Pointer to struct drm-device
 592 *
 593 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
 594 * restriction also for 64-bit systems.
 595 */
 
 596static int vmw_dma_masks(struct vmw_private *dev_priv)
 597{
 598	struct drm_device *dev = dev_priv->dev;
 599	int ret = 0;
 600
 601	ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
 602	if (dev_priv->map_mode != vmw_dma_phys &&
 603	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
 604		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
 605		return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
 606	}
 607
 608	return ret;
 
 
 
 
 609}
 
 610
 611static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 612{
 613	struct vmw_private *dev_priv;
 614	int ret;
 615	uint32_t svga_id;
 616	enum vmw_res_type i;
 617	bool refuse_dma = false;
 618	char host_log[100] = {0};
 619
 620	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 621	if (unlikely(!dev_priv)) {
 622		DRM_ERROR("Failed allocating a device private struct.\n");
 623		return -ENOMEM;
 624	}
 625
 626	pci_set_master(dev->pdev);
 627
 628	dev_priv->dev = dev;
 629	dev_priv->vmw_chipset = chipset;
 630	dev_priv->last_read_seqno = (uint32_t) -100;
 631	mutex_init(&dev_priv->cmdbuf_mutex);
 632	mutex_init(&dev_priv->release_mutex);
 633	mutex_init(&dev_priv->binding_mutex);
 634	mutex_init(&dev_priv->global_kms_state_mutex);
 635	ttm_lock_init(&dev_priv->reservation_sem);
 636	spin_lock_init(&dev_priv->resource_lock);
 637	spin_lock_init(&dev_priv->hw_lock);
 638	spin_lock_init(&dev_priv->waiter_lock);
 639	spin_lock_init(&dev_priv->cap_lock);
 640	spin_lock_init(&dev_priv->svga_lock);
 641	spin_lock_init(&dev_priv->cursor_lock);
 642
 643	for (i = vmw_res_context; i < vmw_res_max; ++i) {
 644		idr_init(&dev_priv->res_idr[i]);
 645		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
 646	}
 647
 
 648	init_waitqueue_head(&dev_priv->fence_queue);
 649	init_waitqueue_head(&dev_priv->fifo_queue);
 650	dev_priv->fence_queue_waiters = 0;
 651	dev_priv->fifo_queue_waiters = 0;
 652
 653	dev_priv->used_memory_size = 0;
 654
 655	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
 656	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
 657	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 658
 659	dev_priv->assume_16bpp = !!vmw_assume_16bpp;
 660
 661	dev_priv->enable_fb = enable_fbdev;
 662
 663	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
 664	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
 665	if (svga_id != SVGA_ID_2) {
 666		ret = -ENOSYS;
 667		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
 668		goto out_err0;
 669	}
 670
 671	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
 672
 673	if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
 674		dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
 675	}
 676
 677
 678	ret = vmw_dma_select_mode(dev_priv);
 679	if (unlikely(ret != 0)) {
 680		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
 681		refuse_dma = true;
 682	}
 683
 684	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
 685	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
 686	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
 687	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
 688
 689	vmw_get_initial_size(dev_priv);
 690
 691	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 692		dev_priv->max_gmr_ids =
 693			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
 694		dev_priv->max_gmr_pages =
 695			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
 696		dev_priv->memory_size =
 697			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
 698		dev_priv->memory_size -= dev_priv->vram_size;
 699	} else {
 700		/*
 701		 * An arbitrary limit of 512MiB on surface
 702		 * memory. But all HWV8 hardware supports GMR2.
 703		 */
 704		dev_priv->memory_size = 512*1024*1024;
 705	}
 706	dev_priv->max_mob_pages = 0;
 707	dev_priv->max_mob_size = 0;
 708	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
 709		uint64_t mem_size =
 710			vmw_read(dev_priv,
 711				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
 712
 713		/*
 714		 * Workaround for low memory 2D VMs to compensate for the
 715		 * allocation taken by fbdev
 716		 */
 717		if (!(dev_priv->capabilities & SVGA_CAP_3D))
 718			mem_size *= 3;
 719
 720		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
 721		dev_priv->prim_bb_mem =
 722			vmw_read(dev_priv,
 723				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
 724		dev_priv->max_mob_size =
 725			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
 726		dev_priv->stdu_max_width =
 727			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
 728		dev_priv->stdu_max_height =
 729			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
 730
 731		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
 732			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
 733		dev_priv->texture_max_width = vmw_read(dev_priv,
 734						       SVGA_REG_DEV_CAP);
 735		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
 736			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
 737		dev_priv->texture_max_height = vmw_read(dev_priv,
 738							SVGA_REG_DEV_CAP);
 739	} else {
 740		dev_priv->texture_max_width = 8192;
 741		dev_priv->texture_max_height = 8192;
 742		dev_priv->prim_bb_mem = dev_priv->vram_size;
 743	}
 744
 745	vmw_print_capabilities(dev_priv->capabilities);
 746	if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
 747		vmw_print_capabilities2(dev_priv->capabilities2);
 748
 749	ret = vmw_dma_masks(dev_priv);
 750	if (unlikely(ret != 0))
 751		goto out_err0;
 752
 753	dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK,
 754					     SCATTERLIST_MAX_SEGMENT));
 755
 756	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 757		DRM_INFO("Max GMR ids is %u\n",
 758			 (unsigned)dev_priv->max_gmr_ids);
 759		DRM_INFO("Max number of GMR pages is %u\n",
 760			 (unsigned)dev_priv->max_gmr_pages);
 761		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
 762			 (unsigned)dev_priv->memory_size / 1024);
 763	}
 764	DRM_INFO("Maximum display memory size is %u kiB\n",
 765		 dev_priv->prim_bb_mem / 1024);
 766	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
 767		 dev_priv->vram_start, dev_priv->vram_size / 1024);
 768	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
 769		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
 770
 
 
 
 
 
 
 
 
 
 771	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
 772				       dev_priv->mmio_size, MEMREMAP_WB);
 773
 774	if (unlikely(dev_priv->mmio_virt == NULL)) {
 775		ret = -ENOMEM;
 776		DRM_ERROR("Failed mapping MMIO.\n");
 777		goto out_err0;
 778	}
 779
 780	/* Need mmio memory to check for fifo pitchlock cap. */
 781	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
 782	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
 783	    !vmw_fifo_have_pitchlock(dev_priv)) {
 784		ret = -ENOSYS;
 785		DRM_ERROR("Hardware has no pitchlock\n");
 786		goto out_err4;
 787	}
 788
 789	dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
 790						&vmw_prime_dmabuf_ops);
 791
 792	if (unlikely(dev_priv->tdev == NULL)) {
 793		DRM_ERROR("Unable to initialize TTM object management.\n");
 794		ret = -ENOMEM;
 795		goto out_err4;
 796	}
 797
 798	dev->dev_private = dev_priv;
 799
 800	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
 801	dev_priv->stealth = (ret != 0);
 802	if (dev_priv->stealth) {
 803		/**
 804		 * Request at least the mmio PCI resource.
 805		 */
 806
 807		DRM_INFO("It appears like vesafb is loaded. "
 808			 "Ignore above error if any.\n");
 809		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
 810		if (unlikely(ret != 0)) {
 811			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
 812			goto out_no_device;
 813		}
 814	}
 815
 816	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
 817		ret = vmw_irq_install(dev, dev->pdev->irq);
 818		if (ret != 0) {
 819			DRM_ERROR("Failed installing irq: %d\n", ret);
 820			goto out_no_irq;
 821		}
 822	}
 823
 824	dev_priv->fman = vmw_fence_manager_init(dev_priv);
 825	if (unlikely(dev_priv->fman == NULL)) {
 826		ret = -ENOMEM;
 827		goto out_no_fman;
 828	}
 829
 830	ret = ttm_bo_device_init(&dev_priv->bdev,
 
 831				 &vmw_bo_driver,
 832				 dev->anon_inode->i_mapping,
 
 833				 false);
 834	if (unlikely(ret != 0)) {
 835		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
 836		goto out_no_bdev;
 837	}
 838
 839	/*
 840	 * Enable VRAM, but initially don't use it until SVGA is enabled and
 841	 * unhidden.
 842	 */
 843	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
 844			     (dev_priv->vram_size >> PAGE_SHIFT));
 845	if (unlikely(ret != 0)) {
 846		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
 847		goto out_no_vram;
 848	}
 849	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
 850
 851	dev_priv->has_gmr = true;
 852	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
 853	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
 854					 VMW_PL_GMR) != 0) {
 855		DRM_INFO("No GMR memory available. "
 856			 "Graphics memory resources are very limited.\n");
 857		dev_priv->has_gmr = false;
 858	}
 859
 860	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
 861		dev_priv->has_mob = true;
 862		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
 863				   VMW_PL_MOB) != 0) {
 864			DRM_INFO("No MOB memory available. "
 865				 "3D will be disabled.\n");
 866			dev_priv->has_mob = false;
 867		}
 868	}
 869
 870	if (dev_priv->has_mob) {
 871		spin_lock(&dev_priv->cap_lock);
 872		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
 873		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
 874		spin_unlock(&dev_priv->cap_lock);
 875	}
 876
 877	vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
 878	ret = vmw_kms_init(dev_priv);
 879	if (unlikely(ret != 0))
 880		goto out_no_kms;
 881	vmw_overlay_init(dev_priv);
 882
 883	ret = vmw_request_device(dev_priv);
 884	if (ret)
 885		goto out_no_fifo;
 886
 887	if (dev_priv->has_dx) {
 888		/*
 889		 * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
 890		 * support
 891		 */
 892		if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
 893			vmw_write(dev_priv, SVGA_REG_DEV_CAP,
 894					SVGA3D_DEVCAP_SM41);
 895			dev_priv->has_sm4_1 = vmw_read(dev_priv,
 896							SVGA_REG_DEV_CAP);
 897		}
 898	}
 899
 900	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
 901	DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
 902		 ? "yes." : "no.");
 903	DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
 904
 905	snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
 906		VMWGFX_REPO, VMWGFX_GIT_VERSION);
 907	vmw_host_log(host_log);
 908
 909	memset(host_log, 0, sizeof(host_log));
 910	snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
 911		VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
 912		VMWGFX_DRIVER_PATCHLEVEL);
 913	vmw_host_log(host_log);
 914
 915	if (dev_priv->enable_fb) {
 916		vmw_fifo_resource_inc(dev_priv);
 917		vmw_svga_enable(dev_priv);
 918		vmw_fb_init(dev_priv);
 919	}
 920
 921	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
 922	register_pm_notifier(&dev_priv->pm_nb);
 923
 924	return 0;
 925
 926out_no_fifo:
 927	vmw_overlay_close(dev_priv);
 928	vmw_kms_close(dev_priv);
 929out_no_kms:
 930	if (dev_priv->has_mob)
 931		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 932	if (dev_priv->has_gmr)
 933		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 934	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 935out_no_vram:
 936	(void)ttm_bo_device_release(&dev_priv->bdev);
 937out_no_bdev:
 938	vmw_fence_manager_takedown(dev_priv->fman);
 939out_no_fman:
 940	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 941		vmw_irq_uninstall(dev_priv->dev);
 942out_no_irq:
 943	if (dev_priv->stealth)
 944		pci_release_region(dev->pdev, 2);
 945	else
 946		pci_release_regions(dev->pdev);
 947out_no_device:
 948	ttm_object_device_release(&dev_priv->tdev);
 949out_err4:
 950	memunmap(dev_priv->mmio_virt);
 
 
 951out_err0:
 952	for (i = vmw_res_context; i < vmw_res_max; ++i)
 953		idr_destroy(&dev_priv->res_idr[i]);
 954
 955	if (dev_priv->ctx.staged_bindings)
 956		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 957	kfree(dev_priv);
 958	return ret;
 959}
 960
 961static void vmw_driver_unload(struct drm_device *dev)
 962{
 963	struct vmw_private *dev_priv = vmw_priv(dev);
 964	enum vmw_res_type i;
 965
 966	unregister_pm_notifier(&dev_priv->pm_nb);
 967
 968	if (dev_priv->ctx.res_ht_initialized)
 969		drm_ht_remove(&dev_priv->ctx.res_ht);
 970	vfree(dev_priv->ctx.cmd_bounce);
 971	if (dev_priv->enable_fb) {
 972		vmw_fb_off(dev_priv);
 973		vmw_fb_close(dev_priv);
 974		vmw_fifo_resource_dec(dev_priv);
 975		vmw_svga_disable(dev_priv);
 976	}
 977
 978	vmw_kms_close(dev_priv);
 979	vmw_overlay_close(dev_priv);
 980
 981	if (dev_priv->has_gmr)
 982		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 983	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 984
 985	vmw_release_device_early(dev_priv);
 986	if (dev_priv->has_mob)
 987		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 988	(void) ttm_bo_device_release(&dev_priv->bdev);
 989	vmw_release_device_late(dev_priv);
 990	vmw_fence_manager_takedown(dev_priv->fman);
 991	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 992		vmw_irq_uninstall(dev_priv->dev);
 993	if (dev_priv->stealth)
 994		pci_release_region(dev->pdev, 2);
 995	else
 996		pci_release_regions(dev->pdev);
 997
 998	ttm_object_device_release(&dev_priv->tdev);
 999	memunmap(dev_priv->mmio_virt);
1000	if (dev_priv->ctx.staged_bindings)
1001		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 
1002
1003	for (i = vmw_res_context; i < vmw_res_max; ++i)
1004		idr_destroy(&dev_priv->res_idr[i]);
1005
1006	kfree(dev_priv);
 
 
1007}
1008
1009static void vmw_postclose(struct drm_device *dev,
1010			 struct drm_file *file_priv)
1011{
1012	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 
 
 
 
 
 
 
 
 
 
 
1013
1014	ttm_object_file_release(&vmw_fp->tfile);
1015	kfree(vmw_fp);
1016}
1017
1018static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1019{
1020	struct vmw_private *dev_priv = vmw_priv(dev);
1021	struct vmw_fpriv *vmw_fp;
1022	int ret = -ENOMEM;
1023
1024	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1025	if (unlikely(!vmw_fp))
1026		return ret;
1027
1028	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1029	if (unlikely(vmw_fp->tfile == NULL))
1030		goto out_no_tfile;
1031
1032	file_priv->driver_priv = vmw_fp;
1033
1034	return 0;
1035
1036out_no_tfile:
1037	kfree(vmw_fp);
1038	return ret;
1039}
1040
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1041static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1042			      unsigned long arg,
1043			      long (*ioctl_func)(struct file *, unsigned int,
1044						 unsigned long))
1045{
1046	struct drm_file *file_priv = filp->private_data;
1047	struct drm_device *dev = file_priv->minor->dev;
1048	unsigned int nr = DRM_IOCTL_NR(cmd);
 
1049	unsigned int flags;
 
1050
1051	/*
1052	 * Do extra checking on driver private ioctls.
1053	 */
1054
1055	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1056	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1057		const struct drm_ioctl_desc *ioctl =
1058			&vmw_ioctls[nr - DRM_COMMAND_BASE];
1059
1060		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1061			return ioctl_func(filp, cmd, arg);
1062		} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1063			if (!drm_is_current_master(file_priv) &&
1064			    !capable(CAP_SYS_ADMIN))
1065				return -EACCES;
 
 
 
 
1066		}
1067
1068		if (unlikely(ioctl->cmd != cmd))
1069			goto out_io_encoding;
1070
1071		flags = ioctl->flags;
1072	} else if (!drm_ioctl_flags(nr, &flags))
1073		return -EINVAL;
1074
1075	return ioctl_func(filp, cmd, arg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1076
1077out_io_encoding:
1078	DRM_ERROR("Invalid command format, ioctl %d\n",
1079		  nr - DRM_COMMAND_BASE);
1080
1081	return -EINVAL;
1082}
1083
1084static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1085			       unsigned long arg)
1086{
1087	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1088}
1089
1090#ifdef CONFIG_COMPAT
1091static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1092			     unsigned long arg)
1093{
1094	return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1095}
1096#endif
1097
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1098static int vmw_master_set(struct drm_device *dev,
1099			  struct drm_file *file_priv,
1100			  bool from_open)
1101{
1102	/*
1103	 * Inform a new master that the layout may have changed while
1104	 * it was gone.
1105	 */
1106	if (!from_open)
1107		drm_sysfs_hotplug_event(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1108
1109	return 0;
1110}
1111
1112static void vmw_master_drop(struct drm_device *dev,
1113			    struct drm_file *file_priv)
 
1114{
1115	struct vmw_private *dev_priv = vmw_priv(dev);
 
 
 
 
 
 
 
 
1116
 
 
1117	vmw_kms_legacy_hotspot_clear(dev_priv);
 
 
 
 
 
 
 
1118	if (!dev_priv->enable_fb)
1119		vmw_svga_disable(dev_priv);
 
 
 
 
 
 
 
1120}
1121
1122/**
1123 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1124 *
1125 * @dev_priv: Pointer to device private struct.
1126 * Needs the reservation sem to be held in non-exclusive mode.
1127 */
1128static void __vmw_svga_enable(struct vmw_private *dev_priv)
1129{
1130	spin_lock(&dev_priv->svga_lock);
1131	if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1132		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1133		dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1134	}
1135	spin_unlock(&dev_priv->svga_lock);
1136}
1137
1138/**
1139 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1140 *
1141 * @dev_priv: Pointer to device private struct.
1142 */
1143void vmw_svga_enable(struct vmw_private *dev_priv)
1144{
1145	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
1146	__vmw_svga_enable(dev_priv);
1147	ttm_read_unlock(&dev_priv->reservation_sem);
1148}
1149
1150/**
1151 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1152 *
1153 * @dev_priv: Pointer to device private struct.
1154 * Needs the reservation sem to be held in exclusive mode.
1155 * Will not empty VRAM. VRAM must be emptied by caller.
1156 */
1157static void __vmw_svga_disable(struct vmw_private *dev_priv)
1158{
1159	spin_lock(&dev_priv->svga_lock);
1160	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1161		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1162		vmw_write(dev_priv, SVGA_REG_ENABLE,
1163			  SVGA_REG_ENABLE_HIDE |
1164			  SVGA_REG_ENABLE_ENABLE);
1165	}
1166	spin_unlock(&dev_priv->svga_lock);
1167}
1168
1169/**
1170 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1171 * running.
1172 *
1173 * @dev_priv: Pointer to device private struct.
1174 * Will empty VRAM.
1175 */
1176void vmw_svga_disable(struct vmw_private *dev_priv)
1177{
1178	/*
1179	 * Disabling SVGA will turn off device modesetting capabilities, so
1180	 * notify KMS about that so that it doesn't cache atomic state that
1181	 * isn't valid anymore, for example crtcs turned on.
1182	 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1183	 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1184	 * end up with lock order reversal. Thus, a master may actually perform
1185	 * a new modeset just after we call vmw_kms_lost_device() and race with
1186	 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1187	 * to be inconsistent with the device, causing modesetting problems.
1188	 *
1189	 */
1190	vmw_kms_lost_device(dev_priv->dev);
1191	ttm_write_lock(&dev_priv->reservation_sem, false);
1192	spin_lock(&dev_priv->svga_lock);
1193	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1194		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1195		spin_unlock(&dev_priv->svga_lock);
1196		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1197			DRM_ERROR("Failed evicting VRAM buffers.\n");
1198		vmw_write(dev_priv, SVGA_REG_ENABLE,
1199			  SVGA_REG_ENABLE_HIDE |
1200			  SVGA_REG_ENABLE_ENABLE);
1201	} else
1202		spin_unlock(&dev_priv->svga_lock);
1203	ttm_write_unlock(&dev_priv->reservation_sem);
1204}
1205
1206static void vmw_remove(struct pci_dev *pdev)
1207{
1208	struct drm_device *dev = pci_get_drvdata(pdev);
1209
1210	pci_disable_device(pdev);
1211	drm_put_dev(dev);
1212}
1213
1214static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1215			      void *ptr)
1216{
1217	struct vmw_private *dev_priv =
1218		container_of(nb, struct vmw_private, pm_nb);
1219
1220	switch (val) {
1221	case PM_HIBERNATION_PREPARE:
 
 
 
 
1222		/*
1223		 * Take the reservation sem in write mode, which will make sure
1224		 * there are no other processes holding a buffer object
1225		 * reservation, meaning we should be able to evict all buffer
1226		 * objects if needed.
1227		 * Once user-space processes have been frozen, we can release
1228		 * the lock again.
1229		 */
1230		ttm_suspend_lock(&dev_priv->reservation_sem);
1231		dev_priv->suspend_locked = true;
 
 
 
1232		break;
1233	case PM_POST_HIBERNATION:
1234	case PM_POST_RESTORE:
1235		if (READ_ONCE(dev_priv->suspend_locked)) {
1236			dev_priv->suspend_locked = false;
1237			ttm_suspend_unlock(&dev_priv->reservation_sem);
1238		}
 
 
1239		break;
1240	default:
1241		break;
1242	}
1243	return 0;
1244}
1245
1246static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1247{
1248	struct drm_device *dev = pci_get_drvdata(pdev);
1249	struct vmw_private *dev_priv = vmw_priv(dev);
1250
1251	if (dev_priv->refuse_hibernation)
1252		return -EBUSY;
1253
1254	pci_save_state(pdev);
1255	pci_disable_device(pdev);
1256	pci_set_power_state(pdev, PCI_D3hot);
1257	return 0;
1258}
1259
1260static int vmw_pci_resume(struct pci_dev *pdev)
1261{
1262	pci_set_power_state(pdev, PCI_D0);
1263	pci_restore_state(pdev);
1264	return pci_enable_device(pdev);
1265}
1266
1267static int vmw_pm_suspend(struct device *kdev)
1268{
1269	struct pci_dev *pdev = to_pci_dev(kdev);
1270	struct pm_message dummy;
1271
1272	dummy.event = 0;
1273
1274	return vmw_pci_suspend(pdev, dummy);
1275}
1276
1277static int vmw_pm_resume(struct device *kdev)
1278{
1279	struct pci_dev *pdev = to_pci_dev(kdev);
1280
1281	return vmw_pci_resume(pdev);
1282}
1283
1284static int vmw_pm_freeze(struct device *kdev)
1285{
1286	struct pci_dev *pdev = to_pci_dev(kdev);
1287	struct drm_device *dev = pci_get_drvdata(pdev);
1288	struct vmw_private *dev_priv = vmw_priv(dev);
1289	int ret;
1290
1291	/*
1292	 * Unlock for vmw_kms_suspend.
1293	 * No user-space processes should be running now.
1294	 */
1295	ttm_suspend_unlock(&dev_priv->reservation_sem);
1296	ret = vmw_kms_suspend(dev_priv->dev);
1297	if (ret) {
1298		ttm_suspend_lock(&dev_priv->reservation_sem);
1299		DRM_ERROR("Failed to freeze modesetting.\n");
1300		return ret;
1301	}
1302	if (dev_priv->enable_fb)
1303		vmw_fb_off(dev_priv);
1304
1305	ttm_suspend_lock(&dev_priv->reservation_sem);
1306	vmw_execbuf_release_pinned_bo(dev_priv);
1307	vmw_resource_evict_all(dev_priv);
1308	vmw_release_device_early(dev_priv);
1309	ttm_bo_swapout_all(&dev_priv->bdev);
1310	if (dev_priv->enable_fb)
1311		vmw_fifo_resource_dec(dev_priv);
 
1312	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1313		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1314		if (dev_priv->enable_fb)
1315			vmw_fifo_resource_inc(dev_priv);
1316		WARN_ON(vmw_request_device_late(dev_priv));
1317		dev_priv->suspend_locked = false;
1318		ttm_suspend_unlock(&dev_priv->reservation_sem);
1319		if (dev_priv->suspend_state)
1320			vmw_kms_resume(dev);
1321		if (dev_priv->enable_fb)
1322			vmw_fb_on(dev_priv);
1323		return -EBUSY;
1324	}
1325
1326	vmw_fence_fifo_down(dev_priv->fman);
1327	__vmw_svga_disable(dev_priv);
1328	
1329	vmw_release_device_late(dev_priv);
 
1330	return 0;
1331}
1332
1333static int vmw_pm_restore(struct device *kdev)
1334{
1335	struct pci_dev *pdev = to_pci_dev(kdev);
1336	struct drm_device *dev = pci_get_drvdata(pdev);
1337	struct vmw_private *dev_priv = vmw_priv(dev);
1338	int ret;
1339
1340	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1341	(void) vmw_read(dev_priv, SVGA_REG_ID);
1342
1343	if (dev_priv->enable_fb)
1344		vmw_fifo_resource_inc(dev_priv);
1345
1346	ret = vmw_request_device(dev_priv);
1347	if (ret)
1348		return ret;
1349
1350	if (dev_priv->enable_fb)
1351		__vmw_svga_enable(dev_priv);
1352
1353	vmw_fence_fifo_up(dev_priv->fman);
1354	dev_priv->suspend_locked = false;
1355	ttm_suspend_unlock(&dev_priv->reservation_sem);
1356	if (dev_priv->suspend_state)
1357		vmw_kms_resume(dev_priv->dev);
1358
1359	if (dev_priv->enable_fb)
1360		vmw_fb_on(dev_priv);
1361
1362	return 0;
1363}
1364
1365static const struct dev_pm_ops vmw_pm_ops = {
1366	.freeze = vmw_pm_freeze,
1367	.thaw = vmw_pm_restore,
1368	.restore = vmw_pm_restore,
1369	.suspend = vmw_pm_suspend,
1370	.resume = vmw_pm_resume,
1371};
1372
1373static const struct file_operations vmwgfx_driver_fops = {
1374	.owner = THIS_MODULE,
1375	.open = drm_open,
1376	.release = drm_release,
1377	.unlocked_ioctl = vmw_unlocked_ioctl,
1378	.mmap = vmw_mmap,
1379	.poll = vmw_fops_poll,
1380	.read = vmw_fops_read,
1381#if defined(CONFIG_COMPAT)
1382	.compat_ioctl = vmw_compat_ioctl,
1383#endif
1384	.llseek = noop_llseek,
1385};
1386
1387static struct drm_driver driver = {
1388	.driver_features =
1389	DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
1390	.load = vmw_driver_load,
1391	.unload = vmw_driver_unload,
 
 
 
 
 
1392	.get_vblank_counter = vmw_get_vblank_counter,
1393	.enable_vblank = vmw_enable_vblank,
1394	.disable_vblank = vmw_disable_vblank,
1395	.ioctls = vmw_ioctls,
1396	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
 
 
1397	.master_set = vmw_master_set,
1398	.master_drop = vmw_master_drop,
1399	.open = vmw_driver_open,
1400	.postclose = vmw_postclose,
 
1401
1402	.dumb_create = vmw_dumb_create,
1403	.dumb_map_offset = vmw_dumb_map_offset,
1404	.dumb_destroy = vmw_dumb_destroy,
1405
1406	.prime_fd_to_handle = vmw_prime_fd_to_handle,
1407	.prime_handle_to_fd = vmw_prime_handle_to_fd,
1408
1409	.fops = &vmwgfx_driver_fops,
1410	.name = VMWGFX_DRIVER_NAME,
1411	.desc = VMWGFX_DRIVER_DESC,
1412	.date = VMWGFX_DRIVER_DATE,
1413	.major = VMWGFX_DRIVER_MAJOR,
1414	.minor = VMWGFX_DRIVER_MINOR,
1415	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1416};
1417
1418static struct pci_driver vmw_pci_driver = {
1419	.name = VMWGFX_DRIVER_NAME,
1420	.id_table = vmw_pci_id_list,
1421	.probe = vmw_probe,
1422	.remove = vmw_remove,
1423	.driver = {
1424		.pm = &vmw_pm_ops
1425	}
1426};
1427
1428static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1429{
1430	return drm_get_pci_dev(pdev, ent, &driver);
1431}
1432
1433static int __init vmwgfx_init(void)
1434{
1435	int ret;
1436
 
1437	if (vgacon_text_force())
1438		return -EINVAL;
 
1439
1440	ret = pci_register_driver(&vmw_pci_driver);
1441	if (ret)
1442		DRM_ERROR("Failed initializing DRM.\n");
1443	return ret;
1444}
1445
1446static void __exit vmwgfx_exit(void)
1447{
1448	pci_unregister_driver(&vmw_pci_driver);
1449}
1450
1451module_init(vmwgfx_init);
1452module_exit(vmwgfx_exit);
1453
1454MODULE_AUTHOR("VMware Inc. and others");
1455MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1456MODULE_LICENSE("GPL and additional rights");
1457MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1458	       __stringify(VMWGFX_DRIVER_MINOR) "."
1459	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1460	       "0");