Linux Audio

Check our new training course

Loading...
v3.1
 
  1/**************************************************************************
  2 *
  3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include "drmP.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 29#include "vmwgfx_drv.h"
 30#include "ttm/ttm_placement.h"
 31#include "ttm/ttm_bo_driver.h"
 32#include "ttm/ttm_object.h"
 33#include "ttm/ttm_module.h"
 34
 35#define VMWGFX_DRIVER_NAME "vmwgfx"
 36#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
 37#define VMWGFX_CHIP_SVGAII 0
 38#define VMW_FB_RESERVATION 0
 39
 
 
 
 
 
 
 
 
 
 
 
 
 40/**
 41 * Fully encoded drm commands. Might move to vmw_drm.h
 42 */
 43
 44#define DRM_IOCTL_VMW_GET_PARAM					\
 45	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
 46		 struct drm_vmw_getparam_arg)
 47#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
 48	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
 49		union drm_vmw_alloc_dmabuf_arg)
 50#define DRM_IOCTL_VMW_UNREF_DMABUF				\
 51	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
 52		struct drm_vmw_unref_dmabuf_arg)
 53#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
 54	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
 55		 struct drm_vmw_cursor_bypass_arg)
 56
 57#define DRM_IOCTL_VMW_CONTROL_STREAM				\
 58	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
 59		 struct drm_vmw_control_stream_arg)
 60#define DRM_IOCTL_VMW_CLAIM_STREAM				\
 61	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
 62		 struct drm_vmw_stream_arg)
 63#define DRM_IOCTL_VMW_UNREF_STREAM				\
 64	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
 65		 struct drm_vmw_stream_arg)
 66
 67#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
 68	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
 69		struct drm_vmw_context_arg)
 70#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
 71	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
 72		struct drm_vmw_context_arg)
 73#define DRM_IOCTL_VMW_CREATE_SURFACE				\
 74	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
 75		 union drm_vmw_surface_create_arg)
 76#define DRM_IOCTL_VMW_UNREF_SURFACE				\
 77	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
 78		 struct drm_vmw_surface_arg)
 79#define DRM_IOCTL_VMW_REF_SURFACE				\
 80	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
 81		 union drm_vmw_surface_reference_arg)
 82#define DRM_IOCTL_VMW_EXECBUF					\
 83	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
 84		struct drm_vmw_execbuf_arg)
 85#define DRM_IOCTL_VMW_FIFO_DEBUG				\
 86	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG,		\
 87		 struct drm_vmw_fifo_debug_arg)
 88#define DRM_IOCTL_VMW_FENCE_WAIT				\
 89	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
 90		 struct drm_vmw_fence_wait_arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
 92	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
 93		 struct drm_vmw_update_layout_arg)
 94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 95
 96/**
 97 * The core DRM version of this macro doesn't account for
 98 * DRM_COMMAND_BASE.
 99 */
100
101#define VMW_IOCTL_DEF(ioctl, func, flags) \
102  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
103
104/**
105 * Ioctl definitions.
106 */
107
108static struct drm_ioctl_desc vmw_ioctls[] = {
109	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
110		      DRM_AUTH | DRM_UNLOCKED),
111	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
112		      DRM_AUTH | DRM_UNLOCKED),
113	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
114		      DRM_AUTH | DRM_UNLOCKED),
115	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
116		      vmw_kms_cursor_bypass_ioctl,
117		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
118
119	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
120		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
121	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
122		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
123	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
124		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
125
126	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
127		      DRM_AUTH | DRM_UNLOCKED),
128	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
129		      DRM_AUTH | DRM_UNLOCKED),
130	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
131		      DRM_AUTH | DRM_UNLOCKED),
132	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
133		      DRM_AUTH | DRM_UNLOCKED),
134	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
135		      DRM_AUTH | DRM_UNLOCKED),
136	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
137		      DRM_AUTH | DRM_UNLOCKED),
138	VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
139		      DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
140	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
141		      DRM_AUTH | DRM_UNLOCKED),
142	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
143		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144};
145
146static struct pci_device_id vmw_pci_id_list[] = {
147	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
148	{0, 0, 0}
149};
 
150
151static int enable_fbdev;
 
 
 
 
 
152
153static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
154static void vmw_master_init(struct vmw_master *);
155static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
156			      void *ptr);
157
158MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
159module_param_named(enable_fbdev, enable_fbdev, int, 0600);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
161static void vmw_print_capabilities(uint32_t capabilities)
162{
163	DRM_INFO("Capabilities:\n");
164	if (capabilities & SVGA_CAP_RECT_COPY)
165		DRM_INFO("  Rect copy.\n");
166	if (capabilities & SVGA_CAP_CURSOR)
167		DRM_INFO("  Cursor.\n");
168	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
169		DRM_INFO("  Cursor bypass.\n");
170	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
171		DRM_INFO("  Cursor bypass 2.\n");
172	if (capabilities & SVGA_CAP_8BIT_EMULATION)
173		DRM_INFO("  8bit emulation.\n");
174	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
175		DRM_INFO("  Alpha cursor.\n");
176	if (capabilities & SVGA_CAP_3D)
177		DRM_INFO("  3D.\n");
178	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
179		DRM_INFO("  Extended Fifo.\n");
180	if (capabilities & SVGA_CAP_MULTIMON)
181		DRM_INFO("  Multimon.\n");
182	if (capabilities & SVGA_CAP_PITCHLOCK)
183		DRM_INFO("  Pitchlock.\n");
184	if (capabilities & SVGA_CAP_IRQMASK)
185		DRM_INFO("  Irq mask.\n");
186	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
187		DRM_INFO("  Display Topology.\n");
188	if (capabilities & SVGA_CAP_GMR)
189		DRM_INFO("  GMR.\n");
190	if (capabilities & SVGA_CAP_TRACES)
191		DRM_INFO("  Traces.\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192}
193
194static int vmw_request_device(struct vmw_private *dev_priv)
195{
196	int ret;
197
198	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
199	if (unlikely(ret != 0)) {
200		DRM_ERROR("Unable to initialize FIFO.\n");
201		return ret;
202	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
204	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205}
206
207static void vmw_release_device(struct vmw_private *dev_priv)
 
 
 
 
 
 
 
 
208{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209	vmw_fifo_release(dev_priv, &dev_priv->fifo);
210}
211
212int vmw_3d_resource_inc(struct vmw_private *dev_priv)
 
 
 
 
 
 
 
 
 
213{
214	int ret = 0;
 
 
 
 
 
 
 
215
216	mutex_lock(&dev_priv->release_mutex);
217	if (unlikely(dev_priv->num_3d_resources++ == 0)) {
218		ret = vmw_request_device(dev_priv);
219		if (unlikely(ret != 0))
220			--dev_priv->num_3d_resources;
 
 
 
 
221	}
222	mutex_unlock(&dev_priv->release_mutex);
223	return ret;
 
224}
225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
227void vmw_3d_resource_dec(struct vmw_private *dev_priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228{
229	int32_t n3d;
 
230
231	mutex_lock(&dev_priv->release_mutex);
232	if (unlikely(--dev_priv->num_3d_resources == 0))
233		vmw_release_device(dev_priv);
234	n3d = (int32_t) dev_priv->num_3d_resources;
235	mutex_unlock(&dev_priv->release_mutex);
 
236
237	BUG_ON(n3d < 0);
238}
239
240static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
241{
242	struct vmw_private *dev_priv;
243	int ret;
244	uint32_t svga_id;
 
 
 
245
246	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
247	if (unlikely(dev_priv == NULL)) {
248		DRM_ERROR("Failed allocating a device private struct.\n");
249		return -ENOMEM;
250	}
251	memset(dev_priv, 0, sizeof(*dev_priv));
 
252
253	dev_priv->dev = dev;
254	dev_priv->vmw_chipset = chipset;
255	dev_priv->last_read_sequence = (uint32_t) -100;
256	mutex_init(&dev_priv->hw_mutex);
257	mutex_init(&dev_priv->cmdbuf_mutex);
258	mutex_init(&dev_priv->release_mutex);
259	rwlock_init(&dev_priv->resource_lock);
260	idr_init(&dev_priv->context_idr);
261	idr_init(&dev_priv->surface_idr);
262	idr_init(&dev_priv->stream_idr);
263	mutex_init(&dev_priv->init_mutex);
 
 
 
 
 
 
 
 
 
 
264	init_waitqueue_head(&dev_priv->fence_queue);
265	init_waitqueue_head(&dev_priv->fifo_queue);
266	atomic_set(&dev_priv->fence_queue_waiters, 0);
267	atomic_set(&dev_priv->fifo_queue_waiters, 0);
 
 
268
269	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
270	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
271	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
272
273	dev_priv->enable_fb = enable_fbdev;
274
275	mutex_lock(&dev_priv->hw_mutex);
276
277	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
278	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
279	if (svga_id != SVGA_ID_2) {
280		ret = -ENOSYS;
281		DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
282		mutex_unlock(&dev_priv->hw_mutex);
283		goto out_err0;
284	}
285
286	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
287
288	if (dev_priv->capabilities & SVGA_CAP_GMR) {
289		dev_priv->max_gmr_descriptors =
290			vmw_read(dev_priv,
291				 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
292		dev_priv->max_gmr_ids =
293			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
 
 
 
 
 
294	}
295
296	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
297	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
298	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
299	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
300
301	mutex_unlock(&dev_priv->hw_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302
303	vmw_print_capabilities(dev_priv->capabilities);
 
 
304
305	if (dev_priv->capabilities & SVGA_CAP_GMR) {
 
 
 
 
 
 
 
306		DRM_INFO("Max GMR ids is %u\n",
307			 (unsigned)dev_priv->max_gmr_ids);
308		DRM_INFO("Max GMR descriptors is %u\n",
309			 (unsigned)dev_priv->max_gmr_descriptors);
 
 
310	}
 
 
311	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
312		 dev_priv->vram_start, dev_priv->vram_size / 1024);
313	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
314		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
315
316	ret = vmw_ttm_global_init(dev_priv);
317	if (unlikely(ret != 0))
318		goto out_err0;
319
320
321	vmw_master_init(&dev_priv->fbdev_master);
322	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
323	dev_priv->active_master = &dev_priv->fbdev_master;
324
325
326	ret = ttm_bo_device_init(&dev_priv->bdev,
327				 dev_priv->bo_global_ref.ref.object,
328				 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
329				 false);
330	if (unlikely(ret != 0)) {
331		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
332		goto out_err1;
333	}
334
335	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
336			     (dev_priv->vram_size >> PAGE_SHIFT));
337	if (unlikely(ret != 0)) {
338		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
339		goto out_err2;
340	}
341
342	dev_priv->has_gmr = true;
343	if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
344			   dev_priv->max_gmr_ids) != 0) {
345		DRM_INFO("No GMR memory available. "
346			 "Graphics memory resources are very limited.\n");
347		dev_priv->has_gmr = false;
348	}
349
350	dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
351					   dev_priv->mmio_size, DRM_MTRR_WC);
352
353	dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
354					 dev_priv->mmio_size);
355
356	if (unlikely(dev_priv->mmio_virt == NULL)) {
357		ret = -ENOMEM;
358		DRM_ERROR("Failed mapping MMIO.\n");
359		goto out_err3;
360	}
361
362	/* Need mmio memory to check for fifo pitchlock cap. */
363	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
364	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
365	    !vmw_fifo_have_pitchlock(dev_priv)) {
366		ret = -ENOSYS;
367		DRM_ERROR("Hardware has no pitchlock\n");
368		goto out_err4;
369	}
370
371	dev_priv->tdev = ttm_object_device_init
372	    (dev_priv->mem_global_ref.object, 12);
373
374	if (unlikely(dev_priv->tdev == NULL)) {
375		DRM_ERROR("Unable to initialize TTM object management.\n");
376		ret = -ENOMEM;
377		goto out_err4;
378	}
379
380	dev->dev_private = dev_priv;
381
382	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
383	dev_priv->stealth = (ret != 0);
384	if (dev_priv->stealth) {
385		/**
386		 * Request at least the mmio PCI resource.
387		 */
388
389		DRM_INFO("It appears like vesafb is loaded. "
390			 "Ignore above error if any.\n");
391		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
392		if (unlikely(ret != 0)) {
393			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
394			goto out_no_device;
395		}
396	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397	ret = vmw_kms_init(dev_priv);
398	if (unlikely(ret != 0))
399		goto out_no_kms;
400	vmw_overlay_init(dev_priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
401	if (dev_priv->enable_fb) {
402		ret = vmw_3d_resource_inc(dev_priv);
403		if (unlikely(ret != 0))
404			goto out_no_fifo;
405		vmw_kms_save_vga(dev_priv);
406		vmw_fb_init(dev_priv);
407		DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
408			 "Detected device 3D availability.\n" :
409			 "Detected no device 3D availability.\n");
410	} else {
411		DRM_INFO("Delayed 3D detection since we're not "
412			 "running the device in SVGA mode yet.\n");
413	}
414
415	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
416		ret = drm_irq_install(dev);
417		if (unlikely(ret != 0)) {
418			DRM_ERROR("Failed installing irq: %d\n", ret);
419			goto out_no_irq;
420		}
421	}
422
423	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
424	register_pm_notifier(&dev_priv->pm_nb);
425
426	return 0;
427
428out_no_irq:
429	if (dev_priv->enable_fb) {
430		vmw_fb_close(dev_priv);
431		vmw_kms_restore_vga(dev_priv);
432		vmw_3d_resource_dec(dev_priv);
433	}
434out_no_fifo:
435	vmw_overlay_close(dev_priv);
436	vmw_kms_close(dev_priv);
437out_no_kms:
 
 
 
 
 
 
 
 
 
 
 
 
 
438	if (dev_priv->stealth)
439		pci_release_region(dev->pdev, 2);
440	else
441		pci_release_regions(dev->pdev);
442out_no_device:
443	ttm_object_device_release(&dev_priv->tdev);
444out_err4:
445	iounmap(dev_priv->mmio_virt);
446out_err3:
447	drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
448		     dev_priv->mmio_size, DRM_MTRR_WC);
449	if (dev_priv->has_gmr)
450		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
451	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
452out_err2:
453	(void)ttm_bo_device_release(&dev_priv->bdev);
454out_err1:
455	vmw_ttm_global_release(dev_priv);
456out_err0:
457	idr_destroy(&dev_priv->surface_idr);
458	idr_destroy(&dev_priv->context_idr);
459	idr_destroy(&dev_priv->stream_idr);
 
 
460	kfree(dev_priv);
461	return ret;
462}
463
464static int vmw_driver_unload(struct drm_device *dev)
465{
466	struct vmw_private *dev_priv = vmw_priv(dev);
 
467
468	unregister_pm_notifier(&dev_priv->pm_nb);
469
470	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
471		drm_irq_uninstall(dev_priv->dev);
 
472	if (dev_priv->enable_fb) {
 
473		vmw_fb_close(dev_priv);
474		vmw_kms_restore_vga(dev_priv);
475		vmw_3d_resource_dec(dev_priv);
476	}
 
477	vmw_kms_close(dev_priv);
478	vmw_overlay_close(dev_priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
479	if (dev_priv->stealth)
480		pci_release_region(dev->pdev, 2);
481	else
482		pci_release_regions(dev->pdev);
483
484	ttm_object_device_release(&dev_priv->tdev);
485	iounmap(dev_priv->mmio_virt);
486	drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
487		     dev_priv->mmio_size, DRM_MTRR_WC);
488	if (dev_priv->has_gmr)
489		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
490	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
491	(void)ttm_bo_device_release(&dev_priv->bdev);
492	vmw_ttm_global_release(dev_priv);
493	idr_destroy(&dev_priv->surface_idr);
494	idr_destroy(&dev_priv->context_idr);
495	idr_destroy(&dev_priv->stream_idr);
496
497	kfree(dev_priv);
 
498
499	return 0;
500}
501
502static void vmw_postclose(struct drm_device *dev,
503			 struct drm_file *file_priv)
504{
505	struct vmw_fpriv *vmw_fp;
506
507	vmw_fp = vmw_fpriv(file_priv);
508	ttm_object_file_release(&vmw_fp->tfile);
509	if (vmw_fp->locked_master)
510		drm_master_put(&vmw_fp->locked_master);
511	kfree(vmw_fp);
512}
513
514static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
515{
516	struct vmw_private *dev_priv = vmw_priv(dev);
517	struct vmw_fpriv *vmw_fp;
518	int ret = -ENOMEM;
519
520	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
521	if (unlikely(vmw_fp == NULL))
522		return ret;
523
524	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
525	if (unlikely(vmw_fp->tfile == NULL))
526		goto out_no_tfile;
527
528	file_priv->driver_priv = vmw_fp;
529
530	if (unlikely(dev_priv->bdev.dev_mapping == NULL))
531		dev_priv->bdev.dev_mapping =
532			file_priv->filp->f_path.dentry->d_inode->i_mapping;
533
534	return 0;
535
536out_no_tfile:
537	kfree(vmw_fp);
538	return ret;
539}
540
541static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
542			       unsigned long arg)
 
 
543{
544	struct drm_file *file_priv = filp->private_data;
545	struct drm_device *dev = file_priv->minor->dev;
546	unsigned int nr = DRM_IOCTL_NR(cmd);
 
547
548	/*
549	 * Do extra checking on driver private ioctls.
550	 */
551
552	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
553	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
554		struct drm_ioctl_desc *ioctl =
555		    &vmw_ioctls[nr - DRM_COMMAND_BASE];
556
557		if (unlikely(ioctl->cmd_drv != cmd)) {
558			DRM_ERROR("Invalid command format, ioctl %d\n",
559				  nr - DRM_COMMAND_BASE);
560			return -EINVAL;
 
 
561		}
562	}
563
564	return drm_ioctl(filp, cmd, arg);
565}
566
567static int vmw_firstopen(struct drm_device *dev)
568{
569	struct vmw_private *dev_priv = vmw_priv(dev);
570	dev_priv->is_opened = true;
571
572	return 0;
573}
574
575static void vmw_lastclose(struct drm_device *dev)
576{
577	struct vmw_private *dev_priv = vmw_priv(dev);
578	struct drm_crtc *crtc;
579	struct drm_mode_set set;
580	int ret;
581
582	/**
583	 * Do nothing on the lastclose call from drm_unload.
584	 */
585
586	if (!dev_priv->is_opened)
587		return;
 
588
589	dev_priv->is_opened = false;
590	set.x = 0;
591	set.y = 0;
592	set.fb = NULL;
593	set.mode = NULL;
594	set.connectors = NULL;
595	set.num_connectors = 0;
596
597	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
598		set.crtc = crtc;
599		ret = crtc->funcs->set_config(&set);
600		WARN_ON(ret != 0);
601	}
602
 
603}
604
605static void vmw_master_init(struct vmw_master *vmaster)
 
606{
607	ttm_lock_init(&vmaster->lock);
608	INIT_LIST_HEAD(&vmaster->fb_surf);
609	mutex_init(&vmaster->fb_surf_mutex);
610}
611
612static int vmw_master_create(struct drm_device *dev,
613			     struct drm_master *master)
 
614{
615	struct vmw_master *vmaster;
616
617	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
618	if (unlikely(vmaster == NULL))
619		return -ENOMEM;
620
621	vmw_master_init(vmaster);
622	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
623	master->driver_priv = vmaster;
624
625	return 0;
626}
 
627
628static void vmw_master_destroy(struct drm_device *dev,
629			       struct drm_master *master)
 
630{
631	struct vmw_master *vmaster = vmw_master(master);
632
633	master->driver_priv = NULL;
634	kfree(vmaster);
 
 
635}
636
637
638static int vmw_master_set(struct drm_device *dev,
639			  struct drm_file *file_priv,
640			  bool from_open)
641{
642	struct vmw_private *dev_priv = vmw_priv(dev);
643	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
644	struct vmw_master *active = dev_priv->active_master;
645	struct vmw_master *vmaster = vmw_master(file_priv->master);
646	int ret = 0;
647
648	if (!dev_priv->enable_fb) {
649		ret = vmw_3d_resource_inc(dev_priv);
650		if (unlikely(ret != 0))
651			return ret;
652		vmw_kms_save_vga(dev_priv);
653		mutex_lock(&dev_priv->hw_mutex);
654		vmw_write(dev_priv, SVGA_REG_TRACES, 0);
655		mutex_unlock(&dev_priv->hw_mutex);
656	}
657
658	if (active) {
659		BUG_ON(active != &dev_priv->fbdev_master);
660		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
661		if (unlikely(ret != 0))
662			goto out_no_active_lock;
663
664		ttm_lock_set_kill(&active->lock, true, SIGTERM);
665		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
666		if (unlikely(ret != 0)) {
667			DRM_ERROR("Unable to clean VRAM on "
668				  "master drop.\n");
669		}
670
671		dev_priv->active_master = NULL;
672	}
673
674	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
675	if (!from_open) {
676		ttm_vt_unlock(&vmaster->lock);
677		BUG_ON(vmw_fp->locked_master != file_priv->master);
678		drm_master_put(&vmw_fp->locked_master);
 
 
 
 
 
 
 
679	}
 
 
680
681	dev_priv->active_master = vmaster;
682
683	return 0;
 
 
 
 
 
 
 
 
684
685out_no_active_lock:
686	if (!dev_priv->enable_fb) {
687		mutex_lock(&dev_priv->hw_mutex);
688		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
689		mutex_unlock(&dev_priv->hw_mutex);
690		vmw_kms_restore_vga(dev_priv);
691		vmw_3d_resource_dec(dev_priv);
 
 
 
 
 
 
 
 
692	}
693	return ret;
694}
695
696static void vmw_master_drop(struct drm_device *dev,
697			    struct drm_file *file_priv,
698			    bool from_release)
 
 
 
 
 
699{
700	struct vmw_private *dev_priv = vmw_priv(dev);
701	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
702	struct vmw_master *vmaster = vmw_master(file_priv->master);
703	int ret;
704
705	/**
706	 * Make sure the master doesn't disappear while we have
707	 * it locked.
 
 
 
708	 */
709
710	vmw_fp->locked_master = drm_master_get(file_priv->master);
711	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
712	vmw_kms_idle_workqueues(vmaster);
713
714	if (unlikely((ret != 0))) {
715		DRM_ERROR("Unable to lock TTM at VT switch.\n");
716		drm_master_put(&vmw_fp->locked_master);
717	}
718
719	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
720
721	if (!dev_priv->enable_fb) {
722		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
723		if (unlikely(ret != 0))
724			DRM_ERROR("Unable to clean VRAM on master drop.\n");
725		mutex_lock(&dev_priv->hw_mutex);
726		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
727		mutex_unlock(&dev_priv->hw_mutex);
728		vmw_kms_restore_vga(dev_priv);
729		vmw_3d_resource_dec(dev_priv);
730	}
731
732	dev_priv->active_master = &dev_priv->fbdev_master;
733	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
734	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
735
736	if (dev_priv->enable_fb)
737		vmw_fb_on(dev_priv);
738}
739
740
741static void vmw_remove(struct pci_dev *pdev)
742{
743	struct drm_device *dev = pci_get_drvdata(pdev);
744
745	drm_put_dev(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
746}
747
748static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
749			      void *ptr)
750{
751	struct vmw_private *dev_priv =
752		container_of(nb, struct vmw_private, pm_nb);
753	struct vmw_master *vmaster = dev_priv->active_master;
754
755	switch (val) {
756	case PM_HIBERNATION_PREPARE:
757	case PM_SUSPEND_PREPARE:
758		ttm_suspend_lock(&vmaster->lock);
759
760		/**
761		 * This empties VRAM and unbinds all GMR bindings.
762		 * Buffer contents is moved to swappable memory.
 
763		 */
764		ttm_bo_swapout_all(&dev_priv->bdev);
765
766		break;
767	case PM_POST_HIBERNATION:
768	case PM_POST_SUSPEND:
769	case PM_POST_RESTORE:
770		ttm_suspend_unlock(&vmaster->lock);
771
772		break;
773	case PM_RESTORE_PREPARE:
774		break;
775	default:
776		break;
777	}
778	return 0;
779}
780
781/**
782 * These might not be needed with the virtual SVGA device.
783 */
784
785static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
786{
787	struct drm_device *dev = pci_get_drvdata(pdev);
788	struct vmw_private *dev_priv = vmw_priv(dev);
789
790	if (dev_priv->num_3d_resources != 0) {
791		DRM_INFO("Can't suspend or hibernate "
792			 "while 3D resources are active.\n");
793		return -EBUSY;
794	}
795
796	pci_save_state(pdev);
797	pci_disable_device(pdev);
798	pci_set_power_state(pdev, PCI_D3hot);
799	return 0;
800}
801
802static int vmw_pci_resume(struct pci_dev *pdev)
803{
804	pci_set_power_state(pdev, PCI_D0);
805	pci_restore_state(pdev);
806	return pci_enable_device(pdev);
807}
808
809static int vmw_pm_suspend(struct device *kdev)
810{
811	struct pci_dev *pdev = to_pci_dev(kdev);
812	struct pm_message dummy;
813
814	dummy.event = 0;
815
816	return vmw_pci_suspend(pdev, dummy);
817}
818
819static int vmw_pm_resume(struct device *kdev)
820{
821	struct pci_dev *pdev = to_pci_dev(kdev);
822
823	return vmw_pci_resume(pdev);
824}
825
826static int vmw_pm_prepare(struct device *kdev)
827{
828	struct pci_dev *pdev = to_pci_dev(kdev);
829	struct drm_device *dev = pci_get_drvdata(pdev);
830	struct vmw_private *dev_priv = vmw_priv(dev);
 
831
832	/**
833	 * Release 3d reference held by fbdev and potentially
834	 * stop fifo.
835	 */
836	dev_priv->suspended = true;
 
 
 
 
 
 
837	if (dev_priv->enable_fb)
838		vmw_3d_resource_dec(dev_priv);
839
840	if (dev_priv->num_3d_resources != 0) {
841
842		DRM_INFO("Can't suspend or hibernate "
843			 "while 3D resources are active.\n");
844
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
845		if (dev_priv->enable_fb)
846			vmw_3d_resource_inc(dev_priv);
847		dev_priv->suspended = false;
848		return -EBUSY;
849	}
850
 
 
 
 
851	return 0;
852}
853
854static void vmw_pm_complete(struct device *kdev)
855{
856	struct pci_dev *pdev = to_pci_dev(kdev);
857	struct drm_device *dev = pci_get_drvdata(pdev);
858	struct vmw_private *dev_priv = vmw_priv(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
859
860	/**
861	 * Reclaim 3d reference held by fbdev and potentially
862	 * start fifo.
863	 */
864	if (dev_priv->enable_fb)
865		vmw_3d_resource_inc(dev_priv);
866
867	dev_priv->suspended = false;
868}
869
870static const struct dev_pm_ops vmw_pm_ops = {
871	.prepare = vmw_pm_prepare,
872	.complete = vmw_pm_complete,
 
873	.suspend = vmw_pm_suspend,
874	.resume = vmw_pm_resume,
875};
876
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
877static struct drm_driver driver = {
878	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
879	DRIVER_MODESET,
880	.load = vmw_driver_load,
881	.unload = vmw_driver_unload,
882	.firstopen = vmw_firstopen,
883	.lastclose = vmw_lastclose,
884	.irq_preinstall = vmw_irq_preinstall,
885	.irq_postinstall = vmw_irq_postinstall,
886	.irq_uninstall = vmw_irq_uninstall,
887	.irq_handler = vmw_irq_handler,
888	.get_vblank_counter = vmw_get_vblank_counter,
889	.reclaim_buffers_locked = NULL,
890	.ioctls = vmw_ioctls,
891	.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
892	.dma_quiescent = NULL,	/*vmw_dma_quiescent, */
893	.master_create = vmw_master_create,
894	.master_destroy = vmw_master_destroy,
895	.master_set = vmw_master_set,
896	.master_drop = vmw_master_drop,
897	.open = vmw_driver_open,
898	.postclose = vmw_postclose,
899	.fops = {
900		 .owner = THIS_MODULE,
901		 .open = drm_open,
902		 .release = drm_release,
903		 .unlocked_ioctl = vmw_unlocked_ioctl,
904		 .mmap = vmw_mmap,
905		 .poll = drm_poll,
906		 .fasync = drm_fasync,
907#if defined(CONFIG_COMPAT)
908		 .compat_ioctl = drm_compat_ioctl,
909#endif
910		 .llseek = noop_llseek,
911	},
912	.name = VMWGFX_DRIVER_NAME,
913	.desc = VMWGFX_DRIVER_DESC,
914	.date = VMWGFX_DRIVER_DATE,
915	.major = VMWGFX_DRIVER_MAJOR,
916	.minor = VMWGFX_DRIVER_MINOR,
917	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
918};
919
920static struct pci_driver vmw_pci_driver = {
921	.name = VMWGFX_DRIVER_NAME,
922	.id_table = vmw_pci_id_list,
923	.probe = vmw_probe,
924	.remove = vmw_remove,
925	.driver = {
926		.pm = &vmw_pm_ops
927	}
928};
929
930static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
931{
932	return drm_get_pci_dev(pdev, ent, &driver);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
933}
934
935static int __init vmwgfx_init(void)
936{
937	int ret;
938	ret = drm_pci_init(&driver, &vmw_pci_driver);
 
 
 
 
939	if (ret)
940		DRM_ERROR("Failed initializing DRM.\n");
941	return ret;
942}
943
944static void __exit vmwgfx_exit(void)
945{
946	drm_pci_exit(&driver, &vmw_pci_driver);
947}
948
949module_init(vmwgfx_init);
950module_exit(vmwgfx_exit);
951
952MODULE_AUTHOR("VMware Inc. and others");
953MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
954MODULE_LICENSE("GPL and additional rights");
955MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
956	       __stringify(VMWGFX_DRIVER_MINOR) "."
957	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
958	       "0");
v5.9
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <linux/console.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/module.h>
  31#include <linux/pci.h>
  32#include <linux/mem_encrypt.h>
  33
  34#include <drm/drm_drv.h>
  35#include <drm/drm_ioctl.h>
  36#include <drm/drm_sysfs.h>
  37#include <drm/ttm/ttm_bo_driver.h>
  38#include <drm/ttm/ttm_module.h>
  39#include <drm/ttm/ttm_placement.h>
  40
  41#include "ttm_object.h"
  42#include "vmwgfx_binding.h"
  43#include "vmwgfx_drv.h"
 
 
 
 
  44
 
  45#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
  46#define VMWGFX_CHIP_SVGAII 0
  47#define VMW_FB_RESERVATION 0
  48
  49#define VMW_MIN_INITIAL_WIDTH 800
  50#define VMW_MIN_INITIAL_HEIGHT 600
  51
  52#ifndef VMWGFX_GIT_VERSION
  53#define VMWGFX_GIT_VERSION "Unknown"
  54#endif
  55
  56#define VMWGFX_REPO "In Tree"
  57
  58#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
  59
  60
  61/**
  62 * Fully encoded drm commands. Might move to vmw_drm.h
  63 */
  64
  65#define DRM_IOCTL_VMW_GET_PARAM					\
  66	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
  67		 struct drm_vmw_getparam_arg)
  68#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
  69	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
  70		union drm_vmw_alloc_dmabuf_arg)
  71#define DRM_IOCTL_VMW_UNREF_DMABUF				\
  72	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
  73		struct drm_vmw_unref_dmabuf_arg)
  74#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
  75	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
  76		 struct drm_vmw_cursor_bypass_arg)
  77
  78#define DRM_IOCTL_VMW_CONTROL_STREAM				\
  79	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
  80		 struct drm_vmw_control_stream_arg)
  81#define DRM_IOCTL_VMW_CLAIM_STREAM				\
  82	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
  83		 struct drm_vmw_stream_arg)
  84#define DRM_IOCTL_VMW_UNREF_STREAM				\
  85	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
  86		 struct drm_vmw_stream_arg)
  87
  88#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
  89	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
  90		struct drm_vmw_context_arg)
  91#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
  92	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
  93		struct drm_vmw_context_arg)
  94#define DRM_IOCTL_VMW_CREATE_SURFACE				\
  95	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
  96		 union drm_vmw_surface_create_arg)
  97#define DRM_IOCTL_VMW_UNREF_SURFACE				\
  98	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
  99		 struct drm_vmw_surface_arg)
 100#define DRM_IOCTL_VMW_REF_SURFACE				\
 101	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
 102		 union drm_vmw_surface_reference_arg)
 103#define DRM_IOCTL_VMW_EXECBUF					\
 104	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
 105		struct drm_vmw_execbuf_arg)
 106#define DRM_IOCTL_VMW_GET_3D_CAP				\
 107	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
 108		 struct drm_vmw_get_3d_cap_arg)
 109#define DRM_IOCTL_VMW_FENCE_WAIT				\
 110	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
 111		 struct drm_vmw_fence_wait_arg)
 112#define DRM_IOCTL_VMW_FENCE_SIGNALED				\
 113	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
 114		 struct drm_vmw_fence_signaled_arg)
 115#define DRM_IOCTL_VMW_FENCE_UNREF				\
 116	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
 117		 struct drm_vmw_fence_arg)
 118#define DRM_IOCTL_VMW_FENCE_EVENT				\
 119	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
 120		 struct drm_vmw_fence_event_arg)
 121#define DRM_IOCTL_VMW_PRESENT					\
 122	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
 123		 struct drm_vmw_present_arg)
 124#define DRM_IOCTL_VMW_PRESENT_READBACK				\
 125	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
 126		 struct drm_vmw_present_readback_arg)
 127#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
 128	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
 129		 struct drm_vmw_update_layout_arg)
 130#define DRM_IOCTL_VMW_CREATE_SHADER				\
 131	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
 132		 struct drm_vmw_shader_create_arg)
 133#define DRM_IOCTL_VMW_UNREF_SHADER				\
 134	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
 135		 struct drm_vmw_shader_arg)
 136#define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
 137	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
 138		 union drm_vmw_gb_surface_create_arg)
 139#define DRM_IOCTL_VMW_GB_SURFACE_REF				\
 140	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
 141		 union drm_vmw_gb_surface_reference_arg)
 142#define DRM_IOCTL_VMW_SYNCCPU					\
 143	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
 144		 struct drm_vmw_synccpu_arg)
 145#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
 146	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
 147		struct drm_vmw_context_arg)
 148#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT				\
 149	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT,	\
 150		union drm_vmw_gb_surface_create_ext_arg)
 151#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT				\
 152	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT,		\
 153		union drm_vmw_gb_surface_reference_ext_arg)
 154#define DRM_IOCTL_VMW_MSG						\
 155	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG,			\
 156		struct drm_vmw_msg_arg)
 157
 158/**
 159 * The core DRM version of this macro doesn't account for
 160 * DRM_COMMAND_BASE.
 161 */
 162
 163#define VMW_IOCTL_DEF(ioctl, func, flags) \
 164  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
 165
 166/**
 167 * Ioctl definitions.
 168 */
 169
 170static const struct drm_ioctl_desc vmw_ioctls[] = {
 171	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
 172		      DRM_RENDER_ALLOW),
 173	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
 174		      DRM_RENDER_ALLOW),
 175	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
 176		      DRM_RENDER_ALLOW),
 177	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
 178		      vmw_kms_cursor_bypass_ioctl,
 179		      DRM_MASTER),
 180
 181	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
 182		      DRM_MASTER),
 183	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
 184		      DRM_MASTER),
 185	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
 186		      DRM_MASTER),
 187
 188	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
 189		      DRM_RENDER_ALLOW),
 190	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
 191		      DRM_RENDER_ALLOW),
 192	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
 193		      DRM_RENDER_ALLOW),
 194	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
 195		      DRM_RENDER_ALLOW),
 196	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
 197		      DRM_RENDER_ALLOW),
 198	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
 199		      DRM_RENDER_ALLOW),
 200	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
 201		      DRM_RENDER_ALLOW),
 202	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
 203		      vmw_fence_obj_signaled_ioctl,
 204		      DRM_RENDER_ALLOW),
 205	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
 206		      DRM_RENDER_ALLOW),
 207	VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
 208		      DRM_RENDER_ALLOW),
 209	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
 210		      DRM_RENDER_ALLOW),
 211
 212	/* these allow direct access to the framebuffers mark as master only */
 213	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
 214		      DRM_MASTER | DRM_AUTH),
 215	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
 216		      vmw_present_readback_ioctl,
 217		      DRM_MASTER | DRM_AUTH),
 218	/*
 219	 * The permissions of the below ioctl are overridden in
 220	 * vmw_generic_ioctl(). We require either
 221	 * DRM_MASTER or capable(CAP_SYS_ADMIN).
 222	 */
 223	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
 224		      vmw_kms_update_layout_ioctl,
 225		      DRM_RENDER_ALLOW),
 226	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
 227		      vmw_shader_define_ioctl,
 228		      DRM_RENDER_ALLOW),
 229	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
 230		      vmw_shader_destroy_ioctl,
 231		      DRM_RENDER_ALLOW),
 232	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
 233		      vmw_gb_surface_define_ioctl,
 234		      DRM_RENDER_ALLOW),
 235	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
 236		      vmw_gb_surface_reference_ioctl,
 237		      DRM_RENDER_ALLOW),
 238	VMW_IOCTL_DEF(VMW_SYNCCPU,
 239		      vmw_user_bo_synccpu_ioctl,
 240		      DRM_RENDER_ALLOW),
 241	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
 242		      vmw_extended_context_define_ioctl,
 243		      DRM_RENDER_ALLOW),
 244	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
 245		      vmw_gb_surface_define_ext_ioctl,
 246		      DRM_RENDER_ALLOW),
 247	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
 248		      vmw_gb_surface_reference_ext_ioctl,
 249		      DRM_RENDER_ALLOW),
 250	VMW_IOCTL_DEF(VMW_MSG,
 251		      vmw_msg_ioctl,
 252		      DRM_RENDER_ALLOW),
 253};
 254
 255static const struct pci_device_id vmw_pci_id_list[] = {
 256	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
 257	{0, 0, 0}
 258};
 259MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
 260
 261static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
 262static int vmw_force_iommu;
 263static int vmw_restrict_iommu;
 264static int vmw_force_coherent;
 265static int vmw_restrict_dma_mask;
 266static int vmw_assume_16bpp;
 267
 268static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 
 269static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 270			      void *ptr);
 271
 272MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
 273module_param_named(enable_fbdev, enable_fbdev, int, 0600);
 274MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
 275module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
 276MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
 277module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
 278MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
 279module_param_named(force_coherent, vmw_force_coherent, int, 0600);
 280MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
 281module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
 282MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
 283module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
 284
 285
 286static void vmw_print_capabilities2(uint32_t capabilities2)
 287{
 288	DRM_INFO("Capabilities2:\n");
 289	if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
 290		DRM_INFO("  Grow oTable.\n");
 291	if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
 292		DRM_INFO("  IntraSurface copy.\n");
 293	if (capabilities2 & SVGA_CAP2_DX3)
 294		DRM_INFO("  DX3.\n");
 295}
 296
 297static void vmw_print_capabilities(uint32_t capabilities)
 298{
 299	DRM_INFO("Capabilities:\n");
 300	if (capabilities & SVGA_CAP_RECT_COPY)
 301		DRM_INFO("  Rect copy.\n");
 302	if (capabilities & SVGA_CAP_CURSOR)
 303		DRM_INFO("  Cursor.\n");
 304	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
 305		DRM_INFO("  Cursor bypass.\n");
 306	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
 307		DRM_INFO("  Cursor bypass 2.\n");
 308	if (capabilities & SVGA_CAP_8BIT_EMULATION)
 309		DRM_INFO("  8bit emulation.\n");
 310	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
 311		DRM_INFO("  Alpha cursor.\n");
 312	if (capabilities & SVGA_CAP_3D)
 313		DRM_INFO("  3D.\n");
 314	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
 315		DRM_INFO("  Extended Fifo.\n");
 316	if (capabilities & SVGA_CAP_MULTIMON)
 317		DRM_INFO("  Multimon.\n");
 318	if (capabilities & SVGA_CAP_PITCHLOCK)
 319		DRM_INFO("  Pitchlock.\n");
 320	if (capabilities & SVGA_CAP_IRQMASK)
 321		DRM_INFO("  Irq mask.\n");
 322	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
 323		DRM_INFO("  Display Topology.\n");
 324	if (capabilities & SVGA_CAP_GMR)
 325		DRM_INFO("  GMR.\n");
 326	if (capabilities & SVGA_CAP_TRACES)
 327		DRM_INFO("  Traces.\n");
 328	if (capabilities & SVGA_CAP_GMR2)
 329		DRM_INFO("  GMR2.\n");
 330	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
 331		DRM_INFO("  Screen Object 2.\n");
 332	if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
 333		DRM_INFO("  Command Buffers.\n");
 334	if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
 335		DRM_INFO("  Command Buffers 2.\n");
 336	if (capabilities & SVGA_CAP_GBOBJECTS)
 337		DRM_INFO("  Guest Backed Resources.\n");
 338	if (capabilities & SVGA_CAP_DX)
 339		DRM_INFO("  DX Features.\n");
 340	if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
 341		DRM_INFO("  HP Command Queue.\n");
 342}
 343
 344/**
 345 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
 346 *
 347 * @dev_priv: A device private structure.
 348 *
 349 * This function creates a small buffer object that holds the query
 350 * result for dummy queries emitted as query barriers.
 351 * The function will then map the first page and initialize a pending
 352 * occlusion query result structure, Finally it will unmap the buffer.
 353 * No interruptible waits are done within this function.
 354 *
 355 * Returns an error if bo creation or initialization fails.
 356 */
 357static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 358{
 359	int ret;
 360	struct vmw_buffer_object *vbo;
 361	struct ttm_bo_kmap_obj map;
 362	volatile SVGA3dQueryResult *result;
 363	bool dummy;
 364
 365	/*
 366	 * Create the vbo as pinned, so that a tryreserve will
 367	 * immediately succeed. This is because we're the only
 368	 * user of the bo currently.
 369	 */
 370	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
 371	if (!vbo)
 372		return -ENOMEM;
 373
 374	ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
 375			  &vmw_sys_ne_placement, false,
 376			  &vmw_bo_bo_free);
 377	if (unlikely(ret != 0))
 378		return ret;
 379
 380	ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
 381	BUG_ON(ret != 0);
 382	vmw_bo_pin_reserved(vbo, true);
 383
 384	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
 385	if (likely(ret == 0)) {
 386		result = ttm_kmap_obj_virtual(&map, &dummy);
 387		result->totalSize = sizeof(*result);
 388		result->state = SVGA3D_QUERYSTATE_PENDING;
 389		result->result32 = 0xff;
 390		ttm_bo_kunmap(&map);
 391	}
 392	vmw_bo_pin_reserved(vbo, false);
 393	ttm_bo_unreserve(&vbo->base);
 394
 395	if (unlikely(ret != 0)) {
 396		DRM_ERROR("Dummy query buffer map failed.\n");
 397		vmw_bo_unreference(&vbo);
 398	} else
 399		dev_priv->dummy_query_bo = vbo;
 400
 401	return ret;
 402}
 403
 404/**
 405 * vmw_request_device_late - Perform late device setup
 406 *
 407 * @dev_priv: Pointer to device private.
 408 *
 409 * This function performs setup of otables and enables large command
 410 * buffer submission. These tasks are split out to a separate function
 411 * because it reverts vmw_release_device_early and is intended to be used
 412 * by an error path in the hibernation code.
 413 */
 414static int vmw_request_device_late(struct vmw_private *dev_priv)
 415{
 416	int ret;
 417
 418	if (dev_priv->has_mob) {
 419		ret = vmw_otables_setup(dev_priv);
 420		if (unlikely(ret != 0)) {
 421			DRM_ERROR("Unable to initialize "
 422				  "guest Memory OBjects.\n");
 423			return ret;
 424		}
 425	}
 426
 427	if (dev_priv->cman) {
 428		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
 429					       256*4096, 2*4096);
 430		if (ret) {
 431			struct vmw_cmdbuf_man *man = dev_priv->cman;
 432
 433			dev_priv->cman = NULL;
 434			vmw_cmdbuf_man_destroy(man);
 435		}
 436	}
 437
 438	return 0;
 439}
 440
 441static int vmw_request_device(struct vmw_private *dev_priv)
 442{
 443	int ret;
 444
 445	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
 446	if (unlikely(ret != 0)) {
 447		DRM_ERROR("Unable to initialize FIFO.\n");
 448		return ret;
 449	}
 450	vmw_fence_fifo_up(dev_priv->fman);
 451	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
 452	if (IS_ERR(dev_priv->cman)) {
 453		dev_priv->cman = NULL;
 454		dev_priv->sm_type = VMW_SM_LEGACY;
 455	}
 456
 457	ret = vmw_request_device_late(dev_priv);
 458	if (ret)
 459		goto out_no_mob;
 460
 461	ret = vmw_dummy_query_bo_create(dev_priv);
 462	if (unlikely(ret != 0))
 463		goto out_no_query_bo;
 464
 465	return 0;
 466
 467out_no_query_bo:
 468	if (dev_priv->cman)
 469		vmw_cmdbuf_remove_pool(dev_priv->cman);
 470	if (dev_priv->has_mob) {
 471		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
 472		vmw_otables_takedown(dev_priv);
 473	}
 474	if (dev_priv->cman)
 475		vmw_cmdbuf_man_destroy(dev_priv->cman);
 476out_no_mob:
 477	vmw_fence_fifo_down(dev_priv->fman);
 478	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 479	return ret;
 480}
 481
 482/**
 483 * vmw_release_device_early - Early part of fifo takedown.
 484 *
 485 * @dev_priv: Pointer to device private struct.
 486 *
 487 * This is the first part of command submission takedown, to be called before
 488 * buffer management is taken down.
 489 */
 490static void vmw_release_device_early(struct vmw_private *dev_priv)
 491{
 492	/*
 493	 * Previous destructions should've released
 494	 * the pinned bo.
 495	 */
 496
 497	BUG_ON(dev_priv->pinned_bo != NULL);
 498
 499	vmw_bo_unreference(&dev_priv->dummy_query_bo);
 500	if (dev_priv->cman)
 501		vmw_cmdbuf_remove_pool(dev_priv->cman);
 502
 503	if (dev_priv->has_mob) {
 504		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
 505		vmw_otables_takedown(dev_priv);
 506	}
 507}
 508
 509/**
 510 * vmw_release_device_late - Late part of fifo takedown.
 511 *
 512 * @dev_priv: Pointer to device private struct.
 513 *
 514 * This is the last part of the command submission takedown, to be called when
 515 * command submission is no longer needed. It may wait on pending fences.
 516 */
 517static void vmw_release_device_late(struct vmw_private *dev_priv)
 518{
 519	vmw_fence_fifo_down(dev_priv->fman);
 520	if (dev_priv->cman)
 521		vmw_cmdbuf_man_destroy(dev_priv->cman);
 522
 523	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 524}
 525
 526/**
 527 * Sets the initial_[width|height] fields on the given vmw_private.
 528 *
 529 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
 530 * clamping the value to fb_max_[width|height] fields and the
 531 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 532 * If the values appear to be invalid, set them to
 533 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 534 */
 535static void vmw_get_initial_size(struct vmw_private *dev_priv)
 536{
 537	uint32_t width;
 538	uint32_t height;
 539
 540	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
 541	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
 542
 543	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
 544	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
 545
 546	if (width > dev_priv->fb_max_width ||
 547	    height > dev_priv->fb_max_height) {
 548
 549		/*
 550		 * This is a host error and shouldn't occur.
 551		 */
 552
 553		width = VMW_MIN_INITIAL_WIDTH;
 554		height = VMW_MIN_INITIAL_HEIGHT;
 555	}
 556
 557	dev_priv->initial_width = width;
 558	dev_priv->initial_height = height;
 559}
 560
 561/**
 562 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
 563 * system.
 564 *
 565 * @dev_priv: Pointer to a struct vmw_private
 566 *
 567 * This functions tries to determine what actions need to be taken by the
 568 * driver to make system pages visible to the device.
 569 * If this function decides that DMA is not possible, it returns -EINVAL.
 570 * The driver may then try to disable features of the device that require
 571 * DMA.
 572 */
 573static int vmw_dma_select_mode(struct vmw_private *dev_priv)
 574{
 575	static const char *names[vmw_dma_map_max] = {
 576		[vmw_dma_phys] = "Using physical TTM page addresses.",
 577		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
 578		[vmw_dma_map_populate] = "Caching DMA mappings.",
 579		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
 580
 581	/* TTM currently doesn't fully support SEV encryption. */
 582	if (mem_encrypt_active())
 583		return -EINVAL;
 584
 585	if (vmw_force_coherent)
 586		dev_priv->map_mode = vmw_dma_alloc_coherent;
 587	else if (vmw_restrict_iommu)
 588		dev_priv->map_mode = vmw_dma_map_bind;
 589	else
 590		dev_priv->map_mode = vmw_dma_map_populate;
 591
 592        if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) &&
 593	    (dev_priv->map_mode == vmw_dma_alloc_coherent))
 594		return -EINVAL;
 595
 596	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
 597	return 0;
 598}
 599
 600/**
 601 * vmw_dma_masks - set required page- and dma masks
 602 *
 603 * @dev: Pointer to struct drm-device
 604 *
 605 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
 606 * restriction also for 64-bit systems.
 607 */
 608static int vmw_dma_masks(struct vmw_private *dev_priv)
 609{
 610	struct drm_device *dev = dev_priv->dev;
 611	int ret = 0;
 612
 613	ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
 614	if (dev_priv->map_mode != vmw_dma_phys &&
 615	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
 616		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
 617		return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
 618	}
 619
 620	return ret;
 621}
 622
 623static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 624{
 625	struct vmw_private *dev_priv;
 626	int ret;
 627	uint32_t svga_id;
 628	enum vmw_res_type i;
 629	bool refuse_dma = false;
 630	char host_log[100] = {0};
 631
 632	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 633	if (unlikely(!dev_priv)) {
 634		DRM_ERROR("Failed allocating a device private struct.\n");
 635		return -ENOMEM;
 636	}
 637
 638	pci_set_master(dev->pdev);
 639
 640	dev_priv->dev = dev;
 641	dev_priv->vmw_chipset = chipset;
 642	dev_priv->last_read_seqno = (uint32_t) -100;
 
 643	mutex_init(&dev_priv->cmdbuf_mutex);
 644	mutex_init(&dev_priv->release_mutex);
 645	mutex_init(&dev_priv->binding_mutex);
 646	mutex_init(&dev_priv->global_kms_state_mutex);
 647	ttm_lock_init(&dev_priv->reservation_sem);
 648	spin_lock_init(&dev_priv->resource_lock);
 649	spin_lock_init(&dev_priv->hw_lock);
 650	spin_lock_init(&dev_priv->waiter_lock);
 651	spin_lock_init(&dev_priv->cap_lock);
 652	spin_lock_init(&dev_priv->svga_lock);
 653	spin_lock_init(&dev_priv->cursor_lock);
 654
 655	for (i = vmw_res_context; i < vmw_res_max; ++i) {
 656		idr_init(&dev_priv->res_idr[i]);
 657		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
 658	}
 659
 660	init_waitqueue_head(&dev_priv->fence_queue);
 661	init_waitqueue_head(&dev_priv->fifo_queue);
 662	dev_priv->fence_queue_waiters = 0;
 663	dev_priv->fifo_queue_waiters = 0;
 664
 665	dev_priv->used_memory_size = 0;
 666
 667	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
 668	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
 669	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 670
 671	dev_priv->assume_16bpp = !!vmw_assume_16bpp;
 672
 673	dev_priv->enable_fb = enable_fbdev;
 674
 675	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
 676	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
 677	if (svga_id != SVGA_ID_2) {
 678		ret = -ENOSYS;
 679		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
 
 680		goto out_err0;
 681	}
 682
 683	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
 684
 685	if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
 686		dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
 687	}
 688
 689
 690	ret = vmw_dma_select_mode(dev_priv);
 691	if (unlikely(ret != 0)) {
 692		DRM_INFO("Restricting capabilities since DMA not available.\n");
 693		refuse_dma = true;
 694		if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
 695			DRM_INFO("Disabling 3D acceleration.\n");
 696	}
 697
 698	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
 699	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
 700	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
 701	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
 702
 703	vmw_get_initial_size(dev_priv);
 704
 705	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 706		dev_priv->max_gmr_ids =
 707			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
 708		dev_priv->max_gmr_pages =
 709			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
 710		dev_priv->memory_size =
 711			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
 712		dev_priv->memory_size -= dev_priv->vram_size;
 713	} else {
 714		/*
 715		 * An arbitrary limit of 512MiB on surface
 716		 * memory. But all HWV8 hardware supports GMR2.
 717		 */
 718		dev_priv->memory_size = 512*1024*1024;
 719	}
 720	dev_priv->max_mob_pages = 0;
 721	dev_priv->max_mob_size = 0;
 722	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
 723		uint64_t mem_size;
 724
 725		if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
 726			mem_size = vmw_read(dev_priv,
 727					    SVGA_REG_GBOBJECT_MEM_SIZE_KB);
 728		else
 729			mem_size =
 730				vmw_read(dev_priv,
 731					 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
 732
 733		/*
 734		 * Workaround for low memory 2D VMs to compensate for the
 735		 * allocation taken by fbdev
 736		 */
 737		if (!(dev_priv->capabilities & SVGA_CAP_3D))
 738			mem_size *= 3;
 739
 740		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
 741		dev_priv->prim_bb_mem =
 742			vmw_read(dev_priv,
 743				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
 744		dev_priv->max_mob_size =
 745			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
 746		dev_priv->stdu_max_width =
 747			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
 748		dev_priv->stdu_max_height =
 749			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
 750
 751		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
 752			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
 753		dev_priv->texture_max_width = vmw_read(dev_priv,
 754						       SVGA_REG_DEV_CAP);
 755		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
 756			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
 757		dev_priv->texture_max_height = vmw_read(dev_priv,
 758							SVGA_REG_DEV_CAP);
 759	} else {
 760		dev_priv->texture_max_width = 8192;
 761		dev_priv->texture_max_height = 8192;
 762		dev_priv->prim_bb_mem = dev_priv->vram_size;
 763	}
 764
 765	vmw_print_capabilities(dev_priv->capabilities);
 766	if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
 767		vmw_print_capabilities2(dev_priv->capabilities2);
 768
 769	ret = vmw_dma_masks(dev_priv);
 770	if (unlikely(ret != 0))
 771		goto out_err0;
 772
 773	dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK,
 774					     SCATTERLIST_MAX_SEGMENT));
 775
 776	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 777		DRM_INFO("Max GMR ids is %u\n",
 778			 (unsigned)dev_priv->max_gmr_ids);
 779		DRM_INFO("Max number of GMR pages is %u\n",
 780			 (unsigned)dev_priv->max_gmr_pages);
 781		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
 782			 (unsigned)dev_priv->memory_size / 1024);
 783	}
 784	DRM_INFO("Maximum display memory size is %u kiB\n",
 785		 dev_priv->prim_bb_mem / 1024);
 786	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
 787		 dev_priv->vram_start, dev_priv->vram_size / 1024);
 788	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
 789		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
 790
 791	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
 792				       dev_priv->mmio_size, MEMREMAP_WB);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 793
 794	if (unlikely(dev_priv->mmio_virt == NULL)) {
 795		ret = -ENOMEM;
 796		DRM_ERROR("Failed mapping MMIO.\n");
 797		goto out_err0;
 798	}
 799
 800	/* Need mmio memory to check for fifo pitchlock cap. */
 801	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
 802	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
 803	    !vmw_fifo_have_pitchlock(dev_priv)) {
 804		ret = -ENOSYS;
 805		DRM_ERROR("Hardware has no pitchlock\n");
 806		goto out_err4;
 807	}
 808
 809	dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
 810						&vmw_prime_dmabuf_ops);
 811
 812	if (unlikely(dev_priv->tdev == NULL)) {
 813		DRM_ERROR("Unable to initialize TTM object management.\n");
 814		ret = -ENOMEM;
 815		goto out_err4;
 816	}
 817
 818	dev->dev_private = dev_priv;
 819
 820	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
 821	dev_priv->stealth = (ret != 0);
 822	if (dev_priv->stealth) {
 823		/**
 824		 * Request at least the mmio PCI resource.
 825		 */
 826
 827		DRM_INFO("It appears like vesafb is loaded. "
 828			 "Ignore above error if any.\n");
 829		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
 830		if (unlikely(ret != 0)) {
 831			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
 832			goto out_no_device;
 833		}
 834	}
 835
 836	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
 837		ret = vmw_irq_install(dev, dev->pdev->irq);
 838		if (ret != 0) {
 839			DRM_ERROR("Failed installing irq: %d\n", ret);
 840			goto out_no_irq;
 841		}
 842	}
 843
 844	dev_priv->fman = vmw_fence_manager_init(dev_priv);
 845	if (unlikely(dev_priv->fman == NULL)) {
 846		ret = -ENOMEM;
 847		goto out_no_fman;
 848	}
 849
 850	drm_vma_offset_manager_init(&dev_priv->vma_manager,
 851				    DRM_FILE_PAGE_OFFSET_START,
 852				    DRM_FILE_PAGE_OFFSET_SIZE);
 853	ret = ttm_bo_device_init(&dev_priv->bdev,
 854				 &vmw_bo_driver,
 855				 dev->anon_inode->i_mapping,
 856				 &dev_priv->vma_manager,
 857				 false);
 858	if (unlikely(ret != 0)) {
 859		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
 860		goto out_no_bdev;
 861	}
 862
 863	/*
 864	 * Enable VRAM, but initially don't use it until SVGA is enabled and
 865	 * unhidden.
 866	 */
 867	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
 868			     (dev_priv->vram_size >> PAGE_SHIFT));
 869	if (unlikely(ret != 0)) {
 870		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
 871		goto out_no_vram;
 872	}
 873	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
 874
 875	dev_priv->has_gmr = true;
 876	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
 877	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
 878					 VMW_PL_GMR) != 0) {
 879		DRM_INFO("No GMR memory available. "
 880			 "Graphics memory resources are very limited.\n");
 881		dev_priv->has_gmr = false;
 882	}
 883
 884	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
 885		dev_priv->has_mob = true;
 886		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
 887				   VMW_PL_MOB) != 0) {
 888			DRM_INFO("No MOB memory available. "
 889				 "3D will be disabled.\n");
 890			dev_priv->has_mob = false;
 891		}
 892	}
 893
 894	if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
 895		spin_lock(&dev_priv->cap_lock);
 896		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
 897		if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
 898			dev_priv->sm_type = VMW_SM_4;
 899		spin_unlock(&dev_priv->cap_lock);
 900	}
 901
 902	vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
 903
 904	/* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
 905	if (has_sm4_context(dev_priv) &&
 906	    (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
 907		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41);
 908
 909		if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
 910			dev_priv->sm_type = VMW_SM_4_1;
 911
 912		if (has_sm4_1_context(dev_priv) &&
 913		    (dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
 914			vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5);
 915			if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
 916				dev_priv->sm_type = VMW_SM_5;
 917		}
 918	}
 919
 920	ret = vmw_kms_init(dev_priv);
 921	if (unlikely(ret != 0))
 922		goto out_no_kms;
 923	vmw_overlay_init(dev_priv);
 924
 925	ret = vmw_request_device(dev_priv);
 926	if (ret)
 927		goto out_no_fifo;
 928
 929	DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
 930		 ? "yes." : "no.");
 931	if (dev_priv->sm_type == VMW_SM_5)
 932		DRM_INFO("SM5 support available.\n");
 933	if (dev_priv->sm_type == VMW_SM_4_1)
 934		DRM_INFO("SM4_1 support available.\n");
 935	if (dev_priv->sm_type == VMW_SM_4)
 936		DRM_INFO("SM4 support available.\n");
 937
 938	snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
 939		VMWGFX_REPO, VMWGFX_GIT_VERSION);
 940	vmw_host_log(host_log);
 941
 942	memset(host_log, 0, sizeof(host_log));
 943	snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
 944		VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
 945		VMWGFX_DRIVER_PATCHLEVEL);
 946	vmw_host_log(host_log);
 947
 948	if (dev_priv->enable_fb) {
 949		vmw_fifo_resource_inc(dev_priv);
 950		vmw_svga_enable(dev_priv);
 
 
 951		vmw_fb_init(dev_priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 952	}
 953
 954	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
 955	register_pm_notifier(&dev_priv->pm_nb);
 956
 957	return 0;
 958
 
 
 
 
 
 
 959out_no_fifo:
 960	vmw_overlay_close(dev_priv);
 961	vmw_kms_close(dev_priv);
 962out_no_kms:
 963	if (dev_priv->has_mob)
 964		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 965	if (dev_priv->has_gmr)
 966		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 967	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 968out_no_vram:
 969	(void)ttm_bo_device_release(&dev_priv->bdev);
 970out_no_bdev:
 971	vmw_fence_manager_takedown(dev_priv->fman);
 972out_no_fman:
 973	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 974		vmw_irq_uninstall(dev_priv->dev);
 975out_no_irq:
 976	if (dev_priv->stealth)
 977		pci_release_region(dev->pdev, 2);
 978	else
 979		pci_release_regions(dev->pdev);
 980out_no_device:
 981	ttm_object_device_release(&dev_priv->tdev);
 982out_err4:
 983	memunmap(dev_priv->mmio_virt);
 
 
 
 
 
 
 
 
 
 
 984out_err0:
 985	for (i = vmw_res_context; i < vmw_res_max; ++i)
 986		idr_destroy(&dev_priv->res_idr[i]);
 987
 988	if (dev_priv->ctx.staged_bindings)
 989		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 990	kfree(dev_priv);
 991	return ret;
 992}
 993
 994static void vmw_driver_unload(struct drm_device *dev)
 995{
 996	struct vmw_private *dev_priv = vmw_priv(dev);
 997	enum vmw_res_type i;
 998
 999	unregister_pm_notifier(&dev_priv->pm_nb);
1000
1001	if (dev_priv->ctx.res_ht_initialized)
1002		drm_ht_remove(&dev_priv->ctx.res_ht);
1003	vfree(dev_priv->ctx.cmd_bounce);
1004	if (dev_priv->enable_fb) {
1005		vmw_fb_off(dev_priv);
1006		vmw_fb_close(dev_priv);
1007		vmw_fifo_resource_dec(dev_priv);
1008		vmw_svga_disable(dev_priv);
1009	}
1010
1011	vmw_kms_close(dev_priv);
1012	vmw_overlay_close(dev_priv);
1013
1014	if (dev_priv->has_gmr)
1015		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
1016	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
1017
1018	vmw_release_device_early(dev_priv);
1019	if (dev_priv->has_mob)
1020		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
1021	(void) ttm_bo_device_release(&dev_priv->bdev);
1022	drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
1023	vmw_release_device_late(dev_priv);
1024	vmw_fence_manager_takedown(dev_priv->fman);
1025	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1026		vmw_irq_uninstall(dev_priv->dev);
1027	if (dev_priv->stealth)
1028		pci_release_region(dev->pdev, 2);
1029	else
1030		pci_release_regions(dev->pdev);
1031
1032	ttm_object_device_release(&dev_priv->tdev);
1033	memunmap(dev_priv->mmio_virt);
1034	if (dev_priv->ctx.staged_bindings)
1035		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 
 
 
 
 
 
 
 
1036
1037	for (i = vmw_res_context; i < vmw_res_max; ++i)
1038		idr_destroy(&dev_priv->res_idr[i]);
1039
1040	kfree(dev_priv);
1041}
1042
1043static void vmw_postclose(struct drm_device *dev,
1044			 struct drm_file *file_priv)
1045{
1046	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1047
 
1048	ttm_object_file_release(&vmw_fp->tfile);
 
 
1049	kfree(vmw_fp);
1050}
1051
1052static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1053{
1054	struct vmw_private *dev_priv = vmw_priv(dev);
1055	struct vmw_fpriv *vmw_fp;
1056	int ret = -ENOMEM;
1057
1058	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1059	if (unlikely(!vmw_fp))
1060		return ret;
1061
1062	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1063	if (unlikely(vmw_fp->tfile == NULL))
1064		goto out_no_tfile;
1065
1066	file_priv->driver_priv = vmw_fp;
1067
 
 
 
 
1068	return 0;
1069
1070out_no_tfile:
1071	kfree(vmw_fp);
1072	return ret;
1073}
1074
1075static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1076			      unsigned long arg,
1077			      long (*ioctl_func)(struct file *, unsigned int,
1078						 unsigned long))
1079{
1080	struct drm_file *file_priv = filp->private_data;
1081	struct drm_device *dev = file_priv->minor->dev;
1082	unsigned int nr = DRM_IOCTL_NR(cmd);
1083	unsigned int flags;
1084
1085	/*
1086	 * Do extra checking on driver private ioctls.
1087	 */
1088
1089	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1090	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1091		const struct drm_ioctl_desc *ioctl =
1092			&vmw_ioctls[nr - DRM_COMMAND_BASE];
1093
1094		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1095			return ioctl_func(filp, cmd, arg);
1096		} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1097			if (!drm_is_current_master(file_priv) &&
1098			    !capable(CAP_SYS_ADMIN))
1099				return -EACCES;
1100		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1101
1102		if (unlikely(ioctl->cmd != cmd))
1103			goto out_io_encoding;
 
1104
1105		flags = ioctl->flags;
1106	} else if (!drm_ioctl_flags(nr, &flags))
1107		return -EINVAL;
1108
1109	return ioctl_func(filp, cmd, arg);
 
 
 
 
 
 
1110
1111out_io_encoding:
1112	DRM_ERROR("Invalid command format, ioctl %d\n",
1113		  nr - DRM_COMMAND_BASE);
 
 
1114
1115	return -EINVAL;
1116}
1117
1118static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1119			       unsigned long arg)
1120{
1121	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
 
 
1122}
1123
1124#ifdef CONFIG_COMPAT
1125static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1126			     unsigned long arg)
1127{
1128	return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
 
 
 
 
 
 
 
 
 
 
1129}
1130#endif
1131
1132static void vmw_master_set(struct drm_device *dev,
1133			   struct drm_file *file_priv,
1134			   bool from_open)
1135{
1136	/*
1137	 * Inform a new master that the layout may have changed while
1138	 * it was gone.
1139	 */
1140	if (!from_open)
1141		drm_sysfs_hotplug_event(dev);
1142}
1143
1144static void vmw_master_drop(struct drm_device *dev,
1145			    struct drm_file *file_priv)
 
 
1146{
1147	struct vmw_private *dev_priv = vmw_priv(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1148
1149	vmw_kms_legacy_hotspot_clear(dev_priv);
1150	if (!dev_priv->enable_fb)
1151		vmw_svga_disable(dev_priv);
1152}
 
 
 
 
 
1153
1154/**
1155 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1156 *
1157 * @dev_priv: Pointer to device private struct.
1158 * Needs the reservation sem to be held in non-exclusive mode.
1159 */
1160static void __vmw_svga_enable(struct vmw_private *dev_priv)
1161{
1162	spin_lock(&dev_priv->svga_lock);
1163	if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1164		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1165		dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1166	}
1167	spin_unlock(&dev_priv->svga_lock);
1168}
1169
1170/**
1171 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1172 *
1173 * @dev_priv: Pointer to device private struct.
1174 */
1175void vmw_svga_enable(struct vmw_private *dev_priv)
1176{
1177	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
1178	__vmw_svga_enable(dev_priv);
1179	ttm_read_unlock(&dev_priv->reservation_sem);
1180}
1181
1182/**
1183 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1184 *
1185 * @dev_priv: Pointer to device private struct.
1186 * Needs the reservation sem to be held in exclusive mode.
1187 * Will not empty VRAM. VRAM must be emptied by caller.
1188 */
1189static void __vmw_svga_disable(struct vmw_private *dev_priv)
1190{
1191	spin_lock(&dev_priv->svga_lock);
1192	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1193		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1194		vmw_write(dev_priv, SVGA_REG_ENABLE,
1195			  SVGA_REG_ENABLE_HIDE |
1196			  SVGA_REG_ENABLE_ENABLE);
1197	}
1198	spin_unlock(&dev_priv->svga_lock);
1199}
1200
1201/**
1202 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1203 * running.
1204 *
1205 * @dev_priv: Pointer to device private struct.
1206 * Will empty VRAM.
1207 */
1208void vmw_svga_disable(struct vmw_private *dev_priv)
1209{
1210	/*
1211	 * Disabling SVGA will turn off device modesetting capabilities, so
1212	 * notify KMS about that so that it doesn't cache atomic state that
1213	 * isn't valid anymore, for example crtcs turned on.
1214	 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1215	 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1216	 * end up with lock order reversal. Thus, a master may actually perform
1217	 * a new modeset just after we call vmw_kms_lost_device() and race with
1218	 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1219	 * to be inconsistent with the device, causing modesetting problems.
1220	 *
1221	 */
1222	vmw_kms_lost_device(dev_priv->dev);
1223	ttm_write_lock(&dev_priv->reservation_sem, false);
1224	spin_lock(&dev_priv->svga_lock);
1225	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1226		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1227		spin_unlock(&dev_priv->svga_lock);
1228		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1229			DRM_ERROR("Failed evicting VRAM buffers.\n");
1230		vmw_write(dev_priv, SVGA_REG_ENABLE,
1231			  SVGA_REG_ENABLE_HIDE |
1232			  SVGA_REG_ENABLE_ENABLE);
1233	} else
1234		spin_unlock(&dev_priv->svga_lock);
1235	ttm_write_unlock(&dev_priv->reservation_sem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1236}
1237
 
1238static void vmw_remove(struct pci_dev *pdev)
1239{
1240	struct drm_device *dev = pci_get_drvdata(pdev);
1241
1242	drm_dev_unregister(dev);
1243	vmw_driver_unload(dev);
1244	drm_dev_put(dev);
1245	pci_disable_device(pdev);
1246}
1247
1248static unsigned long
1249vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
1250		      unsigned long len, unsigned long pgoff,
1251		      unsigned long flags)
1252{
1253	struct drm_file *file_priv = file->private_data;
1254	struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
1255
1256	return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
1257				     &dev_priv->vma_manager);
1258}
1259
1260static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1261			      void *ptr)
1262{
1263	struct vmw_private *dev_priv =
1264		container_of(nb, struct vmw_private, pm_nb);
 
1265
1266	switch (val) {
1267	case PM_HIBERNATION_PREPARE:
1268		/*
1269		 * Take the reservation sem in write mode, which will make sure
1270		 * there are no other processes holding a buffer object
1271		 * reservation, meaning we should be able to evict all buffer
1272		 * objects if needed.
1273		 * Once user-space processes have been frozen, we can release
1274		 * the lock again.
1275		 */
1276		ttm_suspend_lock(&dev_priv->reservation_sem);
1277		dev_priv->suspend_locked = true;
1278		break;
1279	case PM_POST_HIBERNATION:
 
1280	case PM_POST_RESTORE:
1281		if (READ_ONCE(dev_priv->suspend_locked)) {
1282			dev_priv->suspend_locked = false;
1283			ttm_suspend_unlock(&dev_priv->reservation_sem);
1284		}
1285		break;
1286	default:
1287		break;
1288	}
1289	return 0;
1290}
1291
 
 
 
 
1292static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1293{
1294	struct drm_device *dev = pci_get_drvdata(pdev);
1295	struct vmw_private *dev_priv = vmw_priv(dev);
1296
1297	if (dev_priv->refuse_hibernation)
 
 
1298		return -EBUSY;
 
1299
1300	pci_save_state(pdev);
1301	pci_disable_device(pdev);
1302	pci_set_power_state(pdev, PCI_D3hot);
1303	return 0;
1304}
1305
1306static int vmw_pci_resume(struct pci_dev *pdev)
1307{
1308	pci_set_power_state(pdev, PCI_D0);
1309	pci_restore_state(pdev);
1310	return pci_enable_device(pdev);
1311}
1312
1313static int vmw_pm_suspend(struct device *kdev)
1314{
1315	struct pci_dev *pdev = to_pci_dev(kdev);
1316	struct pm_message dummy;
1317
1318	dummy.event = 0;
1319
1320	return vmw_pci_suspend(pdev, dummy);
1321}
1322
1323static int vmw_pm_resume(struct device *kdev)
1324{
1325	struct pci_dev *pdev = to_pci_dev(kdev);
1326
1327	return vmw_pci_resume(pdev);
1328}
1329
1330static int vmw_pm_freeze(struct device *kdev)
1331{
1332	struct pci_dev *pdev = to_pci_dev(kdev);
1333	struct drm_device *dev = pci_get_drvdata(pdev);
1334	struct vmw_private *dev_priv = vmw_priv(dev);
1335	int ret;
1336
1337	/*
1338	 * Unlock for vmw_kms_suspend.
1339	 * No user-space processes should be running now.
1340	 */
1341	ttm_suspend_unlock(&dev_priv->reservation_sem);
1342	ret = vmw_kms_suspend(dev_priv->dev);
1343	if (ret) {
1344		ttm_suspend_lock(&dev_priv->reservation_sem);
1345		DRM_ERROR("Failed to freeze modesetting.\n");
1346		return ret;
1347	}
1348	if (dev_priv->enable_fb)
1349		vmw_fb_off(dev_priv);
 
 
 
 
 
1350
1351	ttm_suspend_lock(&dev_priv->reservation_sem);
1352	vmw_execbuf_release_pinned_bo(dev_priv);
1353	vmw_resource_evict_all(dev_priv);
1354	vmw_release_device_early(dev_priv);
1355	ttm_bo_swapout_all();
1356	if (dev_priv->enable_fb)
1357		vmw_fifo_resource_dec(dev_priv);
1358	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1359		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1360		if (dev_priv->enable_fb)
1361			vmw_fifo_resource_inc(dev_priv);
1362		WARN_ON(vmw_request_device_late(dev_priv));
1363		dev_priv->suspend_locked = false;
1364		ttm_suspend_unlock(&dev_priv->reservation_sem);
1365		if (dev_priv->suspend_state)
1366			vmw_kms_resume(dev);
1367		if (dev_priv->enable_fb)
1368			vmw_fb_on(dev_priv);
 
1369		return -EBUSY;
1370	}
1371
1372	vmw_fence_fifo_down(dev_priv->fman);
1373	__vmw_svga_disable(dev_priv);
1374	
1375	vmw_release_device_late(dev_priv);
1376	return 0;
1377}
1378
1379static int vmw_pm_restore(struct device *kdev)
1380{
1381	struct pci_dev *pdev = to_pci_dev(kdev);
1382	struct drm_device *dev = pci_get_drvdata(pdev);
1383	struct vmw_private *dev_priv = vmw_priv(dev);
1384	int ret;
1385
1386	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1387	(void) vmw_read(dev_priv, SVGA_REG_ID);
1388
1389	if (dev_priv->enable_fb)
1390		vmw_fifo_resource_inc(dev_priv);
1391
1392	ret = vmw_request_device(dev_priv);
1393	if (ret)
1394		return ret;
1395
1396	if (dev_priv->enable_fb)
1397		__vmw_svga_enable(dev_priv);
1398
1399	vmw_fence_fifo_up(dev_priv->fman);
1400	dev_priv->suspend_locked = false;
1401	ttm_suspend_unlock(&dev_priv->reservation_sem);
1402	if (dev_priv->suspend_state)
1403		vmw_kms_resume(dev_priv->dev);
1404
 
 
 
 
1405	if (dev_priv->enable_fb)
1406		vmw_fb_on(dev_priv);
1407
1408	return 0;
1409}
1410
1411static const struct dev_pm_ops vmw_pm_ops = {
1412	.freeze = vmw_pm_freeze,
1413	.thaw = vmw_pm_restore,
1414	.restore = vmw_pm_restore,
1415	.suspend = vmw_pm_suspend,
1416	.resume = vmw_pm_resume,
1417};
1418
1419static const struct file_operations vmwgfx_driver_fops = {
1420	.owner = THIS_MODULE,
1421	.open = drm_open,
1422	.release = drm_release,
1423	.unlocked_ioctl = vmw_unlocked_ioctl,
1424	.mmap = vmw_mmap,
1425	.poll = vmw_fops_poll,
1426	.read = vmw_fops_read,
1427#if defined(CONFIG_COMPAT)
1428	.compat_ioctl = vmw_compat_ioctl,
1429#endif
1430	.llseek = noop_llseek,
1431	.get_unmapped_area = vmw_get_unmapped_area,
1432};
1433
1434static struct drm_driver driver = {
1435	.driver_features =
1436	DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
 
 
 
 
 
 
 
 
 
 
1437	.ioctls = vmw_ioctls,
1438	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
 
 
 
1439	.master_set = vmw_master_set,
1440	.master_drop = vmw_master_drop,
1441	.open = vmw_driver_open,
1442	.postclose = vmw_postclose,
1443
1444	.dumb_create = vmw_dumb_create,
1445	.dumb_map_offset = vmw_dumb_map_offset,
1446	.dumb_destroy = vmw_dumb_destroy,
1447
1448	.prime_fd_to_handle = vmw_prime_fd_to_handle,
1449	.prime_handle_to_fd = vmw_prime_handle_to_fd,
1450
1451	.fops = &vmwgfx_driver_fops,
 
 
 
 
1452	.name = VMWGFX_DRIVER_NAME,
1453	.desc = VMWGFX_DRIVER_DESC,
1454	.date = VMWGFX_DRIVER_DATE,
1455	.major = VMWGFX_DRIVER_MAJOR,
1456	.minor = VMWGFX_DRIVER_MINOR,
1457	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1458};
1459
1460static struct pci_driver vmw_pci_driver = {
1461	.name = VMWGFX_DRIVER_NAME,
1462	.id_table = vmw_pci_id_list,
1463	.probe = vmw_probe,
1464	.remove = vmw_remove,
1465	.driver = {
1466		.pm = &vmw_pm_ops
1467	}
1468};
1469
1470static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1471{
1472	struct drm_device *dev;
1473	int ret;
1474
1475	ret = pci_enable_device(pdev);
1476	if (ret)
1477		return ret;
1478
1479	dev = drm_dev_alloc(&driver, &pdev->dev);
1480	if (IS_ERR(dev)) {
1481		ret = PTR_ERR(dev);
1482		goto err_pci_disable_device;
1483	}
1484
1485	dev->pdev = pdev;
1486	pci_set_drvdata(pdev, dev);
1487
1488	ret = vmw_driver_load(dev, ent->driver_data);
1489	if (ret)
1490		goto err_drm_dev_put;
1491
1492	ret = drm_dev_register(dev, ent->driver_data);
1493	if (ret)
1494		goto err_vmw_driver_unload;
1495
1496	return 0;
1497
1498err_vmw_driver_unload:
1499	vmw_driver_unload(dev);
1500err_drm_dev_put:
1501	drm_dev_put(dev);
1502err_pci_disable_device:
1503	pci_disable_device(pdev);
1504	return ret;
1505}
1506
1507static int __init vmwgfx_init(void)
1508{
1509	int ret;
1510
1511	if (vgacon_text_force())
1512		return -EINVAL;
1513
1514	ret = pci_register_driver(&vmw_pci_driver);
1515	if (ret)
1516		DRM_ERROR("Failed initializing DRM.\n");
1517	return ret;
1518}
1519
1520static void __exit vmwgfx_exit(void)
1521{
1522	pci_unregister_driver(&vmw_pci_driver);
1523}
1524
1525module_init(vmwgfx_init);
1526module_exit(vmwgfx_exit);
1527
1528MODULE_AUTHOR("VMware Inc. and others");
1529MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1530MODULE_LICENSE("GPL and additional rights");
1531MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1532	       __stringify(VMWGFX_DRIVER_MINOR) "."
1533	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1534	       "0");