Linux Audio

Check our new training course

Loading...
v3.1
  1/**************************************************************************
  2 *
  3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 
 27
 28#include "drmP.h"
 29#include "vmwgfx_drv.h"
 30#include "ttm/ttm_placement.h"
 31#include "ttm/ttm_bo_driver.h"
 32#include "ttm/ttm_object.h"
 33#include "ttm/ttm_module.h"
 34
 35#define VMWGFX_DRIVER_NAME "vmwgfx"
 36#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
 37#define VMWGFX_CHIP_SVGAII 0
 38#define VMW_FB_RESERVATION 0
 39
 
 
 
 
 40/**
 41 * Fully encoded drm commands. Might move to vmw_drm.h
 42 */
 43
 44#define DRM_IOCTL_VMW_GET_PARAM					\
 45	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
 46		 struct drm_vmw_getparam_arg)
 47#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
 48	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
 49		union drm_vmw_alloc_dmabuf_arg)
 50#define DRM_IOCTL_VMW_UNREF_DMABUF				\
 51	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
 52		struct drm_vmw_unref_dmabuf_arg)
 53#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
 54	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
 55		 struct drm_vmw_cursor_bypass_arg)
 56
 57#define DRM_IOCTL_VMW_CONTROL_STREAM				\
 58	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
 59		 struct drm_vmw_control_stream_arg)
 60#define DRM_IOCTL_VMW_CLAIM_STREAM				\
 61	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
 62		 struct drm_vmw_stream_arg)
 63#define DRM_IOCTL_VMW_UNREF_STREAM				\
 64	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
 65		 struct drm_vmw_stream_arg)
 66
 67#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
 68	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
 69		struct drm_vmw_context_arg)
 70#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
 71	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
 72		struct drm_vmw_context_arg)
 73#define DRM_IOCTL_VMW_CREATE_SURFACE				\
 74	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
 75		 union drm_vmw_surface_create_arg)
 76#define DRM_IOCTL_VMW_UNREF_SURFACE				\
 77	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
 78		 struct drm_vmw_surface_arg)
 79#define DRM_IOCTL_VMW_REF_SURFACE				\
 80	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
 81		 union drm_vmw_surface_reference_arg)
 82#define DRM_IOCTL_VMW_EXECBUF					\
 83	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
 84		struct drm_vmw_execbuf_arg)
 85#define DRM_IOCTL_VMW_FIFO_DEBUG				\
 86	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG,		\
 87		 struct drm_vmw_fifo_debug_arg)
 88#define DRM_IOCTL_VMW_FENCE_WAIT				\
 89	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
 90		 struct drm_vmw_fence_wait_arg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
 92	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
 93		 struct drm_vmw_update_layout_arg)
 94
 95
 96/**
 97 * The core DRM version of this macro doesn't account for
 98 * DRM_COMMAND_BASE.
 99 */
100
101#define VMW_IOCTL_DEF(ioctl, func, flags) \
102  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
103
104/**
105 * Ioctl definitions.
106 */
107
108static struct drm_ioctl_desc vmw_ioctls[] = {
109	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
110		      DRM_AUTH | DRM_UNLOCKED),
111	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
112		      DRM_AUTH | DRM_UNLOCKED),
113	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
114		      DRM_AUTH | DRM_UNLOCKED),
115	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
116		      vmw_kms_cursor_bypass_ioctl,
117		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
118
119	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
120		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
121	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
122		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
123	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
124		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
125
126	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
127		      DRM_AUTH | DRM_UNLOCKED),
128	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
129		      DRM_AUTH | DRM_UNLOCKED),
130	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
131		      DRM_AUTH | DRM_UNLOCKED),
132	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
133		      DRM_AUTH | DRM_UNLOCKED),
134	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
135		      DRM_AUTH | DRM_UNLOCKED),
136	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
137		      DRM_AUTH | DRM_UNLOCKED),
138	VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
139		      DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
140	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
 
 
 
 
 
 
 
 
141		      DRM_AUTH | DRM_UNLOCKED),
142	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
143		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
 
 
 
 
 
 
 
 
144};
145
146static struct pci_device_id vmw_pci_id_list[] = {
147	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
148	{0, 0, 0}
149};
 
150
151static int enable_fbdev;
152
153static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
154static void vmw_master_init(struct vmw_master *);
155static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
156			      void *ptr);
157
158MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
159module_param_named(enable_fbdev, enable_fbdev, int, 0600);
160
161static void vmw_print_capabilities(uint32_t capabilities)
162{
163	DRM_INFO("Capabilities:\n");
164	if (capabilities & SVGA_CAP_RECT_COPY)
165		DRM_INFO("  Rect copy.\n");
166	if (capabilities & SVGA_CAP_CURSOR)
167		DRM_INFO("  Cursor.\n");
168	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
169		DRM_INFO("  Cursor bypass.\n");
170	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
171		DRM_INFO("  Cursor bypass 2.\n");
172	if (capabilities & SVGA_CAP_8BIT_EMULATION)
173		DRM_INFO("  8bit emulation.\n");
174	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
175		DRM_INFO("  Alpha cursor.\n");
176	if (capabilities & SVGA_CAP_3D)
177		DRM_INFO("  3D.\n");
178	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
179		DRM_INFO("  Extended Fifo.\n");
180	if (capabilities & SVGA_CAP_MULTIMON)
181		DRM_INFO("  Multimon.\n");
182	if (capabilities & SVGA_CAP_PITCHLOCK)
183		DRM_INFO("  Pitchlock.\n");
184	if (capabilities & SVGA_CAP_IRQMASK)
185		DRM_INFO("  Irq mask.\n");
186	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
187		DRM_INFO("  Display Topology.\n");
188	if (capabilities & SVGA_CAP_GMR)
189		DRM_INFO("  GMR.\n");
190	if (capabilities & SVGA_CAP_TRACES)
191		DRM_INFO("  Traces.\n");
 
 
 
 
192}
193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194static int vmw_request_device(struct vmw_private *dev_priv)
195{
196	int ret;
197
198	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
199	if (unlikely(ret != 0)) {
200		DRM_ERROR("Unable to initialize FIFO.\n");
201		return ret;
202	}
 
 
 
 
 
203
204	return 0;
 
 
 
 
 
205}
206
207static void vmw_release_device(struct vmw_private *dev_priv)
208{
 
 
 
 
 
 
 
 
 
209	vmw_fifo_release(dev_priv, &dev_priv->fifo);
210}
211
212int vmw_3d_resource_inc(struct vmw_private *dev_priv)
 
 
 
 
 
 
 
213{
214	int ret = 0;
215
216	mutex_lock(&dev_priv->release_mutex);
217	if (unlikely(dev_priv->num_3d_resources++ == 0)) {
218		ret = vmw_request_device(dev_priv);
219		if (unlikely(ret != 0))
220			--dev_priv->num_3d_resources;
 
 
 
 
 
 
221	}
 
222	mutex_unlock(&dev_priv->release_mutex);
223	return ret;
224}
225
226
227void vmw_3d_resource_dec(struct vmw_private *dev_priv)
 
 
 
 
 
 
 
 
228{
229	int32_t n3d;
230
231	mutex_lock(&dev_priv->release_mutex);
232	if (unlikely(--dev_priv->num_3d_resources == 0))
233		vmw_release_device(dev_priv);
 
 
 
 
 
 
 
 
234	n3d = (int32_t) dev_priv->num_3d_resources;
235	mutex_unlock(&dev_priv->release_mutex);
236
237	BUG_ON(n3d < 0);
238}
239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
241{
242	struct vmw_private *dev_priv;
243	int ret;
244	uint32_t svga_id;
245
246	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
247	if (unlikely(dev_priv == NULL)) {
248		DRM_ERROR("Failed allocating a device private struct.\n");
249		return -ENOMEM;
250	}
251	memset(dev_priv, 0, sizeof(*dev_priv));
252
 
 
253	dev_priv->dev = dev;
254	dev_priv->vmw_chipset = chipset;
255	dev_priv->last_read_sequence = (uint32_t) -100;
256	mutex_init(&dev_priv->hw_mutex);
257	mutex_init(&dev_priv->cmdbuf_mutex);
258	mutex_init(&dev_priv->release_mutex);
259	rwlock_init(&dev_priv->resource_lock);
260	idr_init(&dev_priv->context_idr);
261	idr_init(&dev_priv->surface_idr);
262	idr_init(&dev_priv->stream_idr);
263	mutex_init(&dev_priv->init_mutex);
264	init_waitqueue_head(&dev_priv->fence_queue);
265	init_waitqueue_head(&dev_priv->fifo_queue);
266	atomic_set(&dev_priv->fence_queue_waiters, 0);
267	atomic_set(&dev_priv->fifo_queue_waiters, 0);
 
 
268
269	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
270	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
271	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
272
273	dev_priv->enable_fb = enable_fbdev;
274
275	mutex_lock(&dev_priv->hw_mutex);
276
277	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
278	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
279	if (svga_id != SVGA_ID_2) {
280		ret = -ENOSYS;
281		DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
282		mutex_unlock(&dev_priv->hw_mutex);
283		goto out_err0;
284	}
285
286	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
287
 
 
 
 
 
 
 
288	if (dev_priv->capabilities & SVGA_CAP_GMR) {
289		dev_priv->max_gmr_descriptors =
290			vmw_read(dev_priv,
291				 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
292		dev_priv->max_gmr_ids =
293			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
294	}
295
296	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
297	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
298	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
299	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
 
 
 
 
 
 
 
 
300
301	mutex_unlock(&dev_priv->hw_mutex);
302
303	vmw_print_capabilities(dev_priv->capabilities);
304
305	if (dev_priv->capabilities & SVGA_CAP_GMR) {
306		DRM_INFO("Max GMR ids is %u\n",
307			 (unsigned)dev_priv->max_gmr_ids);
308		DRM_INFO("Max GMR descriptors is %u\n",
309			 (unsigned)dev_priv->max_gmr_descriptors);
310	}
 
 
 
 
 
 
311	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
312		 dev_priv->vram_start, dev_priv->vram_size / 1024);
313	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
314		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
315
316	ret = vmw_ttm_global_init(dev_priv);
317	if (unlikely(ret != 0))
318		goto out_err0;
319
320
321	vmw_master_init(&dev_priv->fbdev_master);
322	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
323	dev_priv->active_master = &dev_priv->fbdev_master;
324
325
326	ret = ttm_bo_device_init(&dev_priv->bdev,
327				 dev_priv->bo_global_ref.ref.object,
328				 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
329				 false);
330	if (unlikely(ret != 0)) {
331		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
332		goto out_err1;
333	}
334
335	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
336			     (dev_priv->vram_size >> PAGE_SHIFT));
337	if (unlikely(ret != 0)) {
338		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
339		goto out_err2;
340	}
341
342	dev_priv->has_gmr = true;
343	if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
344			   dev_priv->max_gmr_ids) != 0) {
345		DRM_INFO("No GMR memory available. "
346			 "Graphics memory resources are very limited.\n");
347		dev_priv->has_gmr = false;
348	}
349
350	dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
351					   dev_priv->mmio_size, DRM_MTRR_WC);
352
353	dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
354					 dev_priv->mmio_size);
355
356	if (unlikely(dev_priv->mmio_virt == NULL)) {
357		ret = -ENOMEM;
358		DRM_ERROR("Failed mapping MMIO.\n");
359		goto out_err3;
360	}
361
362	/* Need mmio memory to check for fifo pitchlock cap. */
363	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
364	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
365	    !vmw_fifo_have_pitchlock(dev_priv)) {
366		ret = -ENOSYS;
367		DRM_ERROR("Hardware has no pitchlock\n");
368		goto out_err4;
369	}
370
371	dev_priv->tdev = ttm_object_device_init
372	    (dev_priv->mem_global_ref.object, 12);
373
374	if (unlikely(dev_priv->tdev == NULL)) {
375		DRM_ERROR("Unable to initialize TTM object management.\n");
376		ret = -ENOMEM;
377		goto out_err4;
378	}
379
380	dev->dev_private = dev_priv;
381
382	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
383	dev_priv->stealth = (ret != 0);
384	if (dev_priv->stealth) {
385		/**
386		 * Request at least the mmio PCI resource.
387		 */
388
389		DRM_INFO("It appears like vesafb is loaded. "
390			 "Ignore above error if any.\n");
391		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
392		if (unlikely(ret != 0)) {
393			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
394			goto out_no_device;
395		}
396	}
 
 
 
 
 
 
 
 
 
 
 
 
397	ret = vmw_kms_init(dev_priv);
398	if (unlikely(ret != 0))
399		goto out_no_kms;
400	vmw_overlay_init(dev_priv);
 
 
 
 
 
 
 
401	if (dev_priv->enable_fb) {
402		ret = vmw_3d_resource_inc(dev_priv);
403		if (unlikely(ret != 0))
404			goto out_no_fifo;
405		vmw_kms_save_vga(dev_priv);
406		vmw_fb_init(dev_priv);
407		DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
408			 "Detected device 3D availability.\n" :
409			 "Detected no device 3D availability.\n");
410	} else {
411		DRM_INFO("Delayed 3D detection since we're not "
412			 "running the device in SVGA mode yet.\n");
413	}
414
415	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
416		ret = drm_irq_install(dev);
417		if (unlikely(ret != 0)) {
418			DRM_ERROR("Failed installing irq: %d\n", ret);
419			goto out_no_irq;
420		}
421	}
422
423	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
424	register_pm_notifier(&dev_priv->pm_nb);
425
426	return 0;
427
428out_no_irq:
429	if (dev_priv->enable_fb) {
430		vmw_fb_close(dev_priv);
431		vmw_kms_restore_vga(dev_priv);
432		vmw_3d_resource_dec(dev_priv);
433	}
434out_no_fifo:
435	vmw_overlay_close(dev_priv);
436	vmw_kms_close(dev_priv);
437out_no_kms:
 
 
 
 
 
 
 
 
438	if (dev_priv->stealth)
439		pci_release_region(dev->pdev, 2);
440	else
441		pci_release_regions(dev->pdev);
442out_no_device:
443	ttm_object_device_release(&dev_priv->tdev);
444out_err4:
445	iounmap(dev_priv->mmio_virt);
446out_err3:
447	drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
448		     dev_priv->mmio_size, DRM_MTRR_WC);
449	if (dev_priv->has_gmr)
450		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
451	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
452out_err2:
453	(void)ttm_bo_device_release(&dev_priv->bdev);
454out_err1:
455	vmw_ttm_global_release(dev_priv);
456out_err0:
457	idr_destroy(&dev_priv->surface_idr);
458	idr_destroy(&dev_priv->context_idr);
459	idr_destroy(&dev_priv->stream_idr);
460	kfree(dev_priv);
461	return ret;
462}
463
464static int vmw_driver_unload(struct drm_device *dev)
465{
466	struct vmw_private *dev_priv = vmw_priv(dev);
467
468	unregister_pm_notifier(&dev_priv->pm_nb);
469
 
 
470	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
471		drm_irq_uninstall(dev_priv->dev);
472	if (dev_priv->enable_fb) {
473		vmw_fb_close(dev_priv);
474		vmw_kms_restore_vga(dev_priv);
475		vmw_3d_resource_dec(dev_priv);
476	}
477	vmw_kms_close(dev_priv);
478	vmw_overlay_close(dev_priv);
 
479	if (dev_priv->stealth)
480		pci_release_region(dev->pdev, 2);
481	else
482		pci_release_regions(dev->pdev);
483
484	ttm_object_device_release(&dev_priv->tdev);
485	iounmap(dev_priv->mmio_virt);
486	drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
487		     dev_priv->mmio_size, DRM_MTRR_WC);
488	if (dev_priv->has_gmr)
489		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
490	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
491	(void)ttm_bo_device_release(&dev_priv->bdev);
492	vmw_ttm_global_release(dev_priv);
493	idr_destroy(&dev_priv->surface_idr);
494	idr_destroy(&dev_priv->context_idr);
495	idr_destroy(&dev_priv->stream_idr);
496
497	kfree(dev_priv);
498
499	return 0;
500}
501
 
 
 
 
 
 
 
 
 
502static void vmw_postclose(struct drm_device *dev,
503			 struct drm_file *file_priv)
504{
505	struct vmw_fpriv *vmw_fp;
506
507	vmw_fp = vmw_fpriv(file_priv);
508	ttm_object_file_release(&vmw_fp->tfile);
509	if (vmw_fp->locked_master)
510		drm_master_put(&vmw_fp->locked_master);
511	kfree(vmw_fp);
512}
513
514static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
515{
516	struct vmw_private *dev_priv = vmw_priv(dev);
517	struct vmw_fpriv *vmw_fp;
518	int ret = -ENOMEM;
519
520	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
521	if (unlikely(vmw_fp == NULL))
522		return ret;
523
 
524	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
525	if (unlikely(vmw_fp->tfile == NULL))
526		goto out_no_tfile;
527
528	file_priv->driver_priv = vmw_fp;
529
530	if (unlikely(dev_priv->bdev.dev_mapping == NULL))
531		dev_priv->bdev.dev_mapping =
532			file_priv->filp->f_path.dentry->d_inode->i_mapping;
533
534	return 0;
535
536out_no_tfile:
537	kfree(vmw_fp);
538	return ret;
539}
540
541static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
542			       unsigned long arg)
543{
544	struct drm_file *file_priv = filp->private_data;
545	struct drm_device *dev = file_priv->minor->dev;
546	unsigned int nr = DRM_IOCTL_NR(cmd);
547
548	/*
549	 * Do extra checking on driver private ioctls.
550	 */
551
552	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
553	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
554		struct drm_ioctl_desc *ioctl =
555		    &vmw_ioctls[nr - DRM_COMMAND_BASE];
556
557		if (unlikely(ioctl->cmd_drv != cmd)) {
558			DRM_ERROR("Invalid command format, ioctl %d\n",
559				  nr - DRM_COMMAND_BASE);
560			return -EINVAL;
561		}
562	}
563
564	return drm_ioctl(filp, cmd, arg);
565}
566
567static int vmw_firstopen(struct drm_device *dev)
568{
569	struct vmw_private *dev_priv = vmw_priv(dev);
570	dev_priv->is_opened = true;
571
572	return 0;
573}
574
575static void vmw_lastclose(struct drm_device *dev)
576{
577	struct vmw_private *dev_priv = vmw_priv(dev);
578	struct drm_crtc *crtc;
579	struct drm_mode_set set;
580	int ret;
581
582	/**
583	 * Do nothing on the lastclose call from drm_unload.
584	 */
585
586	if (!dev_priv->is_opened)
587		return;
588
589	dev_priv->is_opened = false;
590	set.x = 0;
591	set.y = 0;
592	set.fb = NULL;
593	set.mode = NULL;
594	set.connectors = NULL;
595	set.num_connectors = 0;
596
597	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
598		set.crtc = crtc;
599		ret = crtc->funcs->set_config(&set);
600		WARN_ON(ret != 0);
601	}
602
603}
604
605static void vmw_master_init(struct vmw_master *vmaster)
606{
607	ttm_lock_init(&vmaster->lock);
608	INIT_LIST_HEAD(&vmaster->fb_surf);
609	mutex_init(&vmaster->fb_surf_mutex);
610}
611
612static int vmw_master_create(struct drm_device *dev,
613			     struct drm_master *master)
614{
615	struct vmw_master *vmaster;
616
617	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
618	if (unlikely(vmaster == NULL))
619		return -ENOMEM;
620
621	vmw_master_init(vmaster);
622	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
623	master->driver_priv = vmaster;
624
625	return 0;
626}
627
628static void vmw_master_destroy(struct drm_device *dev,
629			       struct drm_master *master)
630{
631	struct vmw_master *vmaster = vmw_master(master);
632
633	master->driver_priv = NULL;
634	kfree(vmaster);
635}
636
637
638static int vmw_master_set(struct drm_device *dev,
639			  struct drm_file *file_priv,
640			  bool from_open)
641{
642	struct vmw_private *dev_priv = vmw_priv(dev);
643	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
644	struct vmw_master *active = dev_priv->active_master;
645	struct vmw_master *vmaster = vmw_master(file_priv->master);
646	int ret = 0;
647
648	if (!dev_priv->enable_fb) {
649		ret = vmw_3d_resource_inc(dev_priv);
650		if (unlikely(ret != 0))
651			return ret;
652		vmw_kms_save_vga(dev_priv);
653		mutex_lock(&dev_priv->hw_mutex);
654		vmw_write(dev_priv, SVGA_REG_TRACES, 0);
655		mutex_unlock(&dev_priv->hw_mutex);
656	}
657
658	if (active) {
659		BUG_ON(active != &dev_priv->fbdev_master);
660		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
661		if (unlikely(ret != 0))
662			goto out_no_active_lock;
663
664		ttm_lock_set_kill(&active->lock, true, SIGTERM);
665		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
666		if (unlikely(ret != 0)) {
667			DRM_ERROR("Unable to clean VRAM on "
668				  "master drop.\n");
669		}
670
671		dev_priv->active_master = NULL;
672	}
673
674	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
675	if (!from_open) {
676		ttm_vt_unlock(&vmaster->lock);
677		BUG_ON(vmw_fp->locked_master != file_priv->master);
678		drm_master_put(&vmw_fp->locked_master);
679	}
680
681	dev_priv->active_master = vmaster;
682
683	return 0;
684
685out_no_active_lock:
686	if (!dev_priv->enable_fb) {
687		mutex_lock(&dev_priv->hw_mutex);
688		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
689		mutex_unlock(&dev_priv->hw_mutex);
690		vmw_kms_restore_vga(dev_priv);
691		vmw_3d_resource_dec(dev_priv);
692	}
693	return ret;
694}
695
696static void vmw_master_drop(struct drm_device *dev,
697			    struct drm_file *file_priv,
698			    bool from_release)
699{
700	struct vmw_private *dev_priv = vmw_priv(dev);
701	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
702	struct vmw_master *vmaster = vmw_master(file_priv->master);
703	int ret;
704
705	/**
706	 * Make sure the master doesn't disappear while we have
707	 * it locked.
708	 */
709
710	vmw_fp->locked_master = drm_master_get(file_priv->master);
711	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
712	vmw_kms_idle_workqueues(vmaster);
713
714	if (unlikely((ret != 0))) {
715		DRM_ERROR("Unable to lock TTM at VT switch.\n");
716		drm_master_put(&vmw_fp->locked_master);
717	}
718
719	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
720
721	if (!dev_priv->enable_fb) {
722		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
723		if (unlikely(ret != 0))
724			DRM_ERROR("Unable to clean VRAM on master drop.\n");
725		mutex_lock(&dev_priv->hw_mutex);
726		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
727		mutex_unlock(&dev_priv->hw_mutex);
728		vmw_kms_restore_vga(dev_priv);
729		vmw_3d_resource_dec(dev_priv);
730	}
731
732	dev_priv->active_master = &dev_priv->fbdev_master;
733	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
734	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
735
736	if (dev_priv->enable_fb)
737		vmw_fb_on(dev_priv);
738}
739
740
741static void vmw_remove(struct pci_dev *pdev)
742{
743	struct drm_device *dev = pci_get_drvdata(pdev);
744
745	drm_put_dev(dev);
746}
747
748static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
749			      void *ptr)
750{
751	struct vmw_private *dev_priv =
752		container_of(nb, struct vmw_private, pm_nb);
753	struct vmw_master *vmaster = dev_priv->active_master;
754
755	switch (val) {
756	case PM_HIBERNATION_PREPARE:
757	case PM_SUSPEND_PREPARE:
758		ttm_suspend_lock(&vmaster->lock);
759
760		/**
761		 * This empties VRAM and unbinds all GMR bindings.
762		 * Buffer contents is moved to swappable memory.
763		 */
 
764		ttm_bo_swapout_all(&dev_priv->bdev);
765
766		break;
767	case PM_POST_HIBERNATION:
768	case PM_POST_SUSPEND:
769	case PM_POST_RESTORE:
770		ttm_suspend_unlock(&vmaster->lock);
771
772		break;
773	case PM_RESTORE_PREPARE:
774		break;
775	default:
776		break;
777	}
778	return 0;
779}
780
781/**
782 * These might not be needed with the virtual SVGA device.
783 */
784
785static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
786{
787	struct drm_device *dev = pci_get_drvdata(pdev);
788	struct vmw_private *dev_priv = vmw_priv(dev);
789
790	if (dev_priv->num_3d_resources != 0) {
791		DRM_INFO("Can't suspend or hibernate "
792			 "while 3D resources are active.\n");
793		return -EBUSY;
794	}
795
796	pci_save_state(pdev);
797	pci_disable_device(pdev);
798	pci_set_power_state(pdev, PCI_D3hot);
799	return 0;
800}
801
802static int vmw_pci_resume(struct pci_dev *pdev)
803{
804	pci_set_power_state(pdev, PCI_D0);
805	pci_restore_state(pdev);
806	return pci_enable_device(pdev);
807}
808
809static int vmw_pm_suspend(struct device *kdev)
810{
811	struct pci_dev *pdev = to_pci_dev(kdev);
812	struct pm_message dummy;
813
814	dummy.event = 0;
815
816	return vmw_pci_suspend(pdev, dummy);
817}
818
819static int vmw_pm_resume(struct device *kdev)
820{
821	struct pci_dev *pdev = to_pci_dev(kdev);
822
823	return vmw_pci_resume(pdev);
824}
825
826static int vmw_pm_prepare(struct device *kdev)
827{
828	struct pci_dev *pdev = to_pci_dev(kdev);
829	struct drm_device *dev = pci_get_drvdata(pdev);
830	struct vmw_private *dev_priv = vmw_priv(dev);
831
832	/**
833	 * Release 3d reference held by fbdev and potentially
834	 * stop fifo.
835	 */
836	dev_priv->suspended = true;
837	if (dev_priv->enable_fb)
838		vmw_3d_resource_dec(dev_priv);
839
840	if (dev_priv->num_3d_resources != 0) {
841
842		DRM_INFO("Can't suspend or hibernate "
843			 "while 3D resources are active.\n");
844
845		if (dev_priv->enable_fb)
846			vmw_3d_resource_inc(dev_priv);
847		dev_priv->suspended = false;
848		return -EBUSY;
849	}
850
851	return 0;
852}
853
854static void vmw_pm_complete(struct device *kdev)
855{
856	struct pci_dev *pdev = to_pci_dev(kdev);
857	struct drm_device *dev = pci_get_drvdata(pdev);
858	struct vmw_private *dev_priv = vmw_priv(dev);
859
860	/**
861	 * Reclaim 3d reference held by fbdev and potentially
862	 * start fifo.
863	 */
864	if (dev_priv->enable_fb)
865		vmw_3d_resource_inc(dev_priv);
866
867	dev_priv->suspended = false;
868}
869
870static const struct dev_pm_ops vmw_pm_ops = {
871	.prepare = vmw_pm_prepare,
872	.complete = vmw_pm_complete,
873	.suspend = vmw_pm_suspend,
874	.resume = vmw_pm_resume,
875};
876
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
877static struct drm_driver driver = {
878	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
879	DRIVER_MODESET,
880	.load = vmw_driver_load,
881	.unload = vmw_driver_unload,
882	.firstopen = vmw_firstopen,
883	.lastclose = vmw_lastclose,
884	.irq_preinstall = vmw_irq_preinstall,
885	.irq_postinstall = vmw_irq_postinstall,
886	.irq_uninstall = vmw_irq_uninstall,
887	.irq_handler = vmw_irq_handler,
888	.get_vblank_counter = vmw_get_vblank_counter,
 
 
889	.reclaim_buffers_locked = NULL,
890	.ioctls = vmw_ioctls,
891	.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
892	.dma_quiescent = NULL,	/*vmw_dma_quiescent, */
893	.master_create = vmw_master_create,
894	.master_destroy = vmw_master_destroy,
895	.master_set = vmw_master_set,
896	.master_drop = vmw_master_drop,
897	.open = vmw_driver_open,
 
898	.postclose = vmw_postclose,
899	.fops = {
900		 .owner = THIS_MODULE,
901		 .open = drm_open,
902		 .release = drm_release,
903		 .unlocked_ioctl = vmw_unlocked_ioctl,
904		 .mmap = vmw_mmap,
905		 .poll = drm_poll,
906		 .fasync = drm_fasync,
907#if defined(CONFIG_COMPAT)
908		 .compat_ioctl = drm_compat_ioctl,
909#endif
910		 .llseek = noop_llseek,
911	},
912	.name = VMWGFX_DRIVER_NAME,
913	.desc = VMWGFX_DRIVER_DESC,
914	.date = VMWGFX_DRIVER_DATE,
915	.major = VMWGFX_DRIVER_MAJOR,
916	.minor = VMWGFX_DRIVER_MINOR,
917	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
918};
919
920static struct pci_driver vmw_pci_driver = {
921	.name = VMWGFX_DRIVER_NAME,
922	.id_table = vmw_pci_id_list,
923	.probe = vmw_probe,
924	.remove = vmw_remove,
925	.driver = {
926		.pm = &vmw_pm_ops
927	}
928};
929
930static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
931{
932	return drm_get_pci_dev(pdev, ent, &driver);
933}
934
935static int __init vmwgfx_init(void)
936{
937	int ret;
938	ret = drm_pci_init(&driver, &vmw_pci_driver);
939	if (ret)
940		DRM_ERROR("Failed initializing DRM.\n");
941	return ret;
942}
943
944static void __exit vmwgfx_exit(void)
945{
946	drm_pci_exit(&driver, &vmw_pci_driver);
947}
948
949module_init(vmwgfx_init);
950module_exit(vmwgfx_exit);
951
952MODULE_AUTHOR("VMware Inc. and others");
953MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
954MODULE_LICENSE("GPL and additional rights");
955MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
956	       __stringify(VMWGFX_DRIVER_MINOR) "."
957	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
958	       "0");
v3.5.6
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27#include <linux/module.h>
  28
  29#include "drmP.h"
  30#include "vmwgfx_drv.h"
  31#include "ttm/ttm_placement.h"
  32#include "ttm/ttm_bo_driver.h"
  33#include "ttm/ttm_object.h"
  34#include "ttm/ttm_module.h"
  35
  36#define VMWGFX_DRIVER_NAME "vmwgfx"
  37#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
  38#define VMWGFX_CHIP_SVGAII 0
  39#define VMW_FB_RESERVATION 0
  40
  41#define VMW_MIN_INITIAL_WIDTH 800
  42#define VMW_MIN_INITIAL_HEIGHT 600
  43
  44
  45/**
  46 * Fully encoded drm commands. Might move to vmw_drm.h
  47 */
  48
  49#define DRM_IOCTL_VMW_GET_PARAM					\
  50	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
  51		 struct drm_vmw_getparam_arg)
  52#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
  53	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
  54		union drm_vmw_alloc_dmabuf_arg)
  55#define DRM_IOCTL_VMW_UNREF_DMABUF				\
  56	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
  57		struct drm_vmw_unref_dmabuf_arg)
  58#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
  59	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
  60		 struct drm_vmw_cursor_bypass_arg)
  61
  62#define DRM_IOCTL_VMW_CONTROL_STREAM				\
  63	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
  64		 struct drm_vmw_control_stream_arg)
  65#define DRM_IOCTL_VMW_CLAIM_STREAM				\
  66	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
  67		 struct drm_vmw_stream_arg)
  68#define DRM_IOCTL_VMW_UNREF_STREAM				\
  69	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
  70		 struct drm_vmw_stream_arg)
  71
  72#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
  73	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
  74		struct drm_vmw_context_arg)
  75#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
  76	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
  77		struct drm_vmw_context_arg)
  78#define DRM_IOCTL_VMW_CREATE_SURFACE				\
  79	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
  80		 union drm_vmw_surface_create_arg)
  81#define DRM_IOCTL_VMW_UNREF_SURFACE				\
  82	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
  83		 struct drm_vmw_surface_arg)
  84#define DRM_IOCTL_VMW_REF_SURFACE				\
  85	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
  86		 union drm_vmw_surface_reference_arg)
  87#define DRM_IOCTL_VMW_EXECBUF					\
  88	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
  89		struct drm_vmw_execbuf_arg)
  90#define DRM_IOCTL_VMW_GET_3D_CAP				\
  91	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
  92		 struct drm_vmw_get_3d_cap_arg)
  93#define DRM_IOCTL_VMW_FENCE_WAIT				\
  94	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
  95		 struct drm_vmw_fence_wait_arg)
  96#define DRM_IOCTL_VMW_FENCE_SIGNALED				\
  97	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
  98		 struct drm_vmw_fence_signaled_arg)
  99#define DRM_IOCTL_VMW_FENCE_UNREF				\
 100	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
 101		 struct drm_vmw_fence_arg)
 102#define DRM_IOCTL_VMW_FENCE_EVENT				\
 103	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
 104		 struct drm_vmw_fence_event_arg)
 105#define DRM_IOCTL_VMW_PRESENT					\
 106	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
 107		 struct drm_vmw_present_arg)
 108#define DRM_IOCTL_VMW_PRESENT_READBACK				\
 109	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
 110		 struct drm_vmw_present_readback_arg)
 111#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
 112	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
 113		 struct drm_vmw_update_layout_arg)
 114
 
 115/**
 116 * The core DRM version of this macro doesn't account for
 117 * DRM_COMMAND_BASE.
 118 */
 119
 120#define VMW_IOCTL_DEF(ioctl, func, flags) \
 121  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
 122
 123/**
 124 * Ioctl definitions.
 125 */
 126
 127static struct drm_ioctl_desc vmw_ioctls[] = {
 128	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
 129		      DRM_AUTH | DRM_UNLOCKED),
 130	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
 131		      DRM_AUTH | DRM_UNLOCKED),
 132	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
 133		      DRM_AUTH | DRM_UNLOCKED),
 134	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
 135		      vmw_kms_cursor_bypass_ioctl,
 136		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 137
 138	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
 139		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 140	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
 141		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 142	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
 143		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 144
 145	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
 146		      DRM_AUTH | DRM_UNLOCKED),
 147	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
 148		      DRM_AUTH | DRM_UNLOCKED),
 149	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
 150		      DRM_AUTH | DRM_UNLOCKED),
 151	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
 152		      DRM_AUTH | DRM_UNLOCKED),
 153	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
 154		      DRM_AUTH | DRM_UNLOCKED),
 155	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
 156		      DRM_AUTH | DRM_UNLOCKED),
 157	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
 158		      DRM_AUTH | DRM_UNLOCKED),
 159	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
 160		      vmw_fence_obj_signaled_ioctl,
 161		      DRM_AUTH | DRM_UNLOCKED),
 162	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
 163		      DRM_AUTH | DRM_UNLOCKED),
 164	VMW_IOCTL_DEF(VMW_FENCE_EVENT,
 165		      vmw_fence_event_ioctl,
 166		      DRM_AUTH | DRM_UNLOCKED),
 167	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
 168		      DRM_AUTH | DRM_UNLOCKED),
 169
 170	/* these allow direct access to the framebuffers mark as master only */
 171	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
 172		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
 173	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
 174		      vmw_present_readback_ioctl,
 175		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
 176	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
 177		      vmw_kms_update_layout_ioctl,
 178		      DRM_MASTER | DRM_UNLOCKED),
 179};
 180
 181static struct pci_device_id vmw_pci_id_list[] = {
 182	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
 183	{0, 0, 0}
 184};
 185MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
 186
 187static int enable_fbdev;
 188
 189static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 190static void vmw_master_init(struct vmw_master *);
 191static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 192			      void *ptr);
 193
 194MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
 195module_param_named(enable_fbdev, enable_fbdev, int, 0600);
 196
 197static void vmw_print_capabilities(uint32_t capabilities)
 198{
 199	DRM_INFO("Capabilities:\n");
 200	if (capabilities & SVGA_CAP_RECT_COPY)
 201		DRM_INFO("  Rect copy.\n");
 202	if (capabilities & SVGA_CAP_CURSOR)
 203		DRM_INFO("  Cursor.\n");
 204	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
 205		DRM_INFO("  Cursor bypass.\n");
 206	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
 207		DRM_INFO("  Cursor bypass 2.\n");
 208	if (capabilities & SVGA_CAP_8BIT_EMULATION)
 209		DRM_INFO("  8bit emulation.\n");
 210	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
 211		DRM_INFO("  Alpha cursor.\n");
 212	if (capabilities & SVGA_CAP_3D)
 213		DRM_INFO("  3D.\n");
 214	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
 215		DRM_INFO("  Extended Fifo.\n");
 216	if (capabilities & SVGA_CAP_MULTIMON)
 217		DRM_INFO("  Multimon.\n");
 218	if (capabilities & SVGA_CAP_PITCHLOCK)
 219		DRM_INFO("  Pitchlock.\n");
 220	if (capabilities & SVGA_CAP_IRQMASK)
 221		DRM_INFO("  Irq mask.\n");
 222	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
 223		DRM_INFO("  Display Topology.\n");
 224	if (capabilities & SVGA_CAP_GMR)
 225		DRM_INFO("  GMR.\n");
 226	if (capabilities & SVGA_CAP_TRACES)
 227		DRM_INFO("  Traces.\n");
 228	if (capabilities & SVGA_CAP_GMR2)
 229		DRM_INFO("  GMR2.\n");
 230	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
 231		DRM_INFO("  Screen Object 2.\n");
 232}
 233
 234
 235/**
 236 * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
 237 * the start of a buffer object.
 238 *
 239 * @dev_priv: The device private structure.
 240 *
 241 * This function will idle the buffer using an uninterruptible wait, then
 242 * map the first page and initialize a pending occlusion query result structure,
 243 * Finally it will unmap the buffer.
 244 *
 245 * TODO: Since we're only mapping a single page, we should optimize the map
 246 * to use kmap_atomic / iomap_atomic.
 247 */
 248static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
 249{
 250	struct ttm_bo_kmap_obj map;
 251	volatile SVGA3dQueryResult *result;
 252	bool dummy;
 253	int ret;
 254	struct ttm_bo_device *bdev = &dev_priv->bdev;
 255	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
 256
 257	ttm_bo_reserve(bo, false, false, false, 0);
 258	spin_lock(&bdev->fence_lock);
 259	ret = ttm_bo_wait(bo, false, false, false);
 260	spin_unlock(&bdev->fence_lock);
 261	if (unlikely(ret != 0))
 262		(void) vmw_fallback_wait(dev_priv, false, true, 0, false,
 263					 10*HZ);
 264
 265	ret = ttm_bo_kmap(bo, 0, 1, &map);
 266	if (likely(ret == 0)) {
 267		result = ttm_kmap_obj_virtual(&map, &dummy);
 268		result->totalSize = sizeof(*result);
 269		result->state = SVGA3D_QUERYSTATE_PENDING;
 270		result->result32 = 0xff;
 271		ttm_bo_kunmap(&map);
 272	} else
 273		DRM_ERROR("Dummy query buffer map failed.\n");
 274	ttm_bo_unreserve(bo);
 275}
 276
 277
 278/**
 279 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
 280 *
 281 * @dev_priv: A device private structure.
 282 *
 283 * This function creates a small buffer object that holds the query
 284 * result for dummy queries emitted as query barriers.
 285 * No interruptible waits are done within this function.
 286 *
 287 * Returns an error if bo creation fails.
 288 */
 289static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 290{
 291	return ttm_bo_create(&dev_priv->bdev,
 292			     PAGE_SIZE,
 293			     ttm_bo_type_device,
 294			     &vmw_vram_sys_placement,
 295			     0, 0, false, NULL,
 296			     &dev_priv->dummy_query_bo);
 297}
 298
 299
 300static int vmw_request_device(struct vmw_private *dev_priv)
 301{
 302	int ret;
 303
 304	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
 305	if (unlikely(ret != 0)) {
 306		DRM_ERROR("Unable to initialize FIFO.\n");
 307		return ret;
 308	}
 309	vmw_fence_fifo_up(dev_priv->fman);
 310	ret = vmw_dummy_query_bo_create(dev_priv);
 311	if (unlikely(ret != 0))
 312		goto out_no_query_bo;
 313	vmw_dummy_query_bo_prepare(dev_priv);
 314
 315	return 0;
 316
 317out_no_query_bo:
 318	vmw_fence_fifo_down(dev_priv->fman);
 319	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 320	return ret;
 321}
 322
 323static void vmw_release_device(struct vmw_private *dev_priv)
 324{
 325	/*
 326	 * Previous destructions should've released
 327	 * the pinned bo.
 328	 */
 329
 330	BUG_ON(dev_priv->pinned_bo != NULL);
 331
 332	ttm_bo_unref(&dev_priv->dummy_query_bo);
 333	vmw_fence_fifo_down(dev_priv->fman);
 334	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 335}
 336
 337/**
 338 * Increase the 3d resource refcount.
 339 * If the count was prevously zero, initialize the fifo, switching to svga
 340 * mode. Note that the master holds a ref as well, and may request an
 341 * explicit switch to svga mode if fb is not running, using @unhide_svga.
 342 */
 343int vmw_3d_resource_inc(struct vmw_private *dev_priv,
 344			bool unhide_svga)
 345{
 346	int ret = 0;
 347
 348	mutex_lock(&dev_priv->release_mutex);
 349	if (unlikely(dev_priv->num_3d_resources++ == 0)) {
 350		ret = vmw_request_device(dev_priv);
 351		if (unlikely(ret != 0))
 352			--dev_priv->num_3d_resources;
 353	} else if (unhide_svga) {
 354		mutex_lock(&dev_priv->hw_mutex);
 355		vmw_write(dev_priv, SVGA_REG_ENABLE,
 356			  vmw_read(dev_priv, SVGA_REG_ENABLE) &
 357			  ~SVGA_REG_ENABLE_HIDE);
 358		mutex_unlock(&dev_priv->hw_mutex);
 359	}
 360
 361	mutex_unlock(&dev_priv->release_mutex);
 362	return ret;
 363}
 364
 365/**
 366 * Decrease the 3d resource refcount.
 367 * If the count reaches zero, disable the fifo, switching to vga mode.
 368 * Note that the master holds a refcount as well, and may request an
 369 * explicit switch to vga mode when it releases its refcount to account
 370 * for the situation of an X server vt switch to VGA with 3d resources
 371 * active.
 372 */
 373void vmw_3d_resource_dec(struct vmw_private *dev_priv,
 374			 bool hide_svga)
 375{
 376	int32_t n3d;
 377
 378	mutex_lock(&dev_priv->release_mutex);
 379	if (unlikely(--dev_priv->num_3d_resources == 0))
 380		vmw_release_device(dev_priv);
 381	else if (hide_svga) {
 382		mutex_lock(&dev_priv->hw_mutex);
 383		vmw_write(dev_priv, SVGA_REG_ENABLE,
 384			  vmw_read(dev_priv, SVGA_REG_ENABLE) |
 385			  SVGA_REG_ENABLE_HIDE);
 386		mutex_unlock(&dev_priv->hw_mutex);
 387	}
 388
 389	n3d = (int32_t) dev_priv->num_3d_resources;
 390	mutex_unlock(&dev_priv->release_mutex);
 391
 392	BUG_ON(n3d < 0);
 393}
 394
 395/**
 396 * Sets the initial_[width|height] fields on the given vmw_private.
 397 *
 398 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
 399 * clamping the value to fb_max_[width|height] fields and the
 400 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 401 * If the values appear to be invalid, set them to
 402 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 403 */
 404static void vmw_get_initial_size(struct vmw_private *dev_priv)
 405{
 406	uint32_t width;
 407	uint32_t height;
 408
 409	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
 410	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
 411
 412	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
 413	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
 414
 415	if (width > dev_priv->fb_max_width ||
 416	    height > dev_priv->fb_max_height) {
 417
 418		/*
 419		 * This is a host error and shouldn't occur.
 420		 */
 421
 422		width = VMW_MIN_INITIAL_WIDTH;
 423		height = VMW_MIN_INITIAL_HEIGHT;
 424	}
 425
 426	dev_priv->initial_width = width;
 427	dev_priv->initial_height = height;
 428}
 429
 430static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 431{
 432	struct vmw_private *dev_priv;
 433	int ret;
 434	uint32_t svga_id;
 435
 436	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 437	if (unlikely(dev_priv == NULL)) {
 438		DRM_ERROR("Failed allocating a device private struct.\n");
 439		return -ENOMEM;
 440	}
 441	memset(dev_priv, 0, sizeof(*dev_priv));
 442
 443	pci_set_master(dev->pdev);
 444
 445	dev_priv->dev = dev;
 446	dev_priv->vmw_chipset = chipset;
 447	dev_priv->last_read_seqno = (uint32_t) -100;
 448	mutex_init(&dev_priv->hw_mutex);
 449	mutex_init(&dev_priv->cmdbuf_mutex);
 450	mutex_init(&dev_priv->release_mutex);
 451	rwlock_init(&dev_priv->resource_lock);
 452	idr_init(&dev_priv->context_idr);
 453	idr_init(&dev_priv->surface_idr);
 454	idr_init(&dev_priv->stream_idr);
 455	mutex_init(&dev_priv->init_mutex);
 456	init_waitqueue_head(&dev_priv->fence_queue);
 457	init_waitqueue_head(&dev_priv->fifo_queue);
 458	dev_priv->fence_queue_waiters = 0;
 459	atomic_set(&dev_priv->fifo_queue_waiters, 0);
 460	INIT_LIST_HEAD(&dev_priv->surface_lru);
 461	dev_priv->used_memory_size = 0;
 462
 463	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
 464	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
 465	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 466
 467	dev_priv->enable_fb = enable_fbdev;
 468
 469	mutex_lock(&dev_priv->hw_mutex);
 470
 471	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
 472	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
 473	if (svga_id != SVGA_ID_2) {
 474		ret = -ENOSYS;
 475		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
 476		mutex_unlock(&dev_priv->hw_mutex);
 477		goto out_err0;
 478	}
 479
 480	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
 481
 482	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
 483	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
 484	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
 485	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
 486
 487	vmw_get_initial_size(dev_priv);
 488
 489	if (dev_priv->capabilities & SVGA_CAP_GMR) {
 490		dev_priv->max_gmr_descriptors =
 491			vmw_read(dev_priv,
 492				 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
 493		dev_priv->max_gmr_ids =
 494			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
 495	}
 496	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 497		dev_priv->max_gmr_pages =
 498			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
 499		dev_priv->memory_size =
 500			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
 501		dev_priv->memory_size -= dev_priv->vram_size;
 502	} else {
 503		/*
 504		 * An arbitrary limit of 512MiB on surface
 505		 * memory. But all HWV8 hardware supports GMR2.
 506		 */
 507		dev_priv->memory_size = 512*1024*1024;
 508	}
 509
 510	mutex_unlock(&dev_priv->hw_mutex);
 511
 512	vmw_print_capabilities(dev_priv->capabilities);
 513
 514	if (dev_priv->capabilities & SVGA_CAP_GMR) {
 515		DRM_INFO("Max GMR ids is %u\n",
 516			 (unsigned)dev_priv->max_gmr_ids);
 517		DRM_INFO("Max GMR descriptors is %u\n",
 518			 (unsigned)dev_priv->max_gmr_descriptors);
 519	}
 520	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 521		DRM_INFO("Max number of GMR pages is %u\n",
 522			 (unsigned)dev_priv->max_gmr_pages);
 523		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
 524			 (unsigned)dev_priv->memory_size / 1024);
 525	}
 526	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
 527		 dev_priv->vram_start, dev_priv->vram_size / 1024);
 528	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
 529		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
 530
 531	ret = vmw_ttm_global_init(dev_priv);
 532	if (unlikely(ret != 0))
 533		goto out_err0;
 534
 535
 536	vmw_master_init(&dev_priv->fbdev_master);
 537	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
 538	dev_priv->active_master = &dev_priv->fbdev_master;
 539
 540
 541	ret = ttm_bo_device_init(&dev_priv->bdev,
 542				 dev_priv->bo_global_ref.ref.object,
 543				 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
 544				 false);
 545	if (unlikely(ret != 0)) {
 546		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
 547		goto out_err1;
 548	}
 549
 550	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
 551			     (dev_priv->vram_size >> PAGE_SHIFT));
 552	if (unlikely(ret != 0)) {
 553		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
 554		goto out_err2;
 555	}
 556
 557	dev_priv->has_gmr = true;
 558	if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
 559			   dev_priv->max_gmr_ids) != 0) {
 560		DRM_INFO("No GMR memory available. "
 561			 "Graphics memory resources are very limited.\n");
 562		dev_priv->has_gmr = false;
 563	}
 564
 565	dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
 566					   dev_priv->mmio_size, DRM_MTRR_WC);
 567
 568	dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
 569					 dev_priv->mmio_size);
 570
 571	if (unlikely(dev_priv->mmio_virt == NULL)) {
 572		ret = -ENOMEM;
 573		DRM_ERROR("Failed mapping MMIO.\n");
 574		goto out_err3;
 575	}
 576
 577	/* Need mmio memory to check for fifo pitchlock cap. */
 578	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
 579	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
 580	    !vmw_fifo_have_pitchlock(dev_priv)) {
 581		ret = -ENOSYS;
 582		DRM_ERROR("Hardware has no pitchlock\n");
 583		goto out_err4;
 584	}
 585
 586	dev_priv->tdev = ttm_object_device_init
 587	    (dev_priv->mem_global_ref.object, 12);
 588
 589	if (unlikely(dev_priv->tdev == NULL)) {
 590		DRM_ERROR("Unable to initialize TTM object management.\n");
 591		ret = -ENOMEM;
 592		goto out_err4;
 593	}
 594
 595	dev->dev_private = dev_priv;
 596
 597	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
 598	dev_priv->stealth = (ret != 0);
 599	if (dev_priv->stealth) {
 600		/**
 601		 * Request at least the mmio PCI resource.
 602		 */
 603
 604		DRM_INFO("It appears like vesafb is loaded. "
 605			 "Ignore above error if any.\n");
 606		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
 607		if (unlikely(ret != 0)) {
 608			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
 609			goto out_no_device;
 610		}
 611	}
 612
 613	dev_priv->fman = vmw_fence_manager_init(dev_priv);
 614	if (unlikely(dev_priv->fman == NULL))
 615		goto out_no_fman;
 616
 617	/* Need to start the fifo to check if we can do screen objects */
 618	ret = vmw_3d_resource_inc(dev_priv, true);
 619	if (unlikely(ret != 0))
 620		goto out_no_fifo;
 621	vmw_kms_save_vga(dev_priv);
 622
 623	/* Start kms and overlay systems, needs fifo. */
 624	ret = vmw_kms_init(dev_priv);
 625	if (unlikely(ret != 0))
 626		goto out_no_kms;
 627	vmw_overlay_init(dev_priv);
 628
 629	/* 3D Depends on Screen Objects being used. */
 630	DRM_INFO("Detected %sdevice 3D availability.\n",
 631		 vmw_fifo_have_3d(dev_priv) ?
 632		 "" : "no ");
 633
 634	/* We might be done with the fifo now */
 635	if (dev_priv->enable_fb) {
 
 
 
 
 636		vmw_fb_init(dev_priv);
 
 
 
 637	} else {
 638		vmw_kms_restore_vga(dev_priv);
 639		vmw_3d_resource_dec(dev_priv, true);
 640	}
 641
 642	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
 643		ret = drm_irq_install(dev);
 644		if (unlikely(ret != 0)) {
 645			DRM_ERROR("Failed installing irq: %d\n", ret);
 646			goto out_no_irq;
 647		}
 648	}
 649
 650	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
 651	register_pm_notifier(&dev_priv->pm_nb);
 652
 653	return 0;
 654
 655out_no_irq:
 656	if (dev_priv->enable_fb)
 657		vmw_fb_close(dev_priv);
 
 
 
 
 658	vmw_overlay_close(dev_priv);
 659	vmw_kms_close(dev_priv);
 660out_no_kms:
 661	/* We still have a 3D resource reference held */
 662	if (dev_priv->enable_fb) {
 663		vmw_kms_restore_vga(dev_priv);
 664		vmw_3d_resource_dec(dev_priv, false);
 665	}
 666out_no_fifo:
 667	vmw_fence_manager_takedown(dev_priv->fman);
 668out_no_fman:
 669	if (dev_priv->stealth)
 670		pci_release_region(dev->pdev, 2);
 671	else
 672		pci_release_regions(dev->pdev);
 673out_no_device:
 674	ttm_object_device_release(&dev_priv->tdev);
 675out_err4:
 676	iounmap(dev_priv->mmio_virt);
 677out_err3:
 678	drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
 679		     dev_priv->mmio_size, DRM_MTRR_WC);
 680	if (dev_priv->has_gmr)
 681		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 682	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 683out_err2:
 684	(void)ttm_bo_device_release(&dev_priv->bdev);
 685out_err1:
 686	vmw_ttm_global_release(dev_priv);
 687out_err0:
 688	idr_destroy(&dev_priv->surface_idr);
 689	idr_destroy(&dev_priv->context_idr);
 690	idr_destroy(&dev_priv->stream_idr);
 691	kfree(dev_priv);
 692	return ret;
 693}
 694
 695static int vmw_driver_unload(struct drm_device *dev)
 696{
 697	struct vmw_private *dev_priv = vmw_priv(dev);
 698
 699	unregister_pm_notifier(&dev_priv->pm_nb);
 700
 701	if (dev_priv->ctx.cmd_bounce)
 702		vfree(dev_priv->ctx.cmd_bounce);
 703	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 704		drm_irq_uninstall(dev_priv->dev);
 705	if (dev_priv->enable_fb) {
 706		vmw_fb_close(dev_priv);
 707		vmw_kms_restore_vga(dev_priv);
 708		vmw_3d_resource_dec(dev_priv, false);
 709	}
 710	vmw_kms_close(dev_priv);
 711	vmw_overlay_close(dev_priv);
 712	vmw_fence_manager_takedown(dev_priv->fman);
 713	if (dev_priv->stealth)
 714		pci_release_region(dev->pdev, 2);
 715	else
 716		pci_release_regions(dev->pdev);
 717
 718	ttm_object_device_release(&dev_priv->tdev);
 719	iounmap(dev_priv->mmio_virt);
 720	drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
 721		     dev_priv->mmio_size, DRM_MTRR_WC);
 722	if (dev_priv->has_gmr)
 723		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 724	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 725	(void)ttm_bo_device_release(&dev_priv->bdev);
 726	vmw_ttm_global_release(dev_priv);
 727	idr_destroy(&dev_priv->surface_idr);
 728	idr_destroy(&dev_priv->context_idr);
 729	idr_destroy(&dev_priv->stream_idr);
 730
 731	kfree(dev_priv);
 732
 733	return 0;
 734}
 735
 736static void vmw_preclose(struct drm_device *dev,
 737			 struct drm_file *file_priv)
 738{
 739	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 740	struct vmw_private *dev_priv = vmw_priv(dev);
 741
 742	vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
 743}
 744
 745static void vmw_postclose(struct drm_device *dev,
 746			 struct drm_file *file_priv)
 747{
 748	struct vmw_fpriv *vmw_fp;
 749
 750	vmw_fp = vmw_fpriv(file_priv);
 751	ttm_object_file_release(&vmw_fp->tfile);
 752	if (vmw_fp->locked_master)
 753		drm_master_put(&vmw_fp->locked_master);
 754	kfree(vmw_fp);
 755}
 756
 757static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
 758{
 759	struct vmw_private *dev_priv = vmw_priv(dev);
 760	struct vmw_fpriv *vmw_fp;
 761	int ret = -ENOMEM;
 762
 763	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
 764	if (unlikely(vmw_fp == NULL))
 765		return ret;
 766
 767	INIT_LIST_HEAD(&vmw_fp->fence_events);
 768	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
 769	if (unlikely(vmw_fp->tfile == NULL))
 770		goto out_no_tfile;
 771
 772	file_priv->driver_priv = vmw_fp;
 773
 774	if (unlikely(dev_priv->bdev.dev_mapping == NULL))
 775		dev_priv->bdev.dev_mapping =
 776			file_priv->filp->f_path.dentry->d_inode->i_mapping;
 777
 778	return 0;
 779
 780out_no_tfile:
 781	kfree(vmw_fp);
 782	return ret;
 783}
 784
 785static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
 786			       unsigned long arg)
 787{
 788	struct drm_file *file_priv = filp->private_data;
 789	struct drm_device *dev = file_priv->minor->dev;
 790	unsigned int nr = DRM_IOCTL_NR(cmd);
 791
 792	/*
 793	 * Do extra checking on driver private ioctls.
 794	 */
 795
 796	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
 797	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
 798		struct drm_ioctl_desc *ioctl =
 799		    &vmw_ioctls[nr - DRM_COMMAND_BASE];
 800
 801		if (unlikely(ioctl->cmd_drv != cmd)) {
 802			DRM_ERROR("Invalid command format, ioctl %d\n",
 803				  nr - DRM_COMMAND_BASE);
 804			return -EINVAL;
 805		}
 806	}
 807
 808	return drm_ioctl(filp, cmd, arg);
 809}
 810
 811static int vmw_firstopen(struct drm_device *dev)
 812{
 813	struct vmw_private *dev_priv = vmw_priv(dev);
 814	dev_priv->is_opened = true;
 815
 816	return 0;
 817}
 818
 819static void vmw_lastclose(struct drm_device *dev)
 820{
 821	struct vmw_private *dev_priv = vmw_priv(dev);
 822	struct drm_crtc *crtc;
 823	struct drm_mode_set set;
 824	int ret;
 825
 826	/**
 827	 * Do nothing on the lastclose call from drm_unload.
 828	 */
 829
 830	if (!dev_priv->is_opened)
 831		return;
 832
 833	dev_priv->is_opened = false;
 834	set.x = 0;
 835	set.y = 0;
 836	set.fb = NULL;
 837	set.mode = NULL;
 838	set.connectors = NULL;
 839	set.num_connectors = 0;
 840
 841	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 842		set.crtc = crtc;
 843		ret = crtc->funcs->set_config(&set);
 844		WARN_ON(ret != 0);
 845	}
 846
 847}
 848
 849static void vmw_master_init(struct vmw_master *vmaster)
 850{
 851	ttm_lock_init(&vmaster->lock);
 852	INIT_LIST_HEAD(&vmaster->fb_surf);
 853	mutex_init(&vmaster->fb_surf_mutex);
 854}
 855
 856static int vmw_master_create(struct drm_device *dev,
 857			     struct drm_master *master)
 858{
 859	struct vmw_master *vmaster;
 860
 861	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
 862	if (unlikely(vmaster == NULL))
 863		return -ENOMEM;
 864
 865	vmw_master_init(vmaster);
 866	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
 867	master->driver_priv = vmaster;
 868
 869	return 0;
 870}
 871
 872static void vmw_master_destroy(struct drm_device *dev,
 873			       struct drm_master *master)
 874{
 875	struct vmw_master *vmaster = vmw_master(master);
 876
 877	master->driver_priv = NULL;
 878	kfree(vmaster);
 879}
 880
 881
 882static int vmw_master_set(struct drm_device *dev,
 883			  struct drm_file *file_priv,
 884			  bool from_open)
 885{
 886	struct vmw_private *dev_priv = vmw_priv(dev);
 887	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 888	struct vmw_master *active = dev_priv->active_master;
 889	struct vmw_master *vmaster = vmw_master(file_priv->master);
 890	int ret = 0;
 891
 892	if (!dev_priv->enable_fb) {
 893		ret = vmw_3d_resource_inc(dev_priv, true);
 894		if (unlikely(ret != 0))
 895			return ret;
 896		vmw_kms_save_vga(dev_priv);
 897		mutex_lock(&dev_priv->hw_mutex);
 898		vmw_write(dev_priv, SVGA_REG_TRACES, 0);
 899		mutex_unlock(&dev_priv->hw_mutex);
 900	}
 901
 902	if (active) {
 903		BUG_ON(active != &dev_priv->fbdev_master);
 904		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
 905		if (unlikely(ret != 0))
 906			goto out_no_active_lock;
 907
 908		ttm_lock_set_kill(&active->lock, true, SIGTERM);
 909		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
 910		if (unlikely(ret != 0)) {
 911			DRM_ERROR("Unable to clean VRAM on "
 912				  "master drop.\n");
 913		}
 914
 915		dev_priv->active_master = NULL;
 916	}
 917
 918	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
 919	if (!from_open) {
 920		ttm_vt_unlock(&vmaster->lock);
 921		BUG_ON(vmw_fp->locked_master != file_priv->master);
 922		drm_master_put(&vmw_fp->locked_master);
 923	}
 924
 925	dev_priv->active_master = vmaster;
 926
 927	return 0;
 928
 929out_no_active_lock:
 930	if (!dev_priv->enable_fb) {
 931		mutex_lock(&dev_priv->hw_mutex);
 932		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
 933		mutex_unlock(&dev_priv->hw_mutex);
 934		vmw_kms_restore_vga(dev_priv);
 935		vmw_3d_resource_dec(dev_priv, true);
 936	}
 937	return ret;
 938}
 939
 940static void vmw_master_drop(struct drm_device *dev,
 941			    struct drm_file *file_priv,
 942			    bool from_release)
 943{
 944	struct vmw_private *dev_priv = vmw_priv(dev);
 945	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 946	struct vmw_master *vmaster = vmw_master(file_priv->master);
 947	int ret;
 948
 949	/**
 950	 * Make sure the master doesn't disappear while we have
 951	 * it locked.
 952	 */
 953
 954	vmw_fp->locked_master = drm_master_get(file_priv->master);
 955	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
 956	vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
 957
 958	if (unlikely((ret != 0))) {
 959		DRM_ERROR("Unable to lock TTM at VT switch.\n");
 960		drm_master_put(&vmw_fp->locked_master);
 961	}
 962
 963	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
 964
 965	if (!dev_priv->enable_fb) {
 966		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
 967		if (unlikely(ret != 0))
 968			DRM_ERROR("Unable to clean VRAM on master drop.\n");
 969		mutex_lock(&dev_priv->hw_mutex);
 970		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
 971		mutex_unlock(&dev_priv->hw_mutex);
 972		vmw_kms_restore_vga(dev_priv);
 973		vmw_3d_resource_dec(dev_priv, true);
 974	}
 975
 976	dev_priv->active_master = &dev_priv->fbdev_master;
 977	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
 978	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
 979
 980	if (dev_priv->enable_fb)
 981		vmw_fb_on(dev_priv);
 982}
 983
 984
 985static void vmw_remove(struct pci_dev *pdev)
 986{
 987	struct drm_device *dev = pci_get_drvdata(pdev);
 988
 989	drm_put_dev(dev);
 990}
 991
 992static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 993			      void *ptr)
 994{
 995	struct vmw_private *dev_priv =
 996		container_of(nb, struct vmw_private, pm_nb);
 997	struct vmw_master *vmaster = dev_priv->active_master;
 998
 999	switch (val) {
1000	case PM_HIBERNATION_PREPARE:
1001	case PM_SUSPEND_PREPARE:
1002		ttm_suspend_lock(&vmaster->lock);
1003
1004		/**
1005		 * This empties VRAM and unbinds all GMR bindings.
1006		 * Buffer contents is moved to swappable memory.
1007		 */
1008		vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
1009		ttm_bo_swapout_all(&dev_priv->bdev);
1010
1011		break;
1012	case PM_POST_HIBERNATION:
1013	case PM_POST_SUSPEND:
1014	case PM_POST_RESTORE:
1015		ttm_suspend_unlock(&vmaster->lock);
1016
1017		break;
1018	case PM_RESTORE_PREPARE:
1019		break;
1020	default:
1021		break;
1022	}
1023	return 0;
1024}
1025
1026/**
1027 * These might not be needed with the virtual SVGA device.
1028 */
1029
1030static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1031{
1032	struct drm_device *dev = pci_get_drvdata(pdev);
1033	struct vmw_private *dev_priv = vmw_priv(dev);
1034
1035	if (dev_priv->num_3d_resources != 0) {
1036		DRM_INFO("Can't suspend or hibernate "
1037			 "while 3D resources are active.\n");
1038		return -EBUSY;
1039	}
1040
1041	pci_save_state(pdev);
1042	pci_disable_device(pdev);
1043	pci_set_power_state(pdev, PCI_D3hot);
1044	return 0;
1045}
1046
1047static int vmw_pci_resume(struct pci_dev *pdev)
1048{
1049	pci_set_power_state(pdev, PCI_D0);
1050	pci_restore_state(pdev);
1051	return pci_enable_device(pdev);
1052}
1053
1054static int vmw_pm_suspend(struct device *kdev)
1055{
1056	struct pci_dev *pdev = to_pci_dev(kdev);
1057	struct pm_message dummy;
1058
1059	dummy.event = 0;
1060
1061	return vmw_pci_suspend(pdev, dummy);
1062}
1063
1064static int vmw_pm_resume(struct device *kdev)
1065{
1066	struct pci_dev *pdev = to_pci_dev(kdev);
1067
1068	return vmw_pci_resume(pdev);
1069}
1070
1071static int vmw_pm_prepare(struct device *kdev)
1072{
1073	struct pci_dev *pdev = to_pci_dev(kdev);
1074	struct drm_device *dev = pci_get_drvdata(pdev);
1075	struct vmw_private *dev_priv = vmw_priv(dev);
1076
1077	/**
1078	 * Release 3d reference held by fbdev and potentially
1079	 * stop fifo.
1080	 */
1081	dev_priv->suspended = true;
1082	if (dev_priv->enable_fb)
1083			vmw_3d_resource_dec(dev_priv, true);
1084
1085	if (dev_priv->num_3d_resources != 0) {
1086
1087		DRM_INFO("Can't suspend or hibernate "
1088			 "while 3D resources are active.\n");
1089
1090		if (dev_priv->enable_fb)
1091			vmw_3d_resource_inc(dev_priv, true);
1092		dev_priv->suspended = false;
1093		return -EBUSY;
1094	}
1095
1096	return 0;
1097}
1098
1099static void vmw_pm_complete(struct device *kdev)
1100{
1101	struct pci_dev *pdev = to_pci_dev(kdev);
1102	struct drm_device *dev = pci_get_drvdata(pdev);
1103	struct vmw_private *dev_priv = vmw_priv(dev);
1104
1105	/**
1106	 * Reclaim 3d reference held by fbdev and potentially
1107	 * start fifo.
1108	 */
1109	if (dev_priv->enable_fb)
1110			vmw_3d_resource_inc(dev_priv, false);
1111
1112	dev_priv->suspended = false;
1113}
1114
1115static const struct dev_pm_ops vmw_pm_ops = {
1116	.prepare = vmw_pm_prepare,
1117	.complete = vmw_pm_complete,
1118	.suspend = vmw_pm_suspend,
1119	.resume = vmw_pm_resume,
1120};
1121
1122static const struct file_operations vmwgfx_driver_fops = {
1123	.owner = THIS_MODULE,
1124	.open = drm_open,
1125	.release = drm_release,
1126	.unlocked_ioctl = vmw_unlocked_ioctl,
1127	.mmap = vmw_mmap,
1128	.poll = vmw_fops_poll,
1129	.read = vmw_fops_read,
1130	.fasync = drm_fasync,
1131#if defined(CONFIG_COMPAT)
1132	.compat_ioctl = drm_compat_ioctl,
1133#endif
1134	.llseek = noop_llseek,
1135};
1136
1137static struct drm_driver driver = {
1138	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1139	DRIVER_MODESET,
1140	.load = vmw_driver_load,
1141	.unload = vmw_driver_unload,
1142	.firstopen = vmw_firstopen,
1143	.lastclose = vmw_lastclose,
1144	.irq_preinstall = vmw_irq_preinstall,
1145	.irq_postinstall = vmw_irq_postinstall,
1146	.irq_uninstall = vmw_irq_uninstall,
1147	.irq_handler = vmw_irq_handler,
1148	.get_vblank_counter = vmw_get_vblank_counter,
1149	.enable_vblank = vmw_enable_vblank,
1150	.disable_vblank = vmw_disable_vblank,
1151	.reclaim_buffers_locked = NULL,
1152	.ioctls = vmw_ioctls,
1153	.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1154	.dma_quiescent = NULL,	/*vmw_dma_quiescent, */
1155	.master_create = vmw_master_create,
1156	.master_destroy = vmw_master_destroy,
1157	.master_set = vmw_master_set,
1158	.master_drop = vmw_master_drop,
1159	.open = vmw_driver_open,
1160	.preclose = vmw_preclose,
1161	.postclose = vmw_postclose,
1162
1163	.dumb_create = vmw_dumb_create,
1164	.dumb_map_offset = vmw_dumb_map_offset,
1165	.dumb_destroy = vmw_dumb_destroy,
1166
1167	.fops = &vmwgfx_driver_fops,
 
 
 
 
 
 
 
1168	.name = VMWGFX_DRIVER_NAME,
1169	.desc = VMWGFX_DRIVER_DESC,
1170	.date = VMWGFX_DRIVER_DATE,
1171	.major = VMWGFX_DRIVER_MAJOR,
1172	.minor = VMWGFX_DRIVER_MINOR,
1173	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1174};
1175
1176static struct pci_driver vmw_pci_driver = {
1177	.name = VMWGFX_DRIVER_NAME,
1178	.id_table = vmw_pci_id_list,
1179	.probe = vmw_probe,
1180	.remove = vmw_remove,
1181	.driver = {
1182		.pm = &vmw_pm_ops
1183	}
1184};
1185
1186static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1187{
1188	return drm_get_pci_dev(pdev, ent, &driver);
1189}
1190
1191static int __init vmwgfx_init(void)
1192{
1193	int ret;
1194	ret = drm_pci_init(&driver, &vmw_pci_driver);
1195	if (ret)
1196		DRM_ERROR("Failed initializing DRM.\n");
1197	return ret;
1198}
1199
1200static void __exit vmwgfx_exit(void)
1201{
1202	drm_pci_exit(&driver, &vmw_pci_driver);
1203}
1204
1205module_init(vmwgfx_init);
1206module_exit(vmwgfx_exit);
1207
1208MODULE_AUTHOR("VMware Inc. and others");
1209MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1210MODULE_LICENSE("GPL and additional rights");
1211MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1212	       __stringify(VMWGFX_DRIVER_MINOR) "."
1213	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1214	       "0");