Loading...
1/**************************************************************************
2 *
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27#include <linux/module.h>
28#include <linux/console.h>
29
30#include <drm/drmP.h>
31#include "vmwgfx_drv.h"
32#include "vmwgfx_binding.h"
33#include <drm/ttm/ttm_placement.h>
34#include <drm/ttm/ttm_bo_driver.h>
35#include <drm/ttm/ttm_object.h>
36#include <drm/ttm/ttm_module.h>
37#include <linux/dma_remapping.h>
38
39#define VMWGFX_DRIVER_NAME "vmwgfx"
40#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
41#define VMWGFX_CHIP_SVGAII 0
42#define VMW_FB_RESERVATION 0
43
44#define VMW_MIN_INITIAL_WIDTH 800
45#define VMW_MIN_INITIAL_HEIGHT 600
46
47
48/**
49 * Fully encoded drm commands. Might move to vmw_drm.h
50 */
51
52#define DRM_IOCTL_VMW_GET_PARAM \
53 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
54 struct drm_vmw_getparam_arg)
55#define DRM_IOCTL_VMW_ALLOC_DMABUF \
56 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
57 union drm_vmw_alloc_dmabuf_arg)
58#define DRM_IOCTL_VMW_UNREF_DMABUF \
59 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
60 struct drm_vmw_unref_dmabuf_arg)
61#define DRM_IOCTL_VMW_CURSOR_BYPASS \
62 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
63 struct drm_vmw_cursor_bypass_arg)
64
65#define DRM_IOCTL_VMW_CONTROL_STREAM \
66 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
67 struct drm_vmw_control_stream_arg)
68#define DRM_IOCTL_VMW_CLAIM_STREAM \
69 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
70 struct drm_vmw_stream_arg)
71#define DRM_IOCTL_VMW_UNREF_STREAM \
72 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
73 struct drm_vmw_stream_arg)
74
75#define DRM_IOCTL_VMW_CREATE_CONTEXT \
76 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
77 struct drm_vmw_context_arg)
78#define DRM_IOCTL_VMW_UNREF_CONTEXT \
79 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
80 struct drm_vmw_context_arg)
81#define DRM_IOCTL_VMW_CREATE_SURFACE \
82 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
83 union drm_vmw_surface_create_arg)
84#define DRM_IOCTL_VMW_UNREF_SURFACE \
85 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
86 struct drm_vmw_surface_arg)
87#define DRM_IOCTL_VMW_REF_SURFACE \
88 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
89 union drm_vmw_surface_reference_arg)
90#define DRM_IOCTL_VMW_EXECBUF \
91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
92 struct drm_vmw_execbuf_arg)
93#define DRM_IOCTL_VMW_GET_3D_CAP \
94 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
95 struct drm_vmw_get_3d_cap_arg)
96#define DRM_IOCTL_VMW_FENCE_WAIT \
97 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
98 struct drm_vmw_fence_wait_arg)
99#define DRM_IOCTL_VMW_FENCE_SIGNALED \
100 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
101 struct drm_vmw_fence_signaled_arg)
102#define DRM_IOCTL_VMW_FENCE_UNREF \
103 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
104 struct drm_vmw_fence_arg)
105#define DRM_IOCTL_VMW_FENCE_EVENT \
106 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
107 struct drm_vmw_fence_event_arg)
108#define DRM_IOCTL_VMW_PRESENT \
109 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
110 struct drm_vmw_present_arg)
111#define DRM_IOCTL_VMW_PRESENT_READBACK \
112 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
113 struct drm_vmw_present_readback_arg)
114#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
115 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
116 struct drm_vmw_update_layout_arg)
117#define DRM_IOCTL_VMW_CREATE_SHADER \
118 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
119 struct drm_vmw_shader_create_arg)
120#define DRM_IOCTL_VMW_UNREF_SHADER \
121 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
122 struct drm_vmw_shader_arg)
123#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
124 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
125 union drm_vmw_gb_surface_create_arg)
126#define DRM_IOCTL_VMW_GB_SURFACE_REF \
127 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
128 union drm_vmw_gb_surface_reference_arg)
129#define DRM_IOCTL_VMW_SYNCCPU \
130 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
131 struct drm_vmw_synccpu_arg)
132#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
133 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
134 struct drm_vmw_context_arg)
135
136/**
137 * The core DRM version of this macro doesn't account for
138 * DRM_COMMAND_BASE.
139 */
140
141#define VMW_IOCTL_DEF(ioctl, func, flags) \
142 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
143
144/**
145 * Ioctl definitions.
146 */
147
148static const struct drm_ioctl_desc vmw_ioctls[] = {
149 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
150 DRM_AUTH | DRM_RENDER_ALLOW),
151 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
152 DRM_AUTH | DRM_RENDER_ALLOW),
153 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
154 DRM_RENDER_ALLOW),
155 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
156 vmw_kms_cursor_bypass_ioctl,
157 DRM_MASTER | DRM_CONTROL_ALLOW),
158
159 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
160 DRM_MASTER | DRM_CONTROL_ALLOW),
161 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
162 DRM_MASTER | DRM_CONTROL_ALLOW),
163 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
164 DRM_MASTER | DRM_CONTROL_ALLOW),
165
166 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
167 DRM_AUTH | DRM_RENDER_ALLOW),
168 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
169 DRM_RENDER_ALLOW),
170 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
171 DRM_AUTH | DRM_RENDER_ALLOW),
172 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
173 DRM_RENDER_ALLOW),
174 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
175 DRM_AUTH | DRM_RENDER_ALLOW),
176 VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
177 DRM_RENDER_ALLOW),
178 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
179 DRM_RENDER_ALLOW),
180 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
181 vmw_fence_obj_signaled_ioctl,
182 DRM_RENDER_ALLOW),
183 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
184 DRM_RENDER_ALLOW),
185 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
186 DRM_AUTH | DRM_RENDER_ALLOW),
187 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
188 DRM_AUTH | DRM_RENDER_ALLOW),
189
190 /* these allow direct access to the framebuffers mark as master only */
191 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
192 DRM_MASTER | DRM_AUTH),
193 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
194 vmw_present_readback_ioctl,
195 DRM_MASTER | DRM_AUTH),
196 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
197 vmw_kms_update_layout_ioctl,
198 DRM_MASTER | DRM_CONTROL_ALLOW),
199 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
200 vmw_shader_define_ioctl,
201 DRM_AUTH | DRM_RENDER_ALLOW),
202 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
203 vmw_shader_destroy_ioctl,
204 DRM_RENDER_ALLOW),
205 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
206 vmw_gb_surface_define_ioctl,
207 DRM_AUTH | DRM_RENDER_ALLOW),
208 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
209 vmw_gb_surface_reference_ioctl,
210 DRM_AUTH | DRM_RENDER_ALLOW),
211 VMW_IOCTL_DEF(VMW_SYNCCPU,
212 vmw_user_dmabuf_synccpu_ioctl,
213 DRM_RENDER_ALLOW),
214 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
215 vmw_extended_context_define_ioctl,
216 DRM_AUTH | DRM_RENDER_ALLOW),
217};
218
219static struct pci_device_id vmw_pci_id_list[] = {
220 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
221 {0, 0, 0}
222};
223MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
224
225static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
226static int vmw_force_iommu;
227static int vmw_restrict_iommu;
228static int vmw_force_coherent;
229static int vmw_restrict_dma_mask;
230
231static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
232static void vmw_master_init(struct vmw_master *);
233static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
234 void *ptr);
235
236MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
237module_param_named(enable_fbdev, enable_fbdev, int, 0600);
238MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
239module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
240MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
241module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
242MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
243module_param_named(force_coherent, vmw_force_coherent, int, 0600);
244MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
245module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
246
247
248static void vmw_print_capabilities(uint32_t capabilities)
249{
250 DRM_INFO("Capabilities:\n");
251 if (capabilities & SVGA_CAP_RECT_COPY)
252 DRM_INFO(" Rect copy.\n");
253 if (capabilities & SVGA_CAP_CURSOR)
254 DRM_INFO(" Cursor.\n");
255 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
256 DRM_INFO(" Cursor bypass.\n");
257 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
258 DRM_INFO(" Cursor bypass 2.\n");
259 if (capabilities & SVGA_CAP_8BIT_EMULATION)
260 DRM_INFO(" 8bit emulation.\n");
261 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
262 DRM_INFO(" Alpha cursor.\n");
263 if (capabilities & SVGA_CAP_3D)
264 DRM_INFO(" 3D.\n");
265 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
266 DRM_INFO(" Extended Fifo.\n");
267 if (capabilities & SVGA_CAP_MULTIMON)
268 DRM_INFO(" Multimon.\n");
269 if (capabilities & SVGA_CAP_PITCHLOCK)
270 DRM_INFO(" Pitchlock.\n");
271 if (capabilities & SVGA_CAP_IRQMASK)
272 DRM_INFO(" Irq mask.\n");
273 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
274 DRM_INFO(" Display Topology.\n");
275 if (capabilities & SVGA_CAP_GMR)
276 DRM_INFO(" GMR.\n");
277 if (capabilities & SVGA_CAP_TRACES)
278 DRM_INFO(" Traces.\n");
279 if (capabilities & SVGA_CAP_GMR2)
280 DRM_INFO(" GMR2.\n");
281 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
282 DRM_INFO(" Screen Object 2.\n");
283 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
284 DRM_INFO(" Command Buffers.\n");
285 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
286 DRM_INFO(" Command Buffers 2.\n");
287 if (capabilities & SVGA_CAP_GBOBJECTS)
288 DRM_INFO(" Guest Backed Resources.\n");
289 if (capabilities & SVGA_CAP_DX)
290 DRM_INFO(" DX Features.\n");
291}
292
293/**
294 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
295 *
296 * @dev_priv: A device private structure.
297 *
298 * This function creates a small buffer object that holds the query
299 * result for dummy queries emitted as query barriers.
300 * The function will then map the first page and initialize a pending
301 * occlusion query result structure, Finally it will unmap the buffer.
302 * No interruptible waits are done within this function.
303 *
304 * Returns an error if bo creation or initialization fails.
305 */
306static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
307{
308 int ret;
309 struct vmw_dma_buffer *vbo;
310 struct ttm_bo_kmap_obj map;
311 volatile SVGA3dQueryResult *result;
312 bool dummy;
313
314 /*
315 * Create the vbo as pinned, so that a tryreserve will
316 * immediately succeed. This is because we're the only
317 * user of the bo currently.
318 */
319 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
320 if (!vbo)
321 return -ENOMEM;
322
323 ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
324 &vmw_sys_ne_placement, false,
325 &vmw_dmabuf_bo_free);
326 if (unlikely(ret != 0))
327 return ret;
328
329 ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
330 BUG_ON(ret != 0);
331 vmw_bo_pin_reserved(vbo, true);
332
333 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
334 if (likely(ret == 0)) {
335 result = ttm_kmap_obj_virtual(&map, &dummy);
336 result->totalSize = sizeof(*result);
337 result->state = SVGA3D_QUERYSTATE_PENDING;
338 result->result32 = 0xff;
339 ttm_bo_kunmap(&map);
340 }
341 vmw_bo_pin_reserved(vbo, false);
342 ttm_bo_unreserve(&vbo->base);
343
344 if (unlikely(ret != 0)) {
345 DRM_ERROR("Dummy query buffer map failed.\n");
346 vmw_dmabuf_unreference(&vbo);
347 } else
348 dev_priv->dummy_query_bo = vbo;
349
350 return ret;
351}
352
353/**
354 * vmw_request_device_late - Perform late device setup
355 *
356 * @dev_priv: Pointer to device private.
357 *
358 * This function performs setup of otables and enables large command
359 * buffer submission. These tasks are split out to a separate function
360 * because it reverts vmw_release_device_early and is intended to be used
361 * by an error path in the hibernation code.
362 */
363static int vmw_request_device_late(struct vmw_private *dev_priv)
364{
365 int ret;
366
367 if (dev_priv->has_mob) {
368 ret = vmw_otables_setup(dev_priv);
369 if (unlikely(ret != 0)) {
370 DRM_ERROR("Unable to initialize "
371 "guest Memory OBjects.\n");
372 return ret;
373 }
374 }
375
376 if (dev_priv->cman) {
377 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
378 256*4096, 2*4096);
379 if (ret) {
380 struct vmw_cmdbuf_man *man = dev_priv->cman;
381
382 dev_priv->cman = NULL;
383 vmw_cmdbuf_man_destroy(man);
384 }
385 }
386
387 return 0;
388}
389
390static int vmw_request_device(struct vmw_private *dev_priv)
391{
392 int ret;
393
394 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
395 if (unlikely(ret != 0)) {
396 DRM_ERROR("Unable to initialize FIFO.\n");
397 return ret;
398 }
399 vmw_fence_fifo_up(dev_priv->fman);
400 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
401 if (IS_ERR(dev_priv->cman)) {
402 dev_priv->cman = NULL;
403 dev_priv->has_dx = false;
404 }
405
406 ret = vmw_request_device_late(dev_priv);
407 if (ret)
408 goto out_no_mob;
409
410 ret = vmw_dummy_query_bo_create(dev_priv);
411 if (unlikely(ret != 0))
412 goto out_no_query_bo;
413
414 return 0;
415
416out_no_query_bo:
417 if (dev_priv->cman)
418 vmw_cmdbuf_remove_pool(dev_priv->cman);
419 if (dev_priv->has_mob) {
420 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
421 vmw_otables_takedown(dev_priv);
422 }
423 if (dev_priv->cman)
424 vmw_cmdbuf_man_destroy(dev_priv->cman);
425out_no_mob:
426 vmw_fence_fifo_down(dev_priv->fman);
427 vmw_fifo_release(dev_priv, &dev_priv->fifo);
428 return ret;
429}
430
431/**
432 * vmw_release_device_early - Early part of fifo takedown.
433 *
434 * @dev_priv: Pointer to device private struct.
435 *
436 * This is the first part of command submission takedown, to be called before
437 * buffer management is taken down.
438 */
439static void vmw_release_device_early(struct vmw_private *dev_priv)
440{
441 /*
442 * Previous destructions should've released
443 * the pinned bo.
444 */
445
446 BUG_ON(dev_priv->pinned_bo != NULL);
447
448 vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
449 if (dev_priv->cman)
450 vmw_cmdbuf_remove_pool(dev_priv->cman);
451
452 if (dev_priv->has_mob) {
453 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
454 vmw_otables_takedown(dev_priv);
455 }
456}
457
458/**
459 * vmw_release_device_late - Late part of fifo takedown.
460 *
461 * @dev_priv: Pointer to device private struct.
462 *
463 * This is the last part of the command submission takedown, to be called when
464 * command submission is no longer needed. It may wait on pending fences.
465 */
466static void vmw_release_device_late(struct vmw_private *dev_priv)
467{
468 vmw_fence_fifo_down(dev_priv->fman);
469 if (dev_priv->cman)
470 vmw_cmdbuf_man_destroy(dev_priv->cman);
471
472 vmw_fifo_release(dev_priv, &dev_priv->fifo);
473}
474
475/**
476 * Sets the initial_[width|height] fields on the given vmw_private.
477 *
478 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
479 * clamping the value to fb_max_[width|height] fields and the
480 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
481 * If the values appear to be invalid, set them to
482 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
483 */
484static void vmw_get_initial_size(struct vmw_private *dev_priv)
485{
486 uint32_t width;
487 uint32_t height;
488
489 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
490 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
491
492 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
493 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
494
495 if (width > dev_priv->fb_max_width ||
496 height > dev_priv->fb_max_height) {
497
498 /*
499 * This is a host error and shouldn't occur.
500 */
501
502 width = VMW_MIN_INITIAL_WIDTH;
503 height = VMW_MIN_INITIAL_HEIGHT;
504 }
505
506 dev_priv->initial_width = width;
507 dev_priv->initial_height = height;
508}
509
510/**
511 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
512 * system.
513 *
514 * @dev_priv: Pointer to a struct vmw_private
515 *
516 * This functions tries to determine the IOMMU setup and what actions
517 * need to be taken by the driver to make system pages visible to the
518 * device.
519 * If this function decides that DMA is not possible, it returns -EINVAL.
520 * The driver may then try to disable features of the device that require
521 * DMA.
522 */
523static int vmw_dma_select_mode(struct vmw_private *dev_priv)
524{
525 static const char *names[vmw_dma_map_max] = {
526 [vmw_dma_phys] = "Using physical TTM page addresses.",
527 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
528 [vmw_dma_map_populate] = "Keeping DMA mappings.",
529 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
530#ifdef CONFIG_X86
531 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
532
533#ifdef CONFIG_INTEL_IOMMU
534 if (intel_iommu_enabled) {
535 dev_priv->map_mode = vmw_dma_map_populate;
536 goto out_fixup;
537 }
538#endif
539
540 if (!(vmw_force_iommu || vmw_force_coherent)) {
541 dev_priv->map_mode = vmw_dma_phys;
542 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
543 return 0;
544 }
545
546 dev_priv->map_mode = vmw_dma_map_populate;
547
548 if (dma_ops->sync_single_for_cpu)
549 dev_priv->map_mode = vmw_dma_alloc_coherent;
550#ifdef CONFIG_SWIOTLB
551 if (swiotlb_nr_tbl() == 0)
552 dev_priv->map_mode = vmw_dma_map_populate;
553#endif
554
555#ifdef CONFIG_INTEL_IOMMU
556out_fixup:
557#endif
558 if (dev_priv->map_mode == vmw_dma_map_populate &&
559 vmw_restrict_iommu)
560 dev_priv->map_mode = vmw_dma_map_bind;
561
562 if (vmw_force_coherent)
563 dev_priv->map_mode = vmw_dma_alloc_coherent;
564
565#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
566 /*
567 * No coherent page pool
568 */
569 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
570 return -EINVAL;
571#endif
572
573#else /* CONFIG_X86 */
574 dev_priv->map_mode = vmw_dma_map_populate;
575#endif /* CONFIG_X86 */
576
577 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
578
579 return 0;
580}
581
582/**
583 * vmw_dma_masks - set required page- and dma masks
584 *
585 * @dev: Pointer to struct drm-device
586 *
587 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
588 * restriction also for 64-bit systems.
589 */
590#ifdef CONFIG_INTEL_IOMMU
591static int vmw_dma_masks(struct vmw_private *dev_priv)
592{
593 struct drm_device *dev = dev_priv->dev;
594
595 if (intel_iommu_enabled &&
596 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
597 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
598 return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
599 }
600 return 0;
601}
602#else
603static int vmw_dma_masks(struct vmw_private *dev_priv)
604{
605 return 0;
606}
607#endif
608
609static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
610{
611 struct vmw_private *dev_priv;
612 int ret;
613 uint32_t svga_id;
614 enum vmw_res_type i;
615 bool refuse_dma = false;
616
617 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
618 if (unlikely(dev_priv == NULL)) {
619 DRM_ERROR("Failed allocating a device private struct.\n");
620 return -ENOMEM;
621 }
622
623 pci_set_master(dev->pdev);
624
625 dev_priv->dev = dev;
626 dev_priv->vmw_chipset = chipset;
627 dev_priv->last_read_seqno = (uint32_t) -100;
628 mutex_init(&dev_priv->cmdbuf_mutex);
629 mutex_init(&dev_priv->release_mutex);
630 mutex_init(&dev_priv->binding_mutex);
631 rwlock_init(&dev_priv->resource_lock);
632 ttm_lock_init(&dev_priv->reservation_sem);
633 spin_lock_init(&dev_priv->hw_lock);
634 spin_lock_init(&dev_priv->waiter_lock);
635 spin_lock_init(&dev_priv->cap_lock);
636 spin_lock_init(&dev_priv->svga_lock);
637
638 for (i = vmw_res_context; i < vmw_res_max; ++i) {
639 idr_init(&dev_priv->res_idr[i]);
640 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
641 }
642
643 mutex_init(&dev_priv->init_mutex);
644 init_waitqueue_head(&dev_priv->fence_queue);
645 init_waitqueue_head(&dev_priv->fifo_queue);
646 dev_priv->fence_queue_waiters = 0;
647 dev_priv->fifo_queue_waiters = 0;
648
649 dev_priv->used_memory_size = 0;
650
651 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
652 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
653 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
654
655 dev_priv->enable_fb = enable_fbdev;
656
657 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
658 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
659 if (svga_id != SVGA_ID_2) {
660 ret = -ENOSYS;
661 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
662 goto out_err0;
663 }
664
665 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
666 ret = vmw_dma_select_mode(dev_priv);
667 if (unlikely(ret != 0)) {
668 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
669 refuse_dma = true;
670 }
671
672 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
673 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
674 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
675 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
676
677 vmw_get_initial_size(dev_priv);
678
679 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
680 dev_priv->max_gmr_ids =
681 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
682 dev_priv->max_gmr_pages =
683 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
684 dev_priv->memory_size =
685 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
686 dev_priv->memory_size -= dev_priv->vram_size;
687 } else {
688 /*
689 * An arbitrary limit of 512MiB on surface
690 * memory. But all HWV8 hardware supports GMR2.
691 */
692 dev_priv->memory_size = 512*1024*1024;
693 }
694 dev_priv->max_mob_pages = 0;
695 dev_priv->max_mob_size = 0;
696 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
697 uint64_t mem_size =
698 vmw_read(dev_priv,
699 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
700
701 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
702 dev_priv->prim_bb_mem =
703 vmw_read(dev_priv,
704 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
705 dev_priv->max_mob_size =
706 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
707 dev_priv->stdu_max_width =
708 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
709 dev_priv->stdu_max_height =
710 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
711
712 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
713 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
714 dev_priv->texture_max_width = vmw_read(dev_priv,
715 SVGA_REG_DEV_CAP);
716 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
717 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
718 dev_priv->texture_max_height = vmw_read(dev_priv,
719 SVGA_REG_DEV_CAP);
720 } else {
721 dev_priv->texture_max_width = 8192;
722 dev_priv->texture_max_height = 8192;
723 dev_priv->prim_bb_mem = dev_priv->vram_size;
724 }
725
726 vmw_print_capabilities(dev_priv->capabilities);
727
728 ret = vmw_dma_masks(dev_priv);
729 if (unlikely(ret != 0))
730 goto out_err0;
731
732 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
733 DRM_INFO("Max GMR ids is %u\n",
734 (unsigned)dev_priv->max_gmr_ids);
735 DRM_INFO("Max number of GMR pages is %u\n",
736 (unsigned)dev_priv->max_gmr_pages);
737 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
738 (unsigned)dev_priv->memory_size / 1024);
739 }
740 DRM_INFO("Maximum display memory size is %u kiB\n",
741 dev_priv->prim_bb_mem / 1024);
742 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
743 dev_priv->vram_start, dev_priv->vram_size / 1024);
744 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
745 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
746
747 ret = vmw_ttm_global_init(dev_priv);
748 if (unlikely(ret != 0))
749 goto out_err0;
750
751
752 vmw_master_init(&dev_priv->fbdev_master);
753 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
754 dev_priv->active_master = &dev_priv->fbdev_master;
755
756 dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
757 dev_priv->mmio_size, MEMREMAP_WB);
758
759 if (unlikely(dev_priv->mmio_virt == NULL)) {
760 ret = -ENOMEM;
761 DRM_ERROR("Failed mapping MMIO.\n");
762 goto out_err3;
763 }
764
765 /* Need mmio memory to check for fifo pitchlock cap. */
766 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
767 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
768 !vmw_fifo_have_pitchlock(dev_priv)) {
769 ret = -ENOSYS;
770 DRM_ERROR("Hardware has no pitchlock\n");
771 goto out_err4;
772 }
773
774 dev_priv->tdev = ttm_object_device_init
775 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
776
777 if (unlikely(dev_priv->tdev == NULL)) {
778 DRM_ERROR("Unable to initialize TTM object management.\n");
779 ret = -ENOMEM;
780 goto out_err4;
781 }
782
783 dev->dev_private = dev_priv;
784
785 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
786 dev_priv->stealth = (ret != 0);
787 if (dev_priv->stealth) {
788 /**
789 * Request at least the mmio PCI resource.
790 */
791
792 DRM_INFO("It appears like vesafb is loaded. "
793 "Ignore above error if any.\n");
794 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
795 if (unlikely(ret != 0)) {
796 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
797 goto out_no_device;
798 }
799 }
800
801 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
802 ret = drm_irq_install(dev, dev->pdev->irq);
803 if (ret != 0) {
804 DRM_ERROR("Failed installing irq: %d\n", ret);
805 goto out_no_irq;
806 }
807 }
808
809 dev_priv->fman = vmw_fence_manager_init(dev_priv);
810 if (unlikely(dev_priv->fman == NULL)) {
811 ret = -ENOMEM;
812 goto out_no_fman;
813 }
814
815 ret = ttm_bo_device_init(&dev_priv->bdev,
816 dev_priv->bo_global_ref.ref.object,
817 &vmw_bo_driver,
818 dev->anon_inode->i_mapping,
819 VMWGFX_FILE_PAGE_OFFSET,
820 false);
821 if (unlikely(ret != 0)) {
822 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
823 goto out_no_bdev;
824 }
825
826 /*
827 * Enable VRAM, but initially don't use it until SVGA is enabled and
828 * unhidden.
829 */
830 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
831 (dev_priv->vram_size >> PAGE_SHIFT));
832 if (unlikely(ret != 0)) {
833 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
834 goto out_no_vram;
835 }
836 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
837
838 dev_priv->has_gmr = true;
839 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
840 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
841 VMW_PL_GMR) != 0) {
842 DRM_INFO("No GMR memory available. "
843 "Graphics memory resources are very limited.\n");
844 dev_priv->has_gmr = false;
845 }
846
847 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
848 dev_priv->has_mob = true;
849 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
850 VMW_PL_MOB) != 0) {
851 DRM_INFO("No MOB memory available. "
852 "3D will be disabled.\n");
853 dev_priv->has_mob = false;
854 }
855 }
856
857 if (dev_priv->has_mob) {
858 spin_lock(&dev_priv->cap_lock);
859 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
860 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
861 spin_unlock(&dev_priv->cap_lock);
862 }
863
864
865 ret = vmw_kms_init(dev_priv);
866 if (unlikely(ret != 0))
867 goto out_no_kms;
868 vmw_overlay_init(dev_priv);
869
870 ret = vmw_request_device(dev_priv);
871 if (ret)
872 goto out_no_fifo;
873
874 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
875
876 if (dev_priv->enable_fb) {
877 vmw_fifo_resource_inc(dev_priv);
878 vmw_svga_enable(dev_priv);
879 vmw_fb_init(dev_priv);
880 }
881
882 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
883 register_pm_notifier(&dev_priv->pm_nb);
884
885 return 0;
886
887out_no_fifo:
888 vmw_overlay_close(dev_priv);
889 vmw_kms_close(dev_priv);
890out_no_kms:
891 if (dev_priv->has_mob)
892 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
893 if (dev_priv->has_gmr)
894 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
895 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
896out_no_vram:
897 (void)ttm_bo_device_release(&dev_priv->bdev);
898out_no_bdev:
899 vmw_fence_manager_takedown(dev_priv->fman);
900out_no_fman:
901 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
902 drm_irq_uninstall(dev_priv->dev);
903out_no_irq:
904 if (dev_priv->stealth)
905 pci_release_region(dev->pdev, 2);
906 else
907 pci_release_regions(dev->pdev);
908out_no_device:
909 ttm_object_device_release(&dev_priv->tdev);
910out_err4:
911 memunmap(dev_priv->mmio_virt);
912out_err3:
913 vmw_ttm_global_release(dev_priv);
914out_err0:
915 for (i = vmw_res_context; i < vmw_res_max; ++i)
916 idr_destroy(&dev_priv->res_idr[i]);
917
918 if (dev_priv->ctx.staged_bindings)
919 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
920 kfree(dev_priv);
921 return ret;
922}
923
924static int vmw_driver_unload(struct drm_device *dev)
925{
926 struct vmw_private *dev_priv = vmw_priv(dev);
927 enum vmw_res_type i;
928
929 unregister_pm_notifier(&dev_priv->pm_nb);
930
931 if (dev_priv->ctx.res_ht_initialized)
932 drm_ht_remove(&dev_priv->ctx.res_ht);
933 vfree(dev_priv->ctx.cmd_bounce);
934 if (dev_priv->enable_fb) {
935 vmw_fb_off(dev_priv);
936 vmw_fb_close(dev_priv);
937 vmw_fifo_resource_dec(dev_priv);
938 vmw_svga_disable(dev_priv);
939 }
940
941 vmw_kms_close(dev_priv);
942 vmw_overlay_close(dev_priv);
943
944 if (dev_priv->has_gmr)
945 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
946 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
947
948 vmw_release_device_early(dev_priv);
949 if (dev_priv->has_mob)
950 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
951 (void) ttm_bo_device_release(&dev_priv->bdev);
952 vmw_release_device_late(dev_priv);
953 vmw_fence_manager_takedown(dev_priv->fman);
954 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
955 drm_irq_uninstall(dev_priv->dev);
956 if (dev_priv->stealth)
957 pci_release_region(dev->pdev, 2);
958 else
959 pci_release_regions(dev->pdev);
960
961 ttm_object_device_release(&dev_priv->tdev);
962 memunmap(dev_priv->mmio_virt);
963 if (dev_priv->ctx.staged_bindings)
964 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
965 vmw_ttm_global_release(dev_priv);
966
967 for (i = vmw_res_context; i < vmw_res_max; ++i)
968 idr_destroy(&dev_priv->res_idr[i]);
969
970 kfree(dev_priv);
971
972 return 0;
973}
974
975static void vmw_postclose(struct drm_device *dev,
976 struct drm_file *file_priv)
977{
978 struct vmw_fpriv *vmw_fp;
979
980 vmw_fp = vmw_fpriv(file_priv);
981
982 if (vmw_fp->locked_master) {
983 struct vmw_master *vmaster =
984 vmw_master(vmw_fp->locked_master);
985
986 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
987 ttm_vt_unlock(&vmaster->lock);
988 drm_master_put(&vmw_fp->locked_master);
989 }
990
991 ttm_object_file_release(&vmw_fp->tfile);
992 kfree(vmw_fp);
993}
994
995static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
996{
997 struct vmw_private *dev_priv = vmw_priv(dev);
998 struct vmw_fpriv *vmw_fp;
999 int ret = -ENOMEM;
1000
1001 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1002 if (unlikely(vmw_fp == NULL))
1003 return ret;
1004
1005 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1006 if (unlikely(vmw_fp->tfile == NULL))
1007 goto out_no_tfile;
1008
1009 file_priv->driver_priv = vmw_fp;
1010
1011 return 0;
1012
1013out_no_tfile:
1014 kfree(vmw_fp);
1015 return ret;
1016}
1017
1018static struct vmw_master *vmw_master_check(struct drm_device *dev,
1019 struct drm_file *file_priv,
1020 unsigned int flags)
1021{
1022 int ret;
1023 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1024 struct vmw_master *vmaster;
1025
1026 if (file_priv->minor->type != DRM_MINOR_LEGACY ||
1027 !(flags & DRM_AUTH))
1028 return NULL;
1029
1030 ret = mutex_lock_interruptible(&dev->master_mutex);
1031 if (unlikely(ret != 0))
1032 return ERR_PTR(-ERESTARTSYS);
1033
1034 if (file_priv->is_master) {
1035 mutex_unlock(&dev->master_mutex);
1036 return NULL;
1037 }
1038
1039 /*
1040 * Check if we were previously master, but now dropped. In that
1041 * case, allow at least render node functionality.
1042 */
1043 if (vmw_fp->locked_master) {
1044 mutex_unlock(&dev->master_mutex);
1045
1046 if (flags & DRM_RENDER_ALLOW)
1047 return NULL;
1048
1049 DRM_ERROR("Dropped master trying to access ioctl that "
1050 "requires authentication.\n");
1051 return ERR_PTR(-EACCES);
1052 }
1053 mutex_unlock(&dev->master_mutex);
1054
1055 /*
1056 * Take the TTM lock. Possibly sleep waiting for the authenticating
1057 * master to become master again, or for a SIGTERM if the
1058 * authenticating master exits.
1059 */
1060 vmaster = vmw_master(file_priv->master);
1061 ret = ttm_read_lock(&vmaster->lock, true);
1062 if (unlikely(ret != 0))
1063 vmaster = ERR_PTR(ret);
1064
1065 return vmaster;
1066}
1067
1068static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1069 unsigned long arg,
1070 long (*ioctl_func)(struct file *, unsigned int,
1071 unsigned long))
1072{
1073 struct drm_file *file_priv = filp->private_data;
1074 struct drm_device *dev = file_priv->minor->dev;
1075 unsigned int nr = DRM_IOCTL_NR(cmd);
1076 struct vmw_master *vmaster;
1077 unsigned int flags;
1078 long ret;
1079
1080 /*
1081 * Do extra checking on driver private ioctls.
1082 */
1083
1084 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1085 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1086 const struct drm_ioctl_desc *ioctl =
1087 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1088
1089 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1090 ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1091 if (unlikely(ret != 0))
1092 return ret;
1093
1094 if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1095 goto out_io_encoding;
1096
1097 return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1098 _IOC_SIZE(cmd));
1099 }
1100
1101 if (unlikely(ioctl->cmd != cmd))
1102 goto out_io_encoding;
1103
1104 flags = ioctl->flags;
1105 } else if (!drm_ioctl_flags(nr, &flags))
1106 return -EINVAL;
1107
1108 vmaster = vmw_master_check(dev, file_priv, flags);
1109 if (IS_ERR(vmaster)) {
1110 ret = PTR_ERR(vmaster);
1111
1112 if (ret != -ERESTARTSYS)
1113 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1114 nr, ret);
1115 return ret;
1116 }
1117
1118 ret = ioctl_func(filp, cmd, arg);
1119 if (vmaster)
1120 ttm_read_unlock(&vmaster->lock);
1121
1122 return ret;
1123
1124out_io_encoding:
1125 DRM_ERROR("Invalid command format, ioctl %d\n",
1126 nr - DRM_COMMAND_BASE);
1127
1128 return -EINVAL;
1129}
1130
1131static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1132 unsigned long arg)
1133{
1134 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1135}
1136
1137#ifdef CONFIG_COMPAT
1138static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1139 unsigned long arg)
1140{
1141 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1142}
1143#endif
1144
1145static void vmw_lastclose(struct drm_device *dev)
1146{
1147}
1148
1149static void vmw_master_init(struct vmw_master *vmaster)
1150{
1151 ttm_lock_init(&vmaster->lock);
1152}
1153
1154static int vmw_master_create(struct drm_device *dev,
1155 struct drm_master *master)
1156{
1157 struct vmw_master *vmaster;
1158
1159 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1160 if (unlikely(vmaster == NULL))
1161 return -ENOMEM;
1162
1163 vmw_master_init(vmaster);
1164 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1165 master->driver_priv = vmaster;
1166
1167 return 0;
1168}
1169
1170static void vmw_master_destroy(struct drm_device *dev,
1171 struct drm_master *master)
1172{
1173 struct vmw_master *vmaster = vmw_master(master);
1174
1175 master->driver_priv = NULL;
1176 kfree(vmaster);
1177}
1178
1179static int vmw_master_set(struct drm_device *dev,
1180 struct drm_file *file_priv,
1181 bool from_open)
1182{
1183 struct vmw_private *dev_priv = vmw_priv(dev);
1184 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1185 struct vmw_master *active = dev_priv->active_master;
1186 struct vmw_master *vmaster = vmw_master(file_priv->master);
1187 int ret = 0;
1188
1189 if (active) {
1190 BUG_ON(active != &dev_priv->fbdev_master);
1191 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1192 if (unlikely(ret != 0))
1193 return ret;
1194
1195 ttm_lock_set_kill(&active->lock, true, SIGTERM);
1196 dev_priv->active_master = NULL;
1197 }
1198
1199 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1200 if (!from_open) {
1201 ttm_vt_unlock(&vmaster->lock);
1202 BUG_ON(vmw_fp->locked_master != file_priv->master);
1203 drm_master_put(&vmw_fp->locked_master);
1204 }
1205
1206 dev_priv->active_master = vmaster;
1207 drm_sysfs_hotplug_event(dev);
1208
1209 return 0;
1210}
1211
1212static void vmw_master_drop(struct drm_device *dev,
1213 struct drm_file *file_priv,
1214 bool from_release)
1215{
1216 struct vmw_private *dev_priv = vmw_priv(dev);
1217 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1218 struct vmw_master *vmaster = vmw_master(file_priv->master);
1219 int ret;
1220
1221 /**
1222 * Make sure the master doesn't disappear while we have
1223 * it locked.
1224 */
1225
1226 vmw_fp->locked_master = drm_master_get(file_priv->master);
1227 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1228 vmw_kms_legacy_hotspot_clear(dev_priv);
1229 if (unlikely((ret != 0))) {
1230 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1231 drm_master_put(&vmw_fp->locked_master);
1232 }
1233
1234 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1235
1236 if (!dev_priv->enable_fb)
1237 vmw_svga_disable(dev_priv);
1238
1239 dev_priv->active_master = &dev_priv->fbdev_master;
1240 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1241 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1242
1243 if (dev_priv->enable_fb)
1244 vmw_fb_on(dev_priv);
1245}
1246
1247/**
1248 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1249 *
1250 * @dev_priv: Pointer to device private struct.
1251 * Needs the reservation sem to be held in non-exclusive mode.
1252 */
1253static void __vmw_svga_enable(struct vmw_private *dev_priv)
1254{
1255 spin_lock(&dev_priv->svga_lock);
1256 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1257 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1258 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1259 }
1260 spin_unlock(&dev_priv->svga_lock);
1261}
1262
1263/**
1264 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1265 *
1266 * @dev_priv: Pointer to device private struct.
1267 */
1268void vmw_svga_enable(struct vmw_private *dev_priv)
1269{
1270 ttm_read_lock(&dev_priv->reservation_sem, false);
1271 __vmw_svga_enable(dev_priv);
1272 ttm_read_unlock(&dev_priv->reservation_sem);
1273}
1274
1275/**
1276 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1277 *
1278 * @dev_priv: Pointer to device private struct.
1279 * Needs the reservation sem to be held in exclusive mode.
1280 * Will not empty VRAM. VRAM must be emptied by caller.
1281 */
1282static void __vmw_svga_disable(struct vmw_private *dev_priv)
1283{
1284 spin_lock(&dev_priv->svga_lock);
1285 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1286 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1287 vmw_write(dev_priv, SVGA_REG_ENABLE,
1288 SVGA_REG_ENABLE_HIDE |
1289 SVGA_REG_ENABLE_ENABLE);
1290 }
1291 spin_unlock(&dev_priv->svga_lock);
1292}
1293
1294/**
1295 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1296 * running.
1297 *
1298 * @dev_priv: Pointer to device private struct.
1299 * Will empty VRAM.
1300 */
1301void vmw_svga_disable(struct vmw_private *dev_priv)
1302{
1303 ttm_write_lock(&dev_priv->reservation_sem, false);
1304 spin_lock(&dev_priv->svga_lock);
1305 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1306 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1307 spin_unlock(&dev_priv->svga_lock);
1308 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1309 DRM_ERROR("Failed evicting VRAM buffers.\n");
1310 vmw_write(dev_priv, SVGA_REG_ENABLE,
1311 SVGA_REG_ENABLE_HIDE |
1312 SVGA_REG_ENABLE_ENABLE);
1313 } else
1314 spin_unlock(&dev_priv->svga_lock);
1315 ttm_write_unlock(&dev_priv->reservation_sem);
1316}
1317
1318static void vmw_remove(struct pci_dev *pdev)
1319{
1320 struct drm_device *dev = pci_get_drvdata(pdev);
1321
1322 pci_disable_device(pdev);
1323 drm_put_dev(dev);
1324}
1325
1326static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1327 void *ptr)
1328{
1329 struct vmw_private *dev_priv =
1330 container_of(nb, struct vmw_private, pm_nb);
1331
1332 switch (val) {
1333 case PM_HIBERNATION_PREPARE:
1334 if (dev_priv->enable_fb)
1335 vmw_fb_off(dev_priv);
1336 ttm_suspend_lock(&dev_priv->reservation_sem);
1337
1338 /*
1339 * This empties VRAM and unbinds all GMR bindings.
1340 * Buffer contents is moved to swappable memory.
1341 */
1342 vmw_execbuf_release_pinned_bo(dev_priv);
1343 vmw_resource_evict_all(dev_priv);
1344 vmw_release_device_early(dev_priv);
1345 ttm_bo_swapout_all(&dev_priv->bdev);
1346 vmw_fence_fifo_down(dev_priv->fman);
1347 break;
1348 case PM_POST_HIBERNATION:
1349 case PM_POST_RESTORE:
1350 vmw_fence_fifo_up(dev_priv->fman);
1351 ttm_suspend_unlock(&dev_priv->reservation_sem);
1352 if (dev_priv->enable_fb)
1353 vmw_fb_on(dev_priv);
1354 break;
1355 case PM_RESTORE_PREPARE:
1356 break;
1357 default:
1358 break;
1359 }
1360 return 0;
1361}
1362
1363static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1364{
1365 struct drm_device *dev = pci_get_drvdata(pdev);
1366 struct vmw_private *dev_priv = vmw_priv(dev);
1367
1368 if (dev_priv->refuse_hibernation)
1369 return -EBUSY;
1370
1371 pci_save_state(pdev);
1372 pci_disable_device(pdev);
1373 pci_set_power_state(pdev, PCI_D3hot);
1374 return 0;
1375}
1376
1377static int vmw_pci_resume(struct pci_dev *pdev)
1378{
1379 pci_set_power_state(pdev, PCI_D0);
1380 pci_restore_state(pdev);
1381 return pci_enable_device(pdev);
1382}
1383
1384static int vmw_pm_suspend(struct device *kdev)
1385{
1386 struct pci_dev *pdev = to_pci_dev(kdev);
1387 struct pm_message dummy;
1388
1389 dummy.event = 0;
1390
1391 return vmw_pci_suspend(pdev, dummy);
1392}
1393
1394static int vmw_pm_resume(struct device *kdev)
1395{
1396 struct pci_dev *pdev = to_pci_dev(kdev);
1397
1398 return vmw_pci_resume(pdev);
1399}
1400
1401static int vmw_pm_freeze(struct device *kdev)
1402{
1403 struct pci_dev *pdev = to_pci_dev(kdev);
1404 struct drm_device *dev = pci_get_drvdata(pdev);
1405 struct vmw_private *dev_priv = vmw_priv(dev);
1406
1407 dev_priv->suspended = true;
1408 if (dev_priv->enable_fb)
1409 vmw_fifo_resource_dec(dev_priv);
1410
1411 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1412 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1413 if (dev_priv->enable_fb)
1414 vmw_fifo_resource_inc(dev_priv);
1415 WARN_ON(vmw_request_device_late(dev_priv));
1416 dev_priv->suspended = false;
1417 return -EBUSY;
1418 }
1419
1420 if (dev_priv->enable_fb)
1421 __vmw_svga_disable(dev_priv);
1422
1423 vmw_release_device_late(dev_priv);
1424
1425 return 0;
1426}
1427
1428static int vmw_pm_restore(struct device *kdev)
1429{
1430 struct pci_dev *pdev = to_pci_dev(kdev);
1431 struct drm_device *dev = pci_get_drvdata(pdev);
1432 struct vmw_private *dev_priv = vmw_priv(dev);
1433 int ret;
1434
1435 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1436 (void) vmw_read(dev_priv, SVGA_REG_ID);
1437
1438 if (dev_priv->enable_fb)
1439 vmw_fifo_resource_inc(dev_priv);
1440
1441 ret = vmw_request_device(dev_priv);
1442 if (ret)
1443 return ret;
1444
1445 if (dev_priv->enable_fb)
1446 __vmw_svga_enable(dev_priv);
1447
1448 dev_priv->suspended = false;
1449
1450 return 0;
1451}
1452
1453static const struct dev_pm_ops vmw_pm_ops = {
1454 .freeze = vmw_pm_freeze,
1455 .thaw = vmw_pm_restore,
1456 .restore = vmw_pm_restore,
1457 .suspend = vmw_pm_suspend,
1458 .resume = vmw_pm_resume,
1459};
1460
1461static const struct file_operations vmwgfx_driver_fops = {
1462 .owner = THIS_MODULE,
1463 .open = drm_open,
1464 .release = drm_release,
1465 .unlocked_ioctl = vmw_unlocked_ioctl,
1466 .mmap = vmw_mmap,
1467 .poll = vmw_fops_poll,
1468 .read = vmw_fops_read,
1469#if defined(CONFIG_COMPAT)
1470 .compat_ioctl = vmw_compat_ioctl,
1471#endif
1472 .llseek = noop_llseek,
1473};
1474
1475static struct drm_driver driver = {
1476 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1477 DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
1478 .load = vmw_driver_load,
1479 .unload = vmw_driver_unload,
1480 .lastclose = vmw_lastclose,
1481 .irq_preinstall = vmw_irq_preinstall,
1482 .irq_postinstall = vmw_irq_postinstall,
1483 .irq_uninstall = vmw_irq_uninstall,
1484 .irq_handler = vmw_irq_handler,
1485 .get_vblank_counter = vmw_get_vblank_counter,
1486 .enable_vblank = vmw_enable_vblank,
1487 .disable_vblank = vmw_disable_vblank,
1488 .ioctls = vmw_ioctls,
1489 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1490 .master_create = vmw_master_create,
1491 .master_destroy = vmw_master_destroy,
1492 .master_set = vmw_master_set,
1493 .master_drop = vmw_master_drop,
1494 .open = vmw_driver_open,
1495 .postclose = vmw_postclose,
1496 .set_busid = drm_pci_set_busid,
1497
1498 .dumb_create = vmw_dumb_create,
1499 .dumb_map_offset = vmw_dumb_map_offset,
1500 .dumb_destroy = vmw_dumb_destroy,
1501
1502 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1503 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1504
1505 .fops = &vmwgfx_driver_fops,
1506 .name = VMWGFX_DRIVER_NAME,
1507 .desc = VMWGFX_DRIVER_DESC,
1508 .date = VMWGFX_DRIVER_DATE,
1509 .major = VMWGFX_DRIVER_MAJOR,
1510 .minor = VMWGFX_DRIVER_MINOR,
1511 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1512};
1513
1514static struct pci_driver vmw_pci_driver = {
1515 .name = VMWGFX_DRIVER_NAME,
1516 .id_table = vmw_pci_id_list,
1517 .probe = vmw_probe,
1518 .remove = vmw_remove,
1519 .driver = {
1520 .pm = &vmw_pm_ops
1521 }
1522};
1523
1524static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1525{
1526 return drm_get_pci_dev(pdev, ent, &driver);
1527}
1528
1529static int __init vmwgfx_init(void)
1530{
1531 int ret;
1532
1533#ifdef CONFIG_VGA_CONSOLE
1534 if (vgacon_text_force())
1535 return -EINVAL;
1536#endif
1537
1538 ret = drm_pci_init(&driver, &vmw_pci_driver);
1539 if (ret)
1540 DRM_ERROR("Failed initializing DRM.\n");
1541 return ret;
1542}
1543
1544static void __exit vmwgfx_exit(void)
1545{
1546 drm_pci_exit(&driver, &vmw_pci_driver);
1547}
1548
1549module_init(vmwgfx_init);
1550module_exit(vmwgfx_exit);
1551
1552MODULE_AUTHOR("VMware Inc. and others");
1553MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1554MODULE_LICENSE("GPL and additional rights");
1555MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1556 __stringify(VMWGFX_DRIVER_MINOR) "."
1557 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1558 "0");
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "drmP.h"
29#include "vmwgfx_drv.h"
30#include "ttm/ttm_placement.h"
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_object.h"
33#include "ttm/ttm_module.h"
34
35#define VMWGFX_DRIVER_NAME "vmwgfx"
36#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
37#define VMWGFX_CHIP_SVGAII 0
38#define VMW_FB_RESERVATION 0
39
40/**
41 * Fully encoded drm commands. Might move to vmw_drm.h
42 */
43
44#define DRM_IOCTL_VMW_GET_PARAM \
45 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
46 struct drm_vmw_getparam_arg)
47#define DRM_IOCTL_VMW_ALLOC_DMABUF \
48 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
49 union drm_vmw_alloc_dmabuf_arg)
50#define DRM_IOCTL_VMW_UNREF_DMABUF \
51 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
52 struct drm_vmw_unref_dmabuf_arg)
53#define DRM_IOCTL_VMW_CURSOR_BYPASS \
54 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
55 struct drm_vmw_cursor_bypass_arg)
56
57#define DRM_IOCTL_VMW_CONTROL_STREAM \
58 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
59 struct drm_vmw_control_stream_arg)
60#define DRM_IOCTL_VMW_CLAIM_STREAM \
61 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
62 struct drm_vmw_stream_arg)
63#define DRM_IOCTL_VMW_UNREF_STREAM \
64 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
65 struct drm_vmw_stream_arg)
66
67#define DRM_IOCTL_VMW_CREATE_CONTEXT \
68 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
69 struct drm_vmw_context_arg)
70#define DRM_IOCTL_VMW_UNREF_CONTEXT \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
72 struct drm_vmw_context_arg)
73#define DRM_IOCTL_VMW_CREATE_SURFACE \
74 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
75 union drm_vmw_surface_create_arg)
76#define DRM_IOCTL_VMW_UNREF_SURFACE \
77 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
78 struct drm_vmw_surface_arg)
79#define DRM_IOCTL_VMW_REF_SURFACE \
80 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
81 union drm_vmw_surface_reference_arg)
82#define DRM_IOCTL_VMW_EXECBUF \
83 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
84 struct drm_vmw_execbuf_arg)
85#define DRM_IOCTL_VMW_FIFO_DEBUG \
86 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \
87 struct drm_vmw_fifo_debug_arg)
88#define DRM_IOCTL_VMW_FENCE_WAIT \
89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
90 struct drm_vmw_fence_wait_arg)
91#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
92 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
93 struct drm_vmw_update_layout_arg)
94
95
96/**
97 * The core DRM version of this macro doesn't account for
98 * DRM_COMMAND_BASE.
99 */
100
101#define VMW_IOCTL_DEF(ioctl, func, flags) \
102 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
103
104/**
105 * Ioctl definitions.
106 */
107
108static struct drm_ioctl_desc vmw_ioctls[] = {
109 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
110 DRM_AUTH | DRM_UNLOCKED),
111 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
112 DRM_AUTH | DRM_UNLOCKED),
113 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
114 DRM_AUTH | DRM_UNLOCKED),
115 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
116 vmw_kms_cursor_bypass_ioctl,
117 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
118
119 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
120 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
121 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
122 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
123 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
124 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
125
126 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
127 DRM_AUTH | DRM_UNLOCKED),
128 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
129 DRM_AUTH | DRM_UNLOCKED),
130 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
131 DRM_AUTH | DRM_UNLOCKED),
132 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
133 DRM_AUTH | DRM_UNLOCKED),
134 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
135 DRM_AUTH | DRM_UNLOCKED),
136 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
137 DRM_AUTH | DRM_UNLOCKED),
138 VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
139 DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
140 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
141 DRM_AUTH | DRM_UNLOCKED),
142 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
143 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
144};
145
146static struct pci_device_id vmw_pci_id_list[] = {
147 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
148 {0, 0, 0}
149};
150
151static int enable_fbdev;
152
153static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
154static void vmw_master_init(struct vmw_master *);
155static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
156 void *ptr);
157
158MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
159module_param_named(enable_fbdev, enable_fbdev, int, 0600);
160
161static void vmw_print_capabilities(uint32_t capabilities)
162{
163 DRM_INFO("Capabilities:\n");
164 if (capabilities & SVGA_CAP_RECT_COPY)
165 DRM_INFO(" Rect copy.\n");
166 if (capabilities & SVGA_CAP_CURSOR)
167 DRM_INFO(" Cursor.\n");
168 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
169 DRM_INFO(" Cursor bypass.\n");
170 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
171 DRM_INFO(" Cursor bypass 2.\n");
172 if (capabilities & SVGA_CAP_8BIT_EMULATION)
173 DRM_INFO(" 8bit emulation.\n");
174 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
175 DRM_INFO(" Alpha cursor.\n");
176 if (capabilities & SVGA_CAP_3D)
177 DRM_INFO(" 3D.\n");
178 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
179 DRM_INFO(" Extended Fifo.\n");
180 if (capabilities & SVGA_CAP_MULTIMON)
181 DRM_INFO(" Multimon.\n");
182 if (capabilities & SVGA_CAP_PITCHLOCK)
183 DRM_INFO(" Pitchlock.\n");
184 if (capabilities & SVGA_CAP_IRQMASK)
185 DRM_INFO(" Irq mask.\n");
186 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
187 DRM_INFO(" Display Topology.\n");
188 if (capabilities & SVGA_CAP_GMR)
189 DRM_INFO(" GMR.\n");
190 if (capabilities & SVGA_CAP_TRACES)
191 DRM_INFO(" Traces.\n");
192}
193
194static int vmw_request_device(struct vmw_private *dev_priv)
195{
196 int ret;
197
198 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
199 if (unlikely(ret != 0)) {
200 DRM_ERROR("Unable to initialize FIFO.\n");
201 return ret;
202 }
203
204 return 0;
205}
206
207static void vmw_release_device(struct vmw_private *dev_priv)
208{
209 vmw_fifo_release(dev_priv, &dev_priv->fifo);
210}
211
212int vmw_3d_resource_inc(struct vmw_private *dev_priv)
213{
214 int ret = 0;
215
216 mutex_lock(&dev_priv->release_mutex);
217 if (unlikely(dev_priv->num_3d_resources++ == 0)) {
218 ret = vmw_request_device(dev_priv);
219 if (unlikely(ret != 0))
220 --dev_priv->num_3d_resources;
221 }
222 mutex_unlock(&dev_priv->release_mutex);
223 return ret;
224}
225
226
227void vmw_3d_resource_dec(struct vmw_private *dev_priv)
228{
229 int32_t n3d;
230
231 mutex_lock(&dev_priv->release_mutex);
232 if (unlikely(--dev_priv->num_3d_resources == 0))
233 vmw_release_device(dev_priv);
234 n3d = (int32_t) dev_priv->num_3d_resources;
235 mutex_unlock(&dev_priv->release_mutex);
236
237 BUG_ON(n3d < 0);
238}
239
240static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
241{
242 struct vmw_private *dev_priv;
243 int ret;
244 uint32_t svga_id;
245
246 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
247 if (unlikely(dev_priv == NULL)) {
248 DRM_ERROR("Failed allocating a device private struct.\n");
249 return -ENOMEM;
250 }
251 memset(dev_priv, 0, sizeof(*dev_priv));
252
253 dev_priv->dev = dev;
254 dev_priv->vmw_chipset = chipset;
255 dev_priv->last_read_sequence = (uint32_t) -100;
256 mutex_init(&dev_priv->hw_mutex);
257 mutex_init(&dev_priv->cmdbuf_mutex);
258 mutex_init(&dev_priv->release_mutex);
259 rwlock_init(&dev_priv->resource_lock);
260 idr_init(&dev_priv->context_idr);
261 idr_init(&dev_priv->surface_idr);
262 idr_init(&dev_priv->stream_idr);
263 mutex_init(&dev_priv->init_mutex);
264 init_waitqueue_head(&dev_priv->fence_queue);
265 init_waitqueue_head(&dev_priv->fifo_queue);
266 atomic_set(&dev_priv->fence_queue_waiters, 0);
267 atomic_set(&dev_priv->fifo_queue_waiters, 0);
268
269 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
270 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
271 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
272
273 dev_priv->enable_fb = enable_fbdev;
274
275 mutex_lock(&dev_priv->hw_mutex);
276
277 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
278 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
279 if (svga_id != SVGA_ID_2) {
280 ret = -ENOSYS;
281 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
282 mutex_unlock(&dev_priv->hw_mutex);
283 goto out_err0;
284 }
285
286 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
287
288 if (dev_priv->capabilities & SVGA_CAP_GMR) {
289 dev_priv->max_gmr_descriptors =
290 vmw_read(dev_priv,
291 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
292 dev_priv->max_gmr_ids =
293 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
294 }
295
296 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
297 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
298 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
299 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
300
301 mutex_unlock(&dev_priv->hw_mutex);
302
303 vmw_print_capabilities(dev_priv->capabilities);
304
305 if (dev_priv->capabilities & SVGA_CAP_GMR) {
306 DRM_INFO("Max GMR ids is %u\n",
307 (unsigned)dev_priv->max_gmr_ids);
308 DRM_INFO("Max GMR descriptors is %u\n",
309 (unsigned)dev_priv->max_gmr_descriptors);
310 }
311 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
312 dev_priv->vram_start, dev_priv->vram_size / 1024);
313 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
314 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
315
316 ret = vmw_ttm_global_init(dev_priv);
317 if (unlikely(ret != 0))
318 goto out_err0;
319
320
321 vmw_master_init(&dev_priv->fbdev_master);
322 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
323 dev_priv->active_master = &dev_priv->fbdev_master;
324
325
326 ret = ttm_bo_device_init(&dev_priv->bdev,
327 dev_priv->bo_global_ref.ref.object,
328 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
329 false);
330 if (unlikely(ret != 0)) {
331 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
332 goto out_err1;
333 }
334
335 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
336 (dev_priv->vram_size >> PAGE_SHIFT));
337 if (unlikely(ret != 0)) {
338 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
339 goto out_err2;
340 }
341
342 dev_priv->has_gmr = true;
343 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
344 dev_priv->max_gmr_ids) != 0) {
345 DRM_INFO("No GMR memory available. "
346 "Graphics memory resources are very limited.\n");
347 dev_priv->has_gmr = false;
348 }
349
350 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
351 dev_priv->mmio_size, DRM_MTRR_WC);
352
353 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
354 dev_priv->mmio_size);
355
356 if (unlikely(dev_priv->mmio_virt == NULL)) {
357 ret = -ENOMEM;
358 DRM_ERROR("Failed mapping MMIO.\n");
359 goto out_err3;
360 }
361
362 /* Need mmio memory to check for fifo pitchlock cap. */
363 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
364 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
365 !vmw_fifo_have_pitchlock(dev_priv)) {
366 ret = -ENOSYS;
367 DRM_ERROR("Hardware has no pitchlock\n");
368 goto out_err4;
369 }
370
371 dev_priv->tdev = ttm_object_device_init
372 (dev_priv->mem_global_ref.object, 12);
373
374 if (unlikely(dev_priv->tdev == NULL)) {
375 DRM_ERROR("Unable to initialize TTM object management.\n");
376 ret = -ENOMEM;
377 goto out_err4;
378 }
379
380 dev->dev_private = dev_priv;
381
382 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
383 dev_priv->stealth = (ret != 0);
384 if (dev_priv->stealth) {
385 /**
386 * Request at least the mmio PCI resource.
387 */
388
389 DRM_INFO("It appears like vesafb is loaded. "
390 "Ignore above error if any.\n");
391 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
392 if (unlikely(ret != 0)) {
393 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
394 goto out_no_device;
395 }
396 }
397 ret = vmw_kms_init(dev_priv);
398 if (unlikely(ret != 0))
399 goto out_no_kms;
400 vmw_overlay_init(dev_priv);
401 if (dev_priv->enable_fb) {
402 ret = vmw_3d_resource_inc(dev_priv);
403 if (unlikely(ret != 0))
404 goto out_no_fifo;
405 vmw_kms_save_vga(dev_priv);
406 vmw_fb_init(dev_priv);
407 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
408 "Detected device 3D availability.\n" :
409 "Detected no device 3D availability.\n");
410 } else {
411 DRM_INFO("Delayed 3D detection since we're not "
412 "running the device in SVGA mode yet.\n");
413 }
414
415 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
416 ret = drm_irq_install(dev);
417 if (unlikely(ret != 0)) {
418 DRM_ERROR("Failed installing irq: %d\n", ret);
419 goto out_no_irq;
420 }
421 }
422
423 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
424 register_pm_notifier(&dev_priv->pm_nb);
425
426 return 0;
427
428out_no_irq:
429 if (dev_priv->enable_fb) {
430 vmw_fb_close(dev_priv);
431 vmw_kms_restore_vga(dev_priv);
432 vmw_3d_resource_dec(dev_priv);
433 }
434out_no_fifo:
435 vmw_overlay_close(dev_priv);
436 vmw_kms_close(dev_priv);
437out_no_kms:
438 if (dev_priv->stealth)
439 pci_release_region(dev->pdev, 2);
440 else
441 pci_release_regions(dev->pdev);
442out_no_device:
443 ttm_object_device_release(&dev_priv->tdev);
444out_err4:
445 iounmap(dev_priv->mmio_virt);
446out_err3:
447 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
448 dev_priv->mmio_size, DRM_MTRR_WC);
449 if (dev_priv->has_gmr)
450 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
451 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
452out_err2:
453 (void)ttm_bo_device_release(&dev_priv->bdev);
454out_err1:
455 vmw_ttm_global_release(dev_priv);
456out_err0:
457 idr_destroy(&dev_priv->surface_idr);
458 idr_destroy(&dev_priv->context_idr);
459 idr_destroy(&dev_priv->stream_idr);
460 kfree(dev_priv);
461 return ret;
462}
463
464static int vmw_driver_unload(struct drm_device *dev)
465{
466 struct vmw_private *dev_priv = vmw_priv(dev);
467
468 unregister_pm_notifier(&dev_priv->pm_nb);
469
470 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
471 drm_irq_uninstall(dev_priv->dev);
472 if (dev_priv->enable_fb) {
473 vmw_fb_close(dev_priv);
474 vmw_kms_restore_vga(dev_priv);
475 vmw_3d_resource_dec(dev_priv);
476 }
477 vmw_kms_close(dev_priv);
478 vmw_overlay_close(dev_priv);
479 if (dev_priv->stealth)
480 pci_release_region(dev->pdev, 2);
481 else
482 pci_release_regions(dev->pdev);
483
484 ttm_object_device_release(&dev_priv->tdev);
485 iounmap(dev_priv->mmio_virt);
486 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
487 dev_priv->mmio_size, DRM_MTRR_WC);
488 if (dev_priv->has_gmr)
489 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
490 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
491 (void)ttm_bo_device_release(&dev_priv->bdev);
492 vmw_ttm_global_release(dev_priv);
493 idr_destroy(&dev_priv->surface_idr);
494 idr_destroy(&dev_priv->context_idr);
495 idr_destroy(&dev_priv->stream_idr);
496
497 kfree(dev_priv);
498
499 return 0;
500}
501
502static void vmw_postclose(struct drm_device *dev,
503 struct drm_file *file_priv)
504{
505 struct vmw_fpriv *vmw_fp;
506
507 vmw_fp = vmw_fpriv(file_priv);
508 ttm_object_file_release(&vmw_fp->tfile);
509 if (vmw_fp->locked_master)
510 drm_master_put(&vmw_fp->locked_master);
511 kfree(vmw_fp);
512}
513
514static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
515{
516 struct vmw_private *dev_priv = vmw_priv(dev);
517 struct vmw_fpriv *vmw_fp;
518 int ret = -ENOMEM;
519
520 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
521 if (unlikely(vmw_fp == NULL))
522 return ret;
523
524 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
525 if (unlikely(vmw_fp->tfile == NULL))
526 goto out_no_tfile;
527
528 file_priv->driver_priv = vmw_fp;
529
530 if (unlikely(dev_priv->bdev.dev_mapping == NULL))
531 dev_priv->bdev.dev_mapping =
532 file_priv->filp->f_path.dentry->d_inode->i_mapping;
533
534 return 0;
535
536out_no_tfile:
537 kfree(vmw_fp);
538 return ret;
539}
540
541static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
542 unsigned long arg)
543{
544 struct drm_file *file_priv = filp->private_data;
545 struct drm_device *dev = file_priv->minor->dev;
546 unsigned int nr = DRM_IOCTL_NR(cmd);
547
548 /*
549 * Do extra checking on driver private ioctls.
550 */
551
552 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
553 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
554 struct drm_ioctl_desc *ioctl =
555 &vmw_ioctls[nr - DRM_COMMAND_BASE];
556
557 if (unlikely(ioctl->cmd_drv != cmd)) {
558 DRM_ERROR("Invalid command format, ioctl %d\n",
559 nr - DRM_COMMAND_BASE);
560 return -EINVAL;
561 }
562 }
563
564 return drm_ioctl(filp, cmd, arg);
565}
566
567static int vmw_firstopen(struct drm_device *dev)
568{
569 struct vmw_private *dev_priv = vmw_priv(dev);
570 dev_priv->is_opened = true;
571
572 return 0;
573}
574
575static void vmw_lastclose(struct drm_device *dev)
576{
577 struct vmw_private *dev_priv = vmw_priv(dev);
578 struct drm_crtc *crtc;
579 struct drm_mode_set set;
580 int ret;
581
582 /**
583 * Do nothing on the lastclose call from drm_unload.
584 */
585
586 if (!dev_priv->is_opened)
587 return;
588
589 dev_priv->is_opened = false;
590 set.x = 0;
591 set.y = 0;
592 set.fb = NULL;
593 set.mode = NULL;
594 set.connectors = NULL;
595 set.num_connectors = 0;
596
597 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
598 set.crtc = crtc;
599 ret = crtc->funcs->set_config(&set);
600 WARN_ON(ret != 0);
601 }
602
603}
604
605static void vmw_master_init(struct vmw_master *vmaster)
606{
607 ttm_lock_init(&vmaster->lock);
608 INIT_LIST_HEAD(&vmaster->fb_surf);
609 mutex_init(&vmaster->fb_surf_mutex);
610}
611
612static int vmw_master_create(struct drm_device *dev,
613 struct drm_master *master)
614{
615 struct vmw_master *vmaster;
616
617 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
618 if (unlikely(vmaster == NULL))
619 return -ENOMEM;
620
621 vmw_master_init(vmaster);
622 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
623 master->driver_priv = vmaster;
624
625 return 0;
626}
627
628static void vmw_master_destroy(struct drm_device *dev,
629 struct drm_master *master)
630{
631 struct vmw_master *vmaster = vmw_master(master);
632
633 master->driver_priv = NULL;
634 kfree(vmaster);
635}
636
637
638static int vmw_master_set(struct drm_device *dev,
639 struct drm_file *file_priv,
640 bool from_open)
641{
642 struct vmw_private *dev_priv = vmw_priv(dev);
643 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
644 struct vmw_master *active = dev_priv->active_master;
645 struct vmw_master *vmaster = vmw_master(file_priv->master);
646 int ret = 0;
647
648 if (!dev_priv->enable_fb) {
649 ret = vmw_3d_resource_inc(dev_priv);
650 if (unlikely(ret != 0))
651 return ret;
652 vmw_kms_save_vga(dev_priv);
653 mutex_lock(&dev_priv->hw_mutex);
654 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
655 mutex_unlock(&dev_priv->hw_mutex);
656 }
657
658 if (active) {
659 BUG_ON(active != &dev_priv->fbdev_master);
660 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
661 if (unlikely(ret != 0))
662 goto out_no_active_lock;
663
664 ttm_lock_set_kill(&active->lock, true, SIGTERM);
665 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
666 if (unlikely(ret != 0)) {
667 DRM_ERROR("Unable to clean VRAM on "
668 "master drop.\n");
669 }
670
671 dev_priv->active_master = NULL;
672 }
673
674 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
675 if (!from_open) {
676 ttm_vt_unlock(&vmaster->lock);
677 BUG_ON(vmw_fp->locked_master != file_priv->master);
678 drm_master_put(&vmw_fp->locked_master);
679 }
680
681 dev_priv->active_master = vmaster;
682
683 return 0;
684
685out_no_active_lock:
686 if (!dev_priv->enable_fb) {
687 mutex_lock(&dev_priv->hw_mutex);
688 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
689 mutex_unlock(&dev_priv->hw_mutex);
690 vmw_kms_restore_vga(dev_priv);
691 vmw_3d_resource_dec(dev_priv);
692 }
693 return ret;
694}
695
696static void vmw_master_drop(struct drm_device *dev,
697 struct drm_file *file_priv,
698 bool from_release)
699{
700 struct vmw_private *dev_priv = vmw_priv(dev);
701 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
702 struct vmw_master *vmaster = vmw_master(file_priv->master);
703 int ret;
704
705 /**
706 * Make sure the master doesn't disappear while we have
707 * it locked.
708 */
709
710 vmw_fp->locked_master = drm_master_get(file_priv->master);
711 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
712 vmw_kms_idle_workqueues(vmaster);
713
714 if (unlikely((ret != 0))) {
715 DRM_ERROR("Unable to lock TTM at VT switch.\n");
716 drm_master_put(&vmw_fp->locked_master);
717 }
718
719 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
720
721 if (!dev_priv->enable_fb) {
722 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
723 if (unlikely(ret != 0))
724 DRM_ERROR("Unable to clean VRAM on master drop.\n");
725 mutex_lock(&dev_priv->hw_mutex);
726 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
727 mutex_unlock(&dev_priv->hw_mutex);
728 vmw_kms_restore_vga(dev_priv);
729 vmw_3d_resource_dec(dev_priv);
730 }
731
732 dev_priv->active_master = &dev_priv->fbdev_master;
733 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
734 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
735
736 if (dev_priv->enable_fb)
737 vmw_fb_on(dev_priv);
738}
739
740
741static void vmw_remove(struct pci_dev *pdev)
742{
743 struct drm_device *dev = pci_get_drvdata(pdev);
744
745 drm_put_dev(dev);
746}
747
748static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
749 void *ptr)
750{
751 struct vmw_private *dev_priv =
752 container_of(nb, struct vmw_private, pm_nb);
753 struct vmw_master *vmaster = dev_priv->active_master;
754
755 switch (val) {
756 case PM_HIBERNATION_PREPARE:
757 case PM_SUSPEND_PREPARE:
758 ttm_suspend_lock(&vmaster->lock);
759
760 /**
761 * This empties VRAM and unbinds all GMR bindings.
762 * Buffer contents is moved to swappable memory.
763 */
764 ttm_bo_swapout_all(&dev_priv->bdev);
765
766 break;
767 case PM_POST_HIBERNATION:
768 case PM_POST_SUSPEND:
769 case PM_POST_RESTORE:
770 ttm_suspend_unlock(&vmaster->lock);
771
772 break;
773 case PM_RESTORE_PREPARE:
774 break;
775 default:
776 break;
777 }
778 return 0;
779}
780
781/**
782 * These might not be needed with the virtual SVGA device.
783 */
784
785static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
786{
787 struct drm_device *dev = pci_get_drvdata(pdev);
788 struct vmw_private *dev_priv = vmw_priv(dev);
789
790 if (dev_priv->num_3d_resources != 0) {
791 DRM_INFO("Can't suspend or hibernate "
792 "while 3D resources are active.\n");
793 return -EBUSY;
794 }
795
796 pci_save_state(pdev);
797 pci_disable_device(pdev);
798 pci_set_power_state(pdev, PCI_D3hot);
799 return 0;
800}
801
802static int vmw_pci_resume(struct pci_dev *pdev)
803{
804 pci_set_power_state(pdev, PCI_D0);
805 pci_restore_state(pdev);
806 return pci_enable_device(pdev);
807}
808
809static int vmw_pm_suspend(struct device *kdev)
810{
811 struct pci_dev *pdev = to_pci_dev(kdev);
812 struct pm_message dummy;
813
814 dummy.event = 0;
815
816 return vmw_pci_suspend(pdev, dummy);
817}
818
819static int vmw_pm_resume(struct device *kdev)
820{
821 struct pci_dev *pdev = to_pci_dev(kdev);
822
823 return vmw_pci_resume(pdev);
824}
825
826static int vmw_pm_prepare(struct device *kdev)
827{
828 struct pci_dev *pdev = to_pci_dev(kdev);
829 struct drm_device *dev = pci_get_drvdata(pdev);
830 struct vmw_private *dev_priv = vmw_priv(dev);
831
832 /**
833 * Release 3d reference held by fbdev and potentially
834 * stop fifo.
835 */
836 dev_priv->suspended = true;
837 if (dev_priv->enable_fb)
838 vmw_3d_resource_dec(dev_priv);
839
840 if (dev_priv->num_3d_resources != 0) {
841
842 DRM_INFO("Can't suspend or hibernate "
843 "while 3D resources are active.\n");
844
845 if (dev_priv->enable_fb)
846 vmw_3d_resource_inc(dev_priv);
847 dev_priv->suspended = false;
848 return -EBUSY;
849 }
850
851 return 0;
852}
853
854static void vmw_pm_complete(struct device *kdev)
855{
856 struct pci_dev *pdev = to_pci_dev(kdev);
857 struct drm_device *dev = pci_get_drvdata(pdev);
858 struct vmw_private *dev_priv = vmw_priv(dev);
859
860 /**
861 * Reclaim 3d reference held by fbdev and potentially
862 * start fifo.
863 */
864 if (dev_priv->enable_fb)
865 vmw_3d_resource_inc(dev_priv);
866
867 dev_priv->suspended = false;
868}
869
870static const struct dev_pm_ops vmw_pm_ops = {
871 .prepare = vmw_pm_prepare,
872 .complete = vmw_pm_complete,
873 .suspend = vmw_pm_suspend,
874 .resume = vmw_pm_resume,
875};
876
877static struct drm_driver driver = {
878 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
879 DRIVER_MODESET,
880 .load = vmw_driver_load,
881 .unload = vmw_driver_unload,
882 .firstopen = vmw_firstopen,
883 .lastclose = vmw_lastclose,
884 .irq_preinstall = vmw_irq_preinstall,
885 .irq_postinstall = vmw_irq_postinstall,
886 .irq_uninstall = vmw_irq_uninstall,
887 .irq_handler = vmw_irq_handler,
888 .get_vblank_counter = vmw_get_vblank_counter,
889 .reclaim_buffers_locked = NULL,
890 .ioctls = vmw_ioctls,
891 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
892 .dma_quiescent = NULL, /*vmw_dma_quiescent, */
893 .master_create = vmw_master_create,
894 .master_destroy = vmw_master_destroy,
895 .master_set = vmw_master_set,
896 .master_drop = vmw_master_drop,
897 .open = vmw_driver_open,
898 .postclose = vmw_postclose,
899 .fops = {
900 .owner = THIS_MODULE,
901 .open = drm_open,
902 .release = drm_release,
903 .unlocked_ioctl = vmw_unlocked_ioctl,
904 .mmap = vmw_mmap,
905 .poll = drm_poll,
906 .fasync = drm_fasync,
907#if defined(CONFIG_COMPAT)
908 .compat_ioctl = drm_compat_ioctl,
909#endif
910 .llseek = noop_llseek,
911 },
912 .name = VMWGFX_DRIVER_NAME,
913 .desc = VMWGFX_DRIVER_DESC,
914 .date = VMWGFX_DRIVER_DATE,
915 .major = VMWGFX_DRIVER_MAJOR,
916 .minor = VMWGFX_DRIVER_MINOR,
917 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
918};
919
920static struct pci_driver vmw_pci_driver = {
921 .name = VMWGFX_DRIVER_NAME,
922 .id_table = vmw_pci_id_list,
923 .probe = vmw_probe,
924 .remove = vmw_remove,
925 .driver = {
926 .pm = &vmw_pm_ops
927 }
928};
929
930static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
931{
932 return drm_get_pci_dev(pdev, ent, &driver);
933}
934
935static int __init vmwgfx_init(void)
936{
937 int ret;
938 ret = drm_pci_init(&driver, &vmw_pci_driver);
939 if (ret)
940 DRM_ERROR("Failed initializing DRM.\n");
941 return ret;
942}
943
944static void __exit vmwgfx_exit(void)
945{
946 drm_pci_exit(&driver, &vmw_pci_driver);
947}
948
949module_init(vmwgfx_init);
950module_exit(vmwgfx_exit);
951
952MODULE_AUTHOR("VMware Inc. and others");
953MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
954MODULE_LICENSE("GPL and additional rights");
955MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
956 __stringify(VMWGFX_DRIVER_MINOR) "."
957 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
958 "0");