Loading...
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <linux/console.h>
29#include <linux/dma-mapping.h>
30#include <linux/module.h>
31
32#include <drm/drm_drv.h>
33#include <drm/drm_ioctl.h>
34#include <drm/drm_pci.h>
35#include <drm/drm_sysfs.h>
36#include <drm/ttm/ttm_bo_driver.h>
37#include <drm/ttm/ttm_module.h>
38#include <drm/ttm/ttm_placement.h>
39
40#include "ttm_object.h"
41#include "vmwgfx_binding.h"
42#include "vmwgfx_drv.h"
43
44#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
45#define VMWGFX_CHIP_SVGAII 0
46#define VMW_FB_RESERVATION 0
47
48#define VMW_MIN_INITIAL_WIDTH 800
49#define VMW_MIN_INITIAL_HEIGHT 600
50
51#ifndef VMWGFX_GIT_VERSION
52#define VMWGFX_GIT_VERSION "Unknown"
53#endif
54
55#define VMWGFX_REPO "In Tree"
56
57#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
58
59
60/**
61 * Fully encoded drm commands. Might move to vmw_drm.h
62 */
63
64#define DRM_IOCTL_VMW_GET_PARAM \
65 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
66 struct drm_vmw_getparam_arg)
67#define DRM_IOCTL_VMW_ALLOC_DMABUF \
68 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
69 union drm_vmw_alloc_dmabuf_arg)
70#define DRM_IOCTL_VMW_UNREF_DMABUF \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
72 struct drm_vmw_unref_dmabuf_arg)
73#define DRM_IOCTL_VMW_CURSOR_BYPASS \
74 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
75 struct drm_vmw_cursor_bypass_arg)
76
77#define DRM_IOCTL_VMW_CONTROL_STREAM \
78 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
79 struct drm_vmw_control_stream_arg)
80#define DRM_IOCTL_VMW_CLAIM_STREAM \
81 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
82 struct drm_vmw_stream_arg)
83#define DRM_IOCTL_VMW_UNREF_STREAM \
84 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
85 struct drm_vmw_stream_arg)
86
87#define DRM_IOCTL_VMW_CREATE_CONTEXT \
88 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
89 struct drm_vmw_context_arg)
90#define DRM_IOCTL_VMW_UNREF_CONTEXT \
91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
92 struct drm_vmw_context_arg)
93#define DRM_IOCTL_VMW_CREATE_SURFACE \
94 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
95 union drm_vmw_surface_create_arg)
96#define DRM_IOCTL_VMW_UNREF_SURFACE \
97 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
98 struct drm_vmw_surface_arg)
99#define DRM_IOCTL_VMW_REF_SURFACE \
100 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
101 union drm_vmw_surface_reference_arg)
102#define DRM_IOCTL_VMW_EXECBUF \
103 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
104 struct drm_vmw_execbuf_arg)
105#define DRM_IOCTL_VMW_GET_3D_CAP \
106 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
107 struct drm_vmw_get_3d_cap_arg)
108#define DRM_IOCTL_VMW_FENCE_WAIT \
109 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
110 struct drm_vmw_fence_wait_arg)
111#define DRM_IOCTL_VMW_FENCE_SIGNALED \
112 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
113 struct drm_vmw_fence_signaled_arg)
114#define DRM_IOCTL_VMW_FENCE_UNREF \
115 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
116 struct drm_vmw_fence_arg)
117#define DRM_IOCTL_VMW_FENCE_EVENT \
118 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
119 struct drm_vmw_fence_event_arg)
120#define DRM_IOCTL_VMW_PRESENT \
121 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
122 struct drm_vmw_present_arg)
123#define DRM_IOCTL_VMW_PRESENT_READBACK \
124 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
125 struct drm_vmw_present_readback_arg)
126#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
127 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
128 struct drm_vmw_update_layout_arg)
129#define DRM_IOCTL_VMW_CREATE_SHADER \
130 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
131 struct drm_vmw_shader_create_arg)
132#define DRM_IOCTL_VMW_UNREF_SHADER \
133 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
134 struct drm_vmw_shader_arg)
135#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
136 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
137 union drm_vmw_gb_surface_create_arg)
138#define DRM_IOCTL_VMW_GB_SURFACE_REF \
139 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
140 union drm_vmw_gb_surface_reference_arg)
141#define DRM_IOCTL_VMW_SYNCCPU \
142 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
143 struct drm_vmw_synccpu_arg)
144#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
145 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
146 struct drm_vmw_context_arg)
147#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
148 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
149 union drm_vmw_gb_surface_create_ext_arg)
150#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
151 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
152 union drm_vmw_gb_surface_reference_ext_arg)
153
154/**
155 * The core DRM version of this macro doesn't account for
156 * DRM_COMMAND_BASE.
157 */
158
159#define VMW_IOCTL_DEF(ioctl, func, flags) \
160 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
161
162/**
163 * Ioctl definitions.
164 */
165
166static const struct drm_ioctl_desc vmw_ioctls[] = {
167 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
168 DRM_AUTH | DRM_RENDER_ALLOW),
169 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
170 DRM_AUTH | DRM_RENDER_ALLOW),
171 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
172 DRM_RENDER_ALLOW),
173 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
174 vmw_kms_cursor_bypass_ioctl,
175 DRM_MASTER),
176
177 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
178 DRM_MASTER),
179 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
180 DRM_MASTER),
181 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
182 DRM_MASTER),
183
184 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
185 DRM_AUTH | DRM_RENDER_ALLOW),
186 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
187 DRM_RENDER_ALLOW),
188 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
189 DRM_AUTH | DRM_RENDER_ALLOW),
190 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
191 DRM_RENDER_ALLOW),
192 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
193 DRM_AUTH | DRM_RENDER_ALLOW),
194 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
195 DRM_RENDER_ALLOW),
196 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
197 DRM_RENDER_ALLOW),
198 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
199 vmw_fence_obj_signaled_ioctl,
200 DRM_RENDER_ALLOW),
201 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
202 DRM_RENDER_ALLOW),
203 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
204 DRM_AUTH | DRM_RENDER_ALLOW),
205 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
206 DRM_AUTH | DRM_RENDER_ALLOW),
207
208 /* these allow direct access to the framebuffers mark as master only */
209 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
210 DRM_MASTER | DRM_AUTH),
211 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
212 vmw_present_readback_ioctl,
213 DRM_MASTER | DRM_AUTH),
214 /*
215 * The permissions of the below ioctl are overridden in
216 * vmw_generic_ioctl(). We require either
217 * DRM_MASTER or capable(CAP_SYS_ADMIN).
218 */
219 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
220 vmw_kms_update_layout_ioctl,
221 DRM_RENDER_ALLOW),
222 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
223 vmw_shader_define_ioctl,
224 DRM_AUTH | DRM_RENDER_ALLOW),
225 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
226 vmw_shader_destroy_ioctl,
227 DRM_RENDER_ALLOW),
228 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
229 vmw_gb_surface_define_ioctl,
230 DRM_AUTH | DRM_RENDER_ALLOW),
231 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
232 vmw_gb_surface_reference_ioctl,
233 DRM_AUTH | DRM_RENDER_ALLOW),
234 VMW_IOCTL_DEF(VMW_SYNCCPU,
235 vmw_user_bo_synccpu_ioctl,
236 DRM_RENDER_ALLOW),
237 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
238 vmw_extended_context_define_ioctl,
239 DRM_AUTH | DRM_RENDER_ALLOW),
240 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
241 vmw_gb_surface_define_ext_ioctl,
242 DRM_AUTH | DRM_RENDER_ALLOW),
243 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
244 vmw_gb_surface_reference_ext_ioctl,
245 DRM_AUTH | DRM_RENDER_ALLOW),
246};
247
248static const struct pci_device_id vmw_pci_id_list[] = {
249 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
250 {0, 0, 0}
251};
252MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
253
254static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
255static int vmw_force_iommu;
256static int vmw_restrict_iommu;
257static int vmw_force_coherent;
258static int vmw_restrict_dma_mask;
259static int vmw_assume_16bpp;
260
261static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
262static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
263 void *ptr);
264
265MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
266module_param_named(enable_fbdev, enable_fbdev, int, 0600);
267MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
268module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
269MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
270module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
271MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
272module_param_named(force_coherent, vmw_force_coherent, int, 0600);
273MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
274module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
275MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
276module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
277
278
279static void vmw_print_capabilities2(uint32_t capabilities2)
280{
281 DRM_INFO("Capabilities2:\n");
282 if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
283 DRM_INFO(" Grow oTable.\n");
284 if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
285 DRM_INFO(" IntraSurface copy.\n");
286}
287
288static void vmw_print_capabilities(uint32_t capabilities)
289{
290 DRM_INFO("Capabilities:\n");
291 if (capabilities & SVGA_CAP_RECT_COPY)
292 DRM_INFO(" Rect copy.\n");
293 if (capabilities & SVGA_CAP_CURSOR)
294 DRM_INFO(" Cursor.\n");
295 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
296 DRM_INFO(" Cursor bypass.\n");
297 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
298 DRM_INFO(" Cursor bypass 2.\n");
299 if (capabilities & SVGA_CAP_8BIT_EMULATION)
300 DRM_INFO(" 8bit emulation.\n");
301 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
302 DRM_INFO(" Alpha cursor.\n");
303 if (capabilities & SVGA_CAP_3D)
304 DRM_INFO(" 3D.\n");
305 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
306 DRM_INFO(" Extended Fifo.\n");
307 if (capabilities & SVGA_CAP_MULTIMON)
308 DRM_INFO(" Multimon.\n");
309 if (capabilities & SVGA_CAP_PITCHLOCK)
310 DRM_INFO(" Pitchlock.\n");
311 if (capabilities & SVGA_CAP_IRQMASK)
312 DRM_INFO(" Irq mask.\n");
313 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
314 DRM_INFO(" Display Topology.\n");
315 if (capabilities & SVGA_CAP_GMR)
316 DRM_INFO(" GMR.\n");
317 if (capabilities & SVGA_CAP_TRACES)
318 DRM_INFO(" Traces.\n");
319 if (capabilities & SVGA_CAP_GMR2)
320 DRM_INFO(" GMR2.\n");
321 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
322 DRM_INFO(" Screen Object 2.\n");
323 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
324 DRM_INFO(" Command Buffers.\n");
325 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
326 DRM_INFO(" Command Buffers 2.\n");
327 if (capabilities & SVGA_CAP_GBOBJECTS)
328 DRM_INFO(" Guest Backed Resources.\n");
329 if (capabilities & SVGA_CAP_DX)
330 DRM_INFO(" DX Features.\n");
331 if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
332 DRM_INFO(" HP Command Queue.\n");
333}
334
335/**
336 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
337 *
338 * @dev_priv: A device private structure.
339 *
340 * This function creates a small buffer object that holds the query
341 * result for dummy queries emitted as query barriers.
342 * The function will then map the first page and initialize a pending
343 * occlusion query result structure, Finally it will unmap the buffer.
344 * No interruptible waits are done within this function.
345 *
346 * Returns an error if bo creation or initialization fails.
347 */
348static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
349{
350 int ret;
351 struct vmw_buffer_object *vbo;
352 struct ttm_bo_kmap_obj map;
353 volatile SVGA3dQueryResult *result;
354 bool dummy;
355
356 /*
357 * Create the vbo as pinned, so that a tryreserve will
358 * immediately succeed. This is because we're the only
359 * user of the bo currently.
360 */
361 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
362 if (!vbo)
363 return -ENOMEM;
364
365 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
366 &vmw_sys_ne_placement, false,
367 &vmw_bo_bo_free);
368 if (unlikely(ret != 0))
369 return ret;
370
371 ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
372 BUG_ON(ret != 0);
373 vmw_bo_pin_reserved(vbo, true);
374
375 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
376 if (likely(ret == 0)) {
377 result = ttm_kmap_obj_virtual(&map, &dummy);
378 result->totalSize = sizeof(*result);
379 result->state = SVGA3D_QUERYSTATE_PENDING;
380 result->result32 = 0xff;
381 ttm_bo_kunmap(&map);
382 }
383 vmw_bo_pin_reserved(vbo, false);
384 ttm_bo_unreserve(&vbo->base);
385
386 if (unlikely(ret != 0)) {
387 DRM_ERROR("Dummy query buffer map failed.\n");
388 vmw_bo_unreference(&vbo);
389 } else
390 dev_priv->dummy_query_bo = vbo;
391
392 return ret;
393}
394
395/**
396 * vmw_request_device_late - Perform late device setup
397 *
398 * @dev_priv: Pointer to device private.
399 *
400 * This function performs setup of otables and enables large command
401 * buffer submission. These tasks are split out to a separate function
402 * because it reverts vmw_release_device_early and is intended to be used
403 * by an error path in the hibernation code.
404 */
405static int vmw_request_device_late(struct vmw_private *dev_priv)
406{
407 int ret;
408
409 if (dev_priv->has_mob) {
410 ret = vmw_otables_setup(dev_priv);
411 if (unlikely(ret != 0)) {
412 DRM_ERROR("Unable to initialize "
413 "guest Memory OBjects.\n");
414 return ret;
415 }
416 }
417
418 if (dev_priv->cman) {
419 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
420 256*4096, 2*4096);
421 if (ret) {
422 struct vmw_cmdbuf_man *man = dev_priv->cman;
423
424 dev_priv->cman = NULL;
425 vmw_cmdbuf_man_destroy(man);
426 }
427 }
428
429 return 0;
430}
431
432static int vmw_request_device(struct vmw_private *dev_priv)
433{
434 int ret;
435
436 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
437 if (unlikely(ret != 0)) {
438 DRM_ERROR("Unable to initialize FIFO.\n");
439 return ret;
440 }
441 vmw_fence_fifo_up(dev_priv->fman);
442 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
443 if (IS_ERR(dev_priv->cman)) {
444 dev_priv->cman = NULL;
445 dev_priv->has_dx = false;
446 }
447
448 ret = vmw_request_device_late(dev_priv);
449 if (ret)
450 goto out_no_mob;
451
452 ret = vmw_dummy_query_bo_create(dev_priv);
453 if (unlikely(ret != 0))
454 goto out_no_query_bo;
455
456 return 0;
457
458out_no_query_bo:
459 if (dev_priv->cman)
460 vmw_cmdbuf_remove_pool(dev_priv->cman);
461 if (dev_priv->has_mob) {
462 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
463 vmw_otables_takedown(dev_priv);
464 }
465 if (dev_priv->cman)
466 vmw_cmdbuf_man_destroy(dev_priv->cman);
467out_no_mob:
468 vmw_fence_fifo_down(dev_priv->fman);
469 vmw_fifo_release(dev_priv, &dev_priv->fifo);
470 return ret;
471}
472
473/**
474 * vmw_release_device_early - Early part of fifo takedown.
475 *
476 * @dev_priv: Pointer to device private struct.
477 *
478 * This is the first part of command submission takedown, to be called before
479 * buffer management is taken down.
480 */
481static void vmw_release_device_early(struct vmw_private *dev_priv)
482{
483 /*
484 * Previous destructions should've released
485 * the pinned bo.
486 */
487
488 BUG_ON(dev_priv->pinned_bo != NULL);
489
490 vmw_bo_unreference(&dev_priv->dummy_query_bo);
491 if (dev_priv->cman)
492 vmw_cmdbuf_remove_pool(dev_priv->cman);
493
494 if (dev_priv->has_mob) {
495 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
496 vmw_otables_takedown(dev_priv);
497 }
498}
499
500/**
501 * vmw_release_device_late - Late part of fifo takedown.
502 *
503 * @dev_priv: Pointer to device private struct.
504 *
505 * This is the last part of the command submission takedown, to be called when
506 * command submission is no longer needed. It may wait on pending fences.
507 */
508static void vmw_release_device_late(struct vmw_private *dev_priv)
509{
510 vmw_fence_fifo_down(dev_priv->fman);
511 if (dev_priv->cman)
512 vmw_cmdbuf_man_destroy(dev_priv->cman);
513
514 vmw_fifo_release(dev_priv, &dev_priv->fifo);
515}
516
517/**
518 * Sets the initial_[width|height] fields on the given vmw_private.
519 *
520 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
521 * clamping the value to fb_max_[width|height] fields and the
522 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
523 * If the values appear to be invalid, set them to
524 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
525 */
526static void vmw_get_initial_size(struct vmw_private *dev_priv)
527{
528 uint32_t width;
529 uint32_t height;
530
531 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
532 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
533
534 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
535 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
536
537 if (width > dev_priv->fb_max_width ||
538 height > dev_priv->fb_max_height) {
539
540 /*
541 * This is a host error and shouldn't occur.
542 */
543
544 width = VMW_MIN_INITIAL_WIDTH;
545 height = VMW_MIN_INITIAL_HEIGHT;
546 }
547
548 dev_priv->initial_width = width;
549 dev_priv->initial_height = height;
550}
551
552/**
553 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
554 * system.
555 *
556 * @dev_priv: Pointer to a struct vmw_private
557 *
558 * This functions tries to determine what actions need to be taken by the
559 * driver to make system pages visible to the device.
560 * If this function decides that DMA is not possible, it returns -EINVAL.
561 * The driver may then try to disable features of the device that require
562 * DMA.
563 */
564static int vmw_dma_select_mode(struct vmw_private *dev_priv)
565{
566 static const char *names[vmw_dma_map_max] = {
567 [vmw_dma_phys] = "Using physical TTM page addresses.",
568 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
569 [vmw_dma_map_populate] = "Caching DMA mappings.",
570 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
571
572 if (vmw_force_coherent)
573 dev_priv->map_mode = vmw_dma_alloc_coherent;
574 else if (vmw_restrict_iommu)
575 dev_priv->map_mode = vmw_dma_map_bind;
576 else
577 dev_priv->map_mode = vmw_dma_map_populate;
578
579 /* No TTM coherent page pool? FIXME: Ask TTM instead! */
580 if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
581 (dev_priv->map_mode == vmw_dma_alloc_coherent))
582 return -EINVAL;
583
584 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
585 return 0;
586}
587
588/**
589 * vmw_dma_masks - set required page- and dma masks
590 *
591 * @dev: Pointer to struct drm-device
592 *
593 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
594 * restriction also for 64-bit systems.
595 */
596static int vmw_dma_masks(struct vmw_private *dev_priv)
597{
598 struct drm_device *dev = dev_priv->dev;
599 int ret = 0;
600
601 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
602 if (dev_priv->map_mode != vmw_dma_phys &&
603 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
604 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
605 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
606 }
607
608 return ret;
609}
610
611static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
612{
613 struct vmw_private *dev_priv;
614 int ret;
615 uint32_t svga_id;
616 enum vmw_res_type i;
617 bool refuse_dma = false;
618 char host_log[100] = {0};
619
620 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
621 if (unlikely(!dev_priv)) {
622 DRM_ERROR("Failed allocating a device private struct.\n");
623 return -ENOMEM;
624 }
625
626 pci_set_master(dev->pdev);
627
628 dev_priv->dev = dev;
629 dev_priv->vmw_chipset = chipset;
630 dev_priv->last_read_seqno = (uint32_t) -100;
631 mutex_init(&dev_priv->cmdbuf_mutex);
632 mutex_init(&dev_priv->release_mutex);
633 mutex_init(&dev_priv->binding_mutex);
634 mutex_init(&dev_priv->global_kms_state_mutex);
635 ttm_lock_init(&dev_priv->reservation_sem);
636 spin_lock_init(&dev_priv->resource_lock);
637 spin_lock_init(&dev_priv->hw_lock);
638 spin_lock_init(&dev_priv->waiter_lock);
639 spin_lock_init(&dev_priv->cap_lock);
640 spin_lock_init(&dev_priv->svga_lock);
641 spin_lock_init(&dev_priv->cursor_lock);
642
643 for (i = vmw_res_context; i < vmw_res_max; ++i) {
644 idr_init(&dev_priv->res_idr[i]);
645 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
646 }
647
648 init_waitqueue_head(&dev_priv->fence_queue);
649 init_waitqueue_head(&dev_priv->fifo_queue);
650 dev_priv->fence_queue_waiters = 0;
651 dev_priv->fifo_queue_waiters = 0;
652
653 dev_priv->used_memory_size = 0;
654
655 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
656 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
657 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
658
659 dev_priv->assume_16bpp = !!vmw_assume_16bpp;
660
661 dev_priv->enable_fb = enable_fbdev;
662
663 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
664 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
665 if (svga_id != SVGA_ID_2) {
666 ret = -ENOSYS;
667 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
668 goto out_err0;
669 }
670
671 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
672
673 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
674 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
675 }
676
677
678 ret = vmw_dma_select_mode(dev_priv);
679 if (unlikely(ret != 0)) {
680 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
681 refuse_dma = true;
682 }
683
684 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
685 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
686 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
687 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
688
689 vmw_get_initial_size(dev_priv);
690
691 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
692 dev_priv->max_gmr_ids =
693 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
694 dev_priv->max_gmr_pages =
695 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
696 dev_priv->memory_size =
697 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
698 dev_priv->memory_size -= dev_priv->vram_size;
699 } else {
700 /*
701 * An arbitrary limit of 512MiB on surface
702 * memory. But all HWV8 hardware supports GMR2.
703 */
704 dev_priv->memory_size = 512*1024*1024;
705 }
706 dev_priv->max_mob_pages = 0;
707 dev_priv->max_mob_size = 0;
708 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
709 uint64_t mem_size =
710 vmw_read(dev_priv,
711 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
712
713 /*
714 * Workaround for low memory 2D VMs to compensate for the
715 * allocation taken by fbdev
716 */
717 if (!(dev_priv->capabilities & SVGA_CAP_3D))
718 mem_size *= 3;
719
720 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
721 dev_priv->prim_bb_mem =
722 vmw_read(dev_priv,
723 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
724 dev_priv->max_mob_size =
725 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
726 dev_priv->stdu_max_width =
727 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
728 dev_priv->stdu_max_height =
729 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
730
731 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
732 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
733 dev_priv->texture_max_width = vmw_read(dev_priv,
734 SVGA_REG_DEV_CAP);
735 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
736 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
737 dev_priv->texture_max_height = vmw_read(dev_priv,
738 SVGA_REG_DEV_CAP);
739 } else {
740 dev_priv->texture_max_width = 8192;
741 dev_priv->texture_max_height = 8192;
742 dev_priv->prim_bb_mem = dev_priv->vram_size;
743 }
744
745 vmw_print_capabilities(dev_priv->capabilities);
746 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
747 vmw_print_capabilities2(dev_priv->capabilities2);
748
749 ret = vmw_dma_masks(dev_priv);
750 if (unlikely(ret != 0))
751 goto out_err0;
752
753 dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK,
754 SCATTERLIST_MAX_SEGMENT));
755
756 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
757 DRM_INFO("Max GMR ids is %u\n",
758 (unsigned)dev_priv->max_gmr_ids);
759 DRM_INFO("Max number of GMR pages is %u\n",
760 (unsigned)dev_priv->max_gmr_pages);
761 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
762 (unsigned)dev_priv->memory_size / 1024);
763 }
764 DRM_INFO("Maximum display memory size is %u kiB\n",
765 dev_priv->prim_bb_mem / 1024);
766 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
767 dev_priv->vram_start, dev_priv->vram_size / 1024);
768 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
769 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
770
771 dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
772 dev_priv->mmio_size, MEMREMAP_WB);
773
774 if (unlikely(dev_priv->mmio_virt == NULL)) {
775 ret = -ENOMEM;
776 DRM_ERROR("Failed mapping MMIO.\n");
777 goto out_err0;
778 }
779
780 /* Need mmio memory to check for fifo pitchlock cap. */
781 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
782 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
783 !vmw_fifo_have_pitchlock(dev_priv)) {
784 ret = -ENOSYS;
785 DRM_ERROR("Hardware has no pitchlock\n");
786 goto out_err4;
787 }
788
789 dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
790 &vmw_prime_dmabuf_ops);
791
792 if (unlikely(dev_priv->tdev == NULL)) {
793 DRM_ERROR("Unable to initialize TTM object management.\n");
794 ret = -ENOMEM;
795 goto out_err4;
796 }
797
798 dev->dev_private = dev_priv;
799
800 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
801 dev_priv->stealth = (ret != 0);
802 if (dev_priv->stealth) {
803 /**
804 * Request at least the mmio PCI resource.
805 */
806
807 DRM_INFO("It appears like vesafb is loaded. "
808 "Ignore above error if any.\n");
809 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
810 if (unlikely(ret != 0)) {
811 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
812 goto out_no_device;
813 }
814 }
815
816 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
817 ret = vmw_irq_install(dev, dev->pdev->irq);
818 if (ret != 0) {
819 DRM_ERROR("Failed installing irq: %d\n", ret);
820 goto out_no_irq;
821 }
822 }
823
824 dev_priv->fman = vmw_fence_manager_init(dev_priv);
825 if (unlikely(dev_priv->fman == NULL)) {
826 ret = -ENOMEM;
827 goto out_no_fman;
828 }
829
830 ret = ttm_bo_device_init(&dev_priv->bdev,
831 &vmw_bo_driver,
832 dev->anon_inode->i_mapping,
833 false);
834 if (unlikely(ret != 0)) {
835 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
836 goto out_no_bdev;
837 }
838
839 /*
840 * Enable VRAM, but initially don't use it until SVGA is enabled and
841 * unhidden.
842 */
843 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
844 (dev_priv->vram_size >> PAGE_SHIFT));
845 if (unlikely(ret != 0)) {
846 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
847 goto out_no_vram;
848 }
849 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
850
851 dev_priv->has_gmr = true;
852 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
853 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
854 VMW_PL_GMR) != 0) {
855 DRM_INFO("No GMR memory available. "
856 "Graphics memory resources are very limited.\n");
857 dev_priv->has_gmr = false;
858 }
859
860 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
861 dev_priv->has_mob = true;
862 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
863 VMW_PL_MOB) != 0) {
864 DRM_INFO("No MOB memory available. "
865 "3D will be disabled.\n");
866 dev_priv->has_mob = false;
867 }
868 }
869
870 if (dev_priv->has_mob) {
871 spin_lock(&dev_priv->cap_lock);
872 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
873 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
874 spin_unlock(&dev_priv->cap_lock);
875 }
876
877 vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
878 ret = vmw_kms_init(dev_priv);
879 if (unlikely(ret != 0))
880 goto out_no_kms;
881 vmw_overlay_init(dev_priv);
882
883 ret = vmw_request_device(dev_priv);
884 if (ret)
885 goto out_no_fifo;
886
887 if (dev_priv->has_dx) {
888 /*
889 * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
890 * support
891 */
892 if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
893 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
894 SVGA3D_DEVCAP_SM41);
895 dev_priv->has_sm4_1 = vmw_read(dev_priv,
896 SVGA_REG_DEV_CAP);
897 }
898 }
899
900 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
901 DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
902 ? "yes." : "no.");
903 DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
904
905 snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
906 VMWGFX_REPO, VMWGFX_GIT_VERSION);
907 vmw_host_log(host_log);
908
909 memset(host_log, 0, sizeof(host_log));
910 snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
911 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
912 VMWGFX_DRIVER_PATCHLEVEL);
913 vmw_host_log(host_log);
914
915 if (dev_priv->enable_fb) {
916 vmw_fifo_resource_inc(dev_priv);
917 vmw_svga_enable(dev_priv);
918 vmw_fb_init(dev_priv);
919 }
920
921 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
922 register_pm_notifier(&dev_priv->pm_nb);
923
924 return 0;
925
926out_no_fifo:
927 vmw_overlay_close(dev_priv);
928 vmw_kms_close(dev_priv);
929out_no_kms:
930 if (dev_priv->has_mob)
931 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
932 if (dev_priv->has_gmr)
933 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
934 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
935out_no_vram:
936 (void)ttm_bo_device_release(&dev_priv->bdev);
937out_no_bdev:
938 vmw_fence_manager_takedown(dev_priv->fman);
939out_no_fman:
940 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
941 vmw_irq_uninstall(dev_priv->dev);
942out_no_irq:
943 if (dev_priv->stealth)
944 pci_release_region(dev->pdev, 2);
945 else
946 pci_release_regions(dev->pdev);
947out_no_device:
948 ttm_object_device_release(&dev_priv->tdev);
949out_err4:
950 memunmap(dev_priv->mmio_virt);
951out_err0:
952 for (i = vmw_res_context; i < vmw_res_max; ++i)
953 idr_destroy(&dev_priv->res_idr[i]);
954
955 if (dev_priv->ctx.staged_bindings)
956 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
957 kfree(dev_priv);
958 return ret;
959}
960
961static void vmw_driver_unload(struct drm_device *dev)
962{
963 struct vmw_private *dev_priv = vmw_priv(dev);
964 enum vmw_res_type i;
965
966 unregister_pm_notifier(&dev_priv->pm_nb);
967
968 if (dev_priv->ctx.res_ht_initialized)
969 drm_ht_remove(&dev_priv->ctx.res_ht);
970 vfree(dev_priv->ctx.cmd_bounce);
971 if (dev_priv->enable_fb) {
972 vmw_fb_off(dev_priv);
973 vmw_fb_close(dev_priv);
974 vmw_fifo_resource_dec(dev_priv);
975 vmw_svga_disable(dev_priv);
976 }
977
978 vmw_kms_close(dev_priv);
979 vmw_overlay_close(dev_priv);
980
981 if (dev_priv->has_gmr)
982 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
983 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
984
985 vmw_release_device_early(dev_priv);
986 if (dev_priv->has_mob)
987 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
988 (void) ttm_bo_device_release(&dev_priv->bdev);
989 vmw_release_device_late(dev_priv);
990 vmw_fence_manager_takedown(dev_priv->fman);
991 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
992 vmw_irq_uninstall(dev_priv->dev);
993 if (dev_priv->stealth)
994 pci_release_region(dev->pdev, 2);
995 else
996 pci_release_regions(dev->pdev);
997
998 ttm_object_device_release(&dev_priv->tdev);
999 memunmap(dev_priv->mmio_virt);
1000 if (dev_priv->ctx.staged_bindings)
1001 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1002
1003 for (i = vmw_res_context; i < vmw_res_max; ++i)
1004 idr_destroy(&dev_priv->res_idr[i]);
1005
1006 kfree(dev_priv);
1007}
1008
1009static void vmw_postclose(struct drm_device *dev,
1010 struct drm_file *file_priv)
1011{
1012 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1013
1014 ttm_object_file_release(&vmw_fp->tfile);
1015 kfree(vmw_fp);
1016}
1017
1018static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1019{
1020 struct vmw_private *dev_priv = vmw_priv(dev);
1021 struct vmw_fpriv *vmw_fp;
1022 int ret = -ENOMEM;
1023
1024 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1025 if (unlikely(!vmw_fp))
1026 return ret;
1027
1028 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1029 if (unlikely(vmw_fp->tfile == NULL))
1030 goto out_no_tfile;
1031
1032 file_priv->driver_priv = vmw_fp;
1033
1034 return 0;
1035
1036out_no_tfile:
1037 kfree(vmw_fp);
1038 return ret;
1039}
1040
1041static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1042 unsigned long arg,
1043 long (*ioctl_func)(struct file *, unsigned int,
1044 unsigned long))
1045{
1046 struct drm_file *file_priv = filp->private_data;
1047 struct drm_device *dev = file_priv->minor->dev;
1048 unsigned int nr = DRM_IOCTL_NR(cmd);
1049 unsigned int flags;
1050
1051 /*
1052 * Do extra checking on driver private ioctls.
1053 */
1054
1055 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1056 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1057 const struct drm_ioctl_desc *ioctl =
1058 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1059
1060 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1061 return ioctl_func(filp, cmd, arg);
1062 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1063 if (!drm_is_current_master(file_priv) &&
1064 !capable(CAP_SYS_ADMIN))
1065 return -EACCES;
1066 }
1067
1068 if (unlikely(ioctl->cmd != cmd))
1069 goto out_io_encoding;
1070
1071 flags = ioctl->flags;
1072 } else if (!drm_ioctl_flags(nr, &flags))
1073 return -EINVAL;
1074
1075 return ioctl_func(filp, cmd, arg);
1076
1077out_io_encoding:
1078 DRM_ERROR("Invalid command format, ioctl %d\n",
1079 nr - DRM_COMMAND_BASE);
1080
1081 return -EINVAL;
1082}
1083
1084static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1085 unsigned long arg)
1086{
1087 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1088}
1089
1090#ifdef CONFIG_COMPAT
1091static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1092 unsigned long arg)
1093{
1094 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1095}
1096#endif
1097
1098static int vmw_master_set(struct drm_device *dev,
1099 struct drm_file *file_priv,
1100 bool from_open)
1101{
1102 /*
1103 * Inform a new master that the layout may have changed while
1104 * it was gone.
1105 */
1106 if (!from_open)
1107 drm_sysfs_hotplug_event(dev);
1108
1109 return 0;
1110}
1111
1112static void vmw_master_drop(struct drm_device *dev,
1113 struct drm_file *file_priv)
1114{
1115 struct vmw_private *dev_priv = vmw_priv(dev);
1116
1117 vmw_kms_legacy_hotspot_clear(dev_priv);
1118 if (!dev_priv->enable_fb)
1119 vmw_svga_disable(dev_priv);
1120}
1121
1122/**
1123 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1124 *
1125 * @dev_priv: Pointer to device private struct.
1126 * Needs the reservation sem to be held in non-exclusive mode.
1127 */
1128static void __vmw_svga_enable(struct vmw_private *dev_priv)
1129{
1130 spin_lock(&dev_priv->svga_lock);
1131 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1132 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1133 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1134 }
1135 spin_unlock(&dev_priv->svga_lock);
1136}
1137
1138/**
1139 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1140 *
1141 * @dev_priv: Pointer to device private struct.
1142 */
1143void vmw_svga_enable(struct vmw_private *dev_priv)
1144{
1145 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
1146 __vmw_svga_enable(dev_priv);
1147 ttm_read_unlock(&dev_priv->reservation_sem);
1148}
1149
1150/**
1151 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1152 *
1153 * @dev_priv: Pointer to device private struct.
1154 * Needs the reservation sem to be held in exclusive mode.
1155 * Will not empty VRAM. VRAM must be emptied by caller.
1156 */
1157static void __vmw_svga_disable(struct vmw_private *dev_priv)
1158{
1159 spin_lock(&dev_priv->svga_lock);
1160 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1161 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1162 vmw_write(dev_priv, SVGA_REG_ENABLE,
1163 SVGA_REG_ENABLE_HIDE |
1164 SVGA_REG_ENABLE_ENABLE);
1165 }
1166 spin_unlock(&dev_priv->svga_lock);
1167}
1168
1169/**
1170 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1171 * running.
1172 *
1173 * @dev_priv: Pointer to device private struct.
1174 * Will empty VRAM.
1175 */
1176void vmw_svga_disable(struct vmw_private *dev_priv)
1177{
1178 /*
1179 * Disabling SVGA will turn off device modesetting capabilities, so
1180 * notify KMS about that so that it doesn't cache atomic state that
1181 * isn't valid anymore, for example crtcs turned on.
1182 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1183 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1184 * end up with lock order reversal. Thus, a master may actually perform
1185 * a new modeset just after we call vmw_kms_lost_device() and race with
1186 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1187 * to be inconsistent with the device, causing modesetting problems.
1188 *
1189 */
1190 vmw_kms_lost_device(dev_priv->dev);
1191 ttm_write_lock(&dev_priv->reservation_sem, false);
1192 spin_lock(&dev_priv->svga_lock);
1193 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1194 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1195 spin_unlock(&dev_priv->svga_lock);
1196 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1197 DRM_ERROR("Failed evicting VRAM buffers.\n");
1198 vmw_write(dev_priv, SVGA_REG_ENABLE,
1199 SVGA_REG_ENABLE_HIDE |
1200 SVGA_REG_ENABLE_ENABLE);
1201 } else
1202 spin_unlock(&dev_priv->svga_lock);
1203 ttm_write_unlock(&dev_priv->reservation_sem);
1204}
1205
1206static void vmw_remove(struct pci_dev *pdev)
1207{
1208 struct drm_device *dev = pci_get_drvdata(pdev);
1209
1210 pci_disable_device(pdev);
1211 drm_put_dev(dev);
1212}
1213
1214static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1215 void *ptr)
1216{
1217 struct vmw_private *dev_priv =
1218 container_of(nb, struct vmw_private, pm_nb);
1219
1220 switch (val) {
1221 case PM_HIBERNATION_PREPARE:
1222 /*
1223 * Take the reservation sem in write mode, which will make sure
1224 * there are no other processes holding a buffer object
1225 * reservation, meaning we should be able to evict all buffer
1226 * objects if needed.
1227 * Once user-space processes have been frozen, we can release
1228 * the lock again.
1229 */
1230 ttm_suspend_lock(&dev_priv->reservation_sem);
1231 dev_priv->suspend_locked = true;
1232 break;
1233 case PM_POST_HIBERNATION:
1234 case PM_POST_RESTORE:
1235 if (READ_ONCE(dev_priv->suspend_locked)) {
1236 dev_priv->suspend_locked = false;
1237 ttm_suspend_unlock(&dev_priv->reservation_sem);
1238 }
1239 break;
1240 default:
1241 break;
1242 }
1243 return 0;
1244}
1245
1246static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1247{
1248 struct drm_device *dev = pci_get_drvdata(pdev);
1249 struct vmw_private *dev_priv = vmw_priv(dev);
1250
1251 if (dev_priv->refuse_hibernation)
1252 return -EBUSY;
1253
1254 pci_save_state(pdev);
1255 pci_disable_device(pdev);
1256 pci_set_power_state(pdev, PCI_D3hot);
1257 return 0;
1258}
1259
1260static int vmw_pci_resume(struct pci_dev *pdev)
1261{
1262 pci_set_power_state(pdev, PCI_D0);
1263 pci_restore_state(pdev);
1264 return pci_enable_device(pdev);
1265}
1266
1267static int vmw_pm_suspend(struct device *kdev)
1268{
1269 struct pci_dev *pdev = to_pci_dev(kdev);
1270 struct pm_message dummy;
1271
1272 dummy.event = 0;
1273
1274 return vmw_pci_suspend(pdev, dummy);
1275}
1276
1277static int vmw_pm_resume(struct device *kdev)
1278{
1279 struct pci_dev *pdev = to_pci_dev(kdev);
1280
1281 return vmw_pci_resume(pdev);
1282}
1283
1284static int vmw_pm_freeze(struct device *kdev)
1285{
1286 struct pci_dev *pdev = to_pci_dev(kdev);
1287 struct drm_device *dev = pci_get_drvdata(pdev);
1288 struct vmw_private *dev_priv = vmw_priv(dev);
1289 int ret;
1290
1291 /*
1292 * Unlock for vmw_kms_suspend.
1293 * No user-space processes should be running now.
1294 */
1295 ttm_suspend_unlock(&dev_priv->reservation_sem);
1296 ret = vmw_kms_suspend(dev_priv->dev);
1297 if (ret) {
1298 ttm_suspend_lock(&dev_priv->reservation_sem);
1299 DRM_ERROR("Failed to freeze modesetting.\n");
1300 return ret;
1301 }
1302 if (dev_priv->enable_fb)
1303 vmw_fb_off(dev_priv);
1304
1305 ttm_suspend_lock(&dev_priv->reservation_sem);
1306 vmw_execbuf_release_pinned_bo(dev_priv);
1307 vmw_resource_evict_all(dev_priv);
1308 vmw_release_device_early(dev_priv);
1309 ttm_bo_swapout_all(&dev_priv->bdev);
1310 if (dev_priv->enable_fb)
1311 vmw_fifo_resource_dec(dev_priv);
1312 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1313 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1314 if (dev_priv->enable_fb)
1315 vmw_fifo_resource_inc(dev_priv);
1316 WARN_ON(vmw_request_device_late(dev_priv));
1317 dev_priv->suspend_locked = false;
1318 ttm_suspend_unlock(&dev_priv->reservation_sem);
1319 if (dev_priv->suspend_state)
1320 vmw_kms_resume(dev);
1321 if (dev_priv->enable_fb)
1322 vmw_fb_on(dev_priv);
1323 return -EBUSY;
1324 }
1325
1326 vmw_fence_fifo_down(dev_priv->fman);
1327 __vmw_svga_disable(dev_priv);
1328
1329 vmw_release_device_late(dev_priv);
1330 return 0;
1331}
1332
1333static int vmw_pm_restore(struct device *kdev)
1334{
1335 struct pci_dev *pdev = to_pci_dev(kdev);
1336 struct drm_device *dev = pci_get_drvdata(pdev);
1337 struct vmw_private *dev_priv = vmw_priv(dev);
1338 int ret;
1339
1340 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1341 (void) vmw_read(dev_priv, SVGA_REG_ID);
1342
1343 if (dev_priv->enable_fb)
1344 vmw_fifo_resource_inc(dev_priv);
1345
1346 ret = vmw_request_device(dev_priv);
1347 if (ret)
1348 return ret;
1349
1350 if (dev_priv->enable_fb)
1351 __vmw_svga_enable(dev_priv);
1352
1353 vmw_fence_fifo_up(dev_priv->fman);
1354 dev_priv->suspend_locked = false;
1355 ttm_suspend_unlock(&dev_priv->reservation_sem);
1356 if (dev_priv->suspend_state)
1357 vmw_kms_resume(dev_priv->dev);
1358
1359 if (dev_priv->enable_fb)
1360 vmw_fb_on(dev_priv);
1361
1362 return 0;
1363}
1364
1365static const struct dev_pm_ops vmw_pm_ops = {
1366 .freeze = vmw_pm_freeze,
1367 .thaw = vmw_pm_restore,
1368 .restore = vmw_pm_restore,
1369 .suspend = vmw_pm_suspend,
1370 .resume = vmw_pm_resume,
1371};
1372
1373static const struct file_operations vmwgfx_driver_fops = {
1374 .owner = THIS_MODULE,
1375 .open = drm_open,
1376 .release = drm_release,
1377 .unlocked_ioctl = vmw_unlocked_ioctl,
1378 .mmap = vmw_mmap,
1379 .poll = vmw_fops_poll,
1380 .read = vmw_fops_read,
1381#if defined(CONFIG_COMPAT)
1382 .compat_ioctl = vmw_compat_ioctl,
1383#endif
1384 .llseek = noop_llseek,
1385};
1386
1387static struct drm_driver driver = {
1388 .driver_features =
1389 DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
1390 .load = vmw_driver_load,
1391 .unload = vmw_driver_unload,
1392 .get_vblank_counter = vmw_get_vblank_counter,
1393 .enable_vblank = vmw_enable_vblank,
1394 .disable_vblank = vmw_disable_vblank,
1395 .ioctls = vmw_ioctls,
1396 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1397 .master_set = vmw_master_set,
1398 .master_drop = vmw_master_drop,
1399 .open = vmw_driver_open,
1400 .postclose = vmw_postclose,
1401
1402 .dumb_create = vmw_dumb_create,
1403 .dumb_map_offset = vmw_dumb_map_offset,
1404 .dumb_destroy = vmw_dumb_destroy,
1405
1406 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1407 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1408
1409 .fops = &vmwgfx_driver_fops,
1410 .name = VMWGFX_DRIVER_NAME,
1411 .desc = VMWGFX_DRIVER_DESC,
1412 .date = VMWGFX_DRIVER_DATE,
1413 .major = VMWGFX_DRIVER_MAJOR,
1414 .minor = VMWGFX_DRIVER_MINOR,
1415 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1416};
1417
1418static struct pci_driver vmw_pci_driver = {
1419 .name = VMWGFX_DRIVER_NAME,
1420 .id_table = vmw_pci_id_list,
1421 .probe = vmw_probe,
1422 .remove = vmw_remove,
1423 .driver = {
1424 .pm = &vmw_pm_ops
1425 }
1426};
1427
1428static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1429{
1430 return drm_get_pci_dev(pdev, ent, &driver);
1431}
1432
1433static int __init vmwgfx_init(void)
1434{
1435 int ret;
1436
1437 if (vgacon_text_force())
1438 return -EINVAL;
1439
1440 ret = pci_register_driver(&vmw_pci_driver);
1441 if (ret)
1442 DRM_ERROR("Failed initializing DRM.\n");
1443 return ret;
1444}
1445
1446static void __exit vmwgfx_exit(void)
1447{
1448 pci_unregister_driver(&vmw_pci_driver);
1449}
1450
1451module_init(vmwgfx_init);
1452module_exit(vmwgfx_exit);
1453
1454MODULE_AUTHOR("VMware Inc. and others");
1455MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1456MODULE_LICENSE("GPL and additional rights");
1457MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1458 __stringify(VMWGFX_DRIVER_MINOR) "."
1459 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1460 "0");
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27#include <linux/module.h>
28
29#include <drm/drmP.h>
30#include "vmwgfx_drv.h"
31#include <drm/ttm/ttm_placement.h>
32#include <drm/ttm/ttm_bo_driver.h>
33#include <drm/ttm/ttm_object.h>
34#include <drm/ttm/ttm_module.h>
35#include <linux/dma_remapping.h>
36
37#define VMWGFX_DRIVER_NAME "vmwgfx"
38#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
39#define VMWGFX_CHIP_SVGAII 0
40#define VMW_FB_RESERVATION 0
41
42#define VMW_MIN_INITIAL_WIDTH 800
43#define VMW_MIN_INITIAL_HEIGHT 600
44
45
46/**
47 * Fully encoded drm commands. Might move to vmw_drm.h
48 */
49
50#define DRM_IOCTL_VMW_GET_PARAM \
51 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
52 struct drm_vmw_getparam_arg)
53#define DRM_IOCTL_VMW_ALLOC_DMABUF \
54 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
55 union drm_vmw_alloc_dmabuf_arg)
56#define DRM_IOCTL_VMW_UNREF_DMABUF \
57 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
58 struct drm_vmw_unref_dmabuf_arg)
59#define DRM_IOCTL_VMW_CURSOR_BYPASS \
60 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
61 struct drm_vmw_cursor_bypass_arg)
62
63#define DRM_IOCTL_VMW_CONTROL_STREAM \
64 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
65 struct drm_vmw_control_stream_arg)
66#define DRM_IOCTL_VMW_CLAIM_STREAM \
67 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
68 struct drm_vmw_stream_arg)
69#define DRM_IOCTL_VMW_UNREF_STREAM \
70 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
71 struct drm_vmw_stream_arg)
72
73#define DRM_IOCTL_VMW_CREATE_CONTEXT \
74 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
75 struct drm_vmw_context_arg)
76#define DRM_IOCTL_VMW_UNREF_CONTEXT \
77 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
78 struct drm_vmw_context_arg)
79#define DRM_IOCTL_VMW_CREATE_SURFACE \
80 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
81 union drm_vmw_surface_create_arg)
82#define DRM_IOCTL_VMW_UNREF_SURFACE \
83 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
84 struct drm_vmw_surface_arg)
85#define DRM_IOCTL_VMW_REF_SURFACE \
86 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
87 union drm_vmw_surface_reference_arg)
88#define DRM_IOCTL_VMW_EXECBUF \
89 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
90 struct drm_vmw_execbuf_arg)
91#define DRM_IOCTL_VMW_GET_3D_CAP \
92 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
93 struct drm_vmw_get_3d_cap_arg)
94#define DRM_IOCTL_VMW_FENCE_WAIT \
95 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
96 struct drm_vmw_fence_wait_arg)
97#define DRM_IOCTL_VMW_FENCE_SIGNALED \
98 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
99 struct drm_vmw_fence_signaled_arg)
100#define DRM_IOCTL_VMW_FENCE_UNREF \
101 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
102 struct drm_vmw_fence_arg)
103#define DRM_IOCTL_VMW_FENCE_EVENT \
104 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
105 struct drm_vmw_fence_event_arg)
106#define DRM_IOCTL_VMW_PRESENT \
107 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
108 struct drm_vmw_present_arg)
109#define DRM_IOCTL_VMW_PRESENT_READBACK \
110 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
111 struct drm_vmw_present_readback_arg)
112#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
113 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
114 struct drm_vmw_update_layout_arg)
115#define DRM_IOCTL_VMW_CREATE_SHADER \
116 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
117 struct drm_vmw_shader_create_arg)
118#define DRM_IOCTL_VMW_UNREF_SHADER \
119 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
120 struct drm_vmw_shader_arg)
121#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
122 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
123 union drm_vmw_gb_surface_create_arg)
124#define DRM_IOCTL_VMW_GB_SURFACE_REF \
125 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
126 union drm_vmw_gb_surface_reference_arg)
127#define DRM_IOCTL_VMW_SYNCCPU \
128 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
129 struct drm_vmw_synccpu_arg)
130
131/**
132 * The core DRM version of this macro doesn't account for
133 * DRM_COMMAND_BASE.
134 */
135
136#define VMW_IOCTL_DEF(ioctl, func, flags) \
137 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
138
139/**
140 * Ioctl definitions.
141 */
142
143static const struct drm_ioctl_desc vmw_ioctls[] = {
144 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
145 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
146 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
147 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
148 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
149 DRM_UNLOCKED | DRM_RENDER_ALLOW),
150 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
151 vmw_kms_cursor_bypass_ioctl,
152 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
153
154 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
155 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
156 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
157 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
158 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
159 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
160
161 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
162 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
163 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
164 DRM_UNLOCKED | DRM_RENDER_ALLOW),
165 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
166 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
167 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
168 DRM_UNLOCKED | DRM_RENDER_ALLOW),
169 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
170 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
171 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
172 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
173 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
174 DRM_UNLOCKED | DRM_RENDER_ALLOW),
175 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
176 vmw_fence_obj_signaled_ioctl,
177 DRM_UNLOCKED | DRM_RENDER_ALLOW),
178 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
179 DRM_UNLOCKED | DRM_RENDER_ALLOW),
180 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
181 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
182 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
183 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
184
185 /* these allow direct access to the framebuffers mark as master only */
186 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
187 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
188 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
189 vmw_present_readback_ioctl,
190 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
191 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
192 vmw_kms_update_layout_ioctl,
193 DRM_MASTER | DRM_UNLOCKED),
194 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
195 vmw_shader_define_ioctl,
196 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
197 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
198 vmw_shader_destroy_ioctl,
199 DRM_UNLOCKED | DRM_RENDER_ALLOW),
200 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
201 vmw_gb_surface_define_ioctl,
202 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
203 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
204 vmw_gb_surface_reference_ioctl,
205 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
206 VMW_IOCTL_DEF(VMW_SYNCCPU,
207 vmw_user_dmabuf_synccpu_ioctl,
208 DRM_UNLOCKED | DRM_RENDER_ALLOW),
209};
210
211static struct pci_device_id vmw_pci_id_list[] = {
212 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
213 {0, 0, 0}
214};
215MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
216
217static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
218static int vmw_force_iommu;
219static int vmw_restrict_iommu;
220static int vmw_force_coherent;
221static int vmw_restrict_dma_mask;
222
223static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
224static void vmw_master_init(struct vmw_master *);
225static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
226 void *ptr);
227
228MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
229module_param_named(enable_fbdev, enable_fbdev, int, 0600);
230MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
231module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
232MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
233module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
234MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
235module_param_named(force_coherent, vmw_force_coherent, int, 0600);
236MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
237module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
238
239
240static void vmw_print_capabilities(uint32_t capabilities)
241{
242 DRM_INFO("Capabilities:\n");
243 if (capabilities & SVGA_CAP_RECT_COPY)
244 DRM_INFO(" Rect copy.\n");
245 if (capabilities & SVGA_CAP_CURSOR)
246 DRM_INFO(" Cursor.\n");
247 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
248 DRM_INFO(" Cursor bypass.\n");
249 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
250 DRM_INFO(" Cursor bypass 2.\n");
251 if (capabilities & SVGA_CAP_8BIT_EMULATION)
252 DRM_INFO(" 8bit emulation.\n");
253 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
254 DRM_INFO(" Alpha cursor.\n");
255 if (capabilities & SVGA_CAP_3D)
256 DRM_INFO(" 3D.\n");
257 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
258 DRM_INFO(" Extended Fifo.\n");
259 if (capabilities & SVGA_CAP_MULTIMON)
260 DRM_INFO(" Multimon.\n");
261 if (capabilities & SVGA_CAP_PITCHLOCK)
262 DRM_INFO(" Pitchlock.\n");
263 if (capabilities & SVGA_CAP_IRQMASK)
264 DRM_INFO(" Irq mask.\n");
265 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
266 DRM_INFO(" Display Topology.\n");
267 if (capabilities & SVGA_CAP_GMR)
268 DRM_INFO(" GMR.\n");
269 if (capabilities & SVGA_CAP_TRACES)
270 DRM_INFO(" Traces.\n");
271 if (capabilities & SVGA_CAP_GMR2)
272 DRM_INFO(" GMR2.\n");
273 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
274 DRM_INFO(" Screen Object 2.\n");
275 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
276 DRM_INFO(" Command Buffers.\n");
277 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
278 DRM_INFO(" Command Buffers 2.\n");
279 if (capabilities & SVGA_CAP_GBOBJECTS)
280 DRM_INFO(" Guest Backed Resources.\n");
281}
282
283/**
284 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
285 *
286 * @dev_priv: A device private structure.
287 *
288 * This function creates a small buffer object that holds the query
289 * result for dummy queries emitted as query barriers.
290 * The function will then map the first page and initialize a pending
291 * occlusion query result structure, Finally it will unmap the buffer.
292 * No interruptible waits are done within this function.
293 *
294 * Returns an error if bo creation or initialization fails.
295 */
296static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
297{
298 int ret;
299 struct ttm_buffer_object *bo;
300 struct ttm_bo_kmap_obj map;
301 volatile SVGA3dQueryResult *result;
302 bool dummy;
303
304 /*
305 * Create the bo as pinned, so that a tryreserve will
306 * immediately succeed. This is because we're the only
307 * user of the bo currently.
308 */
309 ret = ttm_bo_create(&dev_priv->bdev,
310 PAGE_SIZE,
311 ttm_bo_type_device,
312 &vmw_sys_ne_placement,
313 0, false, NULL,
314 &bo);
315
316 if (unlikely(ret != 0))
317 return ret;
318
319 ret = ttm_bo_reserve(bo, false, true, false, 0);
320 BUG_ON(ret != 0);
321
322 ret = ttm_bo_kmap(bo, 0, 1, &map);
323 if (likely(ret == 0)) {
324 result = ttm_kmap_obj_virtual(&map, &dummy);
325 result->totalSize = sizeof(*result);
326 result->state = SVGA3D_QUERYSTATE_PENDING;
327 result->result32 = 0xff;
328 ttm_bo_kunmap(&map);
329 }
330 vmw_bo_pin(bo, false);
331 ttm_bo_unreserve(bo);
332
333 if (unlikely(ret != 0)) {
334 DRM_ERROR("Dummy query buffer map failed.\n");
335 ttm_bo_unref(&bo);
336 } else
337 dev_priv->dummy_query_bo = bo;
338
339 return ret;
340}
341
342static int vmw_request_device(struct vmw_private *dev_priv)
343{
344 int ret;
345
346 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
347 if (unlikely(ret != 0)) {
348 DRM_ERROR("Unable to initialize FIFO.\n");
349 return ret;
350 }
351 vmw_fence_fifo_up(dev_priv->fman);
352 if (dev_priv->has_mob) {
353 ret = vmw_otables_setup(dev_priv);
354 if (unlikely(ret != 0)) {
355 DRM_ERROR("Unable to initialize "
356 "guest Memory OBjects.\n");
357 goto out_no_mob;
358 }
359 }
360 ret = vmw_dummy_query_bo_create(dev_priv);
361 if (unlikely(ret != 0))
362 goto out_no_query_bo;
363
364 return 0;
365
366out_no_query_bo:
367 if (dev_priv->has_mob)
368 vmw_otables_takedown(dev_priv);
369out_no_mob:
370 vmw_fence_fifo_down(dev_priv->fman);
371 vmw_fifo_release(dev_priv, &dev_priv->fifo);
372 return ret;
373}
374
375static void vmw_release_device(struct vmw_private *dev_priv)
376{
377 /*
378 * Previous destructions should've released
379 * the pinned bo.
380 */
381
382 BUG_ON(dev_priv->pinned_bo != NULL);
383
384 ttm_bo_unref(&dev_priv->dummy_query_bo);
385 if (dev_priv->has_mob)
386 vmw_otables_takedown(dev_priv);
387 vmw_fence_fifo_down(dev_priv->fman);
388 vmw_fifo_release(dev_priv, &dev_priv->fifo);
389}
390
391
392/**
393 * Increase the 3d resource refcount.
394 * If the count was prevously zero, initialize the fifo, switching to svga
395 * mode. Note that the master holds a ref as well, and may request an
396 * explicit switch to svga mode if fb is not running, using @unhide_svga.
397 */
398int vmw_3d_resource_inc(struct vmw_private *dev_priv,
399 bool unhide_svga)
400{
401 int ret = 0;
402
403 mutex_lock(&dev_priv->release_mutex);
404 if (unlikely(dev_priv->num_3d_resources++ == 0)) {
405 ret = vmw_request_device(dev_priv);
406 if (unlikely(ret != 0))
407 --dev_priv->num_3d_resources;
408 } else if (unhide_svga) {
409 mutex_lock(&dev_priv->hw_mutex);
410 vmw_write(dev_priv, SVGA_REG_ENABLE,
411 vmw_read(dev_priv, SVGA_REG_ENABLE) &
412 ~SVGA_REG_ENABLE_HIDE);
413 mutex_unlock(&dev_priv->hw_mutex);
414 }
415
416 mutex_unlock(&dev_priv->release_mutex);
417 return ret;
418}
419
420/**
421 * Decrease the 3d resource refcount.
422 * If the count reaches zero, disable the fifo, switching to vga mode.
423 * Note that the master holds a refcount as well, and may request an
424 * explicit switch to vga mode when it releases its refcount to account
425 * for the situation of an X server vt switch to VGA with 3d resources
426 * active.
427 */
428void vmw_3d_resource_dec(struct vmw_private *dev_priv,
429 bool hide_svga)
430{
431 int32_t n3d;
432
433 mutex_lock(&dev_priv->release_mutex);
434 if (unlikely(--dev_priv->num_3d_resources == 0))
435 vmw_release_device(dev_priv);
436 else if (hide_svga) {
437 mutex_lock(&dev_priv->hw_mutex);
438 vmw_write(dev_priv, SVGA_REG_ENABLE,
439 vmw_read(dev_priv, SVGA_REG_ENABLE) |
440 SVGA_REG_ENABLE_HIDE);
441 mutex_unlock(&dev_priv->hw_mutex);
442 }
443
444 n3d = (int32_t) dev_priv->num_3d_resources;
445 mutex_unlock(&dev_priv->release_mutex);
446
447 BUG_ON(n3d < 0);
448}
449
450/**
451 * Sets the initial_[width|height] fields on the given vmw_private.
452 *
453 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
454 * clamping the value to fb_max_[width|height] fields and the
455 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
456 * If the values appear to be invalid, set them to
457 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
458 */
459static void vmw_get_initial_size(struct vmw_private *dev_priv)
460{
461 uint32_t width;
462 uint32_t height;
463
464 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
465 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
466
467 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
468 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
469
470 if (width > dev_priv->fb_max_width ||
471 height > dev_priv->fb_max_height) {
472
473 /*
474 * This is a host error and shouldn't occur.
475 */
476
477 width = VMW_MIN_INITIAL_WIDTH;
478 height = VMW_MIN_INITIAL_HEIGHT;
479 }
480
481 dev_priv->initial_width = width;
482 dev_priv->initial_height = height;
483}
484
485/**
486 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
487 * system.
488 *
489 * @dev_priv: Pointer to a struct vmw_private
490 *
491 * This functions tries to determine the IOMMU setup and what actions
492 * need to be taken by the driver to make system pages visible to the
493 * device.
494 * If this function decides that DMA is not possible, it returns -EINVAL.
495 * The driver may then try to disable features of the device that require
496 * DMA.
497 */
498static int vmw_dma_select_mode(struct vmw_private *dev_priv)
499{
500 static const char *names[vmw_dma_map_max] = {
501 [vmw_dma_phys] = "Using physical TTM page addresses.",
502 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
503 [vmw_dma_map_populate] = "Keeping DMA mappings.",
504 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
505#ifdef CONFIG_X86
506 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
507
508#ifdef CONFIG_INTEL_IOMMU
509 if (intel_iommu_enabled) {
510 dev_priv->map_mode = vmw_dma_map_populate;
511 goto out_fixup;
512 }
513#endif
514
515 if (!(vmw_force_iommu || vmw_force_coherent)) {
516 dev_priv->map_mode = vmw_dma_phys;
517 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
518 return 0;
519 }
520
521 dev_priv->map_mode = vmw_dma_map_populate;
522
523 if (dma_ops->sync_single_for_cpu)
524 dev_priv->map_mode = vmw_dma_alloc_coherent;
525#ifdef CONFIG_SWIOTLB
526 if (swiotlb_nr_tbl() == 0)
527 dev_priv->map_mode = vmw_dma_map_populate;
528#endif
529
530#ifdef CONFIG_INTEL_IOMMU
531out_fixup:
532#endif
533 if (dev_priv->map_mode == vmw_dma_map_populate &&
534 vmw_restrict_iommu)
535 dev_priv->map_mode = vmw_dma_map_bind;
536
537 if (vmw_force_coherent)
538 dev_priv->map_mode = vmw_dma_alloc_coherent;
539
540#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
541 /*
542 * No coherent page pool
543 */
544 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
545 return -EINVAL;
546#endif
547
548#else /* CONFIG_X86 */
549 dev_priv->map_mode = vmw_dma_map_populate;
550#endif /* CONFIG_X86 */
551
552 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
553
554 return 0;
555}
556
557/**
558 * vmw_dma_masks - set required page- and dma masks
559 *
560 * @dev: Pointer to struct drm-device
561 *
562 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
563 * restriction also for 64-bit systems.
564 */
565#ifdef CONFIG_INTEL_IOMMU
566static int vmw_dma_masks(struct vmw_private *dev_priv)
567{
568 struct drm_device *dev = dev_priv->dev;
569
570 if (intel_iommu_enabled &&
571 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
572 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
573 return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
574 }
575 return 0;
576}
577#else
578static int vmw_dma_masks(struct vmw_private *dev_priv)
579{
580 return 0;
581}
582#endif
583
584static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
585{
586 struct vmw_private *dev_priv;
587 int ret;
588 uint32_t svga_id;
589 enum vmw_res_type i;
590 bool refuse_dma = false;
591
592 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
593 if (unlikely(dev_priv == NULL)) {
594 DRM_ERROR("Failed allocating a device private struct.\n");
595 return -ENOMEM;
596 }
597
598 pci_set_master(dev->pdev);
599
600 dev_priv->dev = dev;
601 dev_priv->vmw_chipset = chipset;
602 dev_priv->last_read_seqno = (uint32_t) -100;
603 mutex_init(&dev_priv->hw_mutex);
604 mutex_init(&dev_priv->cmdbuf_mutex);
605 mutex_init(&dev_priv->release_mutex);
606 mutex_init(&dev_priv->binding_mutex);
607 rwlock_init(&dev_priv->resource_lock);
608 ttm_lock_init(&dev_priv->reservation_sem);
609
610 for (i = vmw_res_context; i < vmw_res_max; ++i) {
611 idr_init(&dev_priv->res_idr[i]);
612 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
613 }
614
615 mutex_init(&dev_priv->init_mutex);
616 init_waitqueue_head(&dev_priv->fence_queue);
617 init_waitqueue_head(&dev_priv->fifo_queue);
618 dev_priv->fence_queue_waiters = 0;
619 atomic_set(&dev_priv->fifo_queue_waiters, 0);
620
621 dev_priv->used_memory_size = 0;
622
623 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
624 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
625 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
626
627 dev_priv->enable_fb = enable_fbdev;
628
629 mutex_lock(&dev_priv->hw_mutex);
630
631 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
632 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
633 if (svga_id != SVGA_ID_2) {
634 ret = -ENOSYS;
635 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
636 mutex_unlock(&dev_priv->hw_mutex);
637 goto out_err0;
638 }
639
640 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
641 ret = vmw_dma_select_mode(dev_priv);
642 if (unlikely(ret != 0)) {
643 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
644 refuse_dma = true;
645 }
646
647 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
648 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
649 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
650 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
651
652 vmw_get_initial_size(dev_priv);
653
654 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
655 dev_priv->max_gmr_ids =
656 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
657 dev_priv->max_gmr_pages =
658 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
659 dev_priv->memory_size =
660 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
661 dev_priv->memory_size -= dev_priv->vram_size;
662 } else {
663 /*
664 * An arbitrary limit of 512MiB on surface
665 * memory. But all HWV8 hardware supports GMR2.
666 */
667 dev_priv->memory_size = 512*1024*1024;
668 }
669 dev_priv->max_mob_pages = 0;
670 dev_priv->max_mob_size = 0;
671 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
672 uint64_t mem_size =
673 vmw_read(dev_priv,
674 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
675
676 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
677 dev_priv->prim_bb_mem =
678 vmw_read(dev_priv,
679 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
680 dev_priv->max_mob_size =
681 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
682 } else
683 dev_priv->prim_bb_mem = dev_priv->vram_size;
684
685 ret = vmw_dma_masks(dev_priv);
686 if (unlikely(ret != 0)) {
687 mutex_unlock(&dev_priv->hw_mutex);
688 goto out_err0;
689 }
690
691 if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
692 dev_priv->prim_bb_mem = dev_priv->vram_size;
693
694 mutex_unlock(&dev_priv->hw_mutex);
695
696 vmw_print_capabilities(dev_priv->capabilities);
697
698 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
699 DRM_INFO("Max GMR ids is %u\n",
700 (unsigned)dev_priv->max_gmr_ids);
701 DRM_INFO("Max number of GMR pages is %u\n",
702 (unsigned)dev_priv->max_gmr_pages);
703 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
704 (unsigned)dev_priv->memory_size / 1024);
705 }
706 DRM_INFO("Maximum display memory size is %u kiB\n",
707 dev_priv->prim_bb_mem / 1024);
708 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
709 dev_priv->vram_start, dev_priv->vram_size / 1024);
710 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
711 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
712
713 ret = vmw_ttm_global_init(dev_priv);
714 if (unlikely(ret != 0))
715 goto out_err0;
716
717
718 vmw_master_init(&dev_priv->fbdev_master);
719 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
720 dev_priv->active_master = &dev_priv->fbdev_master;
721
722
723 ret = ttm_bo_device_init(&dev_priv->bdev,
724 dev_priv->bo_global_ref.ref.object,
725 &vmw_bo_driver,
726 dev->anon_inode->i_mapping,
727 VMWGFX_FILE_PAGE_OFFSET,
728 false);
729 if (unlikely(ret != 0)) {
730 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
731 goto out_err1;
732 }
733
734 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
735 (dev_priv->vram_size >> PAGE_SHIFT));
736 if (unlikely(ret != 0)) {
737 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
738 goto out_err2;
739 }
740
741 dev_priv->has_gmr = true;
742 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
743 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
744 VMW_PL_GMR) != 0) {
745 DRM_INFO("No GMR memory available. "
746 "Graphics memory resources are very limited.\n");
747 dev_priv->has_gmr = false;
748 }
749
750 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
751 dev_priv->has_mob = true;
752 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
753 VMW_PL_MOB) != 0) {
754 DRM_INFO("No MOB memory available. "
755 "3D will be disabled.\n");
756 dev_priv->has_mob = false;
757 }
758 }
759
760 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
761 dev_priv->mmio_size);
762
763 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
764 dev_priv->mmio_size);
765
766 if (unlikely(dev_priv->mmio_virt == NULL)) {
767 ret = -ENOMEM;
768 DRM_ERROR("Failed mapping MMIO.\n");
769 goto out_err3;
770 }
771
772 /* Need mmio memory to check for fifo pitchlock cap. */
773 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
774 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
775 !vmw_fifo_have_pitchlock(dev_priv)) {
776 ret = -ENOSYS;
777 DRM_ERROR("Hardware has no pitchlock\n");
778 goto out_err4;
779 }
780
781 dev_priv->tdev = ttm_object_device_init
782 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
783
784 if (unlikely(dev_priv->tdev == NULL)) {
785 DRM_ERROR("Unable to initialize TTM object management.\n");
786 ret = -ENOMEM;
787 goto out_err4;
788 }
789
790 dev->dev_private = dev_priv;
791
792 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
793 dev_priv->stealth = (ret != 0);
794 if (dev_priv->stealth) {
795 /**
796 * Request at least the mmio PCI resource.
797 */
798
799 DRM_INFO("It appears like vesafb is loaded. "
800 "Ignore above error if any.\n");
801 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
802 if (unlikely(ret != 0)) {
803 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
804 goto out_no_device;
805 }
806 }
807
808 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
809 ret = drm_irq_install(dev);
810 if (ret != 0) {
811 DRM_ERROR("Failed installing irq: %d\n", ret);
812 goto out_no_irq;
813 }
814 }
815
816 dev_priv->fman = vmw_fence_manager_init(dev_priv);
817 if (unlikely(dev_priv->fman == NULL)) {
818 ret = -ENOMEM;
819 goto out_no_fman;
820 }
821
822 vmw_kms_save_vga(dev_priv);
823
824 /* Start kms and overlay systems, needs fifo. */
825 ret = vmw_kms_init(dev_priv);
826 if (unlikely(ret != 0))
827 goto out_no_kms;
828 vmw_overlay_init(dev_priv);
829
830 if (dev_priv->enable_fb) {
831 ret = vmw_3d_resource_inc(dev_priv, true);
832 if (unlikely(ret != 0))
833 goto out_no_fifo;
834 vmw_fb_init(dev_priv);
835 }
836
837 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
838 register_pm_notifier(&dev_priv->pm_nb);
839
840 return 0;
841
842out_no_fifo:
843 vmw_overlay_close(dev_priv);
844 vmw_kms_close(dev_priv);
845out_no_kms:
846 vmw_kms_restore_vga(dev_priv);
847 vmw_fence_manager_takedown(dev_priv->fman);
848out_no_fman:
849 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
850 drm_irq_uninstall(dev_priv->dev);
851out_no_irq:
852 if (dev_priv->stealth)
853 pci_release_region(dev->pdev, 2);
854 else
855 pci_release_regions(dev->pdev);
856out_no_device:
857 ttm_object_device_release(&dev_priv->tdev);
858out_err4:
859 iounmap(dev_priv->mmio_virt);
860out_err3:
861 arch_phys_wc_del(dev_priv->mmio_mtrr);
862 if (dev_priv->has_mob)
863 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
864 if (dev_priv->has_gmr)
865 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
866 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
867out_err2:
868 (void)ttm_bo_device_release(&dev_priv->bdev);
869out_err1:
870 vmw_ttm_global_release(dev_priv);
871out_err0:
872 for (i = vmw_res_context; i < vmw_res_max; ++i)
873 idr_destroy(&dev_priv->res_idr[i]);
874
875 kfree(dev_priv);
876 return ret;
877}
878
879static int vmw_driver_unload(struct drm_device *dev)
880{
881 struct vmw_private *dev_priv = vmw_priv(dev);
882 enum vmw_res_type i;
883
884 unregister_pm_notifier(&dev_priv->pm_nb);
885
886 if (dev_priv->ctx.res_ht_initialized)
887 drm_ht_remove(&dev_priv->ctx.res_ht);
888 if (dev_priv->ctx.cmd_bounce)
889 vfree(dev_priv->ctx.cmd_bounce);
890 if (dev_priv->enable_fb) {
891 vmw_fb_close(dev_priv);
892 vmw_kms_restore_vga(dev_priv);
893 vmw_3d_resource_dec(dev_priv, false);
894 }
895 vmw_kms_close(dev_priv);
896 vmw_overlay_close(dev_priv);
897 vmw_fence_manager_takedown(dev_priv->fman);
898 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
899 drm_irq_uninstall(dev_priv->dev);
900 if (dev_priv->stealth)
901 pci_release_region(dev->pdev, 2);
902 else
903 pci_release_regions(dev->pdev);
904
905 ttm_object_device_release(&dev_priv->tdev);
906 iounmap(dev_priv->mmio_virt);
907 arch_phys_wc_del(dev_priv->mmio_mtrr);
908 if (dev_priv->has_mob)
909 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
910 if (dev_priv->has_gmr)
911 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
912 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
913 (void)ttm_bo_device_release(&dev_priv->bdev);
914 vmw_ttm_global_release(dev_priv);
915
916 for (i = vmw_res_context; i < vmw_res_max; ++i)
917 idr_destroy(&dev_priv->res_idr[i]);
918
919 kfree(dev_priv);
920
921 return 0;
922}
923
924static void vmw_preclose(struct drm_device *dev,
925 struct drm_file *file_priv)
926{
927 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
928 struct vmw_private *dev_priv = vmw_priv(dev);
929
930 vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
931}
932
933static void vmw_postclose(struct drm_device *dev,
934 struct drm_file *file_priv)
935{
936 struct vmw_fpriv *vmw_fp;
937
938 vmw_fp = vmw_fpriv(file_priv);
939
940 if (vmw_fp->locked_master) {
941 struct vmw_master *vmaster =
942 vmw_master(vmw_fp->locked_master);
943
944 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
945 ttm_vt_unlock(&vmaster->lock);
946 drm_master_put(&vmw_fp->locked_master);
947 }
948
949 vmw_compat_shader_man_destroy(vmw_fp->shman);
950 ttm_object_file_release(&vmw_fp->tfile);
951 kfree(vmw_fp);
952}
953
954static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
955{
956 struct vmw_private *dev_priv = vmw_priv(dev);
957 struct vmw_fpriv *vmw_fp;
958 int ret = -ENOMEM;
959
960 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
961 if (unlikely(vmw_fp == NULL))
962 return ret;
963
964 INIT_LIST_HEAD(&vmw_fp->fence_events);
965 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
966 if (unlikely(vmw_fp->tfile == NULL))
967 goto out_no_tfile;
968
969 vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
970 if (IS_ERR(vmw_fp->shman))
971 goto out_no_shman;
972
973 file_priv->driver_priv = vmw_fp;
974
975 return 0;
976
977out_no_shman:
978 ttm_object_file_release(&vmw_fp->tfile);
979out_no_tfile:
980 kfree(vmw_fp);
981 return ret;
982}
983
984static struct vmw_master *vmw_master_check(struct drm_device *dev,
985 struct drm_file *file_priv,
986 unsigned int flags)
987{
988 int ret;
989 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
990 struct vmw_master *vmaster;
991
992 if (file_priv->minor->type != DRM_MINOR_LEGACY ||
993 !(flags & DRM_AUTH))
994 return NULL;
995
996 ret = mutex_lock_interruptible(&dev->master_mutex);
997 if (unlikely(ret != 0))
998 return ERR_PTR(-ERESTARTSYS);
999
1000 if (file_priv->is_master) {
1001 mutex_unlock(&dev->master_mutex);
1002 return NULL;
1003 }
1004
1005 /*
1006 * Check if we were previously master, but now dropped.
1007 */
1008 if (vmw_fp->locked_master) {
1009 mutex_unlock(&dev->master_mutex);
1010 DRM_ERROR("Dropped master trying to access ioctl that "
1011 "requires authentication.\n");
1012 return ERR_PTR(-EACCES);
1013 }
1014 mutex_unlock(&dev->master_mutex);
1015
1016 /*
1017 * Taking the drm_global_mutex after the TTM lock might deadlock
1018 */
1019 if (!(flags & DRM_UNLOCKED)) {
1020 DRM_ERROR("Refusing locked ioctl access.\n");
1021 return ERR_PTR(-EDEADLK);
1022 }
1023
1024 /*
1025 * Take the TTM lock. Possibly sleep waiting for the authenticating
1026 * master to become master again, or for a SIGTERM if the
1027 * authenticating master exits.
1028 */
1029 vmaster = vmw_master(file_priv->master);
1030 ret = ttm_read_lock(&vmaster->lock, true);
1031 if (unlikely(ret != 0))
1032 vmaster = ERR_PTR(ret);
1033
1034 return vmaster;
1035}
1036
1037static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1038 unsigned long arg,
1039 long (*ioctl_func)(struct file *, unsigned int,
1040 unsigned long))
1041{
1042 struct drm_file *file_priv = filp->private_data;
1043 struct drm_device *dev = file_priv->minor->dev;
1044 unsigned int nr = DRM_IOCTL_NR(cmd);
1045 struct vmw_master *vmaster;
1046 unsigned int flags;
1047 long ret;
1048
1049 /*
1050 * Do extra checking on driver private ioctls.
1051 */
1052
1053 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1054 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1055 const struct drm_ioctl_desc *ioctl =
1056 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1057
1058 if (unlikely(ioctl->cmd_drv != cmd)) {
1059 DRM_ERROR("Invalid command format, ioctl %d\n",
1060 nr - DRM_COMMAND_BASE);
1061 return -EINVAL;
1062 }
1063 flags = ioctl->flags;
1064 } else if (!drm_ioctl_flags(nr, &flags))
1065 return -EINVAL;
1066
1067 vmaster = vmw_master_check(dev, file_priv, flags);
1068 if (unlikely(IS_ERR(vmaster))) {
1069 DRM_INFO("IOCTL ERROR %d\n", nr);
1070 return PTR_ERR(vmaster);
1071 }
1072
1073 ret = ioctl_func(filp, cmd, arg);
1074 if (vmaster)
1075 ttm_read_unlock(&vmaster->lock);
1076
1077 return ret;
1078}
1079
1080static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1081 unsigned long arg)
1082{
1083 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1084}
1085
1086#ifdef CONFIG_COMPAT
1087static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1088 unsigned long arg)
1089{
1090 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1091}
1092#endif
1093
1094static void vmw_lastclose(struct drm_device *dev)
1095{
1096 struct drm_crtc *crtc;
1097 struct drm_mode_set set;
1098 int ret;
1099
1100 set.x = 0;
1101 set.y = 0;
1102 set.fb = NULL;
1103 set.mode = NULL;
1104 set.connectors = NULL;
1105 set.num_connectors = 0;
1106
1107 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1108 set.crtc = crtc;
1109 ret = drm_mode_set_config_internal(&set);
1110 WARN_ON(ret != 0);
1111 }
1112
1113}
1114
1115static void vmw_master_init(struct vmw_master *vmaster)
1116{
1117 ttm_lock_init(&vmaster->lock);
1118 INIT_LIST_HEAD(&vmaster->fb_surf);
1119 mutex_init(&vmaster->fb_surf_mutex);
1120}
1121
1122static int vmw_master_create(struct drm_device *dev,
1123 struct drm_master *master)
1124{
1125 struct vmw_master *vmaster;
1126
1127 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1128 if (unlikely(vmaster == NULL))
1129 return -ENOMEM;
1130
1131 vmw_master_init(vmaster);
1132 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1133 master->driver_priv = vmaster;
1134
1135 return 0;
1136}
1137
1138static void vmw_master_destroy(struct drm_device *dev,
1139 struct drm_master *master)
1140{
1141 struct vmw_master *vmaster = vmw_master(master);
1142
1143 master->driver_priv = NULL;
1144 kfree(vmaster);
1145}
1146
1147
1148static int vmw_master_set(struct drm_device *dev,
1149 struct drm_file *file_priv,
1150 bool from_open)
1151{
1152 struct vmw_private *dev_priv = vmw_priv(dev);
1153 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1154 struct vmw_master *active = dev_priv->active_master;
1155 struct vmw_master *vmaster = vmw_master(file_priv->master);
1156 int ret = 0;
1157
1158 if (!dev_priv->enable_fb) {
1159 ret = vmw_3d_resource_inc(dev_priv, true);
1160 if (unlikely(ret != 0))
1161 return ret;
1162 vmw_kms_save_vga(dev_priv);
1163 mutex_lock(&dev_priv->hw_mutex);
1164 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
1165 mutex_unlock(&dev_priv->hw_mutex);
1166 }
1167
1168 if (active) {
1169 BUG_ON(active != &dev_priv->fbdev_master);
1170 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1171 if (unlikely(ret != 0))
1172 goto out_no_active_lock;
1173
1174 ttm_lock_set_kill(&active->lock, true, SIGTERM);
1175 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
1176 if (unlikely(ret != 0)) {
1177 DRM_ERROR("Unable to clean VRAM on "
1178 "master drop.\n");
1179 }
1180
1181 dev_priv->active_master = NULL;
1182 }
1183
1184 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1185 if (!from_open) {
1186 ttm_vt_unlock(&vmaster->lock);
1187 BUG_ON(vmw_fp->locked_master != file_priv->master);
1188 drm_master_put(&vmw_fp->locked_master);
1189 }
1190
1191 dev_priv->active_master = vmaster;
1192
1193 return 0;
1194
1195out_no_active_lock:
1196 if (!dev_priv->enable_fb) {
1197 vmw_kms_restore_vga(dev_priv);
1198 vmw_3d_resource_dec(dev_priv, true);
1199 mutex_lock(&dev_priv->hw_mutex);
1200 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1201 mutex_unlock(&dev_priv->hw_mutex);
1202 }
1203 return ret;
1204}
1205
1206static void vmw_master_drop(struct drm_device *dev,
1207 struct drm_file *file_priv,
1208 bool from_release)
1209{
1210 struct vmw_private *dev_priv = vmw_priv(dev);
1211 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1212 struct vmw_master *vmaster = vmw_master(file_priv->master);
1213 int ret;
1214
1215 /**
1216 * Make sure the master doesn't disappear while we have
1217 * it locked.
1218 */
1219
1220 vmw_fp->locked_master = drm_master_get(file_priv->master);
1221 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1222 if (unlikely((ret != 0))) {
1223 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1224 drm_master_put(&vmw_fp->locked_master);
1225 }
1226
1227 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1228 vmw_execbuf_release_pinned_bo(dev_priv);
1229
1230 if (!dev_priv->enable_fb) {
1231 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
1232 if (unlikely(ret != 0))
1233 DRM_ERROR("Unable to clean VRAM on master drop.\n");
1234 vmw_kms_restore_vga(dev_priv);
1235 vmw_3d_resource_dec(dev_priv, true);
1236 mutex_lock(&dev_priv->hw_mutex);
1237 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
1238 mutex_unlock(&dev_priv->hw_mutex);
1239 }
1240
1241 dev_priv->active_master = &dev_priv->fbdev_master;
1242 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1243 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1244
1245 if (dev_priv->enable_fb)
1246 vmw_fb_on(dev_priv);
1247}
1248
1249
1250static void vmw_remove(struct pci_dev *pdev)
1251{
1252 struct drm_device *dev = pci_get_drvdata(pdev);
1253
1254 drm_put_dev(dev);
1255}
1256
1257static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1258 void *ptr)
1259{
1260 struct vmw_private *dev_priv =
1261 container_of(nb, struct vmw_private, pm_nb);
1262
1263 switch (val) {
1264 case PM_HIBERNATION_PREPARE:
1265 case PM_SUSPEND_PREPARE:
1266 ttm_suspend_lock(&dev_priv->reservation_sem);
1267
1268 /**
1269 * This empties VRAM and unbinds all GMR bindings.
1270 * Buffer contents is moved to swappable memory.
1271 */
1272 vmw_execbuf_release_pinned_bo(dev_priv);
1273 vmw_resource_evict_all(dev_priv);
1274 ttm_bo_swapout_all(&dev_priv->bdev);
1275
1276 break;
1277 case PM_POST_HIBERNATION:
1278 case PM_POST_SUSPEND:
1279 case PM_POST_RESTORE:
1280 ttm_suspend_unlock(&dev_priv->reservation_sem);
1281
1282 break;
1283 case PM_RESTORE_PREPARE:
1284 break;
1285 default:
1286 break;
1287 }
1288 return 0;
1289}
1290
1291/**
1292 * These might not be needed with the virtual SVGA device.
1293 */
1294
1295static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1296{
1297 struct drm_device *dev = pci_get_drvdata(pdev);
1298 struct vmw_private *dev_priv = vmw_priv(dev);
1299
1300 if (dev_priv->num_3d_resources != 0) {
1301 DRM_INFO("Can't suspend or hibernate "
1302 "while 3D resources are active.\n");
1303 return -EBUSY;
1304 }
1305
1306 pci_save_state(pdev);
1307 pci_disable_device(pdev);
1308 pci_set_power_state(pdev, PCI_D3hot);
1309 return 0;
1310}
1311
1312static int vmw_pci_resume(struct pci_dev *pdev)
1313{
1314 pci_set_power_state(pdev, PCI_D0);
1315 pci_restore_state(pdev);
1316 return pci_enable_device(pdev);
1317}
1318
1319static int vmw_pm_suspend(struct device *kdev)
1320{
1321 struct pci_dev *pdev = to_pci_dev(kdev);
1322 struct pm_message dummy;
1323
1324 dummy.event = 0;
1325
1326 return vmw_pci_suspend(pdev, dummy);
1327}
1328
1329static int vmw_pm_resume(struct device *kdev)
1330{
1331 struct pci_dev *pdev = to_pci_dev(kdev);
1332
1333 return vmw_pci_resume(pdev);
1334}
1335
1336static int vmw_pm_prepare(struct device *kdev)
1337{
1338 struct pci_dev *pdev = to_pci_dev(kdev);
1339 struct drm_device *dev = pci_get_drvdata(pdev);
1340 struct vmw_private *dev_priv = vmw_priv(dev);
1341
1342 /**
1343 * Release 3d reference held by fbdev and potentially
1344 * stop fifo.
1345 */
1346 dev_priv->suspended = true;
1347 if (dev_priv->enable_fb)
1348 vmw_3d_resource_dec(dev_priv, true);
1349
1350 if (dev_priv->num_3d_resources != 0) {
1351
1352 DRM_INFO("Can't suspend or hibernate "
1353 "while 3D resources are active.\n");
1354
1355 if (dev_priv->enable_fb)
1356 vmw_3d_resource_inc(dev_priv, true);
1357 dev_priv->suspended = false;
1358 return -EBUSY;
1359 }
1360
1361 return 0;
1362}
1363
1364static void vmw_pm_complete(struct device *kdev)
1365{
1366 struct pci_dev *pdev = to_pci_dev(kdev);
1367 struct drm_device *dev = pci_get_drvdata(pdev);
1368 struct vmw_private *dev_priv = vmw_priv(dev);
1369
1370 mutex_lock(&dev_priv->hw_mutex);
1371 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1372 (void) vmw_read(dev_priv, SVGA_REG_ID);
1373 mutex_unlock(&dev_priv->hw_mutex);
1374
1375 /**
1376 * Reclaim 3d reference held by fbdev and potentially
1377 * start fifo.
1378 */
1379 if (dev_priv->enable_fb)
1380 vmw_3d_resource_inc(dev_priv, false);
1381
1382 dev_priv->suspended = false;
1383}
1384
1385static const struct dev_pm_ops vmw_pm_ops = {
1386 .prepare = vmw_pm_prepare,
1387 .complete = vmw_pm_complete,
1388 .suspend = vmw_pm_suspend,
1389 .resume = vmw_pm_resume,
1390};
1391
1392static const struct file_operations vmwgfx_driver_fops = {
1393 .owner = THIS_MODULE,
1394 .open = drm_open,
1395 .release = drm_release,
1396 .unlocked_ioctl = vmw_unlocked_ioctl,
1397 .mmap = vmw_mmap,
1398 .poll = vmw_fops_poll,
1399 .read = vmw_fops_read,
1400#if defined(CONFIG_COMPAT)
1401 .compat_ioctl = vmw_compat_ioctl,
1402#endif
1403 .llseek = noop_llseek,
1404};
1405
1406static struct drm_driver driver = {
1407 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1408 DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
1409 .load = vmw_driver_load,
1410 .unload = vmw_driver_unload,
1411 .lastclose = vmw_lastclose,
1412 .irq_preinstall = vmw_irq_preinstall,
1413 .irq_postinstall = vmw_irq_postinstall,
1414 .irq_uninstall = vmw_irq_uninstall,
1415 .irq_handler = vmw_irq_handler,
1416 .get_vblank_counter = vmw_get_vblank_counter,
1417 .enable_vblank = vmw_enable_vblank,
1418 .disable_vblank = vmw_disable_vblank,
1419 .ioctls = vmw_ioctls,
1420 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
1421 .master_create = vmw_master_create,
1422 .master_destroy = vmw_master_destroy,
1423 .master_set = vmw_master_set,
1424 .master_drop = vmw_master_drop,
1425 .open = vmw_driver_open,
1426 .preclose = vmw_preclose,
1427 .postclose = vmw_postclose,
1428
1429 .dumb_create = vmw_dumb_create,
1430 .dumb_map_offset = vmw_dumb_map_offset,
1431 .dumb_destroy = vmw_dumb_destroy,
1432
1433 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1434 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1435
1436 .fops = &vmwgfx_driver_fops,
1437 .name = VMWGFX_DRIVER_NAME,
1438 .desc = VMWGFX_DRIVER_DESC,
1439 .date = VMWGFX_DRIVER_DATE,
1440 .major = VMWGFX_DRIVER_MAJOR,
1441 .minor = VMWGFX_DRIVER_MINOR,
1442 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1443};
1444
1445static struct pci_driver vmw_pci_driver = {
1446 .name = VMWGFX_DRIVER_NAME,
1447 .id_table = vmw_pci_id_list,
1448 .probe = vmw_probe,
1449 .remove = vmw_remove,
1450 .driver = {
1451 .pm = &vmw_pm_ops
1452 }
1453};
1454
1455static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1456{
1457 return drm_get_pci_dev(pdev, ent, &driver);
1458}
1459
1460static int __init vmwgfx_init(void)
1461{
1462 int ret;
1463 ret = drm_pci_init(&driver, &vmw_pci_driver);
1464 if (ret)
1465 DRM_ERROR("Failed initializing DRM.\n");
1466 return ret;
1467}
1468
1469static void __exit vmwgfx_exit(void)
1470{
1471 drm_pci_exit(&driver, &vmw_pci_driver);
1472}
1473
1474module_init(vmwgfx_init);
1475module_exit(vmwgfx_exit);
1476
1477MODULE_AUTHOR("VMware Inc. and others");
1478MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1479MODULE_LICENSE("GPL and additional rights");
1480MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1481 __stringify(VMWGFX_DRIVER_MINOR) "."
1482 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1483 "0");