Loading...
1/**************************************************************************
2 *
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27#include <linux/module.h>
28#include <linux/console.h>
29
30#include <drm/drmP.h>
31#include "vmwgfx_drv.h"
32#include "vmwgfx_binding.h"
33#include <drm/ttm/ttm_placement.h>
34#include <drm/ttm/ttm_bo_driver.h>
35#include <drm/ttm/ttm_object.h>
36#include <drm/ttm/ttm_module.h>
37#include <linux/dma_remapping.h>
38
39#define VMWGFX_DRIVER_NAME "vmwgfx"
40#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
41#define VMWGFX_CHIP_SVGAII 0
42#define VMW_FB_RESERVATION 0
43
44#define VMW_MIN_INITIAL_WIDTH 800
45#define VMW_MIN_INITIAL_HEIGHT 600
46
47
48/**
49 * Fully encoded drm commands. Might move to vmw_drm.h
50 */
51
52#define DRM_IOCTL_VMW_GET_PARAM \
53 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
54 struct drm_vmw_getparam_arg)
55#define DRM_IOCTL_VMW_ALLOC_DMABUF \
56 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
57 union drm_vmw_alloc_dmabuf_arg)
58#define DRM_IOCTL_VMW_UNREF_DMABUF \
59 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
60 struct drm_vmw_unref_dmabuf_arg)
61#define DRM_IOCTL_VMW_CURSOR_BYPASS \
62 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
63 struct drm_vmw_cursor_bypass_arg)
64
65#define DRM_IOCTL_VMW_CONTROL_STREAM \
66 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
67 struct drm_vmw_control_stream_arg)
68#define DRM_IOCTL_VMW_CLAIM_STREAM \
69 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
70 struct drm_vmw_stream_arg)
71#define DRM_IOCTL_VMW_UNREF_STREAM \
72 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
73 struct drm_vmw_stream_arg)
74
75#define DRM_IOCTL_VMW_CREATE_CONTEXT \
76 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
77 struct drm_vmw_context_arg)
78#define DRM_IOCTL_VMW_UNREF_CONTEXT \
79 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
80 struct drm_vmw_context_arg)
81#define DRM_IOCTL_VMW_CREATE_SURFACE \
82 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
83 union drm_vmw_surface_create_arg)
84#define DRM_IOCTL_VMW_UNREF_SURFACE \
85 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
86 struct drm_vmw_surface_arg)
87#define DRM_IOCTL_VMW_REF_SURFACE \
88 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
89 union drm_vmw_surface_reference_arg)
90#define DRM_IOCTL_VMW_EXECBUF \
91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
92 struct drm_vmw_execbuf_arg)
93#define DRM_IOCTL_VMW_GET_3D_CAP \
94 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
95 struct drm_vmw_get_3d_cap_arg)
96#define DRM_IOCTL_VMW_FENCE_WAIT \
97 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
98 struct drm_vmw_fence_wait_arg)
99#define DRM_IOCTL_VMW_FENCE_SIGNALED \
100 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
101 struct drm_vmw_fence_signaled_arg)
102#define DRM_IOCTL_VMW_FENCE_UNREF \
103 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
104 struct drm_vmw_fence_arg)
105#define DRM_IOCTL_VMW_FENCE_EVENT \
106 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
107 struct drm_vmw_fence_event_arg)
108#define DRM_IOCTL_VMW_PRESENT \
109 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
110 struct drm_vmw_present_arg)
111#define DRM_IOCTL_VMW_PRESENT_READBACK \
112 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
113 struct drm_vmw_present_readback_arg)
114#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
115 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
116 struct drm_vmw_update_layout_arg)
117#define DRM_IOCTL_VMW_CREATE_SHADER \
118 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
119 struct drm_vmw_shader_create_arg)
120#define DRM_IOCTL_VMW_UNREF_SHADER \
121 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
122 struct drm_vmw_shader_arg)
123#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
124 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
125 union drm_vmw_gb_surface_create_arg)
126#define DRM_IOCTL_VMW_GB_SURFACE_REF \
127 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
128 union drm_vmw_gb_surface_reference_arg)
129#define DRM_IOCTL_VMW_SYNCCPU \
130 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
131 struct drm_vmw_synccpu_arg)
132#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
133 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
134 struct drm_vmw_context_arg)
135
136/**
137 * The core DRM version of this macro doesn't account for
138 * DRM_COMMAND_BASE.
139 */
140
141#define VMW_IOCTL_DEF(ioctl, func, flags) \
142 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
143
144/**
145 * Ioctl definitions.
146 */
147
148static const struct drm_ioctl_desc vmw_ioctls[] = {
149 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
150 DRM_AUTH | DRM_RENDER_ALLOW),
151 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
152 DRM_AUTH | DRM_RENDER_ALLOW),
153 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
154 DRM_RENDER_ALLOW),
155 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
156 vmw_kms_cursor_bypass_ioctl,
157 DRM_MASTER | DRM_CONTROL_ALLOW),
158
159 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
160 DRM_MASTER | DRM_CONTROL_ALLOW),
161 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
162 DRM_MASTER | DRM_CONTROL_ALLOW),
163 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
164 DRM_MASTER | DRM_CONTROL_ALLOW),
165
166 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
167 DRM_AUTH | DRM_RENDER_ALLOW),
168 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
169 DRM_RENDER_ALLOW),
170 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
171 DRM_AUTH | DRM_RENDER_ALLOW),
172 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
173 DRM_RENDER_ALLOW),
174 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
175 DRM_AUTH | DRM_RENDER_ALLOW),
176 VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
177 DRM_RENDER_ALLOW),
178 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
179 DRM_RENDER_ALLOW),
180 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
181 vmw_fence_obj_signaled_ioctl,
182 DRM_RENDER_ALLOW),
183 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
184 DRM_RENDER_ALLOW),
185 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
186 DRM_AUTH | DRM_RENDER_ALLOW),
187 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
188 DRM_AUTH | DRM_RENDER_ALLOW),
189
190 /* these allow direct access to the framebuffers mark as master only */
191 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
192 DRM_MASTER | DRM_AUTH),
193 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
194 vmw_present_readback_ioctl,
195 DRM_MASTER | DRM_AUTH),
196 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
197 vmw_kms_update_layout_ioctl,
198 DRM_MASTER | DRM_CONTROL_ALLOW),
199 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
200 vmw_shader_define_ioctl,
201 DRM_AUTH | DRM_RENDER_ALLOW),
202 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
203 vmw_shader_destroy_ioctl,
204 DRM_RENDER_ALLOW),
205 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
206 vmw_gb_surface_define_ioctl,
207 DRM_AUTH | DRM_RENDER_ALLOW),
208 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
209 vmw_gb_surface_reference_ioctl,
210 DRM_AUTH | DRM_RENDER_ALLOW),
211 VMW_IOCTL_DEF(VMW_SYNCCPU,
212 vmw_user_dmabuf_synccpu_ioctl,
213 DRM_RENDER_ALLOW),
214 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
215 vmw_extended_context_define_ioctl,
216 DRM_AUTH | DRM_RENDER_ALLOW),
217};
218
219static struct pci_device_id vmw_pci_id_list[] = {
220 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
221 {0, 0, 0}
222};
223MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
224
225static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
226static int vmw_force_iommu;
227static int vmw_restrict_iommu;
228static int vmw_force_coherent;
229static int vmw_restrict_dma_mask;
230
231static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
232static void vmw_master_init(struct vmw_master *);
233static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
234 void *ptr);
235
236MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
237module_param_named(enable_fbdev, enable_fbdev, int, 0600);
238MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
239module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
240MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
241module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
242MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
243module_param_named(force_coherent, vmw_force_coherent, int, 0600);
244MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
245module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
246
247
248static void vmw_print_capabilities(uint32_t capabilities)
249{
250 DRM_INFO("Capabilities:\n");
251 if (capabilities & SVGA_CAP_RECT_COPY)
252 DRM_INFO(" Rect copy.\n");
253 if (capabilities & SVGA_CAP_CURSOR)
254 DRM_INFO(" Cursor.\n");
255 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
256 DRM_INFO(" Cursor bypass.\n");
257 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
258 DRM_INFO(" Cursor bypass 2.\n");
259 if (capabilities & SVGA_CAP_8BIT_EMULATION)
260 DRM_INFO(" 8bit emulation.\n");
261 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
262 DRM_INFO(" Alpha cursor.\n");
263 if (capabilities & SVGA_CAP_3D)
264 DRM_INFO(" 3D.\n");
265 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
266 DRM_INFO(" Extended Fifo.\n");
267 if (capabilities & SVGA_CAP_MULTIMON)
268 DRM_INFO(" Multimon.\n");
269 if (capabilities & SVGA_CAP_PITCHLOCK)
270 DRM_INFO(" Pitchlock.\n");
271 if (capabilities & SVGA_CAP_IRQMASK)
272 DRM_INFO(" Irq mask.\n");
273 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
274 DRM_INFO(" Display Topology.\n");
275 if (capabilities & SVGA_CAP_GMR)
276 DRM_INFO(" GMR.\n");
277 if (capabilities & SVGA_CAP_TRACES)
278 DRM_INFO(" Traces.\n");
279 if (capabilities & SVGA_CAP_GMR2)
280 DRM_INFO(" GMR2.\n");
281 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
282 DRM_INFO(" Screen Object 2.\n");
283 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
284 DRM_INFO(" Command Buffers.\n");
285 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
286 DRM_INFO(" Command Buffers 2.\n");
287 if (capabilities & SVGA_CAP_GBOBJECTS)
288 DRM_INFO(" Guest Backed Resources.\n");
289 if (capabilities & SVGA_CAP_DX)
290 DRM_INFO(" DX Features.\n");
291}
292
293/**
294 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
295 *
296 * @dev_priv: A device private structure.
297 *
298 * This function creates a small buffer object that holds the query
299 * result for dummy queries emitted as query barriers.
300 * The function will then map the first page and initialize a pending
301 * occlusion query result structure, Finally it will unmap the buffer.
302 * No interruptible waits are done within this function.
303 *
304 * Returns an error if bo creation or initialization fails.
305 */
306static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
307{
308 int ret;
309 struct vmw_dma_buffer *vbo;
310 struct ttm_bo_kmap_obj map;
311 volatile SVGA3dQueryResult *result;
312 bool dummy;
313
314 /*
315 * Create the vbo as pinned, so that a tryreserve will
316 * immediately succeed. This is because we're the only
317 * user of the bo currently.
318 */
319 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
320 if (!vbo)
321 return -ENOMEM;
322
323 ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
324 &vmw_sys_ne_placement, false,
325 &vmw_dmabuf_bo_free);
326 if (unlikely(ret != 0))
327 return ret;
328
329 ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
330 BUG_ON(ret != 0);
331 vmw_bo_pin_reserved(vbo, true);
332
333 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
334 if (likely(ret == 0)) {
335 result = ttm_kmap_obj_virtual(&map, &dummy);
336 result->totalSize = sizeof(*result);
337 result->state = SVGA3D_QUERYSTATE_PENDING;
338 result->result32 = 0xff;
339 ttm_bo_kunmap(&map);
340 }
341 vmw_bo_pin_reserved(vbo, false);
342 ttm_bo_unreserve(&vbo->base);
343
344 if (unlikely(ret != 0)) {
345 DRM_ERROR("Dummy query buffer map failed.\n");
346 vmw_dmabuf_unreference(&vbo);
347 } else
348 dev_priv->dummy_query_bo = vbo;
349
350 return ret;
351}
352
353/**
354 * vmw_request_device_late - Perform late device setup
355 *
356 * @dev_priv: Pointer to device private.
357 *
358 * This function performs setup of otables and enables large command
359 * buffer submission. These tasks are split out to a separate function
360 * because it reverts vmw_release_device_early and is intended to be used
361 * by an error path in the hibernation code.
362 */
363static int vmw_request_device_late(struct vmw_private *dev_priv)
364{
365 int ret;
366
367 if (dev_priv->has_mob) {
368 ret = vmw_otables_setup(dev_priv);
369 if (unlikely(ret != 0)) {
370 DRM_ERROR("Unable to initialize "
371 "guest Memory OBjects.\n");
372 return ret;
373 }
374 }
375
376 if (dev_priv->cman) {
377 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
378 256*4096, 2*4096);
379 if (ret) {
380 struct vmw_cmdbuf_man *man = dev_priv->cman;
381
382 dev_priv->cman = NULL;
383 vmw_cmdbuf_man_destroy(man);
384 }
385 }
386
387 return 0;
388}
389
390static int vmw_request_device(struct vmw_private *dev_priv)
391{
392 int ret;
393
394 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
395 if (unlikely(ret != 0)) {
396 DRM_ERROR("Unable to initialize FIFO.\n");
397 return ret;
398 }
399 vmw_fence_fifo_up(dev_priv->fman);
400 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
401 if (IS_ERR(dev_priv->cman)) {
402 dev_priv->cman = NULL;
403 dev_priv->has_dx = false;
404 }
405
406 ret = vmw_request_device_late(dev_priv);
407 if (ret)
408 goto out_no_mob;
409
410 ret = vmw_dummy_query_bo_create(dev_priv);
411 if (unlikely(ret != 0))
412 goto out_no_query_bo;
413
414 return 0;
415
416out_no_query_bo:
417 if (dev_priv->cman)
418 vmw_cmdbuf_remove_pool(dev_priv->cman);
419 if (dev_priv->has_mob) {
420 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
421 vmw_otables_takedown(dev_priv);
422 }
423 if (dev_priv->cman)
424 vmw_cmdbuf_man_destroy(dev_priv->cman);
425out_no_mob:
426 vmw_fence_fifo_down(dev_priv->fman);
427 vmw_fifo_release(dev_priv, &dev_priv->fifo);
428 return ret;
429}
430
431/**
432 * vmw_release_device_early - Early part of fifo takedown.
433 *
434 * @dev_priv: Pointer to device private struct.
435 *
436 * This is the first part of command submission takedown, to be called before
437 * buffer management is taken down.
438 */
439static void vmw_release_device_early(struct vmw_private *dev_priv)
440{
441 /*
442 * Previous destructions should've released
443 * the pinned bo.
444 */
445
446 BUG_ON(dev_priv->pinned_bo != NULL);
447
448 vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
449 if (dev_priv->cman)
450 vmw_cmdbuf_remove_pool(dev_priv->cman);
451
452 if (dev_priv->has_mob) {
453 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
454 vmw_otables_takedown(dev_priv);
455 }
456}
457
458/**
459 * vmw_release_device_late - Late part of fifo takedown.
460 *
461 * @dev_priv: Pointer to device private struct.
462 *
463 * This is the last part of the command submission takedown, to be called when
464 * command submission is no longer needed. It may wait on pending fences.
465 */
466static void vmw_release_device_late(struct vmw_private *dev_priv)
467{
468 vmw_fence_fifo_down(dev_priv->fman);
469 if (dev_priv->cman)
470 vmw_cmdbuf_man_destroy(dev_priv->cman);
471
472 vmw_fifo_release(dev_priv, &dev_priv->fifo);
473}
474
475/**
476 * Sets the initial_[width|height] fields on the given vmw_private.
477 *
478 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
479 * clamping the value to fb_max_[width|height] fields and the
480 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
481 * If the values appear to be invalid, set them to
482 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
483 */
484static void vmw_get_initial_size(struct vmw_private *dev_priv)
485{
486 uint32_t width;
487 uint32_t height;
488
489 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
490 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
491
492 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
493 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
494
495 if (width > dev_priv->fb_max_width ||
496 height > dev_priv->fb_max_height) {
497
498 /*
499 * This is a host error and shouldn't occur.
500 */
501
502 width = VMW_MIN_INITIAL_WIDTH;
503 height = VMW_MIN_INITIAL_HEIGHT;
504 }
505
506 dev_priv->initial_width = width;
507 dev_priv->initial_height = height;
508}
509
510/**
511 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
512 * system.
513 *
514 * @dev_priv: Pointer to a struct vmw_private
515 *
516 * This functions tries to determine the IOMMU setup and what actions
517 * need to be taken by the driver to make system pages visible to the
518 * device.
519 * If this function decides that DMA is not possible, it returns -EINVAL.
520 * The driver may then try to disable features of the device that require
521 * DMA.
522 */
523static int vmw_dma_select_mode(struct vmw_private *dev_priv)
524{
525 static const char *names[vmw_dma_map_max] = {
526 [vmw_dma_phys] = "Using physical TTM page addresses.",
527 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
528 [vmw_dma_map_populate] = "Keeping DMA mappings.",
529 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
530#ifdef CONFIG_X86
531 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
532
533#ifdef CONFIG_INTEL_IOMMU
534 if (intel_iommu_enabled) {
535 dev_priv->map_mode = vmw_dma_map_populate;
536 goto out_fixup;
537 }
538#endif
539
540 if (!(vmw_force_iommu || vmw_force_coherent)) {
541 dev_priv->map_mode = vmw_dma_phys;
542 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
543 return 0;
544 }
545
546 dev_priv->map_mode = vmw_dma_map_populate;
547
548 if (dma_ops->sync_single_for_cpu)
549 dev_priv->map_mode = vmw_dma_alloc_coherent;
550#ifdef CONFIG_SWIOTLB
551 if (swiotlb_nr_tbl() == 0)
552 dev_priv->map_mode = vmw_dma_map_populate;
553#endif
554
555#ifdef CONFIG_INTEL_IOMMU
556out_fixup:
557#endif
558 if (dev_priv->map_mode == vmw_dma_map_populate &&
559 vmw_restrict_iommu)
560 dev_priv->map_mode = vmw_dma_map_bind;
561
562 if (vmw_force_coherent)
563 dev_priv->map_mode = vmw_dma_alloc_coherent;
564
565#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
566 /*
567 * No coherent page pool
568 */
569 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
570 return -EINVAL;
571#endif
572
573#else /* CONFIG_X86 */
574 dev_priv->map_mode = vmw_dma_map_populate;
575#endif /* CONFIG_X86 */
576
577 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
578
579 return 0;
580}
581
582/**
583 * vmw_dma_masks - set required page- and dma masks
584 *
585 * @dev: Pointer to struct drm-device
586 *
587 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
588 * restriction also for 64-bit systems.
589 */
590#ifdef CONFIG_INTEL_IOMMU
591static int vmw_dma_masks(struct vmw_private *dev_priv)
592{
593 struct drm_device *dev = dev_priv->dev;
594
595 if (intel_iommu_enabled &&
596 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
597 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
598 return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
599 }
600 return 0;
601}
602#else
603static int vmw_dma_masks(struct vmw_private *dev_priv)
604{
605 return 0;
606}
607#endif
608
609static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
610{
611 struct vmw_private *dev_priv;
612 int ret;
613 uint32_t svga_id;
614 enum vmw_res_type i;
615 bool refuse_dma = false;
616
617 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
618 if (unlikely(dev_priv == NULL)) {
619 DRM_ERROR("Failed allocating a device private struct.\n");
620 return -ENOMEM;
621 }
622
623 pci_set_master(dev->pdev);
624
625 dev_priv->dev = dev;
626 dev_priv->vmw_chipset = chipset;
627 dev_priv->last_read_seqno = (uint32_t) -100;
628 mutex_init(&dev_priv->cmdbuf_mutex);
629 mutex_init(&dev_priv->release_mutex);
630 mutex_init(&dev_priv->binding_mutex);
631 rwlock_init(&dev_priv->resource_lock);
632 ttm_lock_init(&dev_priv->reservation_sem);
633 spin_lock_init(&dev_priv->hw_lock);
634 spin_lock_init(&dev_priv->waiter_lock);
635 spin_lock_init(&dev_priv->cap_lock);
636 spin_lock_init(&dev_priv->svga_lock);
637
638 for (i = vmw_res_context; i < vmw_res_max; ++i) {
639 idr_init(&dev_priv->res_idr[i]);
640 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
641 }
642
643 mutex_init(&dev_priv->init_mutex);
644 init_waitqueue_head(&dev_priv->fence_queue);
645 init_waitqueue_head(&dev_priv->fifo_queue);
646 dev_priv->fence_queue_waiters = 0;
647 dev_priv->fifo_queue_waiters = 0;
648
649 dev_priv->used_memory_size = 0;
650
651 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
652 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
653 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
654
655 dev_priv->enable_fb = enable_fbdev;
656
657 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
658 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
659 if (svga_id != SVGA_ID_2) {
660 ret = -ENOSYS;
661 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
662 goto out_err0;
663 }
664
665 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
666 ret = vmw_dma_select_mode(dev_priv);
667 if (unlikely(ret != 0)) {
668 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
669 refuse_dma = true;
670 }
671
672 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
673 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
674 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
675 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
676
677 vmw_get_initial_size(dev_priv);
678
679 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
680 dev_priv->max_gmr_ids =
681 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
682 dev_priv->max_gmr_pages =
683 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
684 dev_priv->memory_size =
685 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
686 dev_priv->memory_size -= dev_priv->vram_size;
687 } else {
688 /*
689 * An arbitrary limit of 512MiB on surface
690 * memory. But all HWV8 hardware supports GMR2.
691 */
692 dev_priv->memory_size = 512*1024*1024;
693 }
694 dev_priv->max_mob_pages = 0;
695 dev_priv->max_mob_size = 0;
696 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
697 uint64_t mem_size =
698 vmw_read(dev_priv,
699 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
700
701 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
702 dev_priv->prim_bb_mem =
703 vmw_read(dev_priv,
704 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
705 dev_priv->max_mob_size =
706 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
707 dev_priv->stdu_max_width =
708 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
709 dev_priv->stdu_max_height =
710 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
711
712 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
713 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
714 dev_priv->texture_max_width = vmw_read(dev_priv,
715 SVGA_REG_DEV_CAP);
716 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
717 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
718 dev_priv->texture_max_height = vmw_read(dev_priv,
719 SVGA_REG_DEV_CAP);
720 } else {
721 dev_priv->texture_max_width = 8192;
722 dev_priv->texture_max_height = 8192;
723 dev_priv->prim_bb_mem = dev_priv->vram_size;
724 }
725
726 vmw_print_capabilities(dev_priv->capabilities);
727
728 ret = vmw_dma_masks(dev_priv);
729 if (unlikely(ret != 0))
730 goto out_err0;
731
732 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
733 DRM_INFO("Max GMR ids is %u\n",
734 (unsigned)dev_priv->max_gmr_ids);
735 DRM_INFO("Max number of GMR pages is %u\n",
736 (unsigned)dev_priv->max_gmr_pages);
737 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
738 (unsigned)dev_priv->memory_size / 1024);
739 }
740 DRM_INFO("Maximum display memory size is %u kiB\n",
741 dev_priv->prim_bb_mem / 1024);
742 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
743 dev_priv->vram_start, dev_priv->vram_size / 1024);
744 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
745 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
746
747 ret = vmw_ttm_global_init(dev_priv);
748 if (unlikely(ret != 0))
749 goto out_err0;
750
751
752 vmw_master_init(&dev_priv->fbdev_master);
753 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
754 dev_priv->active_master = &dev_priv->fbdev_master;
755
756 dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
757 dev_priv->mmio_size, MEMREMAP_WB);
758
759 if (unlikely(dev_priv->mmio_virt == NULL)) {
760 ret = -ENOMEM;
761 DRM_ERROR("Failed mapping MMIO.\n");
762 goto out_err3;
763 }
764
765 /* Need mmio memory to check for fifo pitchlock cap. */
766 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
767 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
768 !vmw_fifo_have_pitchlock(dev_priv)) {
769 ret = -ENOSYS;
770 DRM_ERROR("Hardware has no pitchlock\n");
771 goto out_err4;
772 }
773
774 dev_priv->tdev = ttm_object_device_init
775 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
776
777 if (unlikely(dev_priv->tdev == NULL)) {
778 DRM_ERROR("Unable to initialize TTM object management.\n");
779 ret = -ENOMEM;
780 goto out_err4;
781 }
782
783 dev->dev_private = dev_priv;
784
785 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
786 dev_priv->stealth = (ret != 0);
787 if (dev_priv->stealth) {
788 /**
789 * Request at least the mmio PCI resource.
790 */
791
792 DRM_INFO("It appears like vesafb is loaded. "
793 "Ignore above error if any.\n");
794 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
795 if (unlikely(ret != 0)) {
796 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
797 goto out_no_device;
798 }
799 }
800
801 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
802 ret = drm_irq_install(dev, dev->pdev->irq);
803 if (ret != 0) {
804 DRM_ERROR("Failed installing irq: %d\n", ret);
805 goto out_no_irq;
806 }
807 }
808
809 dev_priv->fman = vmw_fence_manager_init(dev_priv);
810 if (unlikely(dev_priv->fman == NULL)) {
811 ret = -ENOMEM;
812 goto out_no_fman;
813 }
814
815 ret = ttm_bo_device_init(&dev_priv->bdev,
816 dev_priv->bo_global_ref.ref.object,
817 &vmw_bo_driver,
818 dev->anon_inode->i_mapping,
819 VMWGFX_FILE_PAGE_OFFSET,
820 false);
821 if (unlikely(ret != 0)) {
822 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
823 goto out_no_bdev;
824 }
825
826 /*
827 * Enable VRAM, but initially don't use it until SVGA is enabled and
828 * unhidden.
829 */
830 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
831 (dev_priv->vram_size >> PAGE_SHIFT));
832 if (unlikely(ret != 0)) {
833 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
834 goto out_no_vram;
835 }
836 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
837
838 dev_priv->has_gmr = true;
839 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
840 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
841 VMW_PL_GMR) != 0) {
842 DRM_INFO("No GMR memory available. "
843 "Graphics memory resources are very limited.\n");
844 dev_priv->has_gmr = false;
845 }
846
847 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
848 dev_priv->has_mob = true;
849 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
850 VMW_PL_MOB) != 0) {
851 DRM_INFO("No MOB memory available. "
852 "3D will be disabled.\n");
853 dev_priv->has_mob = false;
854 }
855 }
856
857 if (dev_priv->has_mob) {
858 spin_lock(&dev_priv->cap_lock);
859 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
860 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
861 spin_unlock(&dev_priv->cap_lock);
862 }
863
864
865 ret = vmw_kms_init(dev_priv);
866 if (unlikely(ret != 0))
867 goto out_no_kms;
868 vmw_overlay_init(dev_priv);
869
870 ret = vmw_request_device(dev_priv);
871 if (ret)
872 goto out_no_fifo;
873
874 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
875
876 if (dev_priv->enable_fb) {
877 vmw_fifo_resource_inc(dev_priv);
878 vmw_svga_enable(dev_priv);
879 vmw_fb_init(dev_priv);
880 }
881
882 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
883 register_pm_notifier(&dev_priv->pm_nb);
884
885 return 0;
886
887out_no_fifo:
888 vmw_overlay_close(dev_priv);
889 vmw_kms_close(dev_priv);
890out_no_kms:
891 if (dev_priv->has_mob)
892 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
893 if (dev_priv->has_gmr)
894 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
895 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
896out_no_vram:
897 (void)ttm_bo_device_release(&dev_priv->bdev);
898out_no_bdev:
899 vmw_fence_manager_takedown(dev_priv->fman);
900out_no_fman:
901 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
902 drm_irq_uninstall(dev_priv->dev);
903out_no_irq:
904 if (dev_priv->stealth)
905 pci_release_region(dev->pdev, 2);
906 else
907 pci_release_regions(dev->pdev);
908out_no_device:
909 ttm_object_device_release(&dev_priv->tdev);
910out_err4:
911 memunmap(dev_priv->mmio_virt);
912out_err3:
913 vmw_ttm_global_release(dev_priv);
914out_err0:
915 for (i = vmw_res_context; i < vmw_res_max; ++i)
916 idr_destroy(&dev_priv->res_idr[i]);
917
918 if (dev_priv->ctx.staged_bindings)
919 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
920 kfree(dev_priv);
921 return ret;
922}
923
924static int vmw_driver_unload(struct drm_device *dev)
925{
926 struct vmw_private *dev_priv = vmw_priv(dev);
927 enum vmw_res_type i;
928
929 unregister_pm_notifier(&dev_priv->pm_nb);
930
931 if (dev_priv->ctx.res_ht_initialized)
932 drm_ht_remove(&dev_priv->ctx.res_ht);
933 vfree(dev_priv->ctx.cmd_bounce);
934 if (dev_priv->enable_fb) {
935 vmw_fb_off(dev_priv);
936 vmw_fb_close(dev_priv);
937 vmw_fifo_resource_dec(dev_priv);
938 vmw_svga_disable(dev_priv);
939 }
940
941 vmw_kms_close(dev_priv);
942 vmw_overlay_close(dev_priv);
943
944 if (dev_priv->has_gmr)
945 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
946 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
947
948 vmw_release_device_early(dev_priv);
949 if (dev_priv->has_mob)
950 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
951 (void) ttm_bo_device_release(&dev_priv->bdev);
952 vmw_release_device_late(dev_priv);
953 vmw_fence_manager_takedown(dev_priv->fman);
954 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
955 drm_irq_uninstall(dev_priv->dev);
956 if (dev_priv->stealth)
957 pci_release_region(dev->pdev, 2);
958 else
959 pci_release_regions(dev->pdev);
960
961 ttm_object_device_release(&dev_priv->tdev);
962 memunmap(dev_priv->mmio_virt);
963 if (dev_priv->ctx.staged_bindings)
964 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
965 vmw_ttm_global_release(dev_priv);
966
967 for (i = vmw_res_context; i < vmw_res_max; ++i)
968 idr_destroy(&dev_priv->res_idr[i]);
969
970 kfree(dev_priv);
971
972 return 0;
973}
974
975static void vmw_postclose(struct drm_device *dev,
976 struct drm_file *file_priv)
977{
978 struct vmw_fpriv *vmw_fp;
979
980 vmw_fp = vmw_fpriv(file_priv);
981
982 if (vmw_fp->locked_master) {
983 struct vmw_master *vmaster =
984 vmw_master(vmw_fp->locked_master);
985
986 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
987 ttm_vt_unlock(&vmaster->lock);
988 drm_master_put(&vmw_fp->locked_master);
989 }
990
991 ttm_object_file_release(&vmw_fp->tfile);
992 kfree(vmw_fp);
993}
994
995static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
996{
997 struct vmw_private *dev_priv = vmw_priv(dev);
998 struct vmw_fpriv *vmw_fp;
999 int ret = -ENOMEM;
1000
1001 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1002 if (unlikely(vmw_fp == NULL))
1003 return ret;
1004
1005 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1006 if (unlikely(vmw_fp->tfile == NULL))
1007 goto out_no_tfile;
1008
1009 file_priv->driver_priv = vmw_fp;
1010
1011 return 0;
1012
1013out_no_tfile:
1014 kfree(vmw_fp);
1015 return ret;
1016}
1017
1018static struct vmw_master *vmw_master_check(struct drm_device *dev,
1019 struct drm_file *file_priv,
1020 unsigned int flags)
1021{
1022 int ret;
1023 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1024 struct vmw_master *vmaster;
1025
1026 if (file_priv->minor->type != DRM_MINOR_LEGACY ||
1027 !(flags & DRM_AUTH))
1028 return NULL;
1029
1030 ret = mutex_lock_interruptible(&dev->master_mutex);
1031 if (unlikely(ret != 0))
1032 return ERR_PTR(-ERESTARTSYS);
1033
1034 if (file_priv->is_master) {
1035 mutex_unlock(&dev->master_mutex);
1036 return NULL;
1037 }
1038
1039 /*
1040 * Check if we were previously master, but now dropped. In that
1041 * case, allow at least render node functionality.
1042 */
1043 if (vmw_fp->locked_master) {
1044 mutex_unlock(&dev->master_mutex);
1045
1046 if (flags & DRM_RENDER_ALLOW)
1047 return NULL;
1048
1049 DRM_ERROR("Dropped master trying to access ioctl that "
1050 "requires authentication.\n");
1051 return ERR_PTR(-EACCES);
1052 }
1053 mutex_unlock(&dev->master_mutex);
1054
1055 /*
1056 * Take the TTM lock. Possibly sleep waiting for the authenticating
1057 * master to become master again, or for a SIGTERM if the
1058 * authenticating master exits.
1059 */
1060 vmaster = vmw_master(file_priv->master);
1061 ret = ttm_read_lock(&vmaster->lock, true);
1062 if (unlikely(ret != 0))
1063 vmaster = ERR_PTR(ret);
1064
1065 return vmaster;
1066}
1067
1068static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1069 unsigned long arg,
1070 long (*ioctl_func)(struct file *, unsigned int,
1071 unsigned long))
1072{
1073 struct drm_file *file_priv = filp->private_data;
1074 struct drm_device *dev = file_priv->minor->dev;
1075 unsigned int nr = DRM_IOCTL_NR(cmd);
1076 struct vmw_master *vmaster;
1077 unsigned int flags;
1078 long ret;
1079
1080 /*
1081 * Do extra checking on driver private ioctls.
1082 */
1083
1084 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1085 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1086 const struct drm_ioctl_desc *ioctl =
1087 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1088
1089 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1090 ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1091 if (unlikely(ret != 0))
1092 return ret;
1093
1094 if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1095 goto out_io_encoding;
1096
1097 return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1098 _IOC_SIZE(cmd));
1099 }
1100
1101 if (unlikely(ioctl->cmd != cmd))
1102 goto out_io_encoding;
1103
1104 flags = ioctl->flags;
1105 } else if (!drm_ioctl_flags(nr, &flags))
1106 return -EINVAL;
1107
1108 vmaster = vmw_master_check(dev, file_priv, flags);
1109 if (IS_ERR(vmaster)) {
1110 ret = PTR_ERR(vmaster);
1111
1112 if (ret != -ERESTARTSYS)
1113 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1114 nr, ret);
1115 return ret;
1116 }
1117
1118 ret = ioctl_func(filp, cmd, arg);
1119 if (vmaster)
1120 ttm_read_unlock(&vmaster->lock);
1121
1122 return ret;
1123
1124out_io_encoding:
1125 DRM_ERROR("Invalid command format, ioctl %d\n",
1126 nr - DRM_COMMAND_BASE);
1127
1128 return -EINVAL;
1129}
1130
1131static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1132 unsigned long arg)
1133{
1134 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1135}
1136
1137#ifdef CONFIG_COMPAT
1138static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1139 unsigned long arg)
1140{
1141 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1142}
1143#endif
1144
1145static void vmw_lastclose(struct drm_device *dev)
1146{
1147}
1148
1149static void vmw_master_init(struct vmw_master *vmaster)
1150{
1151 ttm_lock_init(&vmaster->lock);
1152}
1153
1154static int vmw_master_create(struct drm_device *dev,
1155 struct drm_master *master)
1156{
1157 struct vmw_master *vmaster;
1158
1159 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1160 if (unlikely(vmaster == NULL))
1161 return -ENOMEM;
1162
1163 vmw_master_init(vmaster);
1164 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1165 master->driver_priv = vmaster;
1166
1167 return 0;
1168}
1169
1170static void vmw_master_destroy(struct drm_device *dev,
1171 struct drm_master *master)
1172{
1173 struct vmw_master *vmaster = vmw_master(master);
1174
1175 master->driver_priv = NULL;
1176 kfree(vmaster);
1177}
1178
1179static int vmw_master_set(struct drm_device *dev,
1180 struct drm_file *file_priv,
1181 bool from_open)
1182{
1183 struct vmw_private *dev_priv = vmw_priv(dev);
1184 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1185 struct vmw_master *active = dev_priv->active_master;
1186 struct vmw_master *vmaster = vmw_master(file_priv->master);
1187 int ret = 0;
1188
1189 if (active) {
1190 BUG_ON(active != &dev_priv->fbdev_master);
1191 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1192 if (unlikely(ret != 0))
1193 return ret;
1194
1195 ttm_lock_set_kill(&active->lock, true, SIGTERM);
1196 dev_priv->active_master = NULL;
1197 }
1198
1199 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1200 if (!from_open) {
1201 ttm_vt_unlock(&vmaster->lock);
1202 BUG_ON(vmw_fp->locked_master != file_priv->master);
1203 drm_master_put(&vmw_fp->locked_master);
1204 }
1205
1206 dev_priv->active_master = vmaster;
1207 drm_sysfs_hotplug_event(dev);
1208
1209 return 0;
1210}
1211
1212static void vmw_master_drop(struct drm_device *dev,
1213 struct drm_file *file_priv,
1214 bool from_release)
1215{
1216 struct vmw_private *dev_priv = vmw_priv(dev);
1217 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1218 struct vmw_master *vmaster = vmw_master(file_priv->master);
1219 int ret;
1220
1221 /**
1222 * Make sure the master doesn't disappear while we have
1223 * it locked.
1224 */
1225
1226 vmw_fp->locked_master = drm_master_get(file_priv->master);
1227 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1228 vmw_kms_legacy_hotspot_clear(dev_priv);
1229 if (unlikely((ret != 0))) {
1230 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1231 drm_master_put(&vmw_fp->locked_master);
1232 }
1233
1234 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1235
1236 if (!dev_priv->enable_fb)
1237 vmw_svga_disable(dev_priv);
1238
1239 dev_priv->active_master = &dev_priv->fbdev_master;
1240 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1241 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1242
1243 if (dev_priv->enable_fb)
1244 vmw_fb_on(dev_priv);
1245}
1246
1247/**
1248 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1249 *
1250 * @dev_priv: Pointer to device private struct.
1251 * Needs the reservation sem to be held in non-exclusive mode.
1252 */
1253static void __vmw_svga_enable(struct vmw_private *dev_priv)
1254{
1255 spin_lock(&dev_priv->svga_lock);
1256 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1257 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1258 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1259 }
1260 spin_unlock(&dev_priv->svga_lock);
1261}
1262
1263/**
1264 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1265 *
1266 * @dev_priv: Pointer to device private struct.
1267 */
1268void vmw_svga_enable(struct vmw_private *dev_priv)
1269{
1270 ttm_read_lock(&dev_priv->reservation_sem, false);
1271 __vmw_svga_enable(dev_priv);
1272 ttm_read_unlock(&dev_priv->reservation_sem);
1273}
1274
1275/**
1276 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1277 *
1278 * @dev_priv: Pointer to device private struct.
1279 * Needs the reservation sem to be held in exclusive mode.
1280 * Will not empty VRAM. VRAM must be emptied by caller.
1281 */
1282static void __vmw_svga_disable(struct vmw_private *dev_priv)
1283{
1284 spin_lock(&dev_priv->svga_lock);
1285 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1286 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1287 vmw_write(dev_priv, SVGA_REG_ENABLE,
1288 SVGA_REG_ENABLE_HIDE |
1289 SVGA_REG_ENABLE_ENABLE);
1290 }
1291 spin_unlock(&dev_priv->svga_lock);
1292}
1293
1294/**
1295 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1296 * running.
1297 *
1298 * @dev_priv: Pointer to device private struct.
1299 * Will empty VRAM.
1300 */
1301void vmw_svga_disable(struct vmw_private *dev_priv)
1302{
1303 ttm_write_lock(&dev_priv->reservation_sem, false);
1304 spin_lock(&dev_priv->svga_lock);
1305 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1306 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1307 spin_unlock(&dev_priv->svga_lock);
1308 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1309 DRM_ERROR("Failed evicting VRAM buffers.\n");
1310 vmw_write(dev_priv, SVGA_REG_ENABLE,
1311 SVGA_REG_ENABLE_HIDE |
1312 SVGA_REG_ENABLE_ENABLE);
1313 } else
1314 spin_unlock(&dev_priv->svga_lock);
1315 ttm_write_unlock(&dev_priv->reservation_sem);
1316}
1317
1318static void vmw_remove(struct pci_dev *pdev)
1319{
1320 struct drm_device *dev = pci_get_drvdata(pdev);
1321
1322 pci_disable_device(pdev);
1323 drm_put_dev(dev);
1324}
1325
1326static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1327 void *ptr)
1328{
1329 struct vmw_private *dev_priv =
1330 container_of(nb, struct vmw_private, pm_nb);
1331
1332 switch (val) {
1333 case PM_HIBERNATION_PREPARE:
1334 if (dev_priv->enable_fb)
1335 vmw_fb_off(dev_priv);
1336 ttm_suspend_lock(&dev_priv->reservation_sem);
1337
1338 /*
1339 * This empties VRAM and unbinds all GMR bindings.
1340 * Buffer contents is moved to swappable memory.
1341 */
1342 vmw_execbuf_release_pinned_bo(dev_priv);
1343 vmw_resource_evict_all(dev_priv);
1344 vmw_release_device_early(dev_priv);
1345 ttm_bo_swapout_all(&dev_priv->bdev);
1346 vmw_fence_fifo_down(dev_priv->fman);
1347 break;
1348 case PM_POST_HIBERNATION:
1349 case PM_POST_RESTORE:
1350 vmw_fence_fifo_up(dev_priv->fman);
1351 ttm_suspend_unlock(&dev_priv->reservation_sem);
1352 if (dev_priv->enable_fb)
1353 vmw_fb_on(dev_priv);
1354 break;
1355 case PM_RESTORE_PREPARE:
1356 break;
1357 default:
1358 break;
1359 }
1360 return 0;
1361}
1362
1363static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1364{
1365 struct drm_device *dev = pci_get_drvdata(pdev);
1366 struct vmw_private *dev_priv = vmw_priv(dev);
1367
1368 if (dev_priv->refuse_hibernation)
1369 return -EBUSY;
1370
1371 pci_save_state(pdev);
1372 pci_disable_device(pdev);
1373 pci_set_power_state(pdev, PCI_D3hot);
1374 return 0;
1375}
1376
1377static int vmw_pci_resume(struct pci_dev *pdev)
1378{
1379 pci_set_power_state(pdev, PCI_D0);
1380 pci_restore_state(pdev);
1381 return pci_enable_device(pdev);
1382}
1383
1384static int vmw_pm_suspend(struct device *kdev)
1385{
1386 struct pci_dev *pdev = to_pci_dev(kdev);
1387 struct pm_message dummy;
1388
1389 dummy.event = 0;
1390
1391 return vmw_pci_suspend(pdev, dummy);
1392}
1393
1394static int vmw_pm_resume(struct device *kdev)
1395{
1396 struct pci_dev *pdev = to_pci_dev(kdev);
1397
1398 return vmw_pci_resume(pdev);
1399}
1400
1401static int vmw_pm_freeze(struct device *kdev)
1402{
1403 struct pci_dev *pdev = to_pci_dev(kdev);
1404 struct drm_device *dev = pci_get_drvdata(pdev);
1405 struct vmw_private *dev_priv = vmw_priv(dev);
1406
1407 dev_priv->suspended = true;
1408 if (dev_priv->enable_fb)
1409 vmw_fifo_resource_dec(dev_priv);
1410
1411 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1412 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1413 if (dev_priv->enable_fb)
1414 vmw_fifo_resource_inc(dev_priv);
1415 WARN_ON(vmw_request_device_late(dev_priv));
1416 dev_priv->suspended = false;
1417 return -EBUSY;
1418 }
1419
1420 if (dev_priv->enable_fb)
1421 __vmw_svga_disable(dev_priv);
1422
1423 vmw_release_device_late(dev_priv);
1424
1425 return 0;
1426}
1427
1428static int vmw_pm_restore(struct device *kdev)
1429{
1430 struct pci_dev *pdev = to_pci_dev(kdev);
1431 struct drm_device *dev = pci_get_drvdata(pdev);
1432 struct vmw_private *dev_priv = vmw_priv(dev);
1433 int ret;
1434
1435 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1436 (void) vmw_read(dev_priv, SVGA_REG_ID);
1437
1438 if (dev_priv->enable_fb)
1439 vmw_fifo_resource_inc(dev_priv);
1440
1441 ret = vmw_request_device(dev_priv);
1442 if (ret)
1443 return ret;
1444
1445 if (dev_priv->enable_fb)
1446 __vmw_svga_enable(dev_priv);
1447
1448 dev_priv->suspended = false;
1449
1450 return 0;
1451}
1452
1453static const struct dev_pm_ops vmw_pm_ops = {
1454 .freeze = vmw_pm_freeze,
1455 .thaw = vmw_pm_restore,
1456 .restore = vmw_pm_restore,
1457 .suspend = vmw_pm_suspend,
1458 .resume = vmw_pm_resume,
1459};
1460
1461static const struct file_operations vmwgfx_driver_fops = {
1462 .owner = THIS_MODULE,
1463 .open = drm_open,
1464 .release = drm_release,
1465 .unlocked_ioctl = vmw_unlocked_ioctl,
1466 .mmap = vmw_mmap,
1467 .poll = vmw_fops_poll,
1468 .read = vmw_fops_read,
1469#if defined(CONFIG_COMPAT)
1470 .compat_ioctl = vmw_compat_ioctl,
1471#endif
1472 .llseek = noop_llseek,
1473};
1474
1475static struct drm_driver driver = {
1476 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1477 DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
1478 .load = vmw_driver_load,
1479 .unload = vmw_driver_unload,
1480 .lastclose = vmw_lastclose,
1481 .irq_preinstall = vmw_irq_preinstall,
1482 .irq_postinstall = vmw_irq_postinstall,
1483 .irq_uninstall = vmw_irq_uninstall,
1484 .irq_handler = vmw_irq_handler,
1485 .get_vblank_counter = vmw_get_vblank_counter,
1486 .enable_vblank = vmw_enable_vblank,
1487 .disable_vblank = vmw_disable_vblank,
1488 .ioctls = vmw_ioctls,
1489 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1490 .master_create = vmw_master_create,
1491 .master_destroy = vmw_master_destroy,
1492 .master_set = vmw_master_set,
1493 .master_drop = vmw_master_drop,
1494 .open = vmw_driver_open,
1495 .postclose = vmw_postclose,
1496 .set_busid = drm_pci_set_busid,
1497
1498 .dumb_create = vmw_dumb_create,
1499 .dumb_map_offset = vmw_dumb_map_offset,
1500 .dumb_destroy = vmw_dumb_destroy,
1501
1502 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1503 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1504
1505 .fops = &vmwgfx_driver_fops,
1506 .name = VMWGFX_DRIVER_NAME,
1507 .desc = VMWGFX_DRIVER_DESC,
1508 .date = VMWGFX_DRIVER_DATE,
1509 .major = VMWGFX_DRIVER_MAJOR,
1510 .minor = VMWGFX_DRIVER_MINOR,
1511 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1512};
1513
1514static struct pci_driver vmw_pci_driver = {
1515 .name = VMWGFX_DRIVER_NAME,
1516 .id_table = vmw_pci_id_list,
1517 .probe = vmw_probe,
1518 .remove = vmw_remove,
1519 .driver = {
1520 .pm = &vmw_pm_ops
1521 }
1522};
1523
1524static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1525{
1526 return drm_get_pci_dev(pdev, ent, &driver);
1527}
1528
1529static int __init vmwgfx_init(void)
1530{
1531 int ret;
1532
1533#ifdef CONFIG_VGA_CONSOLE
1534 if (vgacon_text_force())
1535 return -EINVAL;
1536#endif
1537
1538 ret = drm_pci_init(&driver, &vmw_pci_driver);
1539 if (ret)
1540 DRM_ERROR("Failed initializing DRM.\n");
1541 return ret;
1542}
1543
1544static void __exit vmwgfx_exit(void)
1545{
1546 drm_pci_exit(&driver, &vmw_pci_driver);
1547}
1548
1549module_init(vmwgfx_init);
1550module_exit(vmwgfx_exit);
1551
1552MODULE_AUTHOR("VMware Inc. and others");
1553MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1554MODULE_LICENSE("GPL and additional rights");
1555MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1556 __stringify(VMWGFX_DRIVER_MINOR) "."
1557 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1558 "0");
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <linux/console.h>
29#include <linux/dma-mapping.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/mem_encrypt.h>
33
34#include <drm/drm_aperture.h>
35#include <drm/drm_drv.h>
36#include <drm/drm_ioctl.h>
37#include <drm/drm_sysfs.h>
38#include <drm/ttm/ttm_bo_driver.h>
39#include <drm/ttm/ttm_range_manager.h>
40#include <drm/ttm/ttm_placement.h>
41#include <generated/utsrelease.h>
42
43#include "ttm_object.h"
44#include "vmwgfx_binding.h"
45#include "vmwgfx_drv.h"
46
47#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
48
49#define VMW_MIN_INITIAL_WIDTH 800
50#define VMW_MIN_INITIAL_HEIGHT 600
51
52#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
53
54
55/*
56 * Fully encoded drm commands. Might move to vmw_drm.h
57 */
58
59#define DRM_IOCTL_VMW_GET_PARAM \
60 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
61 struct drm_vmw_getparam_arg)
62#define DRM_IOCTL_VMW_ALLOC_DMABUF \
63 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
64 union drm_vmw_alloc_dmabuf_arg)
65#define DRM_IOCTL_VMW_UNREF_DMABUF \
66 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
67 struct drm_vmw_unref_dmabuf_arg)
68#define DRM_IOCTL_VMW_CURSOR_BYPASS \
69 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
70 struct drm_vmw_cursor_bypass_arg)
71
72#define DRM_IOCTL_VMW_CONTROL_STREAM \
73 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
74 struct drm_vmw_control_stream_arg)
75#define DRM_IOCTL_VMW_CLAIM_STREAM \
76 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
77 struct drm_vmw_stream_arg)
78#define DRM_IOCTL_VMW_UNREF_STREAM \
79 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
80 struct drm_vmw_stream_arg)
81
82#define DRM_IOCTL_VMW_CREATE_CONTEXT \
83 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
84 struct drm_vmw_context_arg)
85#define DRM_IOCTL_VMW_UNREF_CONTEXT \
86 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
87 struct drm_vmw_context_arg)
88#define DRM_IOCTL_VMW_CREATE_SURFACE \
89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
90 union drm_vmw_surface_create_arg)
91#define DRM_IOCTL_VMW_UNREF_SURFACE \
92 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
93 struct drm_vmw_surface_arg)
94#define DRM_IOCTL_VMW_REF_SURFACE \
95 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
96 union drm_vmw_surface_reference_arg)
97#define DRM_IOCTL_VMW_EXECBUF \
98 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
99 struct drm_vmw_execbuf_arg)
100#define DRM_IOCTL_VMW_GET_3D_CAP \
101 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
102 struct drm_vmw_get_3d_cap_arg)
103#define DRM_IOCTL_VMW_FENCE_WAIT \
104 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
105 struct drm_vmw_fence_wait_arg)
106#define DRM_IOCTL_VMW_FENCE_SIGNALED \
107 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
108 struct drm_vmw_fence_signaled_arg)
109#define DRM_IOCTL_VMW_FENCE_UNREF \
110 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
111 struct drm_vmw_fence_arg)
112#define DRM_IOCTL_VMW_FENCE_EVENT \
113 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
114 struct drm_vmw_fence_event_arg)
115#define DRM_IOCTL_VMW_PRESENT \
116 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
117 struct drm_vmw_present_arg)
118#define DRM_IOCTL_VMW_PRESENT_READBACK \
119 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
120 struct drm_vmw_present_readback_arg)
121#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
122 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
123 struct drm_vmw_update_layout_arg)
124#define DRM_IOCTL_VMW_CREATE_SHADER \
125 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
126 struct drm_vmw_shader_create_arg)
127#define DRM_IOCTL_VMW_UNREF_SHADER \
128 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
129 struct drm_vmw_shader_arg)
130#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
131 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
132 union drm_vmw_gb_surface_create_arg)
133#define DRM_IOCTL_VMW_GB_SURFACE_REF \
134 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
135 union drm_vmw_gb_surface_reference_arg)
136#define DRM_IOCTL_VMW_SYNCCPU \
137 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
138 struct drm_vmw_synccpu_arg)
139#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
140 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
141 struct drm_vmw_context_arg)
142#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
143 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
144 union drm_vmw_gb_surface_create_ext_arg)
145#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
146 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
147 union drm_vmw_gb_surface_reference_ext_arg)
148#define DRM_IOCTL_VMW_MSG \
149 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
150 struct drm_vmw_msg_arg)
151
152/*
153 * The core DRM version of this macro doesn't account for
154 * DRM_COMMAND_BASE.
155 */
156
157#define VMW_IOCTL_DEF(ioctl, func, flags) \
158 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
159
160/*
161 * Ioctl definitions.
162 */
163
164static const struct drm_ioctl_desc vmw_ioctls[] = {
165 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
166 DRM_RENDER_ALLOW),
167 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
168 DRM_RENDER_ALLOW),
169 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
170 DRM_RENDER_ALLOW),
171 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
172 vmw_kms_cursor_bypass_ioctl,
173 DRM_MASTER),
174
175 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
176 DRM_MASTER),
177 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
178 DRM_MASTER),
179 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
180 DRM_MASTER),
181
182 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
183 DRM_RENDER_ALLOW),
184 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
185 DRM_RENDER_ALLOW),
186 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
187 DRM_RENDER_ALLOW),
188 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
189 DRM_RENDER_ALLOW),
190 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
191 DRM_RENDER_ALLOW),
192 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
193 DRM_RENDER_ALLOW),
194 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
195 DRM_RENDER_ALLOW),
196 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
197 vmw_fence_obj_signaled_ioctl,
198 DRM_RENDER_ALLOW),
199 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
200 DRM_RENDER_ALLOW),
201 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
202 DRM_RENDER_ALLOW),
203 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
204 DRM_RENDER_ALLOW),
205
206 /* these allow direct access to the framebuffers mark as master only */
207 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
208 DRM_MASTER | DRM_AUTH),
209 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
210 vmw_present_readback_ioctl,
211 DRM_MASTER | DRM_AUTH),
212 /*
213 * The permissions of the below ioctl are overridden in
214 * vmw_generic_ioctl(). We require either
215 * DRM_MASTER or capable(CAP_SYS_ADMIN).
216 */
217 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
218 vmw_kms_update_layout_ioctl,
219 DRM_RENDER_ALLOW),
220 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
221 vmw_shader_define_ioctl,
222 DRM_RENDER_ALLOW),
223 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
224 vmw_shader_destroy_ioctl,
225 DRM_RENDER_ALLOW),
226 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
227 vmw_gb_surface_define_ioctl,
228 DRM_RENDER_ALLOW),
229 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
230 vmw_gb_surface_reference_ioctl,
231 DRM_RENDER_ALLOW),
232 VMW_IOCTL_DEF(VMW_SYNCCPU,
233 vmw_user_bo_synccpu_ioctl,
234 DRM_RENDER_ALLOW),
235 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
236 vmw_extended_context_define_ioctl,
237 DRM_RENDER_ALLOW),
238 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
239 vmw_gb_surface_define_ext_ioctl,
240 DRM_RENDER_ALLOW),
241 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
242 vmw_gb_surface_reference_ext_ioctl,
243 DRM_RENDER_ALLOW),
244 VMW_IOCTL_DEF(VMW_MSG,
245 vmw_msg_ioctl,
246 DRM_RENDER_ALLOW),
247};
248
249static const struct pci_device_id vmw_pci_id_list[] = {
250 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
251 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3) },
252 { }
253};
254MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
255
256static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
257static int vmw_force_iommu;
258static int vmw_restrict_iommu;
259static int vmw_force_coherent;
260static int vmw_restrict_dma_mask;
261static int vmw_assume_16bpp;
262
263static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
264static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
265 void *ptr);
266
267MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
268module_param_named(enable_fbdev, enable_fbdev, int, 0600);
269MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
270module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
271MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
272module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
273MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
274module_param_named(force_coherent, vmw_force_coherent, int, 0600);
275MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
276module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
277MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
278module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
279
280
281static void vmw_print_capabilities2(uint32_t capabilities2)
282{
283 DRM_INFO("Capabilities2:\n");
284 if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
285 DRM_INFO(" Grow oTable.\n");
286 if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
287 DRM_INFO(" IntraSurface copy.\n");
288 if (capabilities2 & SVGA_CAP2_DX3)
289 DRM_INFO(" DX3.\n");
290}
291
292static void vmw_print_capabilities(uint32_t capabilities)
293{
294 DRM_INFO("Capabilities:\n");
295 if (capabilities & SVGA_CAP_RECT_COPY)
296 DRM_INFO(" Rect copy.\n");
297 if (capabilities & SVGA_CAP_CURSOR)
298 DRM_INFO(" Cursor.\n");
299 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
300 DRM_INFO(" Cursor bypass.\n");
301 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
302 DRM_INFO(" Cursor bypass 2.\n");
303 if (capabilities & SVGA_CAP_8BIT_EMULATION)
304 DRM_INFO(" 8bit emulation.\n");
305 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
306 DRM_INFO(" Alpha cursor.\n");
307 if (capabilities & SVGA_CAP_3D)
308 DRM_INFO(" 3D.\n");
309 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
310 DRM_INFO(" Extended Fifo.\n");
311 if (capabilities & SVGA_CAP_MULTIMON)
312 DRM_INFO(" Multimon.\n");
313 if (capabilities & SVGA_CAP_PITCHLOCK)
314 DRM_INFO(" Pitchlock.\n");
315 if (capabilities & SVGA_CAP_IRQMASK)
316 DRM_INFO(" Irq mask.\n");
317 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
318 DRM_INFO(" Display Topology.\n");
319 if (capabilities & SVGA_CAP_GMR)
320 DRM_INFO(" GMR.\n");
321 if (capabilities & SVGA_CAP_TRACES)
322 DRM_INFO(" Traces.\n");
323 if (capabilities & SVGA_CAP_GMR2)
324 DRM_INFO(" GMR2.\n");
325 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
326 DRM_INFO(" Screen Object 2.\n");
327 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
328 DRM_INFO(" Command Buffers.\n");
329 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
330 DRM_INFO(" Command Buffers 2.\n");
331 if (capabilities & SVGA_CAP_GBOBJECTS)
332 DRM_INFO(" Guest Backed Resources.\n");
333 if (capabilities & SVGA_CAP_DX)
334 DRM_INFO(" DX Features.\n");
335 if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
336 DRM_INFO(" HP Command Queue.\n");
337}
338
339/**
340 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
341 *
342 * @dev_priv: A device private structure.
343 *
344 * This function creates a small buffer object that holds the query
345 * result for dummy queries emitted as query barriers.
346 * The function will then map the first page and initialize a pending
347 * occlusion query result structure, Finally it will unmap the buffer.
348 * No interruptible waits are done within this function.
349 *
350 * Returns an error if bo creation or initialization fails.
351 */
352static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
353{
354 int ret;
355 struct vmw_buffer_object *vbo;
356 struct ttm_bo_kmap_obj map;
357 volatile SVGA3dQueryResult *result;
358 bool dummy;
359
360 /*
361 * Create the vbo as pinned, so that a tryreserve will
362 * immediately succeed. This is because we're the only
363 * user of the bo currently.
364 */
365 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
366 if (!vbo)
367 return -ENOMEM;
368
369 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
370 &vmw_sys_placement, false, true,
371 &vmw_bo_bo_free);
372 if (unlikely(ret != 0))
373 return ret;
374
375 ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
376 BUG_ON(ret != 0);
377 vmw_bo_pin_reserved(vbo, true);
378
379 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
380 if (likely(ret == 0)) {
381 result = ttm_kmap_obj_virtual(&map, &dummy);
382 result->totalSize = sizeof(*result);
383 result->state = SVGA3D_QUERYSTATE_PENDING;
384 result->result32 = 0xff;
385 ttm_bo_kunmap(&map);
386 }
387 vmw_bo_pin_reserved(vbo, false);
388 ttm_bo_unreserve(&vbo->base);
389
390 if (unlikely(ret != 0)) {
391 DRM_ERROR("Dummy query buffer map failed.\n");
392 vmw_bo_unreference(&vbo);
393 } else
394 dev_priv->dummy_query_bo = vbo;
395
396 return ret;
397}
398
399static int vmw_device_init(struct vmw_private *dev_priv)
400{
401 bool uses_fb_traces = false;
402
403 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
404 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
405 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
406
407 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
408 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
409 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
410
411 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
412 SVGA_REG_ENABLE_HIDE);
413
414 uses_fb_traces = !vmw_cmd_supported(dev_priv) &&
415 (dev_priv->capabilities & SVGA_CAP_TRACES) != 0;
416
417 vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces);
418 dev_priv->fifo = vmw_fifo_create(dev_priv);
419 if (IS_ERR(dev_priv->fifo)) {
420 int err = PTR_ERR(dev_priv->fifo);
421 dev_priv->fifo = NULL;
422 return err;
423 } else if (!dev_priv->fifo) {
424 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
425 }
426
427 dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
428 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
429 return 0;
430}
431
432static void vmw_device_fini(struct vmw_private *vmw)
433{
434 /*
435 * Legacy sync
436 */
437 vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
438 while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
439 ;
440
441 vmw->last_read_seqno = vmw_fence_read(vmw);
442
443 vmw_write(vmw, SVGA_REG_CONFIG_DONE,
444 vmw->config_done_state);
445 vmw_write(vmw, SVGA_REG_ENABLE,
446 vmw->enable_state);
447 vmw_write(vmw, SVGA_REG_TRACES,
448 vmw->traces_state);
449
450 vmw_fifo_destroy(vmw);
451}
452
453/**
454 * vmw_request_device_late - Perform late device setup
455 *
456 * @dev_priv: Pointer to device private.
457 *
458 * This function performs setup of otables and enables large command
459 * buffer submission. These tasks are split out to a separate function
460 * because it reverts vmw_release_device_early and is intended to be used
461 * by an error path in the hibernation code.
462 */
463static int vmw_request_device_late(struct vmw_private *dev_priv)
464{
465 int ret;
466
467 if (dev_priv->has_mob) {
468 ret = vmw_otables_setup(dev_priv);
469 if (unlikely(ret != 0)) {
470 DRM_ERROR("Unable to initialize "
471 "guest Memory OBjects.\n");
472 return ret;
473 }
474 }
475
476 if (dev_priv->cman) {
477 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
478 if (ret) {
479 struct vmw_cmdbuf_man *man = dev_priv->cman;
480
481 dev_priv->cman = NULL;
482 vmw_cmdbuf_man_destroy(man);
483 }
484 }
485
486 return 0;
487}
488
489static int vmw_request_device(struct vmw_private *dev_priv)
490{
491 int ret;
492
493 ret = vmw_device_init(dev_priv);
494 if (unlikely(ret != 0)) {
495 DRM_ERROR("Unable to initialize the device.\n");
496 return ret;
497 }
498 vmw_fence_fifo_up(dev_priv->fman);
499 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
500 if (IS_ERR(dev_priv->cman)) {
501 dev_priv->cman = NULL;
502 dev_priv->sm_type = VMW_SM_LEGACY;
503 }
504
505 ret = vmw_request_device_late(dev_priv);
506 if (ret)
507 goto out_no_mob;
508
509 ret = vmw_dummy_query_bo_create(dev_priv);
510 if (unlikely(ret != 0))
511 goto out_no_query_bo;
512
513 return 0;
514
515out_no_query_bo:
516 if (dev_priv->cman)
517 vmw_cmdbuf_remove_pool(dev_priv->cman);
518 if (dev_priv->has_mob) {
519 struct ttm_resource_manager *man;
520
521 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
522 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
523 vmw_otables_takedown(dev_priv);
524 }
525 if (dev_priv->cman)
526 vmw_cmdbuf_man_destroy(dev_priv->cman);
527out_no_mob:
528 vmw_fence_fifo_down(dev_priv->fman);
529 vmw_device_fini(dev_priv);
530 return ret;
531}
532
533/**
534 * vmw_release_device_early - Early part of fifo takedown.
535 *
536 * @dev_priv: Pointer to device private struct.
537 *
538 * This is the first part of command submission takedown, to be called before
539 * buffer management is taken down.
540 */
541static void vmw_release_device_early(struct vmw_private *dev_priv)
542{
543 /*
544 * Previous destructions should've released
545 * the pinned bo.
546 */
547
548 BUG_ON(dev_priv->pinned_bo != NULL);
549
550 vmw_bo_unreference(&dev_priv->dummy_query_bo);
551 if (dev_priv->cman)
552 vmw_cmdbuf_remove_pool(dev_priv->cman);
553
554 if (dev_priv->has_mob) {
555 struct ttm_resource_manager *man;
556
557 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
558 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
559 vmw_otables_takedown(dev_priv);
560 }
561}
562
563/**
564 * vmw_release_device_late - Late part of fifo takedown.
565 *
566 * @dev_priv: Pointer to device private struct.
567 *
568 * This is the last part of the command submission takedown, to be called when
569 * command submission is no longer needed. It may wait on pending fences.
570 */
571static void vmw_release_device_late(struct vmw_private *dev_priv)
572{
573 vmw_fence_fifo_down(dev_priv->fman);
574 if (dev_priv->cman)
575 vmw_cmdbuf_man_destroy(dev_priv->cman);
576
577 vmw_device_fini(dev_priv);
578}
579
580/*
581 * Sets the initial_[width|height] fields on the given vmw_private.
582 *
583 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
584 * clamping the value to fb_max_[width|height] fields and the
585 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
586 * If the values appear to be invalid, set them to
587 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
588 */
589static void vmw_get_initial_size(struct vmw_private *dev_priv)
590{
591 uint32_t width;
592 uint32_t height;
593
594 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
595 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
596
597 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
598 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
599
600 if (width > dev_priv->fb_max_width ||
601 height > dev_priv->fb_max_height) {
602
603 /*
604 * This is a host error and shouldn't occur.
605 */
606
607 width = VMW_MIN_INITIAL_WIDTH;
608 height = VMW_MIN_INITIAL_HEIGHT;
609 }
610
611 dev_priv->initial_width = width;
612 dev_priv->initial_height = height;
613}
614
615/**
616 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
617 * system.
618 *
619 * @dev_priv: Pointer to a struct vmw_private
620 *
621 * This functions tries to determine what actions need to be taken by the
622 * driver to make system pages visible to the device.
623 * If this function decides that DMA is not possible, it returns -EINVAL.
624 * The driver may then try to disable features of the device that require
625 * DMA.
626 */
627static int vmw_dma_select_mode(struct vmw_private *dev_priv)
628{
629 static const char *names[vmw_dma_map_max] = {
630 [vmw_dma_phys] = "Using physical TTM page addresses.",
631 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
632 [vmw_dma_map_populate] = "Caching DMA mappings.",
633 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
634
635 /* TTM currently doesn't fully support SEV encryption. */
636 if (mem_encrypt_active())
637 return -EINVAL;
638
639 if (vmw_force_coherent)
640 dev_priv->map_mode = vmw_dma_alloc_coherent;
641 else if (vmw_restrict_iommu)
642 dev_priv->map_mode = vmw_dma_map_bind;
643 else
644 dev_priv->map_mode = vmw_dma_map_populate;
645
646 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
647 return 0;
648}
649
650/**
651 * vmw_dma_masks - set required page- and dma masks
652 *
653 * @dev_priv: Pointer to struct drm-device
654 *
655 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
656 * restriction also for 64-bit systems.
657 */
658static int vmw_dma_masks(struct vmw_private *dev_priv)
659{
660 struct drm_device *dev = &dev_priv->drm;
661 int ret = 0;
662
663 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
664 if (dev_priv->map_mode != vmw_dma_phys &&
665 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
666 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
667 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
668 }
669
670 return ret;
671}
672
673static int vmw_vram_manager_init(struct vmw_private *dev_priv)
674{
675 int ret;
676#ifdef CONFIG_TRANSPARENT_HUGEPAGE
677 ret = vmw_thp_init(dev_priv);
678#else
679 ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
680 dev_priv->vram_size >> PAGE_SHIFT);
681#endif
682 ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
683 return ret;
684}
685
686static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
687{
688#ifdef CONFIG_TRANSPARENT_HUGEPAGE
689 vmw_thp_fini(dev_priv);
690#else
691 ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
692#endif
693}
694
695static int vmw_setup_pci_resources(struct vmw_private *dev,
696 unsigned long pci_id)
697{
698 resource_size_t rmmio_start;
699 resource_size_t rmmio_size;
700 resource_size_t fifo_start;
701 resource_size_t fifo_size;
702 int ret;
703 struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
704
705 pci_set_master(pdev);
706
707 ret = pci_request_regions(pdev, "vmwgfx probe");
708 if (ret)
709 return ret;
710
711 dev->pci_id = pci_id;
712 if (pci_id == VMWGFX_PCI_ID_SVGA3) {
713 rmmio_start = pci_resource_start(pdev, 0);
714 rmmio_size = pci_resource_len(pdev, 0);
715 dev->vram_start = pci_resource_start(pdev, 2);
716 dev->vram_size = pci_resource_len(pdev, 2);
717
718 DRM_INFO("Register MMIO at 0x%pa size is %llu kiB\n",
719 &rmmio_start, (uint64_t)rmmio_size / 1024);
720 dev->rmmio = devm_ioremap(dev->drm.dev,
721 rmmio_start,
722 rmmio_size);
723 if (!dev->rmmio) {
724 DRM_ERROR("Failed mapping registers mmio memory.\n");
725 pci_release_regions(pdev);
726 return -ENOMEM;
727 }
728 } else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
729 dev->io_start = pci_resource_start(pdev, 0);
730 dev->vram_start = pci_resource_start(pdev, 1);
731 dev->vram_size = pci_resource_len(pdev, 1);
732 fifo_start = pci_resource_start(pdev, 2);
733 fifo_size = pci_resource_len(pdev, 2);
734
735 DRM_INFO("FIFO at %pa size is %llu kiB\n",
736 &fifo_start, (uint64_t)fifo_size / 1024);
737 dev->fifo_mem = devm_memremap(dev->drm.dev,
738 fifo_start,
739 fifo_size,
740 MEMREMAP_WB);
741
742 if (IS_ERR(dev->fifo_mem)) {
743 DRM_ERROR("Failed mapping FIFO memory.\n");
744 pci_release_regions(pdev);
745 return PTR_ERR(dev->fifo_mem);
746 }
747 } else {
748 pci_release_regions(pdev);
749 return -EINVAL;
750 }
751
752 /*
753 * This is approximate size of the vram, the exact size will only
754 * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
755 * size will be equal to or bigger than the size reported by
756 * SVGA_REG_VRAM_SIZE.
757 */
758 DRM_INFO("VRAM at %pa size is %llu kiB\n",
759 &dev->vram_start, (uint64_t)dev->vram_size / 1024);
760
761 return 0;
762}
763
764static int vmw_detect_version(struct vmw_private *dev)
765{
766 uint32_t svga_id;
767
768 vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ?
769 SVGA_ID_3 : SVGA_ID_2);
770 svga_id = vmw_read(dev, SVGA_REG_ID);
771 if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) {
772 DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n",
773 svga_id, dev->vmw_chipset);
774 return -ENOSYS;
775 }
776 BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3));
777 DRM_INFO("Running on SVGA version %d.\n", (svga_id & 0xff));
778 return 0;
779}
780
781static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
782{
783 int ret;
784 enum vmw_res_type i;
785 bool refuse_dma = false;
786 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
787
788 dev_priv->vmw_chipset = pci_id;
789 dev_priv->drm.dev_private = dev_priv;
790
791 mutex_init(&dev_priv->cmdbuf_mutex);
792 mutex_init(&dev_priv->binding_mutex);
793 spin_lock_init(&dev_priv->resource_lock);
794 spin_lock_init(&dev_priv->hw_lock);
795 spin_lock_init(&dev_priv->waiter_lock);
796 spin_lock_init(&dev_priv->cap_lock);
797 spin_lock_init(&dev_priv->cursor_lock);
798
799 ret = vmw_setup_pci_resources(dev_priv, pci_id);
800 if (ret)
801 return ret;
802 ret = vmw_detect_version(dev_priv);
803 if (ret)
804 goto out_no_pci_or_version;
805
806
807 for (i = vmw_res_context; i < vmw_res_max; ++i) {
808 idr_init_base(&dev_priv->res_idr[i], 1);
809 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
810 }
811
812 init_waitqueue_head(&dev_priv->fence_queue);
813 init_waitqueue_head(&dev_priv->fifo_queue);
814 dev_priv->fence_queue_waiters = 0;
815 dev_priv->fifo_queue_waiters = 0;
816
817 dev_priv->used_memory_size = 0;
818
819 dev_priv->assume_16bpp = !!vmw_assume_16bpp;
820
821 dev_priv->enable_fb = enable_fbdev;
822
823
824 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
825
826 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
827 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
828 }
829
830
831 ret = vmw_dma_select_mode(dev_priv);
832 if (unlikely(ret != 0)) {
833 DRM_INFO("Restricting capabilities since DMA not available.\n");
834 refuse_dma = true;
835 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
836 DRM_INFO("Disabling 3D acceleration.\n");
837 }
838
839 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
840 dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
841 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
842 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
843
844 vmw_get_initial_size(dev_priv);
845
846 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
847 dev_priv->max_gmr_ids =
848 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
849 dev_priv->max_gmr_pages =
850 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
851 dev_priv->memory_size =
852 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
853 dev_priv->memory_size -= dev_priv->vram_size;
854 } else {
855 /*
856 * An arbitrary limit of 512MiB on surface
857 * memory. But all HWV8 hardware supports GMR2.
858 */
859 dev_priv->memory_size = 512*1024*1024;
860 }
861 dev_priv->max_mob_pages = 0;
862 dev_priv->max_mob_size = 0;
863 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
864 uint64_t mem_size;
865
866 if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
867 mem_size = vmw_read(dev_priv,
868 SVGA_REG_GBOBJECT_MEM_SIZE_KB);
869 else
870 mem_size =
871 vmw_read(dev_priv,
872 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
873
874 /*
875 * Workaround for low memory 2D VMs to compensate for the
876 * allocation taken by fbdev
877 */
878 if (!(dev_priv->capabilities & SVGA_CAP_3D))
879 mem_size *= 3;
880
881 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
882 dev_priv->prim_bb_mem =
883 vmw_read(dev_priv,
884 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
885 dev_priv->max_mob_size =
886 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
887 dev_priv->stdu_max_width =
888 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
889 dev_priv->stdu_max_height =
890 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
891
892 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
893 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
894 dev_priv->texture_max_width = vmw_read(dev_priv,
895 SVGA_REG_DEV_CAP);
896 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
897 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
898 dev_priv->texture_max_height = vmw_read(dev_priv,
899 SVGA_REG_DEV_CAP);
900 } else {
901 dev_priv->texture_max_width = 8192;
902 dev_priv->texture_max_height = 8192;
903 dev_priv->prim_bb_mem = dev_priv->vram_size;
904 }
905
906 vmw_print_capabilities(dev_priv->capabilities);
907 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
908 vmw_print_capabilities2(dev_priv->capabilities2);
909 DRM_INFO("Supports command queues = %d\n",
910 vmw_cmd_supported((dev_priv)));
911
912 ret = vmw_dma_masks(dev_priv);
913 if (unlikely(ret != 0))
914 goto out_err0;
915
916 dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
917
918 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
919 DRM_INFO("Max GMR ids is %u\n",
920 (unsigned)dev_priv->max_gmr_ids);
921 DRM_INFO("Max number of GMR pages is %u\n",
922 (unsigned)dev_priv->max_gmr_pages);
923 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
924 (unsigned)dev_priv->memory_size / 1024);
925 }
926 DRM_INFO("Maximum display memory size is %llu kiB\n",
927 (uint64_t)dev_priv->prim_bb_mem / 1024);
928
929 /* Need mmio memory to check for fifo pitchlock cap. */
930 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
931 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
932 !vmw_fifo_have_pitchlock(dev_priv)) {
933 ret = -ENOSYS;
934 DRM_ERROR("Hardware has no pitchlock\n");
935 goto out_err0;
936 }
937
938 dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
939 &vmw_prime_dmabuf_ops);
940
941 if (unlikely(dev_priv->tdev == NULL)) {
942 DRM_ERROR("Unable to initialize TTM object management.\n");
943 ret = -ENOMEM;
944 goto out_err0;
945 }
946
947 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
948 ret = vmw_irq_install(&dev_priv->drm, pdev->irq);
949 if (ret != 0) {
950 DRM_ERROR("Failed installing irq: %d\n", ret);
951 goto out_no_irq;
952 }
953 }
954
955 dev_priv->fman = vmw_fence_manager_init(dev_priv);
956 if (unlikely(dev_priv->fman == NULL)) {
957 ret = -ENOMEM;
958 goto out_no_fman;
959 }
960
961 drm_vma_offset_manager_init(&dev_priv->vma_manager,
962 DRM_FILE_PAGE_OFFSET_START,
963 DRM_FILE_PAGE_OFFSET_SIZE);
964 ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
965 dev_priv->drm.dev,
966 dev_priv->drm.anon_inode->i_mapping,
967 &dev_priv->vma_manager,
968 dev_priv->map_mode == vmw_dma_alloc_coherent,
969 false);
970 if (unlikely(ret != 0)) {
971 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
972 goto out_no_bdev;
973 }
974
975 /*
976 * Enable VRAM, but initially don't use it until SVGA is enabled and
977 * unhidden.
978 */
979
980 ret = vmw_vram_manager_init(dev_priv);
981 if (unlikely(ret != 0)) {
982 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
983 goto out_no_vram;
984 }
985
986 /*
987 * "Guest Memory Regions" is an aperture like feature with
988 * one slot per bo. There is an upper limit of the number of
989 * slots as well as the bo size.
990 */
991 dev_priv->has_gmr = true;
992 /* TODO: This is most likely not correct */
993 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
994 refuse_dma ||
995 vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
996 DRM_INFO("No GMR memory available. "
997 "Graphics memory resources are very limited.\n");
998 dev_priv->has_gmr = false;
999 }
1000
1001 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
1002 dev_priv->has_mob = true;
1003
1004 if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
1005 DRM_INFO("No MOB memory available. "
1006 "3D will be disabled.\n");
1007 dev_priv->has_mob = false;
1008 }
1009 }
1010
1011 if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
1012 spin_lock(&dev_priv->cap_lock);
1013 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
1014 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
1015 dev_priv->sm_type = VMW_SM_4;
1016 spin_unlock(&dev_priv->cap_lock);
1017 }
1018
1019 vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
1020
1021 /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
1022 if (has_sm4_context(dev_priv) &&
1023 (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
1024 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41);
1025
1026 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
1027 dev_priv->sm_type = VMW_SM_4_1;
1028
1029 if (has_sm4_1_context(dev_priv) &&
1030 (dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
1031 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5);
1032 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
1033 dev_priv->sm_type = VMW_SM_5;
1034 }
1035 }
1036
1037 ret = vmw_kms_init(dev_priv);
1038 if (unlikely(ret != 0))
1039 goto out_no_kms;
1040 vmw_overlay_init(dev_priv);
1041
1042 ret = vmw_request_device(dev_priv);
1043 if (ret)
1044 goto out_no_fifo;
1045
1046 if (dev_priv->sm_type == VMW_SM_5)
1047 DRM_INFO("SM5 support available.\n");
1048 if (dev_priv->sm_type == VMW_SM_4_1)
1049 DRM_INFO("SM4_1 support available.\n");
1050 if (dev_priv->sm_type == VMW_SM_4)
1051 DRM_INFO("SM4 support available.\n");
1052 DRM_INFO("Running without reservation semaphore\n");
1053
1054 vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
1055 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
1056 VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
1057
1058 if (dev_priv->enable_fb) {
1059 vmw_fifo_resource_inc(dev_priv);
1060 vmw_svga_enable(dev_priv);
1061 vmw_fb_init(dev_priv);
1062 }
1063
1064 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
1065 register_pm_notifier(&dev_priv->pm_nb);
1066
1067 return 0;
1068
1069out_no_fifo:
1070 vmw_overlay_close(dev_priv);
1071 vmw_kms_close(dev_priv);
1072out_no_kms:
1073 if (dev_priv->has_mob)
1074 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1075 if (dev_priv->has_gmr)
1076 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1077 vmw_vram_manager_fini(dev_priv);
1078out_no_vram:
1079 ttm_device_fini(&dev_priv->bdev);
1080out_no_bdev:
1081 vmw_fence_manager_takedown(dev_priv->fman);
1082out_no_fman:
1083 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1084 vmw_irq_uninstall(&dev_priv->drm);
1085out_no_irq:
1086 ttm_object_device_release(&dev_priv->tdev);
1087out_err0:
1088 for (i = vmw_res_context; i < vmw_res_max; ++i)
1089 idr_destroy(&dev_priv->res_idr[i]);
1090
1091 if (dev_priv->ctx.staged_bindings)
1092 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1093out_no_pci_or_version:
1094 pci_release_regions(pdev);
1095 return ret;
1096}
1097
1098static void vmw_driver_unload(struct drm_device *dev)
1099{
1100 struct vmw_private *dev_priv = vmw_priv(dev);
1101 struct pci_dev *pdev = to_pci_dev(dev->dev);
1102 enum vmw_res_type i;
1103
1104 unregister_pm_notifier(&dev_priv->pm_nb);
1105
1106 if (dev_priv->ctx.res_ht_initialized)
1107 drm_ht_remove(&dev_priv->ctx.res_ht);
1108 vfree(dev_priv->ctx.cmd_bounce);
1109 if (dev_priv->enable_fb) {
1110 vmw_fb_off(dev_priv);
1111 vmw_fb_close(dev_priv);
1112 vmw_fifo_resource_dec(dev_priv);
1113 vmw_svga_disable(dev_priv);
1114 }
1115
1116 vmw_kms_close(dev_priv);
1117 vmw_overlay_close(dev_priv);
1118
1119 if (dev_priv->has_gmr)
1120 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1121
1122 vmw_release_device_early(dev_priv);
1123 if (dev_priv->has_mob)
1124 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1125 vmw_vram_manager_fini(dev_priv);
1126 ttm_device_fini(&dev_priv->bdev);
1127 drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
1128 vmw_release_device_late(dev_priv);
1129 vmw_fence_manager_takedown(dev_priv->fman);
1130 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1131 vmw_irq_uninstall(&dev_priv->drm);
1132
1133 ttm_object_device_release(&dev_priv->tdev);
1134 if (dev_priv->ctx.staged_bindings)
1135 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1136
1137 for (i = vmw_res_context; i < vmw_res_max; ++i)
1138 idr_destroy(&dev_priv->res_idr[i]);
1139
1140 pci_release_regions(pdev);
1141}
1142
1143static void vmw_postclose(struct drm_device *dev,
1144 struct drm_file *file_priv)
1145{
1146 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1147
1148 ttm_object_file_release(&vmw_fp->tfile);
1149 kfree(vmw_fp);
1150}
1151
1152static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1153{
1154 struct vmw_private *dev_priv = vmw_priv(dev);
1155 struct vmw_fpriv *vmw_fp;
1156 int ret = -ENOMEM;
1157
1158 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1159 if (unlikely(!vmw_fp))
1160 return ret;
1161
1162 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1163 if (unlikely(vmw_fp->tfile == NULL))
1164 goto out_no_tfile;
1165
1166 file_priv->driver_priv = vmw_fp;
1167
1168 return 0;
1169
1170out_no_tfile:
1171 kfree(vmw_fp);
1172 return ret;
1173}
1174
1175static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1176 unsigned long arg,
1177 long (*ioctl_func)(struct file *, unsigned int,
1178 unsigned long))
1179{
1180 struct drm_file *file_priv = filp->private_data;
1181 struct drm_device *dev = file_priv->minor->dev;
1182 unsigned int nr = DRM_IOCTL_NR(cmd);
1183 unsigned int flags;
1184
1185 /*
1186 * Do extra checking on driver private ioctls.
1187 */
1188
1189 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1190 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1191 const struct drm_ioctl_desc *ioctl =
1192 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1193
1194 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1195 return ioctl_func(filp, cmd, arg);
1196 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1197 if (!drm_is_current_master(file_priv) &&
1198 !capable(CAP_SYS_ADMIN))
1199 return -EACCES;
1200 }
1201
1202 if (unlikely(ioctl->cmd != cmd))
1203 goto out_io_encoding;
1204
1205 flags = ioctl->flags;
1206 } else if (!drm_ioctl_flags(nr, &flags))
1207 return -EINVAL;
1208
1209 return ioctl_func(filp, cmd, arg);
1210
1211out_io_encoding:
1212 DRM_ERROR("Invalid command format, ioctl %d\n",
1213 nr - DRM_COMMAND_BASE);
1214
1215 return -EINVAL;
1216}
1217
1218static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1219 unsigned long arg)
1220{
1221 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1222}
1223
1224#ifdef CONFIG_COMPAT
1225static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1226 unsigned long arg)
1227{
1228 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1229}
1230#endif
1231
1232static void vmw_master_set(struct drm_device *dev,
1233 struct drm_file *file_priv,
1234 bool from_open)
1235{
1236 /*
1237 * Inform a new master that the layout may have changed while
1238 * it was gone.
1239 */
1240 if (!from_open)
1241 drm_sysfs_hotplug_event(dev);
1242}
1243
1244static void vmw_master_drop(struct drm_device *dev,
1245 struct drm_file *file_priv)
1246{
1247 struct vmw_private *dev_priv = vmw_priv(dev);
1248
1249 vmw_kms_legacy_hotspot_clear(dev_priv);
1250 if (!dev_priv->enable_fb)
1251 vmw_svga_disable(dev_priv);
1252}
1253
1254/**
1255 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1256 *
1257 * @dev_priv: Pointer to device private struct.
1258 * Needs the reservation sem to be held in non-exclusive mode.
1259 */
1260static void __vmw_svga_enable(struct vmw_private *dev_priv)
1261{
1262 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1263
1264 if (!ttm_resource_manager_used(man)) {
1265 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE);
1266 ttm_resource_manager_set_used(man, true);
1267 }
1268}
1269
1270/**
1271 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1272 *
1273 * @dev_priv: Pointer to device private struct.
1274 */
1275void vmw_svga_enable(struct vmw_private *dev_priv)
1276{
1277 __vmw_svga_enable(dev_priv);
1278}
1279
1280/**
1281 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1282 *
1283 * @dev_priv: Pointer to device private struct.
1284 * Needs the reservation sem to be held in exclusive mode.
1285 * Will not empty VRAM. VRAM must be emptied by caller.
1286 */
1287static void __vmw_svga_disable(struct vmw_private *dev_priv)
1288{
1289 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1290
1291 if (ttm_resource_manager_used(man)) {
1292 ttm_resource_manager_set_used(man, false);
1293 vmw_write(dev_priv, SVGA_REG_ENABLE,
1294 SVGA_REG_ENABLE_HIDE |
1295 SVGA_REG_ENABLE_ENABLE);
1296 }
1297}
1298
1299/**
1300 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1301 * running.
1302 *
1303 * @dev_priv: Pointer to device private struct.
1304 * Will empty VRAM.
1305 */
1306void vmw_svga_disable(struct vmw_private *dev_priv)
1307{
1308 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1309 /*
1310 * Disabling SVGA will turn off device modesetting capabilities, so
1311 * notify KMS about that so that it doesn't cache atomic state that
1312 * isn't valid anymore, for example crtcs turned on.
1313 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1314 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1315 * end up with lock order reversal. Thus, a master may actually perform
1316 * a new modeset just after we call vmw_kms_lost_device() and race with
1317 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1318 * to be inconsistent with the device, causing modesetting problems.
1319 *
1320 */
1321 vmw_kms_lost_device(&dev_priv->drm);
1322 if (ttm_resource_manager_used(man)) {
1323 if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
1324 DRM_ERROR("Failed evicting VRAM buffers.\n");
1325 ttm_resource_manager_set_used(man, false);
1326 vmw_write(dev_priv, SVGA_REG_ENABLE,
1327 SVGA_REG_ENABLE_HIDE |
1328 SVGA_REG_ENABLE_ENABLE);
1329 }
1330}
1331
1332static void vmw_remove(struct pci_dev *pdev)
1333{
1334 struct drm_device *dev = pci_get_drvdata(pdev);
1335
1336 ttm_mem_global_release(&ttm_mem_glob);
1337 drm_dev_unregister(dev);
1338 vmw_driver_unload(dev);
1339}
1340
1341static unsigned long
1342vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
1343 unsigned long len, unsigned long pgoff,
1344 unsigned long flags)
1345{
1346 struct drm_file *file_priv = file->private_data;
1347 struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
1348
1349 return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
1350 &dev_priv->vma_manager);
1351}
1352
1353static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1354 void *ptr)
1355{
1356 struct vmw_private *dev_priv =
1357 container_of(nb, struct vmw_private, pm_nb);
1358
1359 switch (val) {
1360 case PM_HIBERNATION_PREPARE:
1361 /*
1362 * Take the reservation sem in write mode, which will make sure
1363 * there are no other processes holding a buffer object
1364 * reservation, meaning we should be able to evict all buffer
1365 * objects if needed.
1366 * Once user-space processes have been frozen, we can release
1367 * the lock again.
1368 */
1369 dev_priv->suspend_locked = true;
1370 break;
1371 case PM_POST_HIBERNATION:
1372 case PM_POST_RESTORE:
1373 if (READ_ONCE(dev_priv->suspend_locked)) {
1374 dev_priv->suspend_locked = false;
1375 }
1376 break;
1377 default:
1378 break;
1379 }
1380 return 0;
1381}
1382
1383static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1384{
1385 struct drm_device *dev = pci_get_drvdata(pdev);
1386 struct vmw_private *dev_priv = vmw_priv(dev);
1387
1388 if (dev_priv->refuse_hibernation)
1389 return -EBUSY;
1390
1391 pci_save_state(pdev);
1392 pci_disable_device(pdev);
1393 pci_set_power_state(pdev, PCI_D3hot);
1394 return 0;
1395}
1396
1397static int vmw_pci_resume(struct pci_dev *pdev)
1398{
1399 pci_set_power_state(pdev, PCI_D0);
1400 pci_restore_state(pdev);
1401 return pci_enable_device(pdev);
1402}
1403
1404static int vmw_pm_suspend(struct device *kdev)
1405{
1406 struct pci_dev *pdev = to_pci_dev(kdev);
1407 struct pm_message dummy;
1408
1409 dummy.event = 0;
1410
1411 return vmw_pci_suspend(pdev, dummy);
1412}
1413
1414static int vmw_pm_resume(struct device *kdev)
1415{
1416 struct pci_dev *pdev = to_pci_dev(kdev);
1417
1418 return vmw_pci_resume(pdev);
1419}
1420
1421static int vmw_pm_freeze(struct device *kdev)
1422{
1423 struct pci_dev *pdev = to_pci_dev(kdev);
1424 struct drm_device *dev = pci_get_drvdata(pdev);
1425 struct vmw_private *dev_priv = vmw_priv(dev);
1426 struct ttm_operation_ctx ctx = {
1427 .interruptible = false,
1428 .no_wait_gpu = false
1429 };
1430 int ret;
1431
1432 /*
1433 * No user-space processes should be running now.
1434 */
1435 ret = vmw_kms_suspend(&dev_priv->drm);
1436 if (ret) {
1437 DRM_ERROR("Failed to freeze modesetting.\n");
1438 return ret;
1439 }
1440 if (dev_priv->enable_fb)
1441 vmw_fb_off(dev_priv);
1442
1443 vmw_execbuf_release_pinned_bo(dev_priv);
1444 vmw_resource_evict_all(dev_priv);
1445 vmw_release_device_early(dev_priv);
1446 while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
1447 if (dev_priv->enable_fb)
1448 vmw_fifo_resource_dec(dev_priv);
1449 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1450 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1451 if (dev_priv->enable_fb)
1452 vmw_fifo_resource_inc(dev_priv);
1453 WARN_ON(vmw_request_device_late(dev_priv));
1454 dev_priv->suspend_locked = false;
1455 if (dev_priv->suspend_state)
1456 vmw_kms_resume(dev);
1457 if (dev_priv->enable_fb)
1458 vmw_fb_on(dev_priv);
1459 return -EBUSY;
1460 }
1461
1462 vmw_fence_fifo_down(dev_priv->fman);
1463 __vmw_svga_disable(dev_priv);
1464
1465 vmw_release_device_late(dev_priv);
1466 return 0;
1467}
1468
1469static int vmw_pm_restore(struct device *kdev)
1470{
1471 struct pci_dev *pdev = to_pci_dev(kdev);
1472 struct drm_device *dev = pci_get_drvdata(pdev);
1473 struct vmw_private *dev_priv = vmw_priv(dev);
1474 int ret;
1475
1476 vmw_detect_version(dev_priv);
1477
1478 if (dev_priv->enable_fb)
1479 vmw_fifo_resource_inc(dev_priv);
1480
1481 ret = vmw_request_device(dev_priv);
1482 if (ret)
1483 return ret;
1484
1485 if (dev_priv->enable_fb)
1486 __vmw_svga_enable(dev_priv);
1487
1488 vmw_fence_fifo_up(dev_priv->fman);
1489 dev_priv->suspend_locked = false;
1490 if (dev_priv->suspend_state)
1491 vmw_kms_resume(&dev_priv->drm);
1492
1493 if (dev_priv->enable_fb)
1494 vmw_fb_on(dev_priv);
1495
1496 return 0;
1497}
1498
1499static const struct dev_pm_ops vmw_pm_ops = {
1500 .freeze = vmw_pm_freeze,
1501 .thaw = vmw_pm_restore,
1502 .restore = vmw_pm_restore,
1503 .suspend = vmw_pm_suspend,
1504 .resume = vmw_pm_resume,
1505};
1506
1507static const struct file_operations vmwgfx_driver_fops = {
1508 .owner = THIS_MODULE,
1509 .open = drm_open,
1510 .release = drm_release,
1511 .unlocked_ioctl = vmw_unlocked_ioctl,
1512 .mmap = vmw_mmap,
1513 .poll = drm_poll,
1514 .read = drm_read,
1515#if defined(CONFIG_COMPAT)
1516 .compat_ioctl = vmw_compat_ioctl,
1517#endif
1518 .llseek = noop_llseek,
1519 .get_unmapped_area = vmw_get_unmapped_area,
1520};
1521
1522static const struct drm_driver driver = {
1523 .driver_features =
1524 DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
1525 .ioctls = vmw_ioctls,
1526 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1527 .master_set = vmw_master_set,
1528 .master_drop = vmw_master_drop,
1529 .open = vmw_driver_open,
1530 .postclose = vmw_postclose,
1531
1532 .dumb_create = vmw_dumb_create,
1533 .dumb_map_offset = vmw_dumb_map_offset,
1534 .dumb_destroy = vmw_dumb_destroy,
1535
1536 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1537 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1538
1539 .fops = &vmwgfx_driver_fops,
1540 .name = VMWGFX_DRIVER_NAME,
1541 .desc = VMWGFX_DRIVER_DESC,
1542 .date = VMWGFX_DRIVER_DATE,
1543 .major = VMWGFX_DRIVER_MAJOR,
1544 .minor = VMWGFX_DRIVER_MINOR,
1545 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1546};
1547
1548static struct pci_driver vmw_pci_driver = {
1549 .name = VMWGFX_DRIVER_NAME,
1550 .id_table = vmw_pci_id_list,
1551 .probe = vmw_probe,
1552 .remove = vmw_remove,
1553 .driver = {
1554 .pm = &vmw_pm_ops
1555 }
1556};
1557
1558static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1559{
1560 struct vmw_private *vmw;
1561 int ret;
1562
1563 ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb");
1564 if (ret)
1565 return ret;
1566
1567 ret = pcim_enable_device(pdev);
1568 if (ret)
1569 return ret;
1570
1571 vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
1572 struct vmw_private, drm);
1573 if (IS_ERR(vmw))
1574 return PTR_ERR(vmw);
1575
1576 pci_set_drvdata(pdev, &vmw->drm);
1577
1578 ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev);
1579 if (ret)
1580 return ret;
1581
1582 ret = vmw_driver_load(vmw, ent->device);
1583 if (ret)
1584 return ret;
1585
1586 ret = drm_dev_register(&vmw->drm, 0);
1587 if (ret) {
1588 vmw_driver_unload(&vmw->drm);
1589 return ret;
1590 }
1591
1592 return 0;
1593}
1594
1595static int __init vmwgfx_init(void)
1596{
1597 int ret;
1598
1599 if (vgacon_text_force())
1600 return -EINVAL;
1601
1602 ret = pci_register_driver(&vmw_pci_driver);
1603 if (ret)
1604 DRM_ERROR("Failed initializing DRM.\n");
1605 return ret;
1606}
1607
1608static void __exit vmwgfx_exit(void)
1609{
1610 pci_unregister_driver(&vmw_pci_driver);
1611}
1612
1613module_init(vmwgfx_init);
1614module_exit(vmwgfx_exit);
1615
1616MODULE_AUTHOR("VMware Inc. and others");
1617MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1618MODULE_LICENSE("GPL and additional rights");
1619MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1620 __stringify(VMWGFX_DRIVER_MINOR) "."
1621 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1622 "0");