Loading...
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <linux/console.h>
29#include <linux/dma-mapping.h>
30#include <linux/module.h>
31
32#include <drm/drm_drv.h>
33#include <drm/drm_ioctl.h>
34#include <drm/drm_pci.h>
35#include <drm/drm_sysfs.h>
36#include <drm/ttm/ttm_bo_driver.h>
37#include <drm/ttm/ttm_module.h>
38#include <drm/ttm/ttm_placement.h>
39
40#include "ttm_object.h"
41#include "vmwgfx_binding.h"
42#include "vmwgfx_drv.h"
43
44#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
45#define VMWGFX_CHIP_SVGAII 0
46#define VMW_FB_RESERVATION 0
47
48#define VMW_MIN_INITIAL_WIDTH 800
49#define VMW_MIN_INITIAL_HEIGHT 600
50
51#ifndef VMWGFX_GIT_VERSION
52#define VMWGFX_GIT_VERSION "Unknown"
53#endif
54
55#define VMWGFX_REPO "In Tree"
56
57#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
58
59
60/**
61 * Fully encoded drm commands. Might move to vmw_drm.h
62 */
63
64#define DRM_IOCTL_VMW_GET_PARAM \
65 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
66 struct drm_vmw_getparam_arg)
67#define DRM_IOCTL_VMW_ALLOC_DMABUF \
68 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
69 union drm_vmw_alloc_dmabuf_arg)
70#define DRM_IOCTL_VMW_UNREF_DMABUF \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
72 struct drm_vmw_unref_dmabuf_arg)
73#define DRM_IOCTL_VMW_CURSOR_BYPASS \
74 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
75 struct drm_vmw_cursor_bypass_arg)
76
77#define DRM_IOCTL_VMW_CONTROL_STREAM \
78 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
79 struct drm_vmw_control_stream_arg)
80#define DRM_IOCTL_VMW_CLAIM_STREAM \
81 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
82 struct drm_vmw_stream_arg)
83#define DRM_IOCTL_VMW_UNREF_STREAM \
84 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
85 struct drm_vmw_stream_arg)
86
87#define DRM_IOCTL_VMW_CREATE_CONTEXT \
88 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
89 struct drm_vmw_context_arg)
90#define DRM_IOCTL_VMW_UNREF_CONTEXT \
91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
92 struct drm_vmw_context_arg)
93#define DRM_IOCTL_VMW_CREATE_SURFACE \
94 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
95 union drm_vmw_surface_create_arg)
96#define DRM_IOCTL_VMW_UNREF_SURFACE \
97 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
98 struct drm_vmw_surface_arg)
99#define DRM_IOCTL_VMW_REF_SURFACE \
100 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
101 union drm_vmw_surface_reference_arg)
102#define DRM_IOCTL_VMW_EXECBUF \
103 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
104 struct drm_vmw_execbuf_arg)
105#define DRM_IOCTL_VMW_GET_3D_CAP \
106 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
107 struct drm_vmw_get_3d_cap_arg)
108#define DRM_IOCTL_VMW_FENCE_WAIT \
109 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
110 struct drm_vmw_fence_wait_arg)
111#define DRM_IOCTL_VMW_FENCE_SIGNALED \
112 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
113 struct drm_vmw_fence_signaled_arg)
114#define DRM_IOCTL_VMW_FENCE_UNREF \
115 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
116 struct drm_vmw_fence_arg)
117#define DRM_IOCTL_VMW_FENCE_EVENT \
118 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
119 struct drm_vmw_fence_event_arg)
120#define DRM_IOCTL_VMW_PRESENT \
121 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
122 struct drm_vmw_present_arg)
123#define DRM_IOCTL_VMW_PRESENT_READBACK \
124 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
125 struct drm_vmw_present_readback_arg)
126#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
127 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
128 struct drm_vmw_update_layout_arg)
129#define DRM_IOCTL_VMW_CREATE_SHADER \
130 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
131 struct drm_vmw_shader_create_arg)
132#define DRM_IOCTL_VMW_UNREF_SHADER \
133 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
134 struct drm_vmw_shader_arg)
135#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
136 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
137 union drm_vmw_gb_surface_create_arg)
138#define DRM_IOCTL_VMW_GB_SURFACE_REF \
139 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
140 union drm_vmw_gb_surface_reference_arg)
141#define DRM_IOCTL_VMW_SYNCCPU \
142 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
143 struct drm_vmw_synccpu_arg)
144#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
145 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
146 struct drm_vmw_context_arg)
147#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
148 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
149 union drm_vmw_gb_surface_create_ext_arg)
150#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
151 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
152 union drm_vmw_gb_surface_reference_ext_arg)
153
154/**
155 * The core DRM version of this macro doesn't account for
156 * DRM_COMMAND_BASE.
157 */
158
159#define VMW_IOCTL_DEF(ioctl, func, flags) \
160 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
161
162/**
163 * Ioctl definitions.
164 */
165
166static const struct drm_ioctl_desc vmw_ioctls[] = {
167 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
168 DRM_AUTH | DRM_RENDER_ALLOW),
169 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
170 DRM_AUTH | DRM_RENDER_ALLOW),
171 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
172 DRM_RENDER_ALLOW),
173 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
174 vmw_kms_cursor_bypass_ioctl,
175 DRM_MASTER),
176
177 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
178 DRM_MASTER),
179 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
180 DRM_MASTER),
181 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
182 DRM_MASTER),
183
184 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
185 DRM_AUTH | DRM_RENDER_ALLOW),
186 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
187 DRM_RENDER_ALLOW),
188 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
189 DRM_AUTH | DRM_RENDER_ALLOW),
190 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
191 DRM_RENDER_ALLOW),
192 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
193 DRM_AUTH | DRM_RENDER_ALLOW),
194 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
195 DRM_RENDER_ALLOW),
196 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
197 DRM_RENDER_ALLOW),
198 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
199 vmw_fence_obj_signaled_ioctl,
200 DRM_RENDER_ALLOW),
201 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
202 DRM_RENDER_ALLOW),
203 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
204 DRM_AUTH | DRM_RENDER_ALLOW),
205 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
206 DRM_AUTH | DRM_RENDER_ALLOW),
207
208 /* these allow direct access to the framebuffers mark as master only */
209 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
210 DRM_MASTER | DRM_AUTH),
211 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
212 vmw_present_readback_ioctl,
213 DRM_MASTER | DRM_AUTH),
214 /*
215 * The permissions of the below ioctl are overridden in
216 * vmw_generic_ioctl(). We require either
217 * DRM_MASTER or capable(CAP_SYS_ADMIN).
218 */
219 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
220 vmw_kms_update_layout_ioctl,
221 DRM_RENDER_ALLOW),
222 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
223 vmw_shader_define_ioctl,
224 DRM_AUTH | DRM_RENDER_ALLOW),
225 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
226 vmw_shader_destroy_ioctl,
227 DRM_RENDER_ALLOW),
228 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
229 vmw_gb_surface_define_ioctl,
230 DRM_AUTH | DRM_RENDER_ALLOW),
231 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
232 vmw_gb_surface_reference_ioctl,
233 DRM_AUTH | DRM_RENDER_ALLOW),
234 VMW_IOCTL_DEF(VMW_SYNCCPU,
235 vmw_user_bo_synccpu_ioctl,
236 DRM_RENDER_ALLOW),
237 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
238 vmw_extended_context_define_ioctl,
239 DRM_AUTH | DRM_RENDER_ALLOW),
240 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
241 vmw_gb_surface_define_ext_ioctl,
242 DRM_AUTH | DRM_RENDER_ALLOW),
243 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
244 vmw_gb_surface_reference_ext_ioctl,
245 DRM_AUTH | DRM_RENDER_ALLOW),
246};
247
248static const struct pci_device_id vmw_pci_id_list[] = {
249 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
250 {0, 0, 0}
251};
252MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
253
254static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
255static int vmw_force_iommu;
256static int vmw_restrict_iommu;
257static int vmw_force_coherent;
258static int vmw_restrict_dma_mask;
259static int vmw_assume_16bpp;
260
261static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
262static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
263 void *ptr);
264
265MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
266module_param_named(enable_fbdev, enable_fbdev, int, 0600);
267MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
268module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
269MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
270module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
271MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
272module_param_named(force_coherent, vmw_force_coherent, int, 0600);
273MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
274module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
275MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
276module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
277
278
279static void vmw_print_capabilities2(uint32_t capabilities2)
280{
281 DRM_INFO("Capabilities2:\n");
282 if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
283 DRM_INFO(" Grow oTable.\n");
284 if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
285 DRM_INFO(" IntraSurface copy.\n");
286}
287
288static void vmw_print_capabilities(uint32_t capabilities)
289{
290 DRM_INFO("Capabilities:\n");
291 if (capabilities & SVGA_CAP_RECT_COPY)
292 DRM_INFO(" Rect copy.\n");
293 if (capabilities & SVGA_CAP_CURSOR)
294 DRM_INFO(" Cursor.\n");
295 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
296 DRM_INFO(" Cursor bypass.\n");
297 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
298 DRM_INFO(" Cursor bypass 2.\n");
299 if (capabilities & SVGA_CAP_8BIT_EMULATION)
300 DRM_INFO(" 8bit emulation.\n");
301 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
302 DRM_INFO(" Alpha cursor.\n");
303 if (capabilities & SVGA_CAP_3D)
304 DRM_INFO(" 3D.\n");
305 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
306 DRM_INFO(" Extended Fifo.\n");
307 if (capabilities & SVGA_CAP_MULTIMON)
308 DRM_INFO(" Multimon.\n");
309 if (capabilities & SVGA_CAP_PITCHLOCK)
310 DRM_INFO(" Pitchlock.\n");
311 if (capabilities & SVGA_CAP_IRQMASK)
312 DRM_INFO(" Irq mask.\n");
313 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
314 DRM_INFO(" Display Topology.\n");
315 if (capabilities & SVGA_CAP_GMR)
316 DRM_INFO(" GMR.\n");
317 if (capabilities & SVGA_CAP_TRACES)
318 DRM_INFO(" Traces.\n");
319 if (capabilities & SVGA_CAP_GMR2)
320 DRM_INFO(" GMR2.\n");
321 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
322 DRM_INFO(" Screen Object 2.\n");
323 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
324 DRM_INFO(" Command Buffers.\n");
325 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
326 DRM_INFO(" Command Buffers 2.\n");
327 if (capabilities & SVGA_CAP_GBOBJECTS)
328 DRM_INFO(" Guest Backed Resources.\n");
329 if (capabilities & SVGA_CAP_DX)
330 DRM_INFO(" DX Features.\n");
331 if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
332 DRM_INFO(" HP Command Queue.\n");
333}
334
335/**
336 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
337 *
338 * @dev_priv: A device private structure.
339 *
340 * This function creates a small buffer object that holds the query
341 * result for dummy queries emitted as query barriers.
342 * The function will then map the first page and initialize a pending
343 * occlusion query result structure, Finally it will unmap the buffer.
344 * No interruptible waits are done within this function.
345 *
346 * Returns an error if bo creation or initialization fails.
347 */
348static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
349{
350 int ret;
351 struct vmw_buffer_object *vbo;
352 struct ttm_bo_kmap_obj map;
353 volatile SVGA3dQueryResult *result;
354 bool dummy;
355
356 /*
357 * Create the vbo as pinned, so that a tryreserve will
358 * immediately succeed. This is because we're the only
359 * user of the bo currently.
360 */
361 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
362 if (!vbo)
363 return -ENOMEM;
364
365 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
366 &vmw_sys_ne_placement, false,
367 &vmw_bo_bo_free);
368 if (unlikely(ret != 0))
369 return ret;
370
371 ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
372 BUG_ON(ret != 0);
373 vmw_bo_pin_reserved(vbo, true);
374
375 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
376 if (likely(ret == 0)) {
377 result = ttm_kmap_obj_virtual(&map, &dummy);
378 result->totalSize = sizeof(*result);
379 result->state = SVGA3D_QUERYSTATE_PENDING;
380 result->result32 = 0xff;
381 ttm_bo_kunmap(&map);
382 }
383 vmw_bo_pin_reserved(vbo, false);
384 ttm_bo_unreserve(&vbo->base);
385
386 if (unlikely(ret != 0)) {
387 DRM_ERROR("Dummy query buffer map failed.\n");
388 vmw_bo_unreference(&vbo);
389 } else
390 dev_priv->dummy_query_bo = vbo;
391
392 return ret;
393}
394
395/**
396 * vmw_request_device_late - Perform late device setup
397 *
398 * @dev_priv: Pointer to device private.
399 *
400 * This function performs setup of otables and enables large command
401 * buffer submission. These tasks are split out to a separate function
402 * because it reverts vmw_release_device_early and is intended to be used
403 * by an error path in the hibernation code.
404 */
405static int vmw_request_device_late(struct vmw_private *dev_priv)
406{
407 int ret;
408
409 if (dev_priv->has_mob) {
410 ret = vmw_otables_setup(dev_priv);
411 if (unlikely(ret != 0)) {
412 DRM_ERROR("Unable to initialize "
413 "guest Memory OBjects.\n");
414 return ret;
415 }
416 }
417
418 if (dev_priv->cman) {
419 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
420 256*4096, 2*4096);
421 if (ret) {
422 struct vmw_cmdbuf_man *man = dev_priv->cman;
423
424 dev_priv->cman = NULL;
425 vmw_cmdbuf_man_destroy(man);
426 }
427 }
428
429 return 0;
430}
431
432static int vmw_request_device(struct vmw_private *dev_priv)
433{
434 int ret;
435
436 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
437 if (unlikely(ret != 0)) {
438 DRM_ERROR("Unable to initialize FIFO.\n");
439 return ret;
440 }
441 vmw_fence_fifo_up(dev_priv->fman);
442 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
443 if (IS_ERR(dev_priv->cman)) {
444 dev_priv->cman = NULL;
445 dev_priv->has_dx = false;
446 }
447
448 ret = vmw_request_device_late(dev_priv);
449 if (ret)
450 goto out_no_mob;
451
452 ret = vmw_dummy_query_bo_create(dev_priv);
453 if (unlikely(ret != 0))
454 goto out_no_query_bo;
455
456 return 0;
457
458out_no_query_bo:
459 if (dev_priv->cman)
460 vmw_cmdbuf_remove_pool(dev_priv->cman);
461 if (dev_priv->has_mob) {
462 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
463 vmw_otables_takedown(dev_priv);
464 }
465 if (dev_priv->cman)
466 vmw_cmdbuf_man_destroy(dev_priv->cman);
467out_no_mob:
468 vmw_fence_fifo_down(dev_priv->fman);
469 vmw_fifo_release(dev_priv, &dev_priv->fifo);
470 return ret;
471}
472
473/**
474 * vmw_release_device_early - Early part of fifo takedown.
475 *
476 * @dev_priv: Pointer to device private struct.
477 *
478 * This is the first part of command submission takedown, to be called before
479 * buffer management is taken down.
480 */
481static void vmw_release_device_early(struct vmw_private *dev_priv)
482{
483 /*
484 * Previous destructions should've released
485 * the pinned bo.
486 */
487
488 BUG_ON(dev_priv->pinned_bo != NULL);
489
490 vmw_bo_unreference(&dev_priv->dummy_query_bo);
491 if (dev_priv->cman)
492 vmw_cmdbuf_remove_pool(dev_priv->cman);
493
494 if (dev_priv->has_mob) {
495 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
496 vmw_otables_takedown(dev_priv);
497 }
498}
499
500/**
501 * vmw_release_device_late - Late part of fifo takedown.
502 *
503 * @dev_priv: Pointer to device private struct.
504 *
505 * This is the last part of the command submission takedown, to be called when
506 * command submission is no longer needed. It may wait on pending fences.
507 */
508static void vmw_release_device_late(struct vmw_private *dev_priv)
509{
510 vmw_fence_fifo_down(dev_priv->fman);
511 if (dev_priv->cman)
512 vmw_cmdbuf_man_destroy(dev_priv->cman);
513
514 vmw_fifo_release(dev_priv, &dev_priv->fifo);
515}
516
517/**
518 * Sets the initial_[width|height] fields on the given vmw_private.
519 *
520 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
521 * clamping the value to fb_max_[width|height] fields and the
522 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
523 * If the values appear to be invalid, set them to
524 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
525 */
526static void vmw_get_initial_size(struct vmw_private *dev_priv)
527{
528 uint32_t width;
529 uint32_t height;
530
531 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
532 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
533
534 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
535 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
536
537 if (width > dev_priv->fb_max_width ||
538 height > dev_priv->fb_max_height) {
539
540 /*
541 * This is a host error and shouldn't occur.
542 */
543
544 width = VMW_MIN_INITIAL_WIDTH;
545 height = VMW_MIN_INITIAL_HEIGHT;
546 }
547
548 dev_priv->initial_width = width;
549 dev_priv->initial_height = height;
550}
551
552/**
553 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
554 * system.
555 *
556 * @dev_priv: Pointer to a struct vmw_private
557 *
558 * This functions tries to determine what actions need to be taken by the
559 * driver to make system pages visible to the device.
560 * If this function decides that DMA is not possible, it returns -EINVAL.
561 * The driver may then try to disable features of the device that require
562 * DMA.
563 */
564static int vmw_dma_select_mode(struct vmw_private *dev_priv)
565{
566 static const char *names[vmw_dma_map_max] = {
567 [vmw_dma_phys] = "Using physical TTM page addresses.",
568 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
569 [vmw_dma_map_populate] = "Caching DMA mappings.",
570 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
571
572 if (vmw_force_coherent)
573 dev_priv->map_mode = vmw_dma_alloc_coherent;
574 else if (vmw_restrict_iommu)
575 dev_priv->map_mode = vmw_dma_map_bind;
576 else
577 dev_priv->map_mode = vmw_dma_map_populate;
578
579 /* No TTM coherent page pool? FIXME: Ask TTM instead! */
580 if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
581 (dev_priv->map_mode == vmw_dma_alloc_coherent))
582 return -EINVAL;
583
584 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
585 return 0;
586}
587
588/**
589 * vmw_dma_masks - set required page- and dma masks
590 *
591 * @dev: Pointer to struct drm-device
592 *
593 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
594 * restriction also for 64-bit systems.
595 */
596static int vmw_dma_masks(struct vmw_private *dev_priv)
597{
598 struct drm_device *dev = dev_priv->dev;
599 int ret = 0;
600
601 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
602 if (dev_priv->map_mode != vmw_dma_phys &&
603 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
604 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
605 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
606 }
607
608 return ret;
609}
610
611static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
612{
613 struct vmw_private *dev_priv;
614 int ret;
615 uint32_t svga_id;
616 enum vmw_res_type i;
617 bool refuse_dma = false;
618 char host_log[100] = {0};
619
620 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
621 if (unlikely(!dev_priv)) {
622 DRM_ERROR("Failed allocating a device private struct.\n");
623 return -ENOMEM;
624 }
625
626 pci_set_master(dev->pdev);
627
628 dev_priv->dev = dev;
629 dev_priv->vmw_chipset = chipset;
630 dev_priv->last_read_seqno = (uint32_t) -100;
631 mutex_init(&dev_priv->cmdbuf_mutex);
632 mutex_init(&dev_priv->release_mutex);
633 mutex_init(&dev_priv->binding_mutex);
634 mutex_init(&dev_priv->global_kms_state_mutex);
635 ttm_lock_init(&dev_priv->reservation_sem);
636 spin_lock_init(&dev_priv->resource_lock);
637 spin_lock_init(&dev_priv->hw_lock);
638 spin_lock_init(&dev_priv->waiter_lock);
639 spin_lock_init(&dev_priv->cap_lock);
640 spin_lock_init(&dev_priv->svga_lock);
641 spin_lock_init(&dev_priv->cursor_lock);
642
643 for (i = vmw_res_context; i < vmw_res_max; ++i) {
644 idr_init(&dev_priv->res_idr[i]);
645 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
646 }
647
648 init_waitqueue_head(&dev_priv->fence_queue);
649 init_waitqueue_head(&dev_priv->fifo_queue);
650 dev_priv->fence_queue_waiters = 0;
651 dev_priv->fifo_queue_waiters = 0;
652
653 dev_priv->used_memory_size = 0;
654
655 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
656 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
657 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
658
659 dev_priv->assume_16bpp = !!vmw_assume_16bpp;
660
661 dev_priv->enable_fb = enable_fbdev;
662
663 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
664 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
665 if (svga_id != SVGA_ID_2) {
666 ret = -ENOSYS;
667 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
668 goto out_err0;
669 }
670
671 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
672
673 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
674 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
675 }
676
677
678 ret = vmw_dma_select_mode(dev_priv);
679 if (unlikely(ret != 0)) {
680 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
681 refuse_dma = true;
682 }
683
684 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
685 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
686 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
687 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
688
689 vmw_get_initial_size(dev_priv);
690
691 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
692 dev_priv->max_gmr_ids =
693 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
694 dev_priv->max_gmr_pages =
695 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
696 dev_priv->memory_size =
697 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
698 dev_priv->memory_size -= dev_priv->vram_size;
699 } else {
700 /*
701 * An arbitrary limit of 512MiB on surface
702 * memory. But all HWV8 hardware supports GMR2.
703 */
704 dev_priv->memory_size = 512*1024*1024;
705 }
706 dev_priv->max_mob_pages = 0;
707 dev_priv->max_mob_size = 0;
708 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
709 uint64_t mem_size =
710 vmw_read(dev_priv,
711 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
712
713 /*
714 * Workaround for low memory 2D VMs to compensate for the
715 * allocation taken by fbdev
716 */
717 if (!(dev_priv->capabilities & SVGA_CAP_3D))
718 mem_size *= 3;
719
720 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
721 dev_priv->prim_bb_mem =
722 vmw_read(dev_priv,
723 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
724 dev_priv->max_mob_size =
725 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
726 dev_priv->stdu_max_width =
727 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
728 dev_priv->stdu_max_height =
729 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
730
731 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
732 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
733 dev_priv->texture_max_width = vmw_read(dev_priv,
734 SVGA_REG_DEV_CAP);
735 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
736 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
737 dev_priv->texture_max_height = vmw_read(dev_priv,
738 SVGA_REG_DEV_CAP);
739 } else {
740 dev_priv->texture_max_width = 8192;
741 dev_priv->texture_max_height = 8192;
742 dev_priv->prim_bb_mem = dev_priv->vram_size;
743 }
744
745 vmw_print_capabilities(dev_priv->capabilities);
746 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
747 vmw_print_capabilities2(dev_priv->capabilities2);
748
749 ret = vmw_dma_masks(dev_priv);
750 if (unlikely(ret != 0))
751 goto out_err0;
752
753 dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK,
754 SCATTERLIST_MAX_SEGMENT));
755
756 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
757 DRM_INFO("Max GMR ids is %u\n",
758 (unsigned)dev_priv->max_gmr_ids);
759 DRM_INFO("Max number of GMR pages is %u\n",
760 (unsigned)dev_priv->max_gmr_pages);
761 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
762 (unsigned)dev_priv->memory_size / 1024);
763 }
764 DRM_INFO("Maximum display memory size is %u kiB\n",
765 dev_priv->prim_bb_mem / 1024);
766 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
767 dev_priv->vram_start, dev_priv->vram_size / 1024);
768 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
769 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
770
771 dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
772 dev_priv->mmio_size, MEMREMAP_WB);
773
774 if (unlikely(dev_priv->mmio_virt == NULL)) {
775 ret = -ENOMEM;
776 DRM_ERROR("Failed mapping MMIO.\n");
777 goto out_err0;
778 }
779
780 /* Need mmio memory to check for fifo pitchlock cap. */
781 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
782 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
783 !vmw_fifo_have_pitchlock(dev_priv)) {
784 ret = -ENOSYS;
785 DRM_ERROR("Hardware has no pitchlock\n");
786 goto out_err4;
787 }
788
789 dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
790 &vmw_prime_dmabuf_ops);
791
792 if (unlikely(dev_priv->tdev == NULL)) {
793 DRM_ERROR("Unable to initialize TTM object management.\n");
794 ret = -ENOMEM;
795 goto out_err4;
796 }
797
798 dev->dev_private = dev_priv;
799
800 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
801 dev_priv->stealth = (ret != 0);
802 if (dev_priv->stealth) {
803 /**
804 * Request at least the mmio PCI resource.
805 */
806
807 DRM_INFO("It appears like vesafb is loaded. "
808 "Ignore above error if any.\n");
809 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
810 if (unlikely(ret != 0)) {
811 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
812 goto out_no_device;
813 }
814 }
815
816 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
817 ret = vmw_irq_install(dev, dev->pdev->irq);
818 if (ret != 0) {
819 DRM_ERROR("Failed installing irq: %d\n", ret);
820 goto out_no_irq;
821 }
822 }
823
824 dev_priv->fman = vmw_fence_manager_init(dev_priv);
825 if (unlikely(dev_priv->fman == NULL)) {
826 ret = -ENOMEM;
827 goto out_no_fman;
828 }
829
830 ret = ttm_bo_device_init(&dev_priv->bdev,
831 &vmw_bo_driver,
832 dev->anon_inode->i_mapping,
833 false);
834 if (unlikely(ret != 0)) {
835 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
836 goto out_no_bdev;
837 }
838
839 /*
840 * Enable VRAM, but initially don't use it until SVGA is enabled and
841 * unhidden.
842 */
843 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
844 (dev_priv->vram_size >> PAGE_SHIFT));
845 if (unlikely(ret != 0)) {
846 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
847 goto out_no_vram;
848 }
849 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
850
851 dev_priv->has_gmr = true;
852 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
853 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
854 VMW_PL_GMR) != 0) {
855 DRM_INFO("No GMR memory available. "
856 "Graphics memory resources are very limited.\n");
857 dev_priv->has_gmr = false;
858 }
859
860 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
861 dev_priv->has_mob = true;
862 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
863 VMW_PL_MOB) != 0) {
864 DRM_INFO("No MOB memory available. "
865 "3D will be disabled.\n");
866 dev_priv->has_mob = false;
867 }
868 }
869
870 if (dev_priv->has_mob) {
871 spin_lock(&dev_priv->cap_lock);
872 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
873 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
874 spin_unlock(&dev_priv->cap_lock);
875 }
876
877 vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
878 ret = vmw_kms_init(dev_priv);
879 if (unlikely(ret != 0))
880 goto out_no_kms;
881 vmw_overlay_init(dev_priv);
882
883 ret = vmw_request_device(dev_priv);
884 if (ret)
885 goto out_no_fifo;
886
887 if (dev_priv->has_dx) {
888 /*
889 * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
890 * support
891 */
892 if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
893 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
894 SVGA3D_DEVCAP_SM41);
895 dev_priv->has_sm4_1 = vmw_read(dev_priv,
896 SVGA_REG_DEV_CAP);
897 }
898 }
899
900 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
901 DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
902 ? "yes." : "no.");
903 DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
904
905 snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
906 VMWGFX_REPO, VMWGFX_GIT_VERSION);
907 vmw_host_log(host_log);
908
909 memset(host_log, 0, sizeof(host_log));
910 snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
911 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
912 VMWGFX_DRIVER_PATCHLEVEL);
913 vmw_host_log(host_log);
914
915 if (dev_priv->enable_fb) {
916 vmw_fifo_resource_inc(dev_priv);
917 vmw_svga_enable(dev_priv);
918 vmw_fb_init(dev_priv);
919 }
920
921 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
922 register_pm_notifier(&dev_priv->pm_nb);
923
924 return 0;
925
926out_no_fifo:
927 vmw_overlay_close(dev_priv);
928 vmw_kms_close(dev_priv);
929out_no_kms:
930 if (dev_priv->has_mob)
931 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
932 if (dev_priv->has_gmr)
933 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
934 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
935out_no_vram:
936 (void)ttm_bo_device_release(&dev_priv->bdev);
937out_no_bdev:
938 vmw_fence_manager_takedown(dev_priv->fman);
939out_no_fman:
940 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
941 vmw_irq_uninstall(dev_priv->dev);
942out_no_irq:
943 if (dev_priv->stealth)
944 pci_release_region(dev->pdev, 2);
945 else
946 pci_release_regions(dev->pdev);
947out_no_device:
948 ttm_object_device_release(&dev_priv->tdev);
949out_err4:
950 memunmap(dev_priv->mmio_virt);
951out_err0:
952 for (i = vmw_res_context; i < vmw_res_max; ++i)
953 idr_destroy(&dev_priv->res_idr[i]);
954
955 if (dev_priv->ctx.staged_bindings)
956 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
957 kfree(dev_priv);
958 return ret;
959}
960
961static void vmw_driver_unload(struct drm_device *dev)
962{
963 struct vmw_private *dev_priv = vmw_priv(dev);
964 enum vmw_res_type i;
965
966 unregister_pm_notifier(&dev_priv->pm_nb);
967
968 if (dev_priv->ctx.res_ht_initialized)
969 drm_ht_remove(&dev_priv->ctx.res_ht);
970 vfree(dev_priv->ctx.cmd_bounce);
971 if (dev_priv->enable_fb) {
972 vmw_fb_off(dev_priv);
973 vmw_fb_close(dev_priv);
974 vmw_fifo_resource_dec(dev_priv);
975 vmw_svga_disable(dev_priv);
976 }
977
978 vmw_kms_close(dev_priv);
979 vmw_overlay_close(dev_priv);
980
981 if (dev_priv->has_gmr)
982 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
983 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
984
985 vmw_release_device_early(dev_priv);
986 if (dev_priv->has_mob)
987 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
988 (void) ttm_bo_device_release(&dev_priv->bdev);
989 vmw_release_device_late(dev_priv);
990 vmw_fence_manager_takedown(dev_priv->fman);
991 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
992 vmw_irq_uninstall(dev_priv->dev);
993 if (dev_priv->stealth)
994 pci_release_region(dev->pdev, 2);
995 else
996 pci_release_regions(dev->pdev);
997
998 ttm_object_device_release(&dev_priv->tdev);
999 memunmap(dev_priv->mmio_virt);
1000 if (dev_priv->ctx.staged_bindings)
1001 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1002
1003 for (i = vmw_res_context; i < vmw_res_max; ++i)
1004 idr_destroy(&dev_priv->res_idr[i]);
1005
1006 kfree(dev_priv);
1007}
1008
1009static void vmw_postclose(struct drm_device *dev,
1010 struct drm_file *file_priv)
1011{
1012 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1013
1014 ttm_object_file_release(&vmw_fp->tfile);
1015 kfree(vmw_fp);
1016}
1017
1018static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1019{
1020 struct vmw_private *dev_priv = vmw_priv(dev);
1021 struct vmw_fpriv *vmw_fp;
1022 int ret = -ENOMEM;
1023
1024 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1025 if (unlikely(!vmw_fp))
1026 return ret;
1027
1028 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1029 if (unlikely(vmw_fp->tfile == NULL))
1030 goto out_no_tfile;
1031
1032 file_priv->driver_priv = vmw_fp;
1033
1034 return 0;
1035
1036out_no_tfile:
1037 kfree(vmw_fp);
1038 return ret;
1039}
1040
1041static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1042 unsigned long arg,
1043 long (*ioctl_func)(struct file *, unsigned int,
1044 unsigned long))
1045{
1046 struct drm_file *file_priv = filp->private_data;
1047 struct drm_device *dev = file_priv->minor->dev;
1048 unsigned int nr = DRM_IOCTL_NR(cmd);
1049 unsigned int flags;
1050
1051 /*
1052 * Do extra checking on driver private ioctls.
1053 */
1054
1055 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1056 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1057 const struct drm_ioctl_desc *ioctl =
1058 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1059
1060 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1061 return ioctl_func(filp, cmd, arg);
1062 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1063 if (!drm_is_current_master(file_priv) &&
1064 !capable(CAP_SYS_ADMIN))
1065 return -EACCES;
1066 }
1067
1068 if (unlikely(ioctl->cmd != cmd))
1069 goto out_io_encoding;
1070
1071 flags = ioctl->flags;
1072 } else if (!drm_ioctl_flags(nr, &flags))
1073 return -EINVAL;
1074
1075 return ioctl_func(filp, cmd, arg);
1076
1077out_io_encoding:
1078 DRM_ERROR("Invalid command format, ioctl %d\n",
1079 nr - DRM_COMMAND_BASE);
1080
1081 return -EINVAL;
1082}
1083
1084static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1085 unsigned long arg)
1086{
1087 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1088}
1089
1090#ifdef CONFIG_COMPAT
1091static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1092 unsigned long arg)
1093{
1094 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1095}
1096#endif
1097
1098static int vmw_master_set(struct drm_device *dev,
1099 struct drm_file *file_priv,
1100 bool from_open)
1101{
1102 /*
1103 * Inform a new master that the layout may have changed while
1104 * it was gone.
1105 */
1106 if (!from_open)
1107 drm_sysfs_hotplug_event(dev);
1108
1109 return 0;
1110}
1111
1112static void vmw_master_drop(struct drm_device *dev,
1113 struct drm_file *file_priv)
1114{
1115 struct vmw_private *dev_priv = vmw_priv(dev);
1116
1117 vmw_kms_legacy_hotspot_clear(dev_priv);
1118 if (!dev_priv->enable_fb)
1119 vmw_svga_disable(dev_priv);
1120}
1121
1122/**
1123 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1124 *
1125 * @dev_priv: Pointer to device private struct.
1126 * Needs the reservation sem to be held in non-exclusive mode.
1127 */
1128static void __vmw_svga_enable(struct vmw_private *dev_priv)
1129{
1130 spin_lock(&dev_priv->svga_lock);
1131 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1132 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1133 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1134 }
1135 spin_unlock(&dev_priv->svga_lock);
1136}
1137
1138/**
1139 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1140 *
1141 * @dev_priv: Pointer to device private struct.
1142 */
1143void vmw_svga_enable(struct vmw_private *dev_priv)
1144{
1145 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
1146 __vmw_svga_enable(dev_priv);
1147 ttm_read_unlock(&dev_priv->reservation_sem);
1148}
1149
1150/**
1151 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1152 *
1153 * @dev_priv: Pointer to device private struct.
1154 * Needs the reservation sem to be held in exclusive mode.
1155 * Will not empty VRAM. VRAM must be emptied by caller.
1156 */
1157static void __vmw_svga_disable(struct vmw_private *dev_priv)
1158{
1159 spin_lock(&dev_priv->svga_lock);
1160 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1161 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1162 vmw_write(dev_priv, SVGA_REG_ENABLE,
1163 SVGA_REG_ENABLE_HIDE |
1164 SVGA_REG_ENABLE_ENABLE);
1165 }
1166 spin_unlock(&dev_priv->svga_lock);
1167}
1168
1169/**
1170 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1171 * running.
1172 *
1173 * @dev_priv: Pointer to device private struct.
1174 * Will empty VRAM.
1175 */
1176void vmw_svga_disable(struct vmw_private *dev_priv)
1177{
1178 /*
1179 * Disabling SVGA will turn off device modesetting capabilities, so
1180 * notify KMS about that so that it doesn't cache atomic state that
1181 * isn't valid anymore, for example crtcs turned on.
1182 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1183 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1184 * end up with lock order reversal. Thus, a master may actually perform
1185 * a new modeset just after we call vmw_kms_lost_device() and race with
1186 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1187 * to be inconsistent with the device, causing modesetting problems.
1188 *
1189 */
1190 vmw_kms_lost_device(dev_priv->dev);
1191 ttm_write_lock(&dev_priv->reservation_sem, false);
1192 spin_lock(&dev_priv->svga_lock);
1193 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1194 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1195 spin_unlock(&dev_priv->svga_lock);
1196 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1197 DRM_ERROR("Failed evicting VRAM buffers.\n");
1198 vmw_write(dev_priv, SVGA_REG_ENABLE,
1199 SVGA_REG_ENABLE_HIDE |
1200 SVGA_REG_ENABLE_ENABLE);
1201 } else
1202 spin_unlock(&dev_priv->svga_lock);
1203 ttm_write_unlock(&dev_priv->reservation_sem);
1204}
1205
1206static void vmw_remove(struct pci_dev *pdev)
1207{
1208 struct drm_device *dev = pci_get_drvdata(pdev);
1209
1210 pci_disable_device(pdev);
1211 drm_put_dev(dev);
1212}
1213
1214static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1215 void *ptr)
1216{
1217 struct vmw_private *dev_priv =
1218 container_of(nb, struct vmw_private, pm_nb);
1219
1220 switch (val) {
1221 case PM_HIBERNATION_PREPARE:
1222 /*
1223 * Take the reservation sem in write mode, which will make sure
1224 * there are no other processes holding a buffer object
1225 * reservation, meaning we should be able to evict all buffer
1226 * objects if needed.
1227 * Once user-space processes have been frozen, we can release
1228 * the lock again.
1229 */
1230 ttm_suspend_lock(&dev_priv->reservation_sem);
1231 dev_priv->suspend_locked = true;
1232 break;
1233 case PM_POST_HIBERNATION:
1234 case PM_POST_RESTORE:
1235 if (READ_ONCE(dev_priv->suspend_locked)) {
1236 dev_priv->suspend_locked = false;
1237 ttm_suspend_unlock(&dev_priv->reservation_sem);
1238 }
1239 break;
1240 default:
1241 break;
1242 }
1243 return 0;
1244}
1245
1246static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1247{
1248 struct drm_device *dev = pci_get_drvdata(pdev);
1249 struct vmw_private *dev_priv = vmw_priv(dev);
1250
1251 if (dev_priv->refuse_hibernation)
1252 return -EBUSY;
1253
1254 pci_save_state(pdev);
1255 pci_disable_device(pdev);
1256 pci_set_power_state(pdev, PCI_D3hot);
1257 return 0;
1258}
1259
1260static int vmw_pci_resume(struct pci_dev *pdev)
1261{
1262 pci_set_power_state(pdev, PCI_D0);
1263 pci_restore_state(pdev);
1264 return pci_enable_device(pdev);
1265}
1266
1267static int vmw_pm_suspend(struct device *kdev)
1268{
1269 struct pci_dev *pdev = to_pci_dev(kdev);
1270 struct pm_message dummy;
1271
1272 dummy.event = 0;
1273
1274 return vmw_pci_suspend(pdev, dummy);
1275}
1276
1277static int vmw_pm_resume(struct device *kdev)
1278{
1279 struct pci_dev *pdev = to_pci_dev(kdev);
1280
1281 return vmw_pci_resume(pdev);
1282}
1283
1284static int vmw_pm_freeze(struct device *kdev)
1285{
1286 struct pci_dev *pdev = to_pci_dev(kdev);
1287 struct drm_device *dev = pci_get_drvdata(pdev);
1288 struct vmw_private *dev_priv = vmw_priv(dev);
1289 int ret;
1290
1291 /*
1292 * Unlock for vmw_kms_suspend.
1293 * No user-space processes should be running now.
1294 */
1295 ttm_suspend_unlock(&dev_priv->reservation_sem);
1296 ret = vmw_kms_suspend(dev_priv->dev);
1297 if (ret) {
1298 ttm_suspend_lock(&dev_priv->reservation_sem);
1299 DRM_ERROR("Failed to freeze modesetting.\n");
1300 return ret;
1301 }
1302 if (dev_priv->enable_fb)
1303 vmw_fb_off(dev_priv);
1304
1305 ttm_suspend_lock(&dev_priv->reservation_sem);
1306 vmw_execbuf_release_pinned_bo(dev_priv);
1307 vmw_resource_evict_all(dev_priv);
1308 vmw_release_device_early(dev_priv);
1309 ttm_bo_swapout_all(&dev_priv->bdev);
1310 if (dev_priv->enable_fb)
1311 vmw_fifo_resource_dec(dev_priv);
1312 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1313 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1314 if (dev_priv->enable_fb)
1315 vmw_fifo_resource_inc(dev_priv);
1316 WARN_ON(vmw_request_device_late(dev_priv));
1317 dev_priv->suspend_locked = false;
1318 ttm_suspend_unlock(&dev_priv->reservation_sem);
1319 if (dev_priv->suspend_state)
1320 vmw_kms_resume(dev);
1321 if (dev_priv->enable_fb)
1322 vmw_fb_on(dev_priv);
1323 return -EBUSY;
1324 }
1325
1326 vmw_fence_fifo_down(dev_priv->fman);
1327 __vmw_svga_disable(dev_priv);
1328
1329 vmw_release_device_late(dev_priv);
1330 return 0;
1331}
1332
1333static int vmw_pm_restore(struct device *kdev)
1334{
1335 struct pci_dev *pdev = to_pci_dev(kdev);
1336 struct drm_device *dev = pci_get_drvdata(pdev);
1337 struct vmw_private *dev_priv = vmw_priv(dev);
1338 int ret;
1339
1340 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1341 (void) vmw_read(dev_priv, SVGA_REG_ID);
1342
1343 if (dev_priv->enable_fb)
1344 vmw_fifo_resource_inc(dev_priv);
1345
1346 ret = vmw_request_device(dev_priv);
1347 if (ret)
1348 return ret;
1349
1350 if (dev_priv->enable_fb)
1351 __vmw_svga_enable(dev_priv);
1352
1353 vmw_fence_fifo_up(dev_priv->fman);
1354 dev_priv->suspend_locked = false;
1355 ttm_suspend_unlock(&dev_priv->reservation_sem);
1356 if (dev_priv->suspend_state)
1357 vmw_kms_resume(dev_priv->dev);
1358
1359 if (dev_priv->enable_fb)
1360 vmw_fb_on(dev_priv);
1361
1362 return 0;
1363}
1364
1365static const struct dev_pm_ops vmw_pm_ops = {
1366 .freeze = vmw_pm_freeze,
1367 .thaw = vmw_pm_restore,
1368 .restore = vmw_pm_restore,
1369 .suspend = vmw_pm_suspend,
1370 .resume = vmw_pm_resume,
1371};
1372
1373static const struct file_operations vmwgfx_driver_fops = {
1374 .owner = THIS_MODULE,
1375 .open = drm_open,
1376 .release = drm_release,
1377 .unlocked_ioctl = vmw_unlocked_ioctl,
1378 .mmap = vmw_mmap,
1379 .poll = vmw_fops_poll,
1380 .read = vmw_fops_read,
1381#if defined(CONFIG_COMPAT)
1382 .compat_ioctl = vmw_compat_ioctl,
1383#endif
1384 .llseek = noop_llseek,
1385};
1386
1387static struct drm_driver driver = {
1388 .driver_features =
1389 DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
1390 .load = vmw_driver_load,
1391 .unload = vmw_driver_unload,
1392 .get_vblank_counter = vmw_get_vblank_counter,
1393 .enable_vblank = vmw_enable_vblank,
1394 .disable_vblank = vmw_disable_vblank,
1395 .ioctls = vmw_ioctls,
1396 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1397 .master_set = vmw_master_set,
1398 .master_drop = vmw_master_drop,
1399 .open = vmw_driver_open,
1400 .postclose = vmw_postclose,
1401
1402 .dumb_create = vmw_dumb_create,
1403 .dumb_map_offset = vmw_dumb_map_offset,
1404 .dumb_destroy = vmw_dumb_destroy,
1405
1406 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1407 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1408
1409 .fops = &vmwgfx_driver_fops,
1410 .name = VMWGFX_DRIVER_NAME,
1411 .desc = VMWGFX_DRIVER_DESC,
1412 .date = VMWGFX_DRIVER_DATE,
1413 .major = VMWGFX_DRIVER_MAJOR,
1414 .minor = VMWGFX_DRIVER_MINOR,
1415 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1416};
1417
1418static struct pci_driver vmw_pci_driver = {
1419 .name = VMWGFX_DRIVER_NAME,
1420 .id_table = vmw_pci_id_list,
1421 .probe = vmw_probe,
1422 .remove = vmw_remove,
1423 .driver = {
1424 .pm = &vmw_pm_ops
1425 }
1426};
1427
1428static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1429{
1430 return drm_get_pci_dev(pdev, ent, &driver);
1431}
1432
1433static int __init vmwgfx_init(void)
1434{
1435 int ret;
1436
1437 if (vgacon_text_force())
1438 return -EINVAL;
1439
1440 ret = pci_register_driver(&vmw_pci_driver);
1441 if (ret)
1442 DRM_ERROR("Failed initializing DRM.\n");
1443 return ret;
1444}
1445
1446static void __exit vmwgfx_exit(void)
1447{
1448 pci_unregister_driver(&vmw_pci_driver);
1449}
1450
1451module_init(vmwgfx_init);
1452module_exit(vmwgfx_exit);
1453
1454MODULE_AUTHOR("VMware Inc. and others");
1455MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1456MODULE_LICENSE("GPL and additional rights");
1457MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1458 __stringify(VMWGFX_DRIVER_MINOR) "."
1459 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1460 "0");
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <linux/console.h>
29#include <linux/dma-mapping.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/mem_encrypt.h>
33
34#include <drm/drm_aperture.h>
35#include <drm/drm_drv.h>
36#include <drm/drm_ioctl.h>
37#include <drm/drm_sysfs.h>
38#include <drm/ttm/ttm_bo_driver.h>
39#include <drm/ttm/ttm_range_manager.h>
40#include <drm/ttm/ttm_placement.h>
41#include <generated/utsrelease.h>
42
43#include "ttm_object.h"
44#include "vmwgfx_binding.h"
45#include "vmwgfx_drv.h"
46
47#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
48
49#define VMW_MIN_INITIAL_WIDTH 800
50#define VMW_MIN_INITIAL_HEIGHT 600
51
52#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
53
54
55/*
56 * Fully encoded drm commands. Might move to vmw_drm.h
57 */
58
59#define DRM_IOCTL_VMW_GET_PARAM \
60 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
61 struct drm_vmw_getparam_arg)
62#define DRM_IOCTL_VMW_ALLOC_DMABUF \
63 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
64 union drm_vmw_alloc_dmabuf_arg)
65#define DRM_IOCTL_VMW_UNREF_DMABUF \
66 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
67 struct drm_vmw_unref_dmabuf_arg)
68#define DRM_IOCTL_VMW_CURSOR_BYPASS \
69 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
70 struct drm_vmw_cursor_bypass_arg)
71
72#define DRM_IOCTL_VMW_CONTROL_STREAM \
73 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
74 struct drm_vmw_control_stream_arg)
75#define DRM_IOCTL_VMW_CLAIM_STREAM \
76 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
77 struct drm_vmw_stream_arg)
78#define DRM_IOCTL_VMW_UNREF_STREAM \
79 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
80 struct drm_vmw_stream_arg)
81
82#define DRM_IOCTL_VMW_CREATE_CONTEXT \
83 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
84 struct drm_vmw_context_arg)
85#define DRM_IOCTL_VMW_UNREF_CONTEXT \
86 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
87 struct drm_vmw_context_arg)
88#define DRM_IOCTL_VMW_CREATE_SURFACE \
89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
90 union drm_vmw_surface_create_arg)
91#define DRM_IOCTL_VMW_UNREF_SURFACE \
92 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
93 struct drm_vmw_surface_arg)
94#define DRM_IOCTL_VMW_REF_SURFACE \
95 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
96 union drm_vmw_surface_reference_arg)
97#define DRM_IOCTL_VMW_EXECBUF \
98 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
99 struct drm_vmw_execbuf_arg)
100#define DRM_IOCTL_VMW_GET_3D_CAP \
101 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
102 struct drm_vmw_get_3d_cap_arg)
103#define DRM_IOCTL_VMW_FENCE_WAIT \
104 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
105 struct drm_vmw_fence_wait_arg)
106#define DRM_IOCTL_VMW_FENCE_SIGNALED \
107 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
108 struct drm_vmw_fence_signaled_arg)
109#define DRM_IOCTL_VMW_FENCE_UNREF \
110 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
111 struct drm_vmw_fence_arg)
112#define DRM_IOCTL_VMW_FENCE_EVENT \
113 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
114 struct drm_vmw_fence_event_arg)
115#define DRM_IOCTL_VMW_PRESENT \
116 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
117 struct drm_vmw_present_arg)
118#define DRM_IOCTL_VMW_PRESENT_READBACK \
119 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
120 struct drm_vmw_present_readback_arg)
121#define DRM_IOCTL_VMW_UPDATE_LAYOUT \
122 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
123 struct drm_vmw_update_layout_arg)
124#define DRM_IOCTL_VMW_CREATE_SHADER \
125 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
126 struct drm_vmw_shader_create_arg)
127#define DRM_IOCTL_VMW_UNREF_SHADER \
128 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
129 struct drm_vmw_shader_arg)
130#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
131 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
132 union drm_vmw_gb_surface_create_arg)
133#define DRM_IOCTL_VMW_GB_SURFACE_REF \
134 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
135 union drm_vmw_gb_surface_reference_arg)
136#define DRM_IOCTL_VMW_SYNCCPU \
137 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
138 struct drm_vmw_synccpu_arg)
139#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
140 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
141 struct drm_vmw_context_arg)
142#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
143 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
144 union drm_vmw_gb_surface_create_ext_arg)
145#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
146 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
147 union drm_vmw_gb_surface_reference_ext_arg)
148#define DRM_IOCTL_VMW_MSG \
149 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
150 struct drm_vmw_msg_arg)
151
152/*
153 * The core DRM version of this macro doesn't account for
154 * DRM_COMMAND_BASE.
155 */
156
157#define VMW_IOCTL_DEF(ioctl, func, flags) \
158 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
159
160/*
161 * Ioctl definitions.
162 */
163
164static const struct drm_ioctl_desc vmw_ioctls[] = {
165 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
166 DRM_RENDER_ALLOW),
167 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
168 DRM_RENDER_ALLOW),
169 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
170 DRM_RENDER_ALLOW),
171 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
172 vmw_kms_cursor_bypass_ioctl,
173 DRM_MASTER),
174
175 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
176 DRM_MASTER),
177 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
178 DRM_MASTER),
179 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
180 DRM_MASTER),
181
182 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
183 DRM_RENDER_ALLOW),
184 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
185 DRM_RENDER_ALLOW),
186 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
187 DRM_RENDER_ALLOW),
188 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
189 DRM_RENDER_ALLOW),
190 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
191 DRM_RENDER_ALLOW),
192 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
193 DRM_RENDER_ALLOW),
194 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
195 DRM_RENDER_ALLOW),
196 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
197 vmw_fence_obj_signaled_ioctl,
198 DRM_RENDER_ALLOW),
199 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
200 DRM_RENDER_ALLOW),
201 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
202 DRM_RENDER_ALLOW),
203 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
204 DRM_RENDER_ALLOW),
205
206 /* these allow direct access to the framebuffers mark as master only */
207 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
208 DRM_MASTER | DRM_AUTH),
209 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
210 vmw_present_readback_ioctl,
211 DRM_MASTER | DRM_AUTH),
212 /*
213 * The permissions of the below ioctl are overridden in
214 * vmw_generic_ioctl(). We require either
215 * DRM_MASTER or capable(CAP_SYS_ADMIN).
216 */
217 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
218 vmw_kms_update_layout_ioctl,
219 DRM_RENDER_ALLOW),
220 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
221 vmw_shader_define_ioctl,
222 DRM_RENDER_ALLOW),
223 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
224 vmw_shader_destroy_ioctl,
225 DRM_RENDER_ALLOW),
226 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
227 vmw_gb_surface_define_ioctl,
228 DRM_RENDER_ALLOW),
229 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
230 vmw_gb_surface_reference_ioctl,
231 DRM_RENDER_ALLOW),
232 VMW_IOCTL_DEF(VMW_SYNCCPU,
233 vmw_user_bo_synccpu_ioctl,
234 DRM_RENDER_ALLOW),
235 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
236 vmw_extended_context_define_ioctl,
237 DRM_RENDER_ALLOW),
238 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
239 vmw_gb_surface_define_ext_ioctl,
240 DRM_RENDER_ALLOW),
241 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
242 vmw_gb_surface_reference_ext_ioctl,
243 DRM_RENDER_ALLOW),
244 VMW_IOCTL_DEF(VMW_MSG,
245 vmw_msg_ioctl,
246 DRM_RENDER_ALLOW),
247};
248
249static const struct pci_device_id vmw_pci_id_list[] = {
250 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
251 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3) },
252 { }
253};
254MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
255
256static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
257static int vmw_force_iommu;
258static int vmw_restrict_iommu;
259static int vmw_force_coherent;
260static int vmw_restrict_dma_mask;
261static int vmw_assume_16bpp;
262
263static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
264static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
265 void *ptr);
266
267MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
268module_param_named(enable_fbdev, enable_fbdev, int, 0600);
269MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
270module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
271MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
272module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
273MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
274module_param_named(force_coherent, vmw_force_coherent, int, 0600);
275MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
276module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
277MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
278module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
279
280
281static void vmw_print_capabilities2(uint32_t capabilities2)
282{
283 DRM_INFO("Capabilities2:\n");
284 if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
285 DRM_INFO(" Grow oTable.\n");
286 if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
287 DRM_INFO(" IntraSurface copy.\n");
288 if (capabilities2 & SVGA_CAP2_DX3)
289 DRM_INFO(" DX3.\n");
290}
291
292static void vmw_print_capabilities(uint32_t capabilities)
293{
294 DRM_INFO("Capabilities:\n");
295 if (capabilities & SVGA_CAP_RECT_COPY)
296 DRM_INFO(" Rect copy.\n");
297 if (capabilities & SVGA_CAP_CURSOR)
298 DRM_INFO(" Cursor.\n");
299 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
300 DRM_INFO(" Cursor bypass.\n");
301 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
302 DRM_INFO(" Cursor bypass 2.\n");
303 if (capabilities & SVGA_CAP_8BIT_EMULATION)
304 DRM_INFO(" 8bit emulation.\n");
305 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
306 DRM_INFO(" Alpha cursor.\n");
307 if (capabilities & SVGA_CAP_3D)
308 DRM_INFO(" 3D.\n");
309 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
310 DRM_INFO(" Extended Fifo.\n");
311 if (capabilities & SVGA_CAP_MULTIMON)
312 DRM_INFO(" Multimon.\n");
313 if (capabilities & SVGA_CAP_PITCHLOCK)
314 DRM_INFO(" Pitchlock.\n");
315 if (capabilities & SVGA_CAP_IRQMASK)
316 DRM_INFO(" Irq mask.\n");
317 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
318 DRM_INFO(" Display Topology.\n");
319 if (capabilities & SVGA_CAP_GMR)
320 DRM_INFO(" GMR.\n");
321 if (capabilities & SVGA_CAP_TRACES)
322 DRM_INFO(" Traces.\n");
323 if (capabilities & SVGA_CAP_GMR2)
324 DRM_INFO(" GMR2.\n");
325 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
326 DRM_INFO(" Screen Object 2.\n");
327 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
328 DRM_INFO(" Command Buffers.\n");
329 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
330 DRM_INFO(" Command Buffers 2.\n");
331 if (capabilities & SVGA_CAP_GBOBJECTS)
332 DRM_INFO(" Guest Backed Resources.\n");
333 if (capabilities & SVGA_CAP_DX)
334 DRM_INFO(" DX Features.\n");
335 if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
336 DRM_INFO(" HP Command Queue.\n");
337}
338
339/**
340 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
341 *
342 * @dev_priv: A device private structure.
343 *
344 * This function creates a small buffer object that holds the query
345 * result for dummy queries emitted as query barriers.
346 * The function will then map the first page and initialize a pending
347 * occlusion query result structure, Finally it will unmap the buffer.
348 * No interruptible waits are done within this function.
349 *
350 * Returns an error if bo creation or initialization fails.
351 */
352static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
353{
354 int ret;
355 struct vmw_buffer_object *vbo;
356 struct ttm_bo_kmap_obj map;
357 volatile SVGA3dQueryResult *result;
358 bool dummy;
359
360 /*
361 * Create the vbo as pinned, so that a tryreserve will
362 * immediately succeed. This is because we're the only
363 * user of the bo currently.
364 */
365 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
366 if (!vbo)
367 return -ENOMEM;
368
369 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
370 &vmw_sys_placement, false, true,
371 &vmw_bo_bo_free);
372 if (unlikely(ret != 0))
373 return ret;
374
375 ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
376 BUG_ON(ret != 0);
377 vmw_bo_pin_reserved(vbo, true);
378
379 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
380 if (likely(ret == 0)) {
381 result = ttm_kmap_obj_virtual(&map, &dummy);
382 result->totalSize = sizeof(*result);
383 result->state = SVGA3D_QUERYSTATE_PENDING;
384 result->result32 = 0xff;
385 ttm_bo_kunmap(&map);
386 }
387 vmw_bo_pin_reserved(vbo, false);
388 ttm_bo_unreserve(&vbo->base);
389
390 if (unlikely(ret != 0)) {
391 DRM_ERROR("Dummy query buffer map failed.\n");
392 vmw_bo_unreference(&vbo);
393 } else
394 dev_priv->dummy_query_bo = vbo;
395
396 return ret;
397}
398
399static int vmw_device_init(struct vmw_private *dev_priv)
400{
401 bool uses_fb_traces = false;
402
403 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
404 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
405 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
406
407 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
408 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
409 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
410
411 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
412 SVGA_REG_ENABLE_HIDE);
413
414 uses_fb_traces = !vmw_cmd_supported(dev_priv) &&
415 (dev_priv->capabilities & SVGA_CAP_TRACES) != 0;
416
417 vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces);
418 dev_priv->fifo = vmw_fifo_create(dev_priv);
419 if (IS_ERR(dev_priv->fifo)) {
420 int err = PTR_ERR(dev_priv->fifo);
421 dev_priv->fifo = NULL;
422 return err;
423 } else if (!dev_priv->fifo) {
424 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
425 }
426
427 dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
428 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
429 return 0;
430}
431
432static void vmw_device_fini(struct vmw_private *vmw)
433{
434 /*
435 * Legacy sync
436 */
437 vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
438 while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
439 ;
440
441 vmw->last_read_seqno = vmw_fence_read(vmw);
442
443 vmw_write(vmw, SVGA_REG_CONFIG_DONE,
444 vmw->config_done_state);
445 vmw_write(vmw, SVGA_REG_ENABLE,
446 vmw->enable_state);
447 vmw_write(vmw, SVGA_REG_TRACES,
448 vmw->traces_state);
449
450 vmw_fifo_destroy(vmw);
451}
452
453/**
454 * vmw_request_device_late - Perform late device setup
455 *
456 * @dev_priv: Pointer to device private.
457 *
458 * This function performs setup of otables and enables large command
459 * buffer submission. These tasks are split out to a separate function
460 * because it reverts vmw_release_device_early and is intended to be used
461 * by an error path in the hibernation code.
462 */
463static int vmw_request_device_late(struct vmw_private *dev_priv)
464{
465 int ret;
466
467 if (dev_priv->has_mob) {
468 ret = vmw_otables_setup(dev_priv);
469 if (unlikely(ret != 0)) {
470 DRM_ERROR("Unable to initialize "
471 "guest Memory OBjects.\n");
472 return ret;
473 }
474 }
475
476 if (dev_priv->cman) {
477 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
478 if (ret) {
479 struct vmw_cmdbuf_man *man = dev_priv->cman;
480
481 dev_priv->cman = NULL;
482 vmw_cmdbuf_man_destroy(man);
483 }
484 }
485
486 return 0;
487}
488
489static int vmw_request_device(struct vmw_private *dev_priv)
490{
491 int ret;
492
493 ret = vmw_device_init(dev_priv);
494 if (unlikely(ret != 0)) {
495 DRM_ERROR("Unable to initialize the device.\n");
496 return ret;
497 }
498 vmw_fence_fifo_up(dev_priv->fman);
499 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
500 if (IS_ERR(dev_priv->cman)) {
501 dev_priv->cman = NULL;
502 dev_priv->sm_type = VMW_SM_LEGACY;
503 }
504
505 ret = vmw_request_device_late(dev_priv);
506 if (ret)
507 goto out_no_mob;
508
509 ret = vmw_dummy_query_bo_create(dev_priv);
510 if (unlikely(ret != 0))
511 goto out_no_query_bo;
512
513 return 0;
514
515out_no_query_bo:
516 if (dev_priv->cman)
517 vmw_cmdbuf_remove_pool(dev_priv->cman);
518 if (dev_priv->has_mob) {
519 struct ttm_resource_manager *man;
520
521 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
522 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
523 vmw_otables_takedown(dev_priv);
524 }
525 if (dev_priv->cman)
526 vmw_cmdbuf_man_destroy(dev_priv->cman);
527out_no_mob:
528 vmw_fence_fifo_down(dev_priv->fman);
529 vmw_device_fini(dev_priv);
530 return ret;
531}
532
533/**
534 * vmw_release_device_early - Early part of fifo takedown.
535 *
536 * @dev_priv: Pointer to device private struct.
537 *
538 * This is the first part of command submission takedown, to be called before
539 * buffer management is taken down.
540 */
541static void vmw_release_device_early(struct vmw_private *dev_priv)
542{
543 /*
544 * Previous destructions should've released
545 * the pinned bo.
546 */
547
548 BUG_ON(dev_priv->pinned_bo != NULL);
549
550 vmw_bo_unreference(&dev_priv->dummy_query_bo);
551 if (dev_priv->cman)
552 vmw_cmdbuf_remove_pool(dev_priv->cman);
553
554 if (dev_priv->has_mob) {
555 struct ttm_resource_manager *man;
556
557 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
558 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
559 vmw_otables_takedown(dev_priv);
560 }
561}
562
563/**
564 * vmw_release_device_late - Late part of fifo takedown.
565 *
566 * @dev_priv: Pointer to device private struct.
567 *
568 * This is the last part of the command submission takedown, to be called when
569 * command submission is no longer needed. It may wait on pending fences.
570 */
571static void vmw_release_device_late(struct vmw_private *dev_priv)
572{
573 vmw_fence_fifo_down(dev_priv->fman);
574 if (dev_priv->cman)
575 vmw_cmdbuf_man_destroy(dev_priv->cman);
576
577 vmw_device_fini(dev_priv);
578}
579
580/*
581 * Sets the initial_[width|height] fields on the given vmw_private.
582 *
583 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
584 * clamping the value to fb_max_[width|height] fields and the
585 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
586 * If the values appear to be invalid, set them to
587 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
588 */
589static void vmw_get_initial_size(struct vmw_private *dev_priv)
590{
591 uint32_t width;
592 uint32_t height;
593
594 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
595 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
596
597 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
598 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
599
600 if (width > dev_priv->fb_max_width ||
601 height > dev_priv->fb_max_height) {
602
603 /*
604 * This is a host error and shouldn't occur.
605 */
606
607 width = VMW_MIN_INITIAL_WIDTH;
608 height = VMW_MIN_INITIAL_HEIGHT;
609 }
610
611 dev_priv->initial_width = width;
612 dev_priv->initial_height = height;
613}
614
615/**
616 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
617 * system.
618 *
619 * @dev_priv: Pointer to a struct vmw_private
620 *
621 * This functions tries to determine what actions need to be taken by the
622 * driver to make system pages visible to the device.
623 * If this function decides that DMA is not possible, it returns -EINVAL.
624 * The driver may then try to disable features of the device that require
625 * DMA.
626 */
627static int vmw_dma_select_mode(struct vmw_private *dev_priv)
628{
629 static const char *names[vmw_dma_map_max] = {
630 [vmw_dma_phys] = "Using physical TTM page addresses.",
631 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
632 [vmw_dma_map_populate] = "Caching DMA mappings.",
633 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
634
635 /* TTM currently doesn't fully support SEV encryption. */
636 if (mem_encrypt_active())
637 return -EINVAL;
638
639 if (vmw_force_coherent)
640 dev_priv->map_mode = vmw_dma_alloc_coherent;
641 else if (vmw_restrict_iommu)
642 dev_priv->map_mode = vmw_dma_map_bind;
643 else
644 dev_priv->map_mode = vmw_dma_map_populate;
645
646 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
647 return 0;
648}
649
650/**
651 * vmw_dma_masks - set required page- and dma masks
652 *
653 * @dev_priv: Pointer to struct drm-device
654 *
655 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
656 * restriction also for 64-bit systems.
657 */
658static int vmw_dma_masks(struct vmw_private *dev_priv)
659{
660 struct drm_device *dev = &dev_priv->drm;
661 int ret = 0;
662
663 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
664 if (dev_priv->map_mode != vmw_dma_phys &&
665 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
666 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
667 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
668 }
669
670 return ret;
671}
672
673static int vmw_vram_manager_init(struct vmw_private *dev_priv)
674{
675 int ret;
676#ifdef CONFIG_TRANSPARENT_HUGEPAGE
677 ret = vmw_thp_init(dev_priv);
678#else
679 ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
680 dev_priv->vram_size >> PAGE_SHIFT);
681#endif
682 ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
683 return ret;
684}
685
686static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
687{
688#ifdef CONFIG_TRANSPARENT_HUGEPAGE
689 vmw_thp_fini(dev_priv);
690#else
691 ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
692#endif
693}
694
695static int vmw_setup_pci_resources(struct vmw_private *dev,
696 unsigned long pci_id)
697{
698 resource_size_t rmmio_start;
699 resource_size_t rmmio_size;
700 resource_size_t fifo_start;
701 resource_size_t fifo_size;
702 int ret;
703 struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
704
705 pci_set_master(pdev);
706
707 ret = pci_request_regions(pdev, "vmwgfx probe");
708 if (ret)
709 return ret;
710
711 dev->pci_id = pci_id;
712 if (pci_id == VMWGFX_PCI_ID_SVGA3) {
713 rmmio_start = pci_resource_start(pdev, 0);
714 rmmio_size = pci_resource_len(pdev, 0);
715 dev->vram_start = pci_resource_start(pdev, 2);
716 dev->vram_size = pci_resource_len(pdev, 2);
717
718 DRM_INFO("Register MMIO at 0x%pa size is %llu kiB\n",
719 &rmmio_start, (uint64_t)rmmio_size / 1024);
720 dev->rmmio = devm_ioremap(dev->drm.dev,
721 rmmio_start,
722 rmmio_size);
723 if (!dev->rmmio) {
724 DRM_ERROR("Failed mapping registers mmio memory.\n");
725 pci_release_regions(pdev);
726 return -ENOMEM;
727 }
728 } else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
729 dev->io_start = pci_resource_start(pdev, 0);
730 dev->vram_start = pci_resource_start(pdev, 1);
731 dev->vram_size = pci_resource_len(pdev, 1);
732 fifo_start = pci_resource_start(pdev, 2);
733 fifo_size = pci_resource_len(pdev, 2);
734
735 DRM_INFO("FIFO at %pa size is %llu kiB\n",
736 &fifo_start, (uint64_t)fifo_size / 1024);
737 dev->fifo_mem = devm_memremap(dev->drm.dev,
738 fifo_start,
739 fifo_size,
740 MEMREMAP_WB);
741
742 if (IS_ERR(dev->fifo_mem)) {
743 DRM_ERROR("Failed mapping FIFO memory.\n");
744 pci_release_regions(pdev);
745 return PTR_ERR(dev->fifo_mem);
746 }
747 } else {
748 pci_release_regions(pdev);
749 return -EINVAL;
750 }
751
752 /*
753 * This is approximate size of the vram, the exact size will only
754 * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
755 * size will be equal to or bigger than the size reported by
756 * SVGA_REG_VRAM_SIZE.
757 */
758 DRM_INFO("VRAM at %pa size is %llu kiB\n",
759 &dev->vram_start, (uint64_t)dev->vram_size / 1024);
760
761 return 0;
762}
763
764static int vmw_detect_version(struct vmw_private *dev)
765{
766 uint32_t svga_id;
767
768 vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ?
769 SVGA_ID_3 : SVGA_ID_2);
770 svga_id = vmw_read(dev, SVGA_REG_ID);
771 if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) {
772 DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n",
773 svga_id, dev->vmw_chipset);
774 return -ENOSYS;
775 }
776 BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3));
777 DRM_INFO("Running on SVGA version %d.\n", (svga_id & 0xff));
778 return 0;
779}
780
781static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
782{
783 int ret;
784 enum vmw_res_type i;
785 bool refuse_dma = false;
786 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
787
788 dev_priv->vmw_chipset = pci_id;
789 dev_priv->drm.dev_private = dev_priv;
790
791 mutex_init(&dev_priv->cmdbuf_mutex);
792 mutex_init(&dev_priv->binding_mutex);
793 spin_lock_init(&dev_priv->resource_lock);
794 spin_lock_init(&dev_priv->hw_lock);
795 spin_lock_init(&dev_priv->waiter_lock);
796 spin_lock_init(&dev_priv->cap_lock);
797 spin_lock_init(&dev_priv->cursor_lock);
798
799 ret = vmw_setup_pci_resources(dev_priv, pci_id);
800 if (ret)
801 return ret;
802 ret = vmw_detect_version(dev_priv);
803 if (ret)
804 goto out_no_pci_or_version;
805
806
807 for (i = vmw_res_context; i < vmw_res_max; ++i) {
808 idr_init_base(&dev_priv->res_idr[i], 1);
809 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
810 }
811
812 init_waitqueue_head(&dev_priv->fence_queue);
813 init_waitqueue_head(&dev_priv->fifo_queue);
814 dev_priv->fence_queue_waiters = 0;
815 dev_priv->fifo_queue_waiters = 0;
816
817 dev_priv->used_memory_size = 0;
818
819 dev_priv->assume_16bpp = !!vmw_assume_16bpp;
820
821 dev_priv->enable_fb = enable_fbdev;
822
823
824 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
825
826 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
827 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
828 }
829
830
831 ret = vmw_dma_select_mode(dev_priv);
832 if (unlikely(ret != 0)) {
833 DRM_INFO("Restricting capabilities since DMA not available.\n");
834 refuse_dma = true;
835 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
836 DRM_INFO("Disabling 3D acceleration.\n");
837 }
838
839 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
840 dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
841 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
842 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
843
844 vmw_get_initial_size(dev_priv);
845
846 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
847 dev_priv->max_gmr_ids =
848 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
849 dev_priv->max_gmr_pages =
850 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
851 dev_priv->memory_size =
852 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
853 dev_priv->memory_size -= dev_priv->vram_size;
854 } else {
855 /*
856 * An arbitrary limit of 512MiB on surface
857 * memory. But all HWV8 hardware supports GMR2.
858 */
859 dev_priv->memory_size = 512*1024*1024;
860 }
861 dev_priv->max_mob_pages = 0;
862 dev_priv->max_mob_size = 0;
863 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
864 uint64_t mem_size;
865
866 if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
867 mem_size = vmw_read(dev_priv,
868 SVGA_REG_GBOBJECT_MEM_SIZE_KB);
869 else
870 mem_size =
871 vmw_read(dev_priv,
872 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
873
874 /*
875 * Workaround for low memory 2D VMs to compensate for the
876 * allocation taken by fbdev
877 */
878 if (!(dev_priv->capabilities & SVGA_CAP_3D))
879 mem_size *= 3;
880
881 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
882 dev_priv->prim_bb_mem =
883 vmw_read(dev_priv,
884 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
885 dev_priv->max_mob_size =
886 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
887 dev_priv->stdu_max_width =
888 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
889 dev_priv->stdu_max_height =
890 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
891
892 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
893 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
894 dev_priv->texture_max_width = vmw_read(dev_priv,
895 SVGA_REG_DEV_CAP);
896 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
897 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
898 dev_priv->texture_max_height = vmw_read(dev_priv,
899 SVGA_REG_DEV_CAP);
900 } else {
901 dev_priv->texture_max_width = 8192;
902 dev_priv->texture_max_height = 8192;
903 dev_priv->prim_bb_mem = dev_priv->vram_size;
904 }
905
906 vmw_print_capabilities(dev_priv->capabilities);
907 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
908 vmw_print_capabilities2(dev_priv->capabilities2);
909 DRM_INFO("Supports command queues = %d\n",
910 vmw_cmd_supported((dev_priv)));
911
912 ret = vmw_dma_masks(dev_priv);
913 if (unlikely(ret != 0))
914 goto out_err0;
915
916 dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
917
918 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
919 DRM_INFO("Max GMR ids is %u\n",
920 (unsigned)dev_priv->max_gmr_ids);
921 DRM_INFO("Max number of GMR pages is %u\n",
922 (unsigned)dev_priv->max_gmr_pages);
923 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
924 (unsigned)dev_priv->memory_size / 1024);
925 }
926 DRM_INFO("Maximum display memory size is %llu kiB\n",
927 (uint64_t)dev_priv->prim_bb_mem / 1024);
928
929 /* Need mmio memory to check for fifo pitchlock cap. */
930 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
931 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
932 !vmw_fifo_have_pitchlock(dev_priv)) {
933 ret = -ENOSYS;
934 DRM_ERROR("Hardware has no pitchlock\n");
935 goto out_err0;
936 }
937
938 dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
939 &vmw_prime_dmabuf_ops);
940
941 if (unlikely(dev_priv->tdev == NULL)) {
942 DRM_ERROR("Unable to initialize TTM object management.\n");
943 ret = -ENOMEM;
944 goto out_err0;
945 }
946
947 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
948 ret = vmw_irq_install(&dev_priv->drm, pdev->irq);
949 if (ret != 0) {
950 DRM_ERROR("Failed installing irq: %d\n", ret);
951 goto out_no_irq;
952 }
953 }
954
955 dev_priv->fman = vmw_fence_manager_init(dev_priv);
956 if (unlikely(dev_priv->fman == NULL)) {
957 ret = -ENOMEM;
958 goto out_no_fman;
959 }
960
961 drm_vma_offset_manager_init(&dev_priv->vma_manager,
962 DRM_FILE_PAGE_OFFSET_START,
963 DRM_FILE_PAGE_OFFSET_SIZE);
964 ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
965 dev_priv->drm.dev,
966 dev_priv->drm.anon_inode->i_mapping,
967 &dev_priv->vma_manager,
968 dev_priv->map_mode == vmw_dma_alloc_coherent,
969 false);
970 if (unlikely(ret != 0)) {
971 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
972 goto out_no_bdev;
973 }
974
975 /*
976 * Enable VRAM, but initially don't use it until SVGA is enabled and
977 * unhidden.
978 */
979
980 ret = vmw_vram_manager_init(dev_priv);
981 if (unlikely(ret != 0)) {
982 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
983 goto out_no_vram;
984 }
985
986 /*
987 * "Guest Memory Regions" is an aperture like feature with
988 * one slot per bo. There is an upper limit of the number of
989 * slots as well as the bo size.
990 */
991 dev_priv->has_gmr = true;
992 /* TODO: This is most likely not correct */
993 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
994 refuse_dma ||
995 vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
996 DRM_INFO("No GMR memory available. "
997 "Graphics memory resources are very limited.\n");
998 dev_priv->has_gmr = false;
999 }
1000
1001 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
1002 dev_priv->has_mob = true;
1003
1004 if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
1005 DRM_INFO("No MOB memory available. "
1006 "3D will be disabled.\n");
1007 dev_priv->has_mob = false;
1008 }
1009 }
1010
1011 if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
1012 spin_lock(&dev_priv->cap_lock);
1013 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
1014 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
1015 dev_priv->sm_type = VMW_SM_4;
1016 spin_unlock(&dev_priv->cap_lock);
1017 }
1018
1019 vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
1020
1021 /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
1022 if (has_sm4_context(dev_priv) &&
1023 (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
1024 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41);
1025
1026 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
1027 dev_priv->sm_type = VMW_SM_4_1;
1028
1029 if (has_sm4_1_context(dev_priv) &&
1030 (dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
1031 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5);
1032 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
1033 dev_priv->sm_type = VMW_SM_5;
1034 }
1035 }
1036
1037 ret = vmw_kms_init(dev_priv);
1038 if (unlikely(ret != 0))
1039 goto out_no_kms;
1040 vmw_overlay_init(dev_priv);
1041
1042 ret = vmw_request_device(dev_priv);
1043 if (ret)
1044 goto out_no_fifo;
1045
1046 if (dev_priv->sm_type == VMW_SM_5)
1047 DRM_INFO("SM5 support available.\n");
1048 if (dev_priv->sm_type == VMW_SM_4_1)
1049 DRM_INFO("SM4_1 support available.\n");
1050 if (dev_priv->sm_type == VMW_SM_4)
1051 DRM_INFO("SM4 support available.\n");
1052 DRM_INFO("Running without reservation semaphore\n");
1053
1054 vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
1055 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
1056 VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
1057
1058 if (dev_priv->enable_fb) {
1059 vmw_fifo_resource_inc(dev_priv);
1060 vmw_svga_enable(dev_priv);
1061 vmw_fb_init(dev_priv);
1062 }
1063
1064 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
1065 register_pm_notifier(&dev_priv->pm_nb);
1066
1067 return 0;
1068
1069out_no_fifo:
1070 vmw_overlay_close(dev_priv);
1071 vmw_kms_close(dev_priv);
1072out_no_kms:
1073 if (dev_priv->has_mob)
1074 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1075 if (dev_priv->has_gmr)
1076 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1077 vmw_vram_manager_fini(dev_priv);
1078out_no_vram:
1079 ttm_device_fini(&dev_priv->bdev);
1080out_no_bdev:
1081 vmw_fence_manager_takedown(dev_priv->fman);
1082out_no_fman:
1083 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1084 vmw_irq_uninstall(&dev_priv->drm);
1085out_no_irq:
1086 ttm_object_device_release(&dev_priv->tdev);
1087out_err0:
1088 for (i = vmw_res_context; i < vmw_res_max; ++i)
1089 idr_destroy(&dev_priv->res_idr[i]);
1090
1091 if (dev_priv->ctx.staged_bindings)
1092 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1093out_no_pci_or_version:
1094 pci_release_regions(pdev);
1095 return ret;
1096}
1097
1098static void vmw_driver_unload(struct drm_device *dev)
1099{
1100 struct vmw_private *dev_priv = vmw_priv(dev);
1101 struct pci_dev *pdev = to_pci_dev(dev->dev);
1102 enum vmw_res_type i;
1103
1104 unregister_pm_notifier(&dev_priv->pm_nb);
1105
1106 if (dev_priv->ctx.res_ht_initialized)
1107 drm_ht_remove(&dev_priv->ctx.res_ht);
1108 vfree(dev_priv->ctx.cmd_bounce);
1109 if (dev_priv->enable_fb) {
1110 vmw_fb_off(dev_priv);
1111 vmw_fb_close(dev_priv);
1112 vmw_fifo_resource_dec(dev_priv);
1113 vmw_svga_disable(dev_priv);
1114 }
1115
1116 vmw_kms_close(dev_priv);
1117 vmw_overlay_close(dev_priv);
1118
1119 if (dev_priv->has_gmr)
1120 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1121
1122 vmw_release_device_early(dev_priv);
1123 if (dev_priv->has_mob)
1124 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1125 vmw_vram_manager_fini(dev_priv);
1126 ttm_device_fini(&dev_priv->bdev);
1127 drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
1128 vmw_release_device_late(dev_priv);
1129 vmw_fence_manager_takedown(dev_priv->fman);
1130 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1131 vmw_irq_uninstall(&dev_priv->drm);
1132
1133 ttm_object_device_release(&dev_priv->tdev);
1134 if (dev_priv->ctx.staged_bindings)
1135 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1136
1137 for (i = vmw_res_context; i < vmw_res_max; ++i)
1138 idr_destroy(&dev_priv->res_idr[i]);
1139
1140 pci_release_regions(pdev);
1141}
1142
1143static void vmw_postclose(struct drm_device *dev,
1144 struct drm_file *file_priv)
1145{
1146 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1147
1148 ttm_object_file_release(&vmw_fp->tfile);
1149 kfree(vmw_fp);
1150}
1151
1152static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1153{
1154 struct vmw_private *dev_priv = vmw_priv(dev);
1155 struct vmw_fpriv *vmw_fp;
1156 int ret = -ENOMEM;
1157
1158 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1159 if (unlikely(!vmw_fp))
1160 return ret;
1161
1162 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1163 if (unlikely(vmw_fp->tfile == NULL))
1164 goto out_no_tfile;
1165
1166 file_priv->driver_priv = vmw_fp;
1167
1168 return 0;
1169
1170out_no_tfile:
1171 kfree(vmw_fp);
1172 return ret;
1173}
1174
1175static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1176 unsigned long arg,
1177 long (*ioctl_func)(struct file *, unsigned int,
1178 unsigned long))
1179{
1180 struct drm_file *file_priv = filp->private_data;
1181 struct drm_device *dev = file_priv->minor->dev;
1182 unsigned int nr = DRM_IOCTL_NR(cmd);
1183 unsigned int flags;
1184
1185 /*
1186 * Do extra checking on driver private ioctls.
1187 */
1188
1189 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1190 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1191 const struct drm_ioctl_desc *ioctl =
1192 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1193
1194 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1195 return ioctl_func(filp, cmd, arg);
1196 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1197 if (!drm_is_current_master(file_priv) &&
1198 !capable(CAP_SYS_ADMIN))
1199 return -EACCES;
1200 }
1201
1202 if (unlikely(ioctl->cmd != cmd))
1203 goto out_io_encoding;
1204
1205 flags = ioctl->flags;
1206 } else if (!drm_ioctl_flags(nr, &flags))
1207 return -EINVAL;
1208
1209 return ioctl_func(filp, cmd, arg);
1210
1211out_io_encoding:
1212 DRM_ERROR("Invalid command format, ioctl %d\n",
1213 nr - DRM_COMMAND_BASE);
1214
1215 return -EINVAL;
1216}
1217
1218static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1219 unsigned long arg)
1220{
1221 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1222}
1223
1224#ifdef CONFIG_COMPAT
1225static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1226 unsigned long arg)
1227{
1228 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1229}
1230#endif
1231
1232static void vmw_master_set(struct drm_device *dev,
1233 struct drm_file *file_priv,
1234 bool from_open)
1235{
1236 /*
1237 * Inform a new master that the layout may have changed while
1238 * it was gone.
1239 */
1240 if (!from_open)
1241 drm_sysfs_hotplug_event(dev);
1242}
1243
1244static void vmw_master_drop(struct drm_device *dev,
1245 struct drm_file *file_priv)
1246{
1247 struct vmw_private *dev_priv = vmw_priv(dev);
1248
1249 vmw_kms_legacy_hotspot_clear(dev_priv);
1250 if (!dev_priv->enable_fb)
1251 vmw_svga_disable(dev_priv);
1252}
1253
1254/**
1255 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1256 *
1257 * @dev_priv: Pointer to device private struct.
1258 * Needs the reservation sem to be held in non-exclusive mode.
1259 */
1260static void __vmw_svga_enable(struct vmw_private *dev_priv)
1261{
1262 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1263
1264 if (!ttm_resource_manager_used(man)) {
1265 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE);
1266 ttm_resource_manager_set_used(man, true);
1267 }
1268}
1269
1270/**
1271 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1272 *
1273 * @dev_priv: Pointer to device private struct.
1274 */
1275void vmw_svga_enable(struct vmw_private *dev_priv)
1276{
1277 __vmw_svga_enable(dev_priv);
1278}
1279
1280/**
1281 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1282 *
1283 * @dev_priv: Pointer to device private struct.
1284 * Needs the reservation sem to be held in exclusive mode.
1285 * Will not empty VRAM. VRAM must be emptied by caller.
1286 */
1287static void __vmw_svga_disable(struct vmw_private *dev_priv)
1288{
1289 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1290
1291 if (ttm_resource_manager_used(man)) {
1292 ttm_resource_manager_set_used(man, false);
1293 vmw_write(dev_priv, SVGA_REG_ENABLE,
1294 SVGA_REG_ENABLE_HIDE |
1295 SVGA_REG_ENABLE_ENABLE);
1296 }
1297}
1298
1299/**
1300 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1301 * running.
1302 *
1303 * @dev_priv: Pointer to device private struct.
1304 * Will empty VRAM.
1305 */
1306void vmw_svga_disable(struct vmw_private *dev_priv)
1307{
1308 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1309 /*
1310 * Disabling SVGA will turn off device modesetting capabilities, so
1311 * notify KMS about that so that it doesn't cache atomic state that
1312 * isn't valid anymore, for example crtcs turned on.
1313 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1314 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1315 * end up with lock order reversal. Thus, a master may actually perform
1316 * a new modeset just after we call vmw_kms_lost_device() and race with
1317 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1318 * to be inconsistent with the device, causing modesetting problems.
1319 *
1320 */
1321 vmw_kms_lost_device(&dev_priv->drm);
1322 if (ttm_resource_manager_used(man)) {
1323 if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
1324 DRM_ERROR("Failed evicting VRAM buffers.\n");
1325 ttm_resource_manager_set_used(man, false);
1326 vmw_write(dev_priv, SVGA_REG_ENABLE,
1327 SVGA_REG_ENABLE_HIDE |
1328 SVGA_REG_ENABLE_ENABLE);
1329 }
1330}
1331
1332static void vmw_remove(struct pci_dev *pdev)
1333{
1334 struct drm_device *dev = pci_get_drvdata(pdev);
1335
1336 ttm_mem_global_release(&ttm_mem_glob);
1337 drm_dev_unregister(dev);
1338 vmw_driver_unload(dev);
1339}
1340
1341static unsigned long
1342vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
1343 unsigned long len, unsigned long pgoff,
1344 unsigned long flags)
1345{
1346 struct drm_file *file_priv = file->private_data;
1347 struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
1348
1349 return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
1350 &dev_priv->vma_manager);
1351}
1352
1353static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1354 void *ptr)
1355{
1356 struct vmw_private *dev_priv =
1357 container_of(nb, struct vmw_private, pm_nb);
1358
1359 switch (val) {
1360 case PM_HIBERNATION_PREPARE:
1361 /*
1362 * Take the reservation sem in write mode, which will make sure
1363 * there are no other processes holding a buffer object
1364 * reservation, meaning we should be able to evict all buffer
1365 * objects if needed.
1366 * Once user-space processes have been frozen, we can release
1367 * the lock again.
1368 */
1369 dev_priv->suspend_locked = true;
1370 break;
1371 case PM_POST_HIBERNATION:
1372 case PM_POST_RESTORE:
1373 if (READ_ONCE(dev_priv->suspend_locked)) {
1374 dev_priv->suspend_locked = false;
1375 }
1376 break;
1377 default:
1378 break;
1379 }
1380 return 0;
1381}
1382
1383static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1384{
1385 struct drm_device *dev = pci_get_drvdata(pdev);
1386 struct vmw_private *dev_priv = vmw_priv(dev);
1387
1388 if (dev_priv->refuse_hibernation)
1389 return -EBUSY;
1390
1391 pci_save_state(pdev);
1392 pci_disable_device(pdev);
1393 pci_set_power_state(pdev, PCI_D3hot);
1394 return 0;
1395}
1396
1397static int vmw_pci_resume(struct pci_dev *pdev)
1398{
1399 pci_set_power_state(pdev, PCI_D0);
1400 pci_restore_state(pdev);
1401 return pci_enable_device(pdev);
1402}
1403
1404static int vmw_pm_suspend(struct device *kdev)
1405{
1406 struct pci_dev *pdev = to_pci_dev(kdev);
1407 struct pm_message dummy;
1408
1409 dummy.event = 0;
1410
1411 return vmw_pci_suspend(pdev, dummy);
1412}
1413
1414static int vmw_pm_resume(struct device *kdev)
1415{
1416 struct pci_dev *pdev = to_pci_dev(kdev);
1417
1418 return vmw_pci_resume(pdev);
1419}
1420
1421static int vmw_pm_freeze(struct device *kdev)
1422{
1423 struct pci_dev *pdev = to_pci_dev(kdev);
1424 struct drm_device *dev = pci_get_drvdata(pdev);
1425 struct vmw_private *dev_priv = vmw_priv(dev);
1426 struct ttm_operation_ctx ctx = {
1427 .interruptible = false,
1428 .no_wait_gpu = false
1429 };
1430 int ret;
1431
1432 /*
1433 * No user-space processes should be running now.
1434 */
1435 ret = vmw_kms_suspend(&dev_priv->drm);
1436 if (ret) {
1437 DRM_ERROR("Failed to freeze modesetting.\n");
1438 return ret;
1439 }
1440 if (dev_priv->enable_fb)
1441 vmw_fb_off(dev_priv);
1442
1443 vmw_execbuf_release_pinned_bo(dev_priv);
1444 vmw_resource_evict_all(dev_priv);
1445 vmw_release_device_early(dev_priv);
1446 while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
1447 if (dev_priv->enable_fb)
1448 vmw_fifo_resource_dec(dev_priv);
1449 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1450 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1451 if (dev_priv->enable_fb)
1452 vmw_fifo_resource_inc(dev_priv);
1453 WARN_ON(vmw_request_device_late(dev_priv));
1454 dev_priv->suspend_locked = false;
1455 if (dev_priv->suspend_state)
1456 vmw_kms_resume(dev);
1457 if (dev_priv->enable_fb)
1458 vmw_fb_on(dev_priv);
1459 return -EBUSY;
1460 }
1461
1462 vmw_fence_fifo_down(dev_priv->fman);
1463 __vmw_svga_disable(dev_priv);
1464
1465 vmw_release_device_late(dev_priv);
1466 return 0;
1467}
1468
1469static int vmw_pm_restore(struct device *kdev)
1470{
1471 struct pci_dev *pdev = to_pci_dev(kdev);
1472 struct drm_device *dev = pci_get_drvdata(pdev);
1473 struct vmw_private *dev_priv = vmw_priv(dev);
1474 int ret;
1475
1476 vmw_detect_version(dev_priv);
1477
1478 if (dev_priv->enable_fb)
1479 vmw_fifo_resource_inc(dev_priv);
1480
1481 ret = vmw_request_device(dev_priv);
1482 if (ret)
1483 return ret;
1484
1485 if (dev_priv->enable_fb)
1486 __vmw_svga_enable(dev_priv);
1487
1488 vmw_fence_fifo_up(dev_priv->fman);
1489 dev_priv->suspend_locked = false;
1490 if (dev_priv->suspend_state)
1491 vmw_kms_resume(&dev_priv->drm);
1492
1493 if (dev_priv->enable_fb)
1494 vmw_fb_on(dev_priv);
1495
1496 return 0;
1497}
1498
1499static const struct dev_pm_ops vmw_pm_ops = {
1500 .freeze = vmw_pm_freeze,
1501 .thaw = vmw_pm_restore,
1502 .restore = vmw_pm_restore,
1503 .suspend = vmw_pm_suspend,
1504 .resume = vmw_pm_resume,
1505};
1506
1507static const struct file_operations vmwgfx_driver_fops = {
1508 .owner = THIS_MODULE,
1509 .open = drm_open,
1510 .release = drm_release,
1511 .unlocked_ioctl = vmw_unlocked_ioctl,
1512 .mmap = vmw_mmap,
1513 .poll = drm_poll,
1514 .read = drm_read,
1515#if defined(CONFIG_COMPAT)
1516 .compat_ioctl = vmw_compat_ioctl,
1517#endif
1518 .llseek = noop_llseek,
1519 .get_unmapped_area = vmw_get_unmapped_area,
1520};
1521
1522static const struct drm_driver driver = {
1523 .driver_features =
1524 DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
1525 .ioctls = vmw_ioctls,
1526 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1527 .master_set = vmw_master_set,
1528 .master_drop = vmw_master_drop,
1529 .open = vmw_driver_open,
1530 .postclose = vmw_postclose,
1531
1532 .dumb_create = vmw_dumb_create,
1533 .dumb_map_offset = vmw_dumb_map_offset,
1534 .dumb_destroy = vmw_dumb_destroy,
1535
1536 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1537 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1538
1539 .fops = &vmwgfx_driver_fops,
1540 .name = VMWGFX_DRIVER_NAME,
1541 .desc = VMWGFX_DRIVER_DESC,
1542 .date = VMWGFX_DRIVER_DATE,
1543 .major = VMWGFX_DRIVER_MAJOR,
1544 .minor = VMWGFX_DRIVER_MINOR,
1545 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1546};
1547
1548static struct pci_driver vmw_pci_driver = {
1549 .name = VMWGFX_DRIVER_NAME,
1550 .id_table = vmw_pci_id_list,
1551 .probe = vmw_probe,
1552 .remove = vmw_remove,
1553 .driver = {
1554 .pm = &vmw_pm_ops
1555 }
1556};
1557
1558static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1559{
1560 struct vmw_private *vmw;
1561 int ret;
1562
1563 ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb");
1564 if (ret)
1565 return ret;
1566
1567 ret = pcim_enable_device(pdev);
1568 if (ret)
1569 return ret;
1570
1571 vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
1572 struct vmw_private, drm);
1573 if (IS_ERR(vmw))
1574 return PTR_ERR(vmw);
1575
1576 pci_set_drvdata(pdev, &vmw->drm);
1577
1578 ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev);
1579 if (ret)
1580 return ret;
1581
1582 ret = vmw_driver_load(vmw, ent->device);
1583 if (ret)
1584 return ret;
1585
1586 ret = drm_dev_register(&vmw->drm, 0);
1587 if (ret) {
1588 vmw_driver_unload(&vmw->drm);
1589 return ret;
1590 }
1591
1592 return 0;
1593}
1594
1595static int __init vmwgfx_init(void)
1596{
1597 int ret;
1598
1599 if (vgacon_text_force())
1600 return -EINVAL;
1601
1602 ret = pci_register_driver(&vmw_pci_driver);
1603 if (ret)
1604 DRM_ERROR("Failed initializing DRM.\n");
1605 return ret;
1606}
1607
1608static void __exit vmwgfx_exit(void)
1609{
1610 pci_unregister_driver(&vmw_pci_driver);
1611}
1612
1613module_init(vmwgfx_init);
1614module_exit(vmwgfx_exit);
1615
1616MODULE_AUTHOR("VMware Inc. and others");
1617MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1618MODULE_LICENSE("GPL and additional rights");
1619MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1620 __stringify(VMWGFX_DRIVER_MINOR) "."
1621 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1622 "0");