Linux Audio

Check our new training course

Loading...
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/**************************************************************************
  3 *
  4 * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include "vmwgfx_drv.h"
 29#include "vmwgfx_devcaps.h"
 30#include "vmwgfx_kms.h"
 31
 32#include <drm/vmwgfx_drm.h>
 33#include <linux/pci.h>
 34
 35int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 36		       struct drm_file *file_priv)
 37{
 38	struct vmw_private *dev_priv = vmw_priv(dev);
 39	struct drm_vmw_getparam_arg *param =
 40	    (struct drm_vmw_getparam_arg *)data;
 41	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 42
 43	switch (param->param) {
 44	case DRM_VMW_PARAM_NUM_STREAMS:
 45		param->value = vmw_overlay_num_overlays(dev_priv);
 46		break;
 47	case DRM_VMW_PARAM_NUM_FREE_STREAMS:
 48		param->value = vmw_overlay_num_free_overlays(dev_priv);
 49		break;
 50	case DRM_VMW_PARAM_3D:
 51		param->value = vmw_supports_3d(dev_priv) ? 1 : 0;
 52		break;
 53	case DRM_VMW_PARAM_HW_CAPS:
 54		param->value = dev_priv->capabilities;
 55		break;
 56	case DRM_VMW_PARAM_HW_CAPS2:
 57		param->value = dev_priv->capabilities2;
 58		break;
 59	case DRM_VMW_PARAM_FIFO_CAPS:
 60		param->value = vmw_fifo_caps(dev_priv);
 61		break;
 62	case DRM_VMW_PARAM_MAX_FB_SIZE:
 63		param->value = dev_priv->max_primary_mem;
 64		break;
 65	case DRM_VMW_PARAM_FIFO_HW_VERSION:
 66	{
 67		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
 68			param->value = SVGA3D_HWVERSION_WS8_B1;
 69		else
 70			param->value = vmw_fifo_mem_read(
 71					       dev_priv,
 72					       ((vmw_fifo_caps(dev_priv) &
 73						 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
 74							SVGA_FIFO_3D_HWVERSION_REVISED :
 75							SVGA_FIFO_3D_HWVERSION));
 76		break;
 77	}
 78	case DRM_VMW_PARAM_MAX_SURF_MEMORY:
 79		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
 80		    !vmw_fp->gb_aware)
 81			param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
 82		else
 83			param->value = dev_priv->memory_size;
 84		break;
 85	case DRM_VMW_PARAM_3D_CAPS_SIZE:
 86		param->value = vmw_devcaps_size(dev_priv, vmw_fp->gb_aware);
 87		break;
 88	case DRM_VMW_PARAM_MAX_MOB_MEMORY:
 89		vmw_fp->gb_aware = true;
 90		param->value = dev_priv->max_mob_pages * PAGE_SIZE;
 91		break;
 92	case DRM_VMW_PARAM_MAX_MOB_SIZE:
 93		param->value = dev_priv->max_mob_size;
 94		break;
 95	case DRM_VMW_PARAM_SCREEN_TARGET:
 96		param->value =
 97			(dev_priv->active_display_unit == vmw_du_screen_target);
 98		break;
 99	case DRM_VMW_PARAM_DX:
100		param->value = has_sm4_context(dev_priv);
101		break;
102	case DRM_VMW_PARAM_SM4_1:
103		param->value = has_sm4_1_context(dev_priv);
104		break;
105	case DRM_VMW_PARAM_SM5:
106		param->value = has_sm5_context(dev_priv);
107		break;
108	case DRM_VMW_PARAM_GL43:
109		param->value = has_gl43_context(dev_priv);
110		break;
111	case DRM_VMW_PARAM_DEVICE_ID:
112		param->value = to_pci_dev(dev_priv->drm.dev)->device;
113		break;
114	default:
115		return -EINVAL;
116	}
117
118	return 0;
119}
120
121
122int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
123			 struct drm_file *file_priv)
124{
125	struct drm_vmw_get_3d_cap_arg *arg =
126		(struct drm_vmw_get_3d_cap_arg *) data;
127	struct vmw_private *dev_priv = vmw_priv(dev);
128	uint32_t size;
129	void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
130	void *bounce = NULL;
131	int ret;
132	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
133
134	if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
135		VMW_DEBUG_USER("Illegal GET_3D_CAP argument.\n");
136		return -EINVAL;
137	}
138
139	size = vmw_devcaps_size(dev_priv, vmw_fp->gb_aware);
140	if (unlikely(size == 0)) {
141		DRM_ERROR("Failed to figure out the devcaps size (no 3D).\n");
142		return -ENOMEM;
143	}
144
145	if (arg->max_size < size)
146		size = arg->max_size;
147
148	bounce = vzalloc(size);
149	if (unlikely(bounce == NULL)) {
150		DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
151		return -ENOMEM;
152	}
153
154	ret = vmw_devcaps_copy(dev_priv, vmw_fp->gb_aware, bounce, size);
155	if (unlikely (ret != 0))
156		goto out_err;
157
158	ret = copy_to_user(buffer, bounce, size);
159	if (ret)
160		ret = -EFAULT;
161out_err:
162	vfree(bounce);
163
164	if (unlikely(ret != 0))
165		DRM_ERROR("Failed to report 3D caps info.\n");
166
167	return ret;
168}
169
170int vmw_present_ioctl(struct drm_device *dev, void *data,
171		      struct drm_file *file_priv)
172{
173	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
174	struct vmw_private *dev_priv = vmw_priv(dev);
175	struct drm_vmw_present_arg *arg =
176		(struct drm_vmw_present_arg *)data;
177	struct vmw_surface *surface;
178	struct drm_vmw_rect __user *clips_ptr;
179	struct drm_vmw_rect *clips = NULL;
180	struct drm_framebuffer *fb;
181	struct vmw_framebuffer *vfb;
182	struct vmw_resource *res;
183	uint32_t num_clips;
184	int ret;
185
186	num_clips = arg->num_clips;
187	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
188
189	if (unlikely(num_clips == 0))
190		return 0;
191
192	if (clips_ptr == NULL) {
193		VMW_DEBUG_USER("Variable clips_ptr must be specified.\n");
194		ret = -EINVAL;
195		goto out_clips;
196	}
197
198	clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
199	if (clips == NULL) {
200		DRM_ERROR("Failed to allocate clip rect list.\n");
201		ret = -ENOMEM;
202		goto out_clips;
203	}
204
205	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
206	if (ret) {
207		DRM_ERROR("Failed to copy clip rects from userspace.\n");
208		ret = -EFAULT;
209		goto out_no_copy;
210	}
211
212	drm_modeset_lock_all(dev);
213
214	fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
215	if (!fb) {
216		VMW_DEBUG_USER("Invalid framebuffer id.\n");
217		ret = -ENOENT;
218		goto out_no_fb;
219	}
220	vfb = vmw_framebuffer_to_vfb(fb);
221
222	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
223					      user_surface_converter,
224					      &res);
225	if (ret)
226		goto out_no_surface;
227
228	surface = vmw_res_to_srf(res);
229	ret = vmw_kms_present(dev_priv, file_priv,
230			      vfb, surface, arg->sid,
231			      arg->dest_x, arg->dest_y,
232			      clips, num_clips);
233
234	/* vmw_user_surface_lookup takes one ref so does new_fb */
235	vmw_surface_unreference(&surface);
236
237out_no_surface:
238	drm_framebuffer_put(fb);
239out_no_fb:
240	drm_modeset_unlock_all(dev);
241out_no_copy:
242	kfree(clips);
243out_clips:
244	return ret;
245}
246
247int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
248			       struct drm_file *file_priv)
249{
250	struct vmw_private *dev_priv = vmw_priv(dev);
251	struct drm_vmw_present_readback_arg *arg =
252		(struct drm_vmw_present_readback_arg *)data;
253	struct drm_vmw_fence_rep __user *user_fence_rep =
254		(struct drm_vmw_fence_rep __user *)
255		(unsigned long)arg->fence_rep;
256	struct drm_vmw_rect __user *clips_ptr;
257	struct drm_vmw_rect *clips = NULL;
258	struct drm_framebuffer *fb;
259	struct vmw_framebuffer *vfb;
260	uint32_t num_clips;
261	int ret;
262
263	num_clips = arg->num_clips;
264	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
265
266	if (unlikely(num_clips == 0))
267		return 0;
268
269	if (clips_ptr == NULL) {
270		VMW_DEBUG_USER("Argument clips_ptr must be specified.\n");
271		ret = -EINVAL;
272		goto out_clips;
273	}
274
275	clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
276	if (clips == NULL) {
277		DRM_ERROR("Failed to allocate clip rect list.\n");
278		ret = -ENOMEM;
279		goto out_clips;
280	}
281
282	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
283	if (ret) {
284		DRM_ERROR("Failed to copy clip rects from userspace.\n");
285		ret = -EFAULT;
286		goto out_no_copy;
287	}
288
289	drm_modeset_lock_all(dev);
290
291	fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
292	if (!fb) {
293		VMW_DEBUG_USER("Invalid framebuffer id.\n");
294		ret = -ENOENT;
295		goto out_no_fb;
296	}
297
298	vfb = vmw_framebuffer_to_vfb(fb);
299	if (!vfb->bo) {
300		VMW_DEBUG_USER("Framebuffer not buffer backed.\n");
301		ret = -EINVAL;
302		goto out_no_ttm_lock;
303	}
304
305	ret = vmw_kms_readback(dev_priv, file_priv,
306			       vfb, user_fence_rep,
307			       clips, num_clips);
308
309out_no_ttm_lock:
310	drm_framebuffer_put(fb);
311out_no_fb:
312	drm_modeset_unlock_all(dev);
313out_no_copy:
314	kfree(clips);
315out_clips:
316	return ret;
317}
  1/**************************************************************************
  2 *
  3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include "vmwgfx_drv.h"
 29#include <drm/vmwgfx_drm.h>
 30#include "vmwgfx_kms.h"
 31#include "device_include/svga3d_caps.h"
 32
 33struct svga_3d_compat_cap {
 34	SVGA3dCapsRecordHeader header;
 35	SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
 36};
 37
 38int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 39		       struct drm_file *file_priv)
 40{
 41	struct vmw_private *dev_priv = vmw_priv(dev);
 42	struct drm_vmw_getparam_arg *param =
 43	    (struct drm_vmw_getparam_arg *)data;
 44	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 45
 46	switch (param->param) {
 47	case DRM_VMW_PARAM_NUM_STREAMS:
 48		param->value = vmw_overlay_num_overlays(dev_priv);
 49		break;
 50	case DRM_VMW_PARAM_NUM_FREE_STREAMS:
 51		param->value = vmw_overlay_num_free_overlays(dev_priv);
 52		break;
 53	case DRM_VMW_PARAM_3D:
 54		param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
 55		break;
 56	case DRM_VMW_PARAM_HW_CAPS:
 57		param->value = dev_priv->capabilities;
 58		break;
 59	case DRM_VMW_PARAM_FIFO_CAPS:
 60		param->value = dev_priv->fifo.capabilities;
 61		break;
 62	case DRM_VMW_PARAM_MAX_FB_SIZE:
 63		param->value = dev_priv->prim_bb_mem;
 64		break;
 65	case DRM_VMW_PARAM_FIFO_HW_VERSION:
 66	{
 67		u32 *fifo_mem = dev_priv->mmio_virt;
 68		const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 69
 70		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
 71			param->value = SVGA3D_HWVERSION_WS8_B1;
 72			break;
 73		}
 74
 75		param->value =
 76			vmw_mmio_read(fifo_mem +
 77				      ((fifo->capabilities &
 78					SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
 79				       SVGA_FIFO_3D_HWVERSION_REVISED :
 80				       SVGA_FIFO_3D_HWVERSION));
 81		break;
 82	}
 83	case DRM_VMW_PARAM_MAX_SURF_MEMORY:
 84		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
 85		    !vmw_fp->gb_aware)
 86			param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
 87		else
 88			param->value = dev_priv->memory_size;
 89		break;
 90	case DRM_VMW_PARAM_3D_CAPS_SIZE:
 91		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
 92		    vmw_fp->gb_aware)
 93			param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
 94		else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
 95			param->value = sizeof(struct svga_3d_compat_cap) +
 96				sizeof(uint32_t);
 97		else
 98			param->value = (SVGA_FIFO_3D_CAPS_LAST -
 99					SVGA_FIFO_3D_CAPS + 1) *
100				sizeof(uint32_t);
101		break;
102	case DRM_VMW_PARAM_MAX_MOB_MEMORY:
103		vmw_fp->gb_aware = true;
104		param->value = dev_priv->max_mob_pages * PAGE_SIZE;
105		break;
106	case DRM_VMW_PARAM_MAX_MOB_SIZE:
107		param->value = dev_priv->max_mob_size;
108		break;
109	case DRM_VMW_PARAM_SCREEN_TARGET:
110		param->value =
111			(dev_priv->active_display_unit == vmw_du_screen_target);
112		break;
113	case DRM_VMW_PARAM_DX:
114		param->value = dev_priv->has_dx;
115		break;
116	default:
117		DRM_ERROR("Illegal vmwgfx get param request: %d\n",
118			  param->param);
119		return -EINVAL;
120	}
121
122	return 0;
123}
124
125static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value)
126{
127	/* If the header is updated, update the format test as well! */
128	BUILD_BUG_ON(SVGA3D_DEVCAP_DXFMT_BC5_UNORM + 1 != SVGA3D_DEVCAP_MAX);
129
130	if (cap >= SVGA3D_DEVCAP_DXFMT_X8R8G8B8 &&
131	    cap <= SVGA3D_DEVCAP_DXFMT_BC5_UNORM)
132		fmt_value &= ~(SVGADX_DXFMT_MULTISAMPLE_2 |
133			       SVGADX_DXFMT_MULTISAMPLE_4 |
134			       SVGADX_DXFMT_MULTISAMPLE_8);
135	else if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
136		return 0;
137
138	return fmt_value;
139}
140
141static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
142			       size_t size)
143{
144	struct svga_3d_compat_cap *compat_cap =
145		(struct svga_3d_compat_cap *) bounce;
146	unsigned int i;
147	size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
148	unsigned int max_size;
149
150	if (size < pair_offset)
151		return -EINVAL;
152
153	max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
154
155	if (max_size > SVGA3D_DEVCAP_MAX)
156		max_size = SVGA3D_DEVCAP_MAX;
157
158	compat_cap->header.length =
159		(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
160	compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
161
162	spin_lock(&dev_priv->cap_lock);
163	for (i = 0; i < max_size; ++i) {
164		vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
165		compat_cap->pairs[i][0] = i;
166		compat_cap->pairs[i][1] = vmw_mask_multisample
167			(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
168	}
169	spin_unlock(&dev_priv->cap_lock);
170
171	return 0;
172}
173
174
175int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
176			 struct drm_file *file_priv)
177{
178	struct drm_vmw_get_3d_cap_arg *arg =
179		(struct drm_vmw_get_3d_cap_arg *) data;
180	struct vmw_private *dev_priv = vmw_priv(dev);
181	uint32_t size;
182	u32 *fifo_mem;
183	void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
184	void *bounce;
185	int ret;
186	bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
187	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
188
189	if (unlikely(arg->pad64 != 0)) {
190		DRM_ERROR("Illegal GET_3D_CAP argument.\n");
191		return -EINVAL;
192	}
193
194	if (gb_objects && vmw_fp->gb_aware)
195		size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
196	else if (gb_objects)
197		size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
198	else
199		size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
200			sizeof(uint32_t);
201
202	if (arg->max_size < size)
203		size = arg->max_size;
204
205	bounce = vzalloc(size);
206	if (unlikely(bounce == NULL)) {
207		DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
208		return -ENOMEM;
209	}
210
211	if (gb_objects && vmw_fp->gb_aware) {
212		int i, num;
213		uint32_t *bounce32 = (uint32_t *) bounce;
214
215		num = size / sizeof(uint32_t);
216		if (num > SVGA3D_DEVCAP_MAX)
217			num = SVGA3D_DEVCAP_MAX;
218
219		spin_lock(&dev_priv->cap_lock);
220		for (i = 0; i < num; ++i) {
221			vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
222			*bounce32++ = vmw_mask_multisample
223				(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
224		}
225		spin_unlock(&dev_priv->cap_lock);
226	} else if (gb_objects) {
227		ret = vmw_fill_compat_cap(dev_priv, bounce, size);
228		if (unlikely(ret != 0))
229			goto out_err;
230	} else {
231		fifo_mem = dev_priv->mmio_virt;
232		memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
233	}
234
235	ret = copy_to_user(buffer, bounce, size);
236	if (ret)
237		ret = -EFAULT;
238out_err:
239	vfree(bounce);
240
241	if (unlikely(ret != 0))
242		DRM_ERROR("Failed to report 3D caps info.\n");
243
244	return ret;
245}
246
247int vmw_present_ioctl(struct drm_device *dev, void *data,
248		      struct drm_file *file_priv)
249{
250	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
251	struct vmw_private *dev_priv = vmw_priv(dev);
252	struct drm_vmw_present_arg *arg =
253		(struct drm_vmw_present_arg *)data;
254	struct vmw_surface *surface;
255	struct drm_vmw_rect __user *clips_ptr;
256	struct drm_vmw_rect *clips = NULL;
257	struct drm_framebuffer *fb;
258	struct vmw_framebuffer *vfb;
259	struct vmw_resource *res;
260	uint32_t num_clips;
261	int ret;
262
263	num_clips = arg->num_clips;
264	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
265
266	if (unlikely(num_clips == 0))
267		return 0;
268
269	if (clips_ptr == NULL) {
270		DRM_ERROR("Variable clips_ptr must be specified.\n");
271		ret = -EINVAL;
272		goto out_clips;
273	}
274
275	clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
276	if (clips == NULL) {
277		DRM_ERROR("Failed to allocate clip rect list.\n");
278		ret = -ENOMEM;
279		goto out_clips;
280	}
281
282	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
283	if (ret) {
284		DRM_ERROR("Failed to copy clip rects from userspace.\n");
285		ret = -EFAULT;
286		goto out_no_copy;
287	}
288
289	drm_modeset_lock_all(dev);
290
291	fb = drm_framebuffer_lookup(dev, arg->fb_id);
292	if (!fb) {
293		DRM_ERROR("Invalid framebuffer id.\n");
294		ret = -ENOENT;
295		goto out_no_fb;
296	}
297	vfb = vmw_framebuffer_to_vfb(fb);
298
299	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
300	if (unlikely(ret != 0))
301		goto out_no_ttm_lock;
302
303	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
304					      user_surface_converter,
305					      &res);
306	if (ret)
307		goto out_no_surface;
308
309	surface = vmw_res_to_srf(res);
310	ret = vmw_kms_present(dev_priv, file_priv,
311			      vfb, surface, arg->sid,
312			      arg->dest_x, arg->dest_y,
313			      clips, num_clips);
314
315	/* vmw_user_surface_lookup takes one ref so does new_fb */
316	vmw_surface_unreference(&surface);
317
318out_no_surface:
319	ttm_read_unlock(&dev_priv->reservation_sem);
320out_no_ttm_lock:
321	drm_framebuffer_unreference(fb);
322out_no_fb:
323	drm_modeset_unlock_all(dev);
324out_no_copy:
325	kfree(clips);
326out_clips:
327	return ret;
328}
329
330int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
331			       struct drm_file *file_priv)
332{
333	struct vmw_private *dev_priv = vmw_priv(dev);
334	struct drm_vmw_present_readback_arg *arg =
335		(struct drm_vmw_present_readback_arg *)data;
336	struct drm_vmw_fence_rep __user *user_fence_rep =
337		(struct drm_vmw_fence_rep __user *)
338		(unsigned long)arg->fence_rep;
339	struct drm_vmw_rect __user *clips_ptr;
340	struct drm_vmw_rect *clips = NULL;
341	struct drm_framebuffer *fb;
342	struct vmw_framebuffer *vfb;
343	uint32_t num_clips;
344	int ret;
345
346	num_clips = arg->num_clips;
347	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
348
349	if (unlikely(num_clips == 0))
350		return 0;
351
352	if (clips_ptr == NULL) {
353		DRM_ERROR("Argument clips_ptr must be specified.\n");
354		ret = -EINVAL;
355		goto out_clips;
356	}
357
358	clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
359	if (clips == NULL) {
360		DRM_ERROR("Failed to allocate clip rect list.\n");
361		ret = -ENOMEM;
362		goto out_clips;
363	}
364
365	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
366	if (ret) {
367		DRM_ERROR("Failed to copy clip rects from userspace.\n");
368		ret = -EFAULT;
369		goto out_no_copy;
370	}
371
372	drm_modeset_lock_all(dev);
373
374	fb = drm_framebuffer_lookup(dev, arg->fb_id);
375	if (!fb) {
376		DRM_ERROR("Invalid framebuffer id.\n");
377		ret = -ENOENT;
378		goto out_no_fb;
379	}
380
381	vfb = vmw_framebuffer_to_vfb(fb);
382	if (!vfb->dmabuf) {
383		DRM_ERROR("Framebuffer not dmabuf backed.\n");
384		ret = -EINVAL;
385		goto out_no_ttm_lock;
386	}
387
388	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
389	if (unlikely(ret != 0))
390		goto out_no_ttm_lock;
391
392	ret = vmw_kms_readback(dev_priv, file_priv,
393			       vfb, user_fence_rep,
394			       clips, num_clips);
395
396	ttm_read_unlock(&dev_priv->reservation_sem);
397out_no_ttm_lock:
398	drm_framebuffer_unreference(fb);
399out_no_fb:
400	drm_modeset_unlock_all(dev);
401out_no_copy:
402	kfree(clips);
403out_clips:
404	return ret;
405}
406
407
408/**
409 * vmw_fops_poll - wrapper around the drm_poll function
410 *
411 * @filp: See the linux fops poll documentation.
412 * @wait: See the linux fops poll documentation.
413 *
414 * Wrapper around the drm_poll function that makes sure the device is
415 * processing the fifo if drm_poll decides to wait.
416 */
417unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
418{
419	struct drm_file *file_priv = filp->private_data;
420	struct vmw_private *dev_priv =
421		vmw_priv(file_priv->minor->dev);
422
423	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
424	return drm_poll(filp, wait);
425}
426
427
428/**
429 * vmw_fops_read - wrapper around the drm_read function
430 *
431 * @filp: See the linux fops read documentation.
432 * @buffer: See the linux fops read documentation.
433 * @count: See the linux fops read documentation.
434 * offset: See the linux fops read documentation.
435 *
436 * Wrapper around the drm_read function that makes sure the device is
437 * processing the fifo if drm_read decides to wait.
438 */
439ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
440		      size_t count, loff_t *offset)
441{
442	struct drm_file *file_priv = filp->private_data;
443	struct vmw_private *dev_priv =
444		vmw_priv(file_priv->minor->dev);
445
446	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
447	return drm_read(filp, buffer, count, offset);
448}