Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * Copyright 2013 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Dave Airlie
 23 *          Alon Levy
 24 */
 25
 26#include <linux/pci.h>
 27#include <linux/uaccess.h>
 28
 29#include "qxl_drv.h"
 30#include "qxl_object.h"
 31
 32/*
 33 * TODO: allocating a new gem(in qxl_bo) for each request.
 34 * This is wasteful since bo's are page aligned.
 35 */
 36static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
 37			   struct drm_file *file_priv)
 38{
 39	struct qxl_device *qdev = dev->dev_private;
 40	struct drm_qxl_alloc *qxl_alloc = data;
 41	int ret;
 42	struct qxl_bo *qobj;
 43	uint32_t handle;
 44	u32 domain = QXL_GEM_DOMAIN_VRAM;
 45
 46	if (qxl_alloc->size == 0) {
 47		DRM_ERROR("invalid size %d\n", qxl_alloc->size);
 48		return -EINVAL;
 49	}
 50	ret = qxl_gem_object_create_with_handle(qdev, file_priv,
 51						domain,
 52						qxl_alloc->size,
 53						NULL,
 54						&qobj, &handle);
 55	if (ret) {
 56		DRM_ERROR("%s: failed to create gem ret=%d\n",
 57			  __func__, ret);
 58		return -ENOMEM;
 59	}
 60	qxl_alloc->handle = handle;
 61	return 0;
 62}
 63
 64static int qxl_map_ioctl(struct drm_device *dev, void *data,
 65			 struct drm_file *file_priv)
 66{
 67	struct qxl_device *qdev = dev->dev_private;
 68	struct drm_qxl_map *qxl_map = data;
 69
 70	return qxl_mode_dumb_mmap(file_priv, &qdev->ddev, qxl_map->handle,
 71				  &qxl_map->offset);
 72}
 73
 74struct qxl_reloc_info {
 75	int type;
 76	struct qxl_bo *dst_bo;
 77	uint32_t dst_offset;
 78	struct qxl_bo *src_bo;
 79	int src_offset;
 80};
 81
 82/*
 83 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
 84 * are on vram).
 85 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
 86 */
 87static void
 88apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
 89{
 90	void *reloc_page;
 91
 92	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
 93	*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
 94											      info->src_bo,
 95											      info->src_offset);
 96	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
 97}
 98
 99static void
100apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
101{
102	uint32_t id = 0;
103	void *reloc_page;
104
105	if (info->src_bo && !info->src_bo->is_primary)
106		id = info->src_bo->surface_id;
107
108	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
109	*(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
110	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
111}
112
113/* return holding the reference to this object */
114static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
115			      struct qxl_release *release, struct qxl_bo **qbo_p)
116{
117	struct drm_gem_object *gobj;
118	struct qxl_bo *qobj;
119	int ret;
120
121	gobj = drm_gem_object_lookup(file_priv, handle);
122	if (!gobj)
123		return -EINVAL;
124
125	qobj = gem_to_qxl_bo(gobj);
126
127	ret = qxl_release_list_add(release, qobj);
128	drm_gem_object_put_unlocked(gobj);
129	if (ret)
130		return ret;
131
132	*qbo_p = qobj;
133	return 0;
134}
135
136/*
137 * Usage of execbuffer:
138 * Relocations need to take into account the full QXLDrawable size.
139 * However, the command as passed from user space must *not* contain the initial
140 * QXLReleaseInfo struct (first XXX bytes)
141 */
142static int qxl_process_single_command(struct qxl_device *qdev,
143				      struct drm_qxl_command *cmd,
144				      struct drm_file *file_priv)
145{
146	struct qxl_reloc_info *reloc_info;
147	int release_type;
148	struct qxl_release *release;
149	struct qxl_bo *cmd_bo;
150	void *fb_cmd;
151	int i, ret, num_relocs;
152	int unwritten;
153
154	switch (cmd->type) {
155	case QXL_CMD_DRAW:
156		release_type = QXL_RELEASE_DRAWABLE;
157		break;
158	case QXL_CMD_SURFACE:
159	case QXL_CMD_CURSOR:
160	default:
161		DRM_DEBUG("Only draw commands in execbuffers\n");
162		return -EINVAL;
163		break;
164	}
165
166	if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
167		return -EINVAL;
168
169	if (!access_ok(u64_to_user_ptr(cmd->command),
170		       cmd->command_size))
171		return -EFAULT;
172
173	reloc_info = kmalloc_array(cmd->relocs_num,
174				   sizeof(struct qxl_reloc_info), GFP_KERNEL);
175	if (!reloc_info)
176		return -ENOMEM;
177
178	ret = qxl_alloc_release_reserved(qdev,
179					 sizeof(union qxl_release_info) +
180					 cmd->command_size,
181					 release_type,
182					 &release,
183					 &cmd_bo);
184	if (ret)
185		goto out_free_reloc;
186
187	/* TODO copy slow path code from i915 */
188	fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
189	unwritten = __copy_from_user_inatomic_nocache
190		(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
191		 u64_to_user_ptr(cmd->command), cmd->command_size);
192
193	{
194		struct qxl_drawable *draw = fb_cmd;
195
196		draw->mm_time = qdev->rom->mm_clock;
197	}
198
199	qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
200	if (unwritten) {
201		DRM_ERROR("got unwritten %d\n", unwritten);
202		ret = -EFAULT;
203		goto out_free_release;
204	}
205
206	/* fill out reloc info structs */
207	num_relocs = 0;
208	for (i = 0; i < cmd->relocs_num; ++i) {
209		struct drm_qxl_reloc reloc;
210		struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
211
212		if (copy_from_user(&reloc, u + i, sizeof(reloc))) {
213			ret = -EFAULT;
214			goto out_free_bos;
215		}
216
217		/* add the bos to the list of bos to validate -
218		   need to validate first then process relocs? */
219		if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
220			DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
221
222			ret = -EINVAL;
223			goto out_free_bos;
224		}
225		reloc_info[i].type = reloc.reloc_type;
226
227		if (reloc.dst_handle) {
228			ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
229						 &reloc_info[i].dst_bo);
230			if (ret)
231				goto out_free_bos;
232			reloc_info[i].dst_offset = reloc.dst_offset;
233		} else {
234			reloc_info[i].dst_bo = cmd_bo;
235			reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
236		}
237		num_relocs++;
238
239		/* reserve and validate the reloc dst bo */
240		if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
241			ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
242						 &reloc_info[i].src_bo);
243			if (ret)
244				goto out_free_bos;
245			reloc_info[i].src_offset = reloc.src_offset;
246		} else {
247			reloc_info[i].src_bo = NULL;
248			reloc_info[i].src_offset = 0;
249		}
250	}
251
252	/* validate all buffers */
253	ret = qxl_release_reserve_list(release, false);
254	if (ret)
255		goto out_free_bos;
256
257	for (i = 0; i < cmd->relocs_num; ++i) {
258		if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
259			apply_reloc(qdev, &reloc_info[i]);
260		else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
261			apply_surf_reloc(qdev, &reloc_info[i]);
262	}
263
 
264	ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
265	if (ret)
266		qxl_release_backoff_reserve_list(release);
267	else
268		qxl_release_fence_buffer_objects(release);
269
270out_free_bos:
271out_free_release:
272	if (ret)
273		qxl_release_free(qdev, release);
274out_free_reloc:
275	kfree(reloc_info);
276	return ret;
277}
278
279static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
280				struct drm_file *file_priv)
281{
282	struct qxl_device *qdev = dev->dev_private;
283	struct drm_qxl_execbuffer *execbuffer = data;
284	struct drm_qxl_command user_cmd;
285	int cmd_num;
286	int ret;
287
288	for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
289
290		struct drm_qxl_command __user *commands =
291			u64_to_user_ptr(execbuffer->commands);
292
293		if (copy_from_user(&user_cmd, commands + cmd_num,
294				       sizeof(user_cmd)))
295			return -EFAULT;
296
297		ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
298		if (ret)
299			return ret;
300	}
301	return 0;
302}
303
304static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
305				 struct drm_file *file)
306{
307	struct qxl_device *qdev = dev->dev_private;
308	struct drm_qxl_update_area *update_area = data;
309	struct qxl_rect area = {.left = update_area->left,
310				.top = update_area->top,
311				.right = update_area->right,
312				.bottom = update_area->bottom};
313	int ret;
314	struct drm_gem_object *gobj = NULL;
315	struct qxl_bo *qobj = NULL;
316	struct ttm_operation_ctx ctx = { true, false };
317
318	if (update_area->left >= update_area->right ||
319	    update_area->top >= update_area->bottom)
320		return -EINVAL;
321
322	gobj = drm_gem_object_lookup(file, update_area->handle);
323	if (gobj == NULL)
324		return -ENOENT;
325
326	qobj = gem_to_qxl_bo(gobj);
327
328	ret = qxl_bo_reserve(qobj, false);
329	if (ret)
330		goto out;
331
332	if (!qobj->pin_count) {
333		qxl_ttm_placement_from_domain(qobj, qobj->type, false);
334		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
335		if (unlikely(ret))
336			goto out;
337	}
338
339	ret = qxl_bo_check_id(qdev, qobj);
340	if (ret)
341		goto out2;
342	if (!qobj->surface_id)
343		DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
344	ret = qxl_io_update_area(qdev, qobj, &area);
345
346out2:
347	qxl_bo_unreserve(qobj);
348
349out:
350	drm_gem_object_put_unlocked(gobj);
351	return ret;
352}
353
354static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
355		       struct drm_file *file_priv)
356{
357	struct qxl_device *qdev = dev->dev_private;
358	struct drm_qxl_getparam *param = data;
359
360	switch (param->param) {
361	case QXL_PARAM_NUM_SURFACES:
362		param->value = qdev->rom->n_surfaces;
363		break;
364	case QXL_PARAM_MAX_RELOCS:
365		param->value = QXL_MAX_RES;
366		break;
367	default:
368		return -EINVAL;
369	}
370	return 0;
371}
372
373static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
374				  struct drm_file *file_priv)
375{
376	struct qxl_device *qdev = dev->dev_private;
 
377	struct drm_qxl_clientcap *param = data;
378	int byte, idx;
379
380	byte = param->index / 8;
381	idx = param->index % 8;
382
383	if (dev->pdev->revision < 4)
384		return -ENOSYS;
385
386	if (byte >= 58)
387		return -ENOSYS;
388
389	if (qdev->rom->client_capabilities[byte] & (1 << idx))
390		return 0;
391	return -ENOSYS;
392}
393
394static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
395				struct drm_file *file)
396{
397	struct qxl_device *qdev = dev->dev_private;
398	struct drm_qxl_alloc_surf *param = data;
399	struct qxl_bo *qobj;
400	int handle;
401	int ret;
402	int size, actual_stride;
403	struct qxl_surface surf;
404
405	/* work out size allocate bo with handle */
406	actual_stride = param->stride < 0 ? -param->stride : param->stride;
407	size = actual_stride * param->height + actual_stride;
408
409	surf.format = param->format;
410	surf.width = param->width;
411	surf.height = param->height;
412	surf.stride = param->stride;
413	surf.data = 0;
414
415	ret = qxl_gem_object_create_with_handle(qdev, file,
416						QXL_GEM_DOMAIN_SURFACE,
417						size,
418						&surf,
419						&qobj, &handle);
420	if (ret) {
421		DRM_ERROR("%s: failed to create gem ret=%d\n",
422			  __func__, ret);
423		return -ENOMEM;
424	} else
425		param->handle = handle;
426	return ret;
427}
428
429const struct drm_ioctl_desc qxl_ioctls[] = {
430	DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH),
431
432	DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH),
433
434	DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
435							DRM_AUTH),
436	DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
437							DRM_AUTH),
438	DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
439							DRM_AUTH),
440	DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
441							DRM_AUTH),
442
443	DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
444			  DRM_AUTH),
445};
446
447int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
v6.2
  1/*
  2 * Copyright 2013 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Dave Airlie
 23 *          Alon Levy
 24 */
 25
 26#include <linux/pci.h>
 27#include <linux/uaccess.h>
 28
 29#include "qxl_drv.h"
 30#include "qxl_object.h"
 31
 32/*
 33 * TODO: allocating a new gem(in qxl_bo) for each request.
 34 * This is wasteful since bo's are page aligned.
 35 */
 36int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
 
 37{
 38	struct qxl_device *qdev = to_qxl(dev);
 39	struct drm_qxl_alloc *qxl_alloc = data;
 40	int ret;
 41	struct qxl_bo *qobj;
 42	uint32_t handle;
 43	u32 domain = QXL_GEM_DOMAIN_VRAM;
 44
 45	if (qxl_alloc->size == 0) {
 46		DRM_ERROR("invalid size %d\n", qxl_alloc->size);
 47		return -EINVAL;
 48	}
 49	ret = qxl_gem_object_create_with_handle(qdev, file_priv,
 50						domain,
 51						qxl_alloc->size,
 52						NULL,
 53						&qobj, &handle);
 54	if (ret) {
 55		DRM_ERROR("%s: failed to create gem ret=%d\n",
 56			  __func__, ret);
 57		return -ENOMEM;
 58	}
 59	qxl_alloc->handle = handle;
 60	return 0;
 61}
 62
 63int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
 
 64{
 65	struct qxl_device *qdev = to_qxl(dev);
 66	struct drm_qxl_map *qxl_map = data;
 67
 68	return drm_gem_ttm_dumb_map_offset(file_priv, &qdev->ddev, qxl_map->handle,
 69					   &qxl_map->offset);
 70}
 71
 72struct qxl_reloc_info {
 73	int type;
 74	struct qxl_bo *dst_bo;
 75	uint32_t dst_offset;
 76	struct qxl_bo *src_bo;
 77	int src_offset;
 78};
 79
 80/*
 81 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
 82 * are on vram).
 83 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
 84 */
 85static void
 86apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
 87{
 88	void *reloc_page;
 89
 90	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
 91	*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
 92											      info->src_bo,
 93											      info->src_offset);
 94	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
 95}
 96
 97static void
 98apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
 99{
100	uint32_t id = 0;
101	void *reloc_page;
102
103	if (info->src_bo && !info->src_bo->is_primary)
104		id = info->src_bo->surface_id;
105
106	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
107	*(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
108	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
109}
110
111/* return holding the reference to this object */
112static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
113			      struct qxl_release *release, struct qxl_bo **qbo_p)
114{
115	struct drm_gem_object *gobj;
116	struct qxl_bo *qobj;
117	int ret;
118
119	gobj = drm_gem_object_lookup(file_priv, handle);
120	if (!gobj)
121		return -EINVAL;
122
123	qobj = gem_to_qxl_bo(gobj);
124
125	ret = qxl_release_list_add(release, qobj);
126	drm_gem_object_put(gobj);
127	if (ret)
128		return ret;
129
130	*qbo_p = qobj;
131	return 0;
132}
133
134/*
135 * Usage of execbuffer:
136 * Relocations need to take into account the full QXLDrawable size.
137 * However, the command as passed from user space must *not* contain the initial
138 * QXLReleaseInfo struct (first XXX bytes)
139 */
140static int qxl_process_single_command(struct qxl_device *qdev,
141				      struct drm_qxl_command *cmd,
142				      struct drm_file *file_priv)
143{
144	struct qxl_reloc_info *reloc_info;
145	int release_type;
146	struct qxl_release *release;
147	struct qxl_bo *cmd_bo;
148	void *fb_cmd;
149	int i, ret, num_relocs;
150	int unwritten;
151
152	switch (cmd->type) {
153	case QXL_CMD_DRAW:
154		release_type = QXL_RELEASE_DRAWABLE;
155		break;
156	case QXL_CMD_SURFACE:
157	case QXL_CMD_CURSOR:
158	default:
159		DRM_DEBUG("Only draw commands in execbuffers\n");
160		return -EINVAL;
 
161	}
162
163	if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
164		return -EINVAL;
165
166	if (!access_ok(u64_to_user_ptr(cmd->command),
167		       cmd->command_size))
168		return -EFAULT;
169
170	reloc_info = kmalloc_array(cmd->relocs_num,
171				   sizeof(struct qxl_reloc_info), GFP_KERNEL);
172	if (!reloc_info)
173		return -ENOMEM;
174
175	ret = qxl_alloc_release_reserved(qdev,
176					 sizeof(union qxl_release_info) +
177					 cmd->command_size,
178					 release_type,
179					 &release,
180					 &cmd_bo);
181	if (ret)
182		goto out_free_reloc;
183
184	/* TODO copy slow path code from i915 */
185	fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
186	unwritten = __copy_from_user_inatomic_nocache
187		(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
188		 u64_to_user_ptr(cmd->command), cmd->command_size);
189
190	{
191		struct qxl_drawable *draw = fb_cmd;
192
193		draw->mm_time = qdev->rom->mm_clock;
194	}
195
196	qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
197	if (unwritten) {
198		DRM_ERROR("got unwritten %d\n", unwritten);
199		ret = -EFAULT;
200		goto out_free_release;
201	}
202
203	/* fill out reloc info structs */
204	num_relocs = 0;
205	for (i = 0; i < cmd->relocs_num; ++i) {
206		struct drm_qxl_reloc reloc;
207		struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
208
209		if (copy_from_user(&reloc, u + i, sizeof(reloc))) {
210			ret = -EFAULT;
211			goto out_free_bos;
212		}
213
214		/* add the bos to the list of bos to validate -
215		   need to validate first then process relocs? */
216		if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
217			DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
218
219			ret = -EINVAL;
220			goto out_free_bos;
221		}
222		reloc_info[i].type = reloc.reloc_type;
223
224		if (reloc.dst_handle) {
225			ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
226						 &reloc_info[i].dst_bo);
227			if (ret)
228				goto out_free_bos;
229			reloc_info[i].dst_offset = reloc.dst_offset;
230		} else {
231			reloc_info[i].dst_bo = cmd_bo;
232			reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
233		}
234		num_relocs++;
235
236		/* reserve and validate the reloc dst bo */
237		if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
238			ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
239						 &reloc_info[i].src_bo);
240			if (ret)
241				goto out_free_bos;
242			reloc_info[i].src_offset = reloc.src_offset;
243		} else {
244			reloc_info[i].src_bo = NULL;
245			reloc_info[i].src_offset = 0;
246		}
247	}
248
249	/* validate all buffers */
250	ret = qxl_release_reserve_list(release, false);
251	if (ret)
252		goto out_free_bos;
253
254	for (i = 0; i < cmd->relocs_num; ++i) {
255		if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
256			apply_reloc(qdev, &reloc_info[i]);
257		else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
258			apply_surf_reloc(qdev, &reloc_info[i]);
259	}
260
261	qxl_release_fence_buffer_objects(release);
262	ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
 
 
 
 
263
264out_free_bos:
265out_free_release:
266	if (ret)
267		qxl_release_free(qdev, release);
268out_free_reloc:
269	kfree(reloc_info);
270	return ret;
271}
272
273int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
 
274{
275	struct qxl_device *qdev = to_qxl(dev);
276	struct drm_qxl_execbuffer *execbuffer = data;
277	struct drm_qxl_command user_cmd;
278	int cmd_num;
279	int ret;
280
281	for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
282
283		struct drm_qxl_command __user *commands =
284			u64_to_user_ptr(execbuffer->commands);
285
286		if (copy_from_user(&user_cmd, commands + cmd_num,
287				       sizeof(user_cmd)))
288			return -EFAULT;
289
290		ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
291		if (ret)
292			return ret;
293	}
294	return 0;
295}
296
297int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
298{
299	struct qxl_device *qdev = to_qxl(dev);
300	struct drm_qxl_update_area *update_area = data;
301	struct qxl_rect area = {.left = update_area->left,
302				.top = update_area->top,
303				.right = update_area->right,
304				.bottom = update_area->bottom};
305	int ret;
306	struct drm_gem_object *gobj = NULL;
307	struct qxl_bo *qobj = NULL;
308	struct ttm_operation_ctx ctx = { true, false };
309
310	if (update_area->left >= update_area->right ||
311	    update_area->top >= update_area->bottom)
312		return -EINVAL;
313
314	gobj = drm_gem_object_lookup(file, update_area->handle);
315	if (gobj == NULL)
316		return -ENOENT;
317
318	qobj = gem_to_qxl_bo(gobj);
319
320	ret = qxl_bo_reserve(qobj);
321	if (ret)
322		goto out;
323
324	if (!qobj->tbo.pin_count) {
325		qxl_ttm_placement_from_domain(qobj, qobj->type);
326		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
327		if (unlikely(ret))
328			goto out;
329	}
330
331	ret = qxl_bo_check_id(qdev, qobj);
332	if (ret)
333		goto out2;
334	if (!qobj->surface_id)
335		DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
336	ret = qxl_io_update_area(qdev, qobj, &area);
337
338out2:
339	qxl_bo_unreserve(qobj);
340
341out:
342	drm_gem_object_put(gobj);
343	return ret;
344}
345
346int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
 
347{
348	struct qxl_device *qdev = to_qxl(dev);
349	struct drm_qxl_getparam *param = data;
350
351	switch (param->param) {
352	case QXL_PARAM_NUM_SURFACES:
353		param->value = qdev->rom->n_surfaces;
354		break;
355	case QXL_PARAM_MAX_RELOCS:
356		param->value = QXL_MAX_RES;
357		break;
358	default:
359		return -EINVAL;
360	}
361	return 0;
362}
363
364int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
 
365{
366	struct qxl_device *qdev = to_qxl(dev);
367	struct pci_dev *pdev = to_pci_dev(dev->dev);
368	struct drm_qxl_clientcap *param = data;
369	int byte, idx;
370
371	byte = param->index / 8;
372	idx = param->index % 8;
373
374	if (pdev->revision < 4)
375		return -ENOSYS;
376
377	if (byte >= 58)
378		return -ENOSYS;
379
380	if (qdev->rom->client_capabilities[byte] & (1 << idx))
381		return 0;
382	return -ENOSYS;
383}
384
385int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
386{
387	struct qxl_device *qdev = to_qxl(dev);
388	struct drm_qxl_alloc_surf *param = data;
389	struct qxl_bo *qobj;
390	int handle;
391	int ret;
392	int size, actual_stride;
393	struct qxl_surface surf;
394
395	/* work out size allocate bo with handle */
396	actual_stride = param->stride < 0 ? -param->stride : param->stride;
397	size = actual_stride * param->height + actual_stride;
398
399	surf.format = param->format;
400	surf.width = param->width;
401	surf.height = param->height;
402	surf.stride = param->stride;
403	surf.data = 0;
404
405	ret = qxl_gem_object_create_with_handle(qdev, file,
406						QXL_GEM_DOMAIN_SURFACE,
407						size,
408						&surf,
409						&qobj, &handle);
410	if (ret) {
411		DRM_ERROR("%s: failed to create gem ret=%d\n",
412			  __func__, ret);
413		return -ENOMEM;
414	} else
415		param->handle = handle;
416	return ret;
417}