Loading...
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include <linux/pci.h>
27#include <linux/uaccess.h>
28
29#include "qxl_drv.h"
30#include "qxl_object.h"
31
32/*
33 * TODO: allocating a new gem(in qxl_bo) for each request.
34 * This is wasteful since bo's are page aligned.
35 */
36int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
37{
38 struct qxl_device *qdev = to_qxl(dev);
39 struct drm_qxl_alloc *qxl_alloc = data;
40 int ret;
41 uint32_t handle;
42 u32 domain = QXL_GEM_DOMAIN_VRAM;
43
44 if (qxl_alloc->size == 0) {
45 DRM_ERROR("invalid size %d\n", qxl_alloc->size);
46 return -EINVAL;
47 }
48 ret = qxl_gem_object_create_with_handle(qdev, file_priv,
49 domain,
50 qxl_alloc->size,
51 NULL,
52 NULL, &handle);
53 if (ret) {
54 DRM_ERROR("%s: failed to create gem ret=%d\n",
55 __func__, ret);
56 return -ENOMEM;
57 }
58 qxl_alloc->handle = handle;
59 return 0;
60}
61
62int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
63{
64 struct qxl_device *qdev = to_qxl(dev);
65 struct drm_qxl_map *qxl_map = data;
66
67 return drm_gem_ttm_dumb_map_offset(file_priv, &qdev->ddev, qxl_map->handle,
68 &qxl_map->offset);
69}
70
71struct qxl_reloc_info {
72 int type;
73 struct qxl_bo *dst_bo;
74 uint32_t dst_offset;
75 struct qxl_bo *src_bo;
76 int src_offset;
77};
78
79/*
80 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
81 * are on vram).
82 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
83 */
84static void
85apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
86{
87 void *reloc_page;
88
89 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
90 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
91 info->src_bo,
92 info->src_offset);
93 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
94}
95
96static void
97apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
98{
99 uint32_t id = 0;
100 void *reloc_page;
101
102 if (info->src_bo && !info->src_bo->is_primary)
103 id = info->src_bo->surface_id;
104
105 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
106 *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
107 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
108}
109
110/* return holding the reference to this object */
111static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
112 struct qxl_release *release, struct qxl_bo **qbo_p)
113{
114 struct drm_gem_object *gobj;
115 struct qxl_bo *qobj;
116 int ret;
117
118 gobj = drm_gem_object_lookup(file_priv, handle);
119 if (!gobj)
120 return -EINVAL;
121
122 qobj = gem_to_qxl_bo(gobj);
123
124 ret = qxl_release_list_add(release, qobj);
125 drm_gem_object_put(gobj);
126 if (ret)
127 return ret;
128
129 *qbo_p = qobj;
130 return 0;
131}
132
133/*
134 * Usage of execbuffer:
135 * Relocations need to take into account the full QXLDrawable size.
136 * However, the command as passed from user space must *not* contain the initial
137 * QXLReleaseInfo struct (first XXX bytes)
138 */
139static int qxl_process_single_command(struct qxl_device *qdev,
140 struct drm_qxl_command *cmd,
141 struct drm_file *file_priv)
142{
143 struct qxl_reloc_info *reloc_info;
144 int release_type;
145 struct qxl_release *release;
146 struct qxl_bo *cmd_bo;
147 void *fb_cmd;
148 int i, ret;
149 int unwritten;
150
151 switch (cmd->type) {
152 case QXL_CMD_DRAW:
153 release_type = QXL_RELEASE_DRAWABLE;
154 break;
155 case QXL_CMD_SURFACE:
156 case QXL_CMD_CURSOR:
157 default:
158 DRM_DEBUG("Only draw commands in execbuffers\n");
159 return -EINVAL;
160 }
161
162 if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
163 return -EINVAL;
164
165 if (!access_ok(u64_to_user_ptr(cmd->command),
166 cmd->command_size))
167 return -EFAULT;
168
169 reloc_info = kmalloc_array(cmd->relocs_num,
170 sizeof(struct qxl_reloc_info), GFP_KERNEL);
171 if (!reloc_info)
172 return -ENOMEM;
173
174 ret = qxl_alloc_release_reserved(qdev,
175 sizeof(union qxl_release_info) +
176 cmd->command_size,
177 release_type,
178 &release,
179 &cmd_bo);
180 if (ret)
181 goto out_free_reloc;
182
183 /* TODO copy slow path code from i915 */
184 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
185 unwritten = __copy_from_user_inatomic_nocache
186 (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
187 u64_to_user_ptr(cmd->command), cmd->command_size);
188
189 {
190 struct qxl_drawable *draw = fb_cmd;
191
192 draw->mm_time = qdev->rom->mm_clock;
193 }
194
195 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
196 if (unwritten) {
197 DRM_ERROR("got unwritten %d\n", unwritten);
198 ret = -EFAULT;
199 goto out_free_release;
200 }
201
202 /* fill out reloc info structs */
203 for (i = 0; i < cmd->relocs_num; ++i) {
204 struct drm_qxl_reloc reloc;
205 struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
206
207 if (copy_from_user(&reloc, u + i, sizeof(reloc))) {
208 ret = -EFAULT;
209 goto out_free_bos;
210 }
211
212 /* add the bos to the list of bos to validate -
213 need to validate first then process relocs? */
214 if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
215 DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
216
217 ret = -EINVAL;
218 goto out_free_bos;
219 }
220 reloc_info[i].type = reloc.reloc_type;
221
222 if (reloc.dst_handle) {
223 ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
224 &reloc_info[i].dst_bo);
225 if (ret)
226 goto out_free_bos;
227 reloc_info[i].dst_offset = reloc.dst_offset;
228 } else {
229 reloc_info[i].dst_bo = cmd_bo;
230 reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
231 }
232
233 /* reserve and validate the reloc dst bo */
234 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
235 ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
236 &reloc_info[i].src_bo);
237 if (ret)
238 goto out_free_bos;
239 reloc_info[i].src_offset = reloc.src_offset;
240 } else {
241 reloc_info[i].src_bo = NULL;
242 reloc_info[i].src_offset = 0;
243 }
244 }
245
246 /* validate all buffers */
247 ret = qxl_release_reserve_list(release, false);
248 if (ret)
249 goto out_free_bos;
250
251 for (i = 0; i < cmd->relocs_num; ++i) {
252 if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
253 apply_reloc(qdev, &reloc_info[i]);
254 else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
255 apply_surf_reloc(qdev, &reloc_info[i]);
256 }
257
258 qxl_release_fence_buffer_objects(release);
259 ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
260
261out_free_bos:
262out_free_release:
263 if (ret)
264 qxl_release_free(qdev, release);
265out_free_reloc:
266 kfree(reloc_info);
267 return ret;
268}
269
270int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
271{
272 struct qxl_device *qdev = to_qxl(dev);
273 struct drm_qxl_execbuffer *execbuffer = data;
274 struct drm_qxl_command user_cmd;
275 int cmd_num;
276 int ret;
277
278 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
279
280 struct drm_qxl_command __user *commands =
281 u64_to_user_ptr(execbuffer->commands);
282
283 if (copy_from_user(&user_cmd, commands + cmd_num,
284 sizeof(user_cmd)))
285 return -EFAULT;
286
287 ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
288 if (ret)
289 return ret;
290 }
291 return 0;
292}
293
294int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
295{
296 struct qxl_device *qdev = to_qxl(dev);
297 struct drm_qxl_update_area *update_area = data;
298 struct qxl_rect area = {.left = update_area->left,
299 .top = update_area->top,
300 .right = update_area->right,
301 .bottom = update_area->bottom};
302 int ret;
303 struct drm_gem_object *gobj = NULL;
304 struct qxl_bo *qobj = NULL;
305 struct ttm_operation_ctx ctx = { true, false };
306
307 if (update_area->left >= update_area->right ||
308 update_area->top >= update_area->bottom)
309 return -EINVAL;
310
311 gobj = drm_gem_object_lookup(file, update_area->handle);
312 if (gobj == NULL)
313 return -ENOENT;
314
315 qobj = gem_to_qxl_bo(gobj);
316
317 ret = qxl_bo_reserve(qobj);
318 if (ret)
319 goto out;
320
321 if (!qobj->tbo.pin_count) {
322 qxl_ttm_placement_from_domain(qobj, qobj->type);
323 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
324 if (unlikely(ret))
325 goto out;
326 }
327
328 ret = qxl_bo_check_id(qdev, qobj);
329 if (ret)
330 goto out2;
331 if (!qobj->surface_id)
332 DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
333 ret = qxl_io_update_area(qdev, qobj, &area);
334
335out2:
336 qxl_bo_unreserve(qobj);
337
338out:
339 drm_gem_object_put(gobj);
340 return ret;
341}
342
343int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
344{
345 struct qxl_device *qdev = to_qxl(dev);
346 struct drm_qxl_getparam *param = data;
347
348 switch (param->param) {
349 case QXL_PARAM_NUM_SURFACES:
350 param->value = qdev->rom->n_surfaces;
351 break;
352 case QXL_PARAM_MAX_RELOCS:
353 param->value = QXL_MAX_RES;
354 break;
355 default:
356 return -EINVAL;
357 }
358 return 0;
359}
360
361int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
362{
363 struct qxl_device *qdev = to_qxl(dev);
364 struct pci_dev *pdev = to_pci_dev(dev->dev);
365 struct drm_qxl_clientcap *param = data;
366 int byte, idx;
367
368 byte = param->index / 8;
369 idx = param->index % 8;
370
371 if (pdev->revision < 4)
372 return -ENOSYS;
373
374 if (byte >= 58)
375 return -ENOSYS;
376
377 if (qdev->rom->client_capabilities[byte] & (1 << idx))
378 return 0;
379 return -ENOSYS;
380}
381
382int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
383{
384 struct qxl_device *qdev = to_qxl(dev);
385 struct drm_qxl_alloc_surf *param = data;
386 int handle;
387 int ret;
388 int size, actual_stride;
389 struct qxl_surface surf;
390
391 /* work out size allocate bo with handle */
392 actual_stride = param->stride < 0 ? -param->stride : param->stride;
393 size = actual_stride * param->height + actual_stride;
394
395 surf.format = param->format;
396 surf.width = param->width;
397 surf.height = param->height;
398 surf.stride = param->stride;
399 surf.data = 0;
400
401 ret = qxl_gem_object_create_with_handle(qdev, file,
402 QXL_GEM_DOMAIN_SURFACE,
403 size,
404 &surf,
405 NULL, &handle);
406 if (ret) {
407 DRM_ERROR("%s: failed to create gem ret=%d\n",
408 __func__, ret);
409 return -ENOMEM;
410 } else
411 param->handle = handle;
412 return ret;
413}
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include <linux/pci.h>
27#include <linux/uaccess.h>
28
29#include "qxl_drv.h"
30#include "qxl_object.h"
31
32/*
33 * TODO: allocating a new gem(in qxl_bo) for each request.
34 * This is wasteful since bo's are page aligned.
35 */
36static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
37 struct drm_file *file_priv)
38{
39 struct qxl_device *qdev = to_qxl(dev);
40 struct drm_qxl_alloc *qxl_alloc = data;
41 int ret;
42 struct qxl_bo *qobj;
43 uint32_t handle;
44 u32 domain = QXL_GEM_DOMAIN_VRAM;
45
46 if (qxl_alloc->size == 0) {
47 DRM_ERROR("invalid size %d\n", qxl_alloc->size);
48 return -EINVAL;
49 }
50 ret = qxl_gem_object_create_with_handle(qdev, file_priv,
51 domain,
52 qxl_alloc->size,
53 NULL,
54 &qobj, &handle);
55 if (ret) {
56 DRM_ERROR("%s: failed to create gem ret=%d\n",
57 __func__, ret);
58 return -ENOMEM;
59 }
60 qxl_alloc->handle = handle;
61 return 0;
62}
63
64static int qxl_map_ioctl(struct drm_device *dev, void *data,
65 struct drm_file *file_priv)
66{
67 struct qxl_device *qdev = to_qxl(dev);
68 struct drm_qxl_map *qxl_map = data;
69
70 return drm_gem_ttm_dumb_map_offset(file_priv, &qdev->ddev, qxl_map->handle,
71 &qxl_map->offset);
72}
73
74struct qxl_reloc_info {
75 int type;
76 struct qxl_bo *dst_bo;
77 uint32_t dst_offset;
78 struct qxl_bo *src_bo;
79 int src_offset;
80};
81
82/*
83 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
84 * are on vram).
85 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
86 */
87static void
88apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
89{
90 void *reloc_page;
91
92 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
93 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
94 info->src_bo,
95 info->src_offset);
96 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
97}
98
99static void
100apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
101{
102 uint32_t id = 0;
103 void *reloc_page;
104
105 if (info->src_bo && !info->src_bo->is_primary)
106 id = info->src_bo->surface_id;
107
108 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
109 *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
110 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
111}
112
113/* return holding the reference to this object */
114static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
115 struct qxl_release *release, struct qxl_bo **qbo_p)
116{
117 struct drm_gem_object *gobj;
118 struct qxl_bo *qobj;
119 int ret;
120
121 gobj = drm_gem_object_lookup(file_priv, handle);
122 if (!gobj)
123 return -EINVAL;
124
125 qobj = gem_to_qxl_bo(gobj);
126
127 ret = qxl_release_list_add(release, qobj);
128 drm_gem_object_put(gobj);
129 if (ret)
130 return ret;
131
132 *qbo_p = qobj;
133 return 0;
134}
135
136/*
137 * Usage of execbuffer:
138 * Relocations need to take into account the full QXLDrawable size.
139 * However, the command as passed from user space must *not* contain the initial
140 * QXLReleaseInfo struct (first XXX bytes)
141 */
142static int qxl_process_single_command(struct qxl_device *qdev,
143 struct drm_qxl_command *cmd,
144 struct drm_file *file_priv)
145{
146 struct qxl_reloc_info *reloc_info;
147 int release_type;
148 struct qxl_release *release;
149 struct qxl_bo *cmd_bo;
150 void *fb_cmd;
151 int i, ret, num_relocs;
152 int unwritten;
153
154 switch (cmd->type) {
155 case QXL_CMD_DRAW:
156 release_type = QXL_RELEASE_DRAWABLE;
157 break;
158 case QXL_CMD_SURFACE:
159 case QXL_CMD_CURSOR:
160 default:
161 DRM_DEBUG("Only draw commands in execbuffers\n");
162 return -EINVAL;
163 }
164
165 if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
166 return -EINVAL;
167
168 if (!access_ok(u64_to_user_ptr(cmd->command),
169 cmd->command_size))
170 return -EFAULT;
171
172 reloc_info = kmalloc_array(cmd->relocs_num,
173 sizeof(struct qxl_reloc_info), GFP_KERNEL);
174 if (!reloc_info)
175 return -ENOMEM;
176
177 ret = qxl_alloc_release_reserved(qdev,
178 sizeof(union qxl_release_info) +
179 cmd->command_size,
180 release_type,
181 &release,
182 &cmd_bo);
183 if (ret)
184 goto out_free_reloc;
185
186 /* TODO copy slow path code from i915 */
187 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
188 unwritten = __copy_from_user_inatomic_nocache
189 (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
190 u64_to_user_ptr(cmd->command), cmd->command_size);
191
192 {
193 struct qxl_drawable *draw = fb_cmd;
194
195 draw->mm_time = qdev->rom->mm_clock;
196 }
197
198 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
199 if (unwritten) {
200 DRM_ERROR("got unwritten %d\n", unwritten);
201 ret = -EFAULT;
202 goto out_free_release;
203 }
204
205 /* fill out reloc info structs */
206 num_relocs = 0;
207 for (i = 0; i < cmd->relocs_num; ++i) {
208 struct drm_qxl_reloc reloc;
209 struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
210
211 if (copy_from_user(&reloc, u + i, sizeof(reloc))) {
212 ret = -EFAULT;
213 goto out_free_bos;
214 }
215
216 /* add the bos to the list of bos to validate -
217 need to validate first then process relocs? */
218 if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
219 DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
220
221 ret = -EINVAL;
222 goto out_free_bos;
223 }
224 reloc_info[i].type = reloc.reloc_type;
225
226 if (reloc.dst_handle) {
227 ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
228 &reloc_info[i].dst_bo);
229 if (ret)
230 goto out_free_bos;
231 reloc_info[i].dst_offset = reloc.dst_offset;
232 } else {
233 reloc_info[i].dst_bo = cmd_bo;
234 reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
235 }
236 num_relocs++;
237
238 /* reserve and validate the reloc dst bo */
239 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
240 ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
241 &reloc_info[i].src_bo);
242 if (ret)
243 goto out_free_bos;
244 reloc_info[i].src_offset = reloc.src_offset;
245 } else {
246 reloc_info[i].src_bo = NULL;
247 reloc_info[i].src_offset = 0;
248 }
249 }
250
251 /* validate all buffers */
252 ret = qxl_release_reserve_list(release, false);
253 if (ret)
254 goto out_free_bos;
255
256 for (i = 0; i < cmd->relocs_num; ++i) {
257 if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
258 apply_reloc(qdev, &reloc_info[i]);
259 else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
260 apply_surf_reloc(qdev, &reloc_info[i]);
261 }
262
263 qxl_release_fence_buffer_objects(release);
264 ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
265
266out_free_bos:
267out_free_release:
268 if (ret)
269 qxl_release_free(qdev, release);
270out_free_reloc:
271 kfree(reloc_info);
272 return ret;
273}
274
275static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
276 struct drm_file *file_priv)
277{
278 struct qxl_device *qdev = to_qxl(dev);
279 struct drm_qxl_execbuffer *execbuffer = data;
280 struct drm_qxl_command user_cmd;
281 int cmd_num;
282 int ret;
283
284 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
285
286 struct drm_qxl_command __user *commands =
287 u64_to_user_ptr(execbuffer->commands);
288
289 if (copy_from_user(&user_cmd, commands + cmd_num,
290 sizeof(user_cmd)))
291 return -EFAULT;
292
293 ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
294 if (ret)
295 return ret;
296 }
297 return 0;
298}
299
300static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
301 struct drm_file *file)
302{
303 struct qxl_device *qdev = to_qxl(dev);
304 struct drm_qxl_update_area *update_area = data;
305 struct qxl_rect area = {.left = update_area->left,
306 .top = update_area->top,
307 .right = update_area->right,
308 .bottom = update_area->bottom};
309 int ret;
310 struct drm_gem_object *gobj = NULL;
311 struct qxl_bo *qobj = NULL;
312 struct ttm_operation_ctx ctx = { true, false };
313
314 if (update_area->left >= update_area->right ||
315 update_area->top >= update_area->bottom)
316 return -EINVAL;
317
318 gobj = drm_gem_object_lookup(file, update_area->handle);
319 if (gobj == NULL)
320 return -ENOENT;
321
322 qobj = gem_to_qxl_bo(gobj);
323
324 ret = qxl_bo_reserve(qobj);
325 if (ret)
326 goto out;
327
328 if (!qobj->tbo.pin_count) {
329 qxl_ttm_placement_from_domain(qobj, qobj->type);
330 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
331 if (unlikely(ret))
332 goto out;
333 }
334
335 ret = qxl_bo_check_id(qdev, qobj);
336 if (ret)
337 goto out2;
338 if (!qobj->surface_id)
339 DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
340 ret = qxl_io_update_area(qdev, qobj, &area);
341
342out2:
343 qxl_bo_unreserve(qobj);
344
345out:
346 drm_gem_object_put(gobj);
347 return ret;
348}
349
350static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
351 struct drm_file *file_priv)
352{
353 struct qxl_device *qdev = to_qxl(dev);
354 struct drm_qxl_getparam *param = data;
355
356 switch (param->param) {
357 case QXL_PARAM_NUM_SURFACES:
358 param->value = qdev->rom->n_surfaces;
359 break;
360 case QXL_PARAM_MAX_RELOCS:
361 param->value = QXL_MAX_RES;
362 break;
363 default:
364 return -EINVAL;
365 }
366 return 0;
367}
368
369static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
370 struct drm_file *file_priv)
371{
372 struct qxl_device *qdev = to_qxl(dev);
373 struct pci_dev *pdev = to_pci_dev(dev->dev);
374 struct drm_qxl_clientcap *param = data;
375 int byte, idx;
376
377 byte = param->index / 8;
378 idx = param->index % 8;
379
380 if (pdev->revision < 4)
381 return -ENOSYS;
382
383 if (byte >= 58)
384 return -ENOSYS;
385
386 if (qdev->rom->client_capabilities[byte] & (1 << idx))
387 return 0;
388 return -ENOSYS;
389}
390
391static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
392 struct drm_file *file)
393{
394 struct qxl_device *qdev = to_qxl(dev);
395 struct drm_qxl_alloc_surf *param = data;
396 struct qxl_bo *qobj;
397 int handle;
398 int ret;
399 int size, actual_stride;
400 struct qxl_surface surf;
401
402 /* work out size allocate bo with handle */
403 actual_stride = param->stride < 0 ? -param->stride : param->stride;
404 size = actual_stride * param->height + actual_stride;
405
406 surf.format = param->format;
407 surf.width = param->width;
408 surf.height = param->height;
409 surf.stride = param->stride;
410 surf.data = 0;
411
412 ret = qxl_gem_object_create_with_handle(qdev, file,
413 QXL_GEM_DOMAIN_SURFACE,
414 size,
415 &surf,
416 &qobj, &handle);
417 if (ret) {
418 DRM_ERROR("%s: failed to create gem ret=%d\n",
419 __func__, ret);
420 return -ENOMEM;
421 } else
422 param->handle = handle;
423 return ret;
424}
425
426const struct drm_ioctl_desc qxl_ioctls[] = {
427 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH),
428
429 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH),
430
431 DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
432 DRM_AUTH),
433 DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
434 DRM_AUTH),
435 DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
436 DRM_AUTH),
437 DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
438 DRM_AUTH),
439
440 DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
441 DRM_AUTH),
442};
443
444int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);