Loading...
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include <linux/pci.h>
27#include <linux/uaccess.h>
28
29#include "qxl_drv.h"
30#include "qxl_object.h"
31
32/*
33 * TODO: allocating a new gem(in qxl_bo) for each request.
34 * This is wasteful since bo's are page aligned.
35 */
36static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
37 struct drm_file *file_priv)
38{
39 struct qxl_device *qdev = dev->dev_private;
40 struct drm_qxl_alloc *qxl_alloc = data;
41 int ret;
42 struct qxl_bo *qobj;
43 uint32_t handle;
44 u32 domain = QXL_GEM_DOMAIN_VRAM;
45
46 if (qxl_alloc->size == 0) {
47 DRM_ERROR("invalid size %d\n", qxl_alloc->size);
48 return -EINVAL;
49 }
50 ret = qxl_gem_object_create_with_handle(qdev, file_priv,
51 domain,
52 qxl_alloc->size,
53 NULL,
54 &qobj, &handle);
55 if (ret) {
56 DRM_ERROR("%s: failed to create gem ret=%d\n",
57 __func__, ret);
58 return -ENOMEM;
59 }
60 qxl_alloc->handle = handle;
61 return 0;
62}
63
64static int qxl_map_ioctl(struct drm_device *dev, void *data,
65 struct drm_file *file_priv)
66{
67 struct qxl_device *qdev = dev->dev_private;
68 struct drm_qxl_map *qxl_map = data;
69
70 return qxl_mode_dumb_mmap(file_priv, &qdev->ddev, qxl_map->handle,
71 &qxl_map->offset);
72}
73
74struct qxl_reloc_info {
75 int type;
76 struct qxl_bo *dst_bo;
77 uint32_t dst_offset;
78 struct qxl_bo *src_bo;
79 int src_offset;
80};
81
82/*
83 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
84 * are on vram).
85 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
86 */
87static void
88apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
89{
90 void *reloc_page;
91
92 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
93 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
94 info->src_bo,
95 info->src_offset);
96 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
97}
98
99static void
100apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
101{
102 uint32_t id = 0;
103 void *reloc_page;
104
105 if (info->src_bo && !info->src_bo->is_primary)
106 id = info->src_bo->surface_id;
107
108 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
109 *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
110 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
111}
112
113/* return holding the reference to this object */
114static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
115 struct qxl_release *release, struct qxl_bo **qbo_p)
116{
117 struct drm_gem_object *gobj;
118 struct qxl_bo *qobj;
119 int ret;
120
121 gobj = drm_gem_object_lookup(file_priv, handle);
122 if (!gobj)
123 return -EINVAL;
124
125 qobj = gem_to_qxl_bo(gobj);
126
127 ret = qxl_release_list_add(release, qobj);
128 drm_gem_object_put_unlocked(gobj);
129 if (ret)
130 return ret;
131
132 *qbo_p = qobj;
133 return 0;
134}
135
136/*
137 * Usage of execbuffer:
138 * Relocations need to take into account the full QXLDrawable size.
139 * However, the command as passed from user space must *not* contain the initial
140 * QXLReleaseInfo struct (first XXX bytes)
141 */
142static int qxl_process_single_command(struct qxl_device *qdev,
143 struct drm_qxl_command *cmd,
144 struct drm_file *file_priv)
145{
146 struct qxl_reloc_info *reloc_info;
147 int release_type;
148 struct qxl_release *release;
149 struct qxl_bo *cmd_bo;
150 void *fb_cmd;
151 int i, ret, num_relocs;
152 int unwritten;
153
154 switch (cmd->type) {
155 case QXL_CMD_DRAW:
156 release_type = QXL_RELEASE_DRAWABLE;
157 break;
158 case QXL_CMD_SURFACE:
159 case QXL_CMD_CURSOR:
160 default:
161 DRM_DEBUG("Only draw commands in execbuffers\n");
162 return -EINVAL;
163 break;
164 }
165
166 if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
167 return -EINVAL;
168
169 if (!access_ok(u64_to_user_ptr(cmd->command),
170 cmd->command_size))
171 return -EFAULT;
172
173 reloc_info = kmalloc_array(cmd->relocs_num,
174 sizeof(struct qxl_reloc_info), GFP_KERNEL);
175 if (!reloc_info)
176 return -ENOMEM;
177
178 ret = qxl_alloc_release_reserved(qdev,
179 sizeof(union qxl_release_info) +
180 cmd->command_size,
181 release_type,
182 &release,
183 &cmd_bo);
184 if (ret)
185 goto out_free_reloc;
186
187 /* TODO copy slow path code from i915 */
188 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
189 unwritten = __copy_from_user_inatomic_nocache
190 (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
191 u64_to_user_ptr(cmd->command), cmd->command_size);
192
193 {
194 struct qxl_drawable *draw = fb_cmd;
195
196 draw->mm_time = qdev->rom->mm_clock;
197 }
198
199 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
200 if (unwritten) {
201 DRM_ERROR("got unwritten %d\n", unwritten);
202 ret = -EFAULT;
203 goto out_free_release;
204 }
205
206 /* fill out reloc info structs */
207 num_relocs = 0;
208 for (i = 0; i < cmd->relocs_num; ++i) {
209 struct drm_qxl_reloc reloc;
210 struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
211
212 if (copy_from_user(&reloc, u + i, sizeof(reloc))) {
213 ret = -EFAULT;
214 goto out_free_bos;
215 }
216
217 /* add the bos to the list of bos to validate -
218 need to validate first then process relocs? */
219 if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
220 DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
221
222 ret = -EINVAL;
223 goto out_free_bos;
224 }
225 reloc_info[i].type = reloc.reloc_type;
226
227 if (reloc.dst_handle) {
228 ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
229 &reloc_info[i].dst_bo);
230 if (ret)
231 goto out_free_bos;
232 reloc_info[i].dst_offset = reloc.dst_offset;
233 } else {
234 reloc_info[i].dst_bo = cmd_bo;
235 reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
236 }
237 num_relocs++;
238
239 /* reserve and validate the reloc dst bo */
240 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
241 ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
242 &reloc_info[i].src_bo);
243 if (ret)
244 goto out_free_bos;
245 reloc_info[i].src_offset = reloc.src_offset;
246 } else {
247 reloc_info[i].src_bo = NULL;
248 reloc_info[i].src_offset = 0;
249 }
250 }
251
252 /* validate all buffers */
253 ret = qxl_release_reserve_list(release, false);
254 if (ret)
255 goto out_free_bos;
256
257 for (i = 0; i < cmd->relocs_num; ++i) {
258 if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
259 apply_reloc(qdev, &reloc_info[i]);
260 else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
261 apply_surf_reloc(qdev, &reloc_info[i]);
262 }
263
264 ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
265 if (ret)
266 qxl_release_backoff_reserve_list(release);
267 else
268 qxl_release_fence_buffer_objects(release);
269
270out_free_bos:
271out_free_release:
272 if (ret)
273 qxl_release_free(qdev, release);
274out_free_reloc:
275 kfree(reloc_info);
276 return ret;
277}
278
279static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
280 struct drm_file *file_priv)
281{
282 struct qxl_device *qdev = dev->dev_private;
283 struct drm_qxl_execbuffer *execbuffer = data;
284 struct drm_qxl_command user_cmd;
285 int cmd_num;
286 int ret;
287
288 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
289
290 struct drm_qxl_command __user *commands =
291 u64_to_user_ptr(execbuffer->commands);
292
293 if (copy_from_user(&user_cmd, commands + cmd_num,
294 sizeof(user_cmd)))
295 return -EFAULT;
296
297 ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
298 if (ret)
299 return ret;
300 }
301 return 0;
302}
303
304static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
305 struct drm_file *file)
306{
307 struct qxl_device *qdev = dev->dev_private;
308 struct drm_qxl_update_area *update_area = data;
309 struct qxl_rect area = {.left = update_area->left,
310 .top = update_area->top,
311 .right = update_area->right,
312 .bottom = update_area->bottom};
313 int ret;
314 struct drm_gem_object *gobj = NULL;
315 struct qxl_bo *qobj = NULL;
316 struct ttm_operation_ctx ctx = { true, false };
317
318 if (update_area->left >= update_area->right ||
319 update_area->top >= update_area->bottom)
320 return -EINVAL;
321
322 gobj = drm_gem_object_lookup(file, update_area->handle);
323 if (gobj == NULL)
324 return -ENOENT;
325
326 qobj = gem_to_qxl_bo(gobj);
327
328 ret = qxl_bo_reserve(qobj, false);
329 if (ret)
330 goto out;
331
332 if (!qobj->pin_count) {
333 qxl_ttm_placement_from_domain(qobj, qobj->type, false);
334 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
335 if (unlikely(ret))
336 goto out;
337 }
338
339 ret = qxl_bo_check_id(qdev, qobj);
340 if (ret)
341 goto out2;
342 if (!qobj->surface_id)
343 DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
344 ret = qxl_io_update_area(qdev, qobj, &area);
345
346out2:
347 qxl_bo_unreserve(qobj);
348
349out:
350 drm_gem_object_put_unlocked(gobj);
351 return ret;
352}
353
354static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
355 struct drm_file *file_priv)
356{
357 struct qxl_device *qdev = dev->dev_private;
358 struct drm_qxl_getparam *param = data;
359
360 switch (param->param) {
361 case QXL_PARAM_NUM_SURFACES:
362 param->value = qdev->rom->n_surfaces;
363 break;
364 case QXL_PARAM_MAX_RELOCS:
365 param->value = QXL_MAX_RES;
366 break;
367 default:
368 return -EINVAL;
369 }
370 return 0;
371}
372
373static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
374 struct drm_file *file_priv)
375{
376 struct qxl_device *qdev = dev->dev_private;
377 struct drm_qxl_clientcap *param = data;
378 int byte, idx;
379
380 byte = param->index / 8;
381 idx = param->index % 8;
382
383 if (dev->pdev->revision < 4)
384 return -ENOSYS;
385
386 if (byte >= 58)
387 return -ENOSYS;
388
389 if (qdev->rom->client_capabilities[byte] & (1 << idx))
390 return 0;
391 return -ENOSYS;
392}
393
394static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
395 struct drm_file *file)
396{
397 struct qxl_device *qdev = dev->dev_private;
398 struct drm_qxl_alloc_surf *param = data;
399 struct qxl_bo *qobj;
400 int handle;
401 int ret;
402 int size, actual_stride;
403 struct qxl_surface surf;
404
405 /* work out size allocate bo with handle */
406 actual_stride = param->stride < 0 ? -param->stride : param->stride;
407 size = actual_stride * param->height + actual_stride;
408
409 surf.format = param->format;
410 surf.width = param->width;
411 surf.height = param->height;
412 surf.stride = param->stride;
413 surf.data = 0;
414
415 ret = qxl_gem_object_create_with_handle(qdev, file,
416 QXL_GEM_DOMAIN_SURFACE,
417 size,
418 &surf,
419 &qobj, &handle);
420 if (ret) {
421 DRM_ERROR("%s: failed to create gem ret=%d\n",
422 __func__, ret);
423 return -ENOMEM;
424 } else
425 param->handle = handle;
426 return ret;
427}
428
429const struct drm_ioctl_desc qxl_ioctls[] = {
430 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH),
431
432 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH),
433
434 DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
435 DRM_AUTH),
436 DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
437 DRM_AUTH),
438 DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
439 DRM_AUTH),
440 DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
441 DRM_AUTH),
442
443 DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
444 DRM_AUTH),
445};
446
447int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29/*
30 * TODO: allocating a new gem(in qxl_bo) for each request.
31 * This is wasteful since bo's are page aligned.
32 */
33static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
34 struct drm_file *file_priv)
35{
36 struct qxl_device *qdev = dev->dev_private;
37 struct drm_qxl_alloc *qxl_alloc = data;
38 int ret;
39 struct qxl_bo *qobj;
40 uint32_t handle;
41 u32 domain = QXL_GEM_DOMAIN_VRAM;
42
43 if (qxl_alloc->size == 0) {
44 DRM_ERROR("invalid size %d\n", qxl_alloc->size);
45 return -EINVAL;
46 }
47 ret = qxl_gem_object_create_with_handle(qdev, file_priv,
48 domain,
49 qxl_alloc->size,
50 NULL,
51 &qobj, &handle);
52 if (ret) {
53 DRM_ERROR("%s: failed to create gem ret=%d\n",
54 __func__, ret);
55 return -ENOMEM;
56 }
57 qxl_alloc->handle = handle;
58 return 0;
59}
60
61static int qxl_map_ioctl(struct drm_device *dev, void *data,
62 struct drm_file *file_priv)
63{
64 struct qxl_device *qdev = dev->dev_private;
65 struct drm_qxl_map *qxl_map = data;
66
67 return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
68 &qxl_map->offset);
69}
70
71struct qxl_reloc_info {
72 int type;
73 struct qxl_bo *dst_bo;
74 uint32_t dst_offset;
75 struct qxl_bo *src_bo;
76 int src_offset;
77};
78
79/*
80 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
81 * are on vram).
82 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
83 */
84static void
85apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
86{
87 void *reloc_page;
88 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
89 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
90 info->src_bo,
91 info->src_offset);
92 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
93}
94
95static void
96apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
97{
98 uint32_t id = 0;
99 void *reloc_page;
100
101 if (info->src_bo && !info->src_bo->is_primary)
102 id = info->src_bo->surface_id;
103
104 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
105 *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
106 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
107}
108
109/* return holding the reference to this object */
110static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
111 struct qxl_release *release, struct qxl_bo **qbo_p)
112{
113 struct drm_gem_object *gobj;
114 struct qxl_bo *qobj;
115 int ret;
116
117 gobj = drm_gem_object_lookup(file_priv, handle);
118 if (!gobj)
119 return -EINVAL;
120
121 qobj = gem_to_qxl_bo(gobj);
122
123 ret = qxl_release_list_add(release, qobj);
124 drm_gem_object_unreference_unlocked(gobj);
125 if (ret)
126 return ret;
127
128 *qbo_p = qobj;
129 return 0;
130}
131
132/*
133 * Usage of execbuffer:
134 * Relocations need to take into account the full QXLDrawable size.
135 * However, the command as passed from user space must *not* contain the initial
136 * QXLReleaseInfo struct (first XXX bytes)
137 */
138static int qxl_process_single_command(struct qxl_device *qdev,
139 struct drm_qxl_command *cmd,
140 struct drm_file *file_priv)
141{
142 struct qxl_reloc_info *reloc_info;
143 int release_type;
144 struct qxl_release *release;
145 struct qxl_bo *cmd_bo;
146 void *fb_cmd;
147 int i, ret, num_relocs;
148 int unwritten;
149
150 switch (cmd->type) {
151 case QXL_CMD_DRAW:
152 release_type = QXL_RELEASE_DRAWABLE;
153 break;
154 case QXL_CMD_SURFACE:
155 case QXL_CMD_CURSOR:
156 default:
157 DRM_DEBUG("Only draw commands in execbuffers\n");
158 return -EINVAL;
159 break;
160 }
161
162 if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
163 return -EINVAL;
164
165 if (!access_ok(VERIFY_READ,
166 (void *)(unsigned long)cmd->command,
167 cmd->command_size))
168 return -EFAULT;
169
170 reloc_info = kmalloc_array(cmd->relocs_num,
171 sizeof(struct qxl_reloc_info), GFP_KERNEL);
172 if (!reloc_info)
173 return -ENOMEM;
174
175 ret = qxl_alloc_release_reserved(qdev,
176 sizeof(union qxl_release_info) +
177 cmd->command_size,
178 release_type,
179 &release,
180 &cmd_bo);
181 if (ret)
182 goto out_free_reloc;
183
184 /* TODO copy slow path code from i915 */
185 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
186 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
187
188 {
189 struct qxl_drawable *draw = fb_cmd;
190 draw->mm_time = qdev->rom->mm_clock;
191 }
192
193 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
194 if (unwritten) {
195 DRM_ERROR("got unwritten %d\n", unwritten);
196 ret = -EFAULT;
197 goto out_free_release;
198 }
199
200 /* fill out reloc info structs */
201 num_relocs = 0;
202 for (i = 0; i < cmd->relocs_num; ++i) {
203 struct drm_qxl_reloc reloc;
204
205 if (copy_from_user(&reloc,
206 &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
207 sizeof(reloc))) {
208 ret = -EFAULT;
209 goto out_free_bos;
210 }
211
212 /* add the bos to the list of bos to validate -
213 need to validate first then process relocs? */
214 if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
215 DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
216
217 ret = -EINVAL;
218 goto out_free_bos;
219 }
220 reloc_info[i].type = reloc.reloc_type;
221
222 if (reloc.dst_handle) {
223 ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
224 &reloc_info[i].dst_bo);
225 if (ret)
226 goto out_free_bos;
227 reloc_info[i].dst_offset = reloc.dst_offset;
228 } else {
229 reloc_info[i].dst_bo = cmd_bo;
230 reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
231 }
232 num_relocs++;
233
234 /* reserve and validate the reloc dst bo */
235 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
236 ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
237 &reloc_info[i].src_bo);
238 if (ret)
239 goto out_free_bos;
240 reloc_info[i].src_offset = reloc.src_offset;
241 } else {
242 reloc_info[i].src_bo = NULL;
243 reloc_info[i].src_offset = 0;
244 }
245 }
246
247 /* validate all buffers */
248 ret = qxl_release_reserve_list(release, false);
249 if (ret)
250 goto out_free_bos;
251
252 for (i = 0; i < cmd->relocs_num; ++i) {
253 if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
254 apply_reloc(qdev, &reloc_info[i]);
255 else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
256 apply_surf_reloc(qdev, &reloc_info[i]);
257 }
258
259 ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
260 if (ret)
261 qxl_release_backoff_reserve_list(release);
262 else
263 qxl_release_fence_buffer_objects(release);
264
265out_free_bos:
266out_free_release:
267 if (ret)
268 qxl_release_free(qdev, release);
269out_free_reloc:
270 kfree(reloc_info);
271 return ret;
272}
273
274static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
275 struct drm_file *file_priv)
276{
277 struct qxl_device *qdev = dev->dev_private;
278 struct drm_qxl_execbuffer *execbuffer = data;
279 struct drm_qxl_command user_cmd;
280 int cmd_num;
281 int ret;
282
283 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
284
285 struct drm_qxl_command *commands =
286 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
287
288 if (copy_from_user(&user_cmd, &commands[cmd_num],
289 sizeof(user_cmd)))
290 return -EFAULT;
291
292 ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
293 if (ret)
294 return ret;
295 }
296 return 0;
297}
298
299static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
300 struct drm_file *file)
301{
302 struct qxl_device *qdev = dev->dev_private;
303 struct drm_qxl_update_area *update_area = data;
304 struct qxl_rect area = {.left = update_area->left,
305 .top = update_area->top,
306 .right = update_area->right,
307 .bottom = update_area->bottom};
308 int ret;
309 struct drm_gem_object *gobj = NULL;
310 struct qxl_bo *qobj = NULL;
311
312 if (update_area->left >= update_area->right ||
313 update_area->top >= update_area->bottom)
314 return -EINVAL;
315
316 gobj = drm_gem_object_lookup(file, update_area->handle);
317 if (gobj == NULL)
318 return -ENOENT;
319
320 qobj = gem_to_qxl_bo(gobj);
321
322 ret = qxl_bo_reserve(qobj, false);
323 if (ret)
324 goto out;
325
326 if (!qobj->pin_count) {
327 qxl_ttm_placement_from_domain(qobj, qobj->type, false);
328 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
329 true, false);
330 if (unlikely(ret))
331 goto out;
332 }
333
334 ret = qxl_bo_check_id(qdev, qobj);
335 if (ret)
336 goto out2;
337 if (!qobj->surface_id)
338 DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
339 ret = qxl_io_update_area(qdev, qobj, &area);
340
341out2:
342 qxl_bo_unreserve(qobj);
343
344out:
345 drm_gem_object_unreference_unlocked(gobj);
346 return ret;
347}
348
349static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
350 struct drm_file *file_priv)
351{
352 struct qxl_device *qdev = dev->dev_private;
353 struct drm_qxl_getparam *param = data;
354
355 switch (param->param) {
356 case QXL_PARAM_NUM_SURFACES:
357 param->value = qdev->rom->n_surfaces;
358 break;
359 case QXL_PARAM_MAX_RELOCS:
360 param->value = QXL_MAX_RES;
361 break;
362 default:
363 return -EINVAL;
364 }
365 return 0;
366}
367
368static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
369 struct drm_file *file_priv)
370{
371 struct qxl_device *qdev = dev->dev_private;
372 struct drm_qxl_clientcap *param = data;
373 int byte, idx;
374
375 byte = param->index / 8;
376 idx = param->index % 8;
377
378 if (qdev->pdev->revision < 4)
379 return -ENOSYS;
380
381 if (byte >= 58)
382 return -ENOSYS;
383
384 if (qdev->rom->client_capabilities[byte] & (1 << idx))
385 return 0;
386 return -ENOSYS;
387}
388
389static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
390 struct drm_file *file)
391{
392 struct qxl_device *qdev = dev->dev_private;
393 struct drm_qxl_alloc_surf *param = data;
394 struct qxl_bo *qobj;
395 int handle;
396 int ret;
397 int size, actual_stride;
398 struct qxl_surface surf;
399
400 /* work out size allocate bo with handle */
401 actual_stride = param->stride < 0 ? -param->stride : param->stride;
402 size = actual_stride * param->height + actual_stride;
403
404 surf.format = param->format;
405 surf.width = param->width;
406 surf.height = param->height;
407 surf.stride = param->stride;
408 surf.data = 0;
409
410 ret = qxl_gem_object_create_with_handle(qdev, file,
411 QXL_GEM_DOMAIN_SURFACE,
412 size,
413 &surf,
414 &qobj, &handle);
415 if (ret) {
416 DRM_ERROR("%s: failed to create gem ret=%d\n",
417 __func__, ret);
418 return -ENOMEM;
419 } else
420 param->handle = handle;
421 return ret;
422}
423
424const struct drm_ioctl_desc qxl_ioctls[] = {
425 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH),
426
427 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH),
428
429 DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
430 DRM_AUTH),
431 DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
432 DRM_AUTH),
433 DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
434 DRM_AUTH),
435 DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
436 DRM_AUTH),
437
438 DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
439 DRM_AUTH),
440};
441
442int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);