Loading...
Note: File does not exist in v3.1.
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/dma-fence-array.h>
18#include <linux/reservation.h>
19#include <linux/sync_file.h>
20#include "etnaviv_cmdbuf.h"
21#include "etnaviv_drv.h"
22#include "etnaviv_gpu.h"
23#include "etnaviv_gem.h"
24#include "etnaviv_perfmon.h"
25#include "etnaviv_sched.h"
26
27/*
28 * Cmdstream submission:
29 */
30
31#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
32/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
33#define BO_LOCKED 0x4000
34#define BO_PINNED 0x2000
35
36static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
37 struct etnaviv_gpu *gpu, size_t nr_bos, size_t nr_pmrs)
38{
39 struct etnaviv_gem_submit *submit;
40 size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit));
41
42 submit = kzalloc(sz, GFP_KERNEL);
43 if (!submit)
44 return NULL;
45
46 submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request),
47 GFP_KERNEL);
48 if (!submit->pmrs) {
49 kfree(submit);
50 return NULL;
51 }
52 submit->nr_pmrs = nr_pmrs;
53
54 submit->gpu = gpu;
55 kref_init(&submit->refcount);
56
57 return submit;
58}
59
60static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
61 struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
62 unsigned nr_bos)
63{
64 struct drm_etnaviv_gem_submit_bo *bo;
65 unsigned i;
66 int ret = 0;
67
68 spin_lock(&file->table_lock);
69
70 for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
71 struct drm_gem_object *obj;
72
73 if (bo->flags & BO_INVALID_FLAGS) {
74 DRM_ERROR("invalid flags: %x\n", bo->flags);
75 ret = -EINVAL;
76 goto out_unlock;
77 }
78
79 submit->bos[i].flags = bo->flags;
80
81 /* normally use drm_gem_object_lookup(), but for bulk lookup
82 * all under single table_lock just hit object_idr directly:
83 */
84 obj = idr_find(&file->object_idr, bo->handle);
85 if (!obj) {
86 DRM_ERROR("invalid handle %u at index %u\n",
87 bo->handle, i);
88 ret = -EINVAL;
89 goto out_unlock;
90 }
91
92 /*
93 * Take a refcount on the object. The file table lock
94 * prevents the object_idr's refcount on this being dropped.
95 */
96 drm_gem_object_get(obj);
97
98 submit->bos[i].obj = to_etnaviv_bo(obj);
99 }
100
101out_unlock:
102 submit->nr_bos = i;
103 spin_unlock(&file->table_lock);
104
105 return ret;
106}
107
108static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
109{
110 if (submit->bos[i].flags & BO_LOCKED) {
111 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
112
113 ww_mutex_unlock(&etnaviv_obj->resv->lock);
114 submit->bos[i].flags &= ~BO_LOCKED;
115 }
116}
117
118static int submit_lock_objects(struct etnaviv_gem_submit *submit,
119 struct ww_acquire_ctx *ticket)
120{
121 int contended, slow_locked = -1, i, ret = 0;
122
123retry:
124 for (i = 0; i < submit->nr_bos; i++) {
125 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
126
127 if (slow_locked == i)
128 slow_locked = -1;
129
130 contended = i;
131
132 if (!(submit->bos[i].flags & BO_LOCKED)) {
133 ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
134 ticket);
135 if (ret == -EALREADY)
136 DRM_ERROR("BO at index %u already on submit list\n",
137 i);
138 if (ret)
139 goto fail;
140 submit->bos[i].flags |= BO_LOCKED;
141 }
142 }
143
144 ww_acquire_done(ticket);
145
146 return 0;
147
148fail:
149 for (; i >= 0; i--)
150 submit_unlock_object(submit, i);
151
152 if (slow_locked > 0)
153 submit_unlock_object(submit, slow_locked);
154
155 if (ret == -EDEADLK) {
156 struct etnaviv_gem_object *etnaviv_obj;
157
158 etnaviv_obj = submit->bos[contended].obj;
159
160 /* we lost out in a seqno race, lock and retry.. */
161 ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
162 ticket);
163 if (!ret) {
164 submit->bos[contended].flags |= BO_LOCKED;
165 slow_locked = contended;
166 goto retry;
167 }
168 }
169
170 return ret;
171}
172
173static int submit_fence_sync(struct etnaviv_gem_submit *submit)
174{
175 int i, ret = 0;
176
177 for (i = 0; i < submit->nr_bos; i++) {
178 struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
179 struct reservation_object *robj = bo->obj->resv;
180
181 if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
182 ret = reservation_object_reserve_shared(robj);
183 if (ret)
184 return ret;
185 }
186
187 if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
188 continue;
189
190 if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
191 ret = reservation_object_get_fences_rcu(robj, &bo->excl,
192 &bo->nr_shared,
193 &bo->shared);
194 if (ret)
195 return ret;
196 } else {
197 bo->excl = reservation_object_get_excl_rcu(robj);
198 }
199
200 }
201
202 return ret;
203}
204
205static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
206{
207 int i;
208
209 for (i = 0; i < submit->nr_bos; i++) {
210 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
211
212 if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
213 reservation_object_add_excl_fence(etnaviv_obj->resv,
214 submit->out_fence);
215 else
216 reservation_object_add_shared_fence(etnaviv_obj->resv,
217 submit->out_fence);
218
219 submit_unlock_object(submit, i);
220 }
221}
222
223static int submit_pin_objects(struct etnaviv_gem_submit *submit)
224{
225 int i, ret = 0;
226
227 for (i = 0; i < submit->nr_bos; i++) {
228 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
229 struct etnaviv_vram_mapping *mapping;
230
231 mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
232 submit->gpu);
233 if (IS_ERR(mapping)) {
234 ret = PTR_ERR(mapping);
235 break;
236 }
237 atomic_inc(&etnaviv_obj->gpu_active);
238
239 submit->bos[i].flags |= BO_PINNED;
240 submit->bos[i].mapping = mapping;
241 }
242
243 return ret;
244}
245
246static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
247 struct etnaviv_gem_submit_bo **bo)
248{
249 if (idx >= submit->nr_bos) {
250 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
251 idx, submit->nr_bos);
252 return -EINVAL;
253 }
254
255 *bo = &submit->bos[idx];
256
257 return 0;
258}
259
260/* process the reloc's and patch up the cmdstream as needed: */
261static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
262 u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
263 u32 nr_relocs)
264{
265 u32 i, last_offset = 0;
266 u32 *ptr = stream;
267 int ret;
268
269 for (i = 0; i < nr_relocs; i++) {
270 const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
271 struct etnaviv_gem_submit_bo *bo;
272 u32 off;
273
274 if (unlikely(r->flags)) {
275 DRM_ERROR("invalid reloc flags\n");
276 return -EINVAL;
277 }
278
279 if (r->submit_offset % 4) {
280 DRM_ERROR("non-aligned reloc offset: %u\n",
281 r->submit_offset);
282 return -EINVAL;
283 }
284
285 /* offset in dwords: */
286 off = r->submit_offset / 4;
287
288 if ((off >= size ) ||
289 (off < last_offset)) {
290 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
291 return -EINVAL;
292 }
293
294 ret = submit_bo(submit, r->reloc_idx, &bo);
295 if (ret)
296 return ret;
297
298 if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
299 DRM_ERROR("relocation %u outside object\n", i);
300 return -EINVAL;
301 }
302
303 ptr[off] = bo->mapping->iova + r->reloc_offset;
304
305 last_offset = off;
306 }
307
308 return 0;
309}
310
311static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
312 u32 exec_state, const struct drm_etnaviv_gem_submit_pmr *pmrs)
313{
314 u32 i;
315
316 for (i = 0; i < submit->nr_pmrs; i++) {
317 const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
318 struct etnaviv_gem_submit_bo *bo;
319 int ret;
320
321 ret = submit_bo(submit, r->read_idx, &bo);
322 if (ret)
323 return ret;
324
325 /* at offset 0 a sequence number gets stored used for userspace sync */
326 if (r->read_offset == 0) {
327 DRM_ERROR("perfmon request: offset is 0");
328 return -EINVAL;
329 }
330
331 if (r->read_offset >= bo->obj->base.size - sizeof(u32)) {
332 DRM_ERROR("perfmon request: offset %u outside object", i);
333 return -EINVAL;
334 }
335
336 if (r->flags & ~(ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST)) {
337 DRM_ERROR("perfmon request: flags are not valid");
338 return -EINVAL;
339 }
340
341 if (etnaviv_pm_req_validate(r, exec_state)) {
342 DRM_ERROR("perfmon request: domain or signal not valid");
343 return -EINVAL;
344 }
345
346 submit->pmrs[i].flags = r->flags;
347 submit->pmrs[i].domain = r->domain;
348 submit->pmrs[i].signal = r->signal;
349 submit->pmrs[i].sequence = r->sequence;
350 submit->pmrs[i].offset = r->read_offset;
351 submit->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
352 }
353
354 return 0;
355}
356
357static void submit_cleanup(struct kref *kref)
358{
359 struct etnaviv_gem_submit *submit =
360 container_of(kref, struct etnaviv_gem_submit, refcount);
361 unsigned i;
362
363 if (submit->runtime_resumed)
364 pm_runtime_put_autosuspend(submit->gpu->dev);
365
366 if (submit->cmdbuf.suballoc)
367 etnaviv_cmdbuf_free(&submit->cmdbuf);
368
369 for (i = 0; i < submit->nr_bos; i++) {
370 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
371
372 /* unpin all objects */
373 if (submit->bos[i].flags & BO_PINNED) {
374 etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
375 atomic_dec(&etnaviv_obj->gpu_active);
376 submit->bos[i].mapping = NULL;
377 submit->bos[i].flags &= ~BO_PINNED;
378 }
379
380 /* if the GPU submit failed, objects might still be locked */
381 submit_unlock_object(submit, i);
382 drm_gem_object_put_unlocked(&etnaviv_obj->base);
383 }
384
385 wake_up_all(&submit->gpu->fence_event);
386
387 if (submit->in_fence)
388 dma_fence_put(submit->in_fence);
389 if (submit->out_fence) {
390 /* first remove from IDR, so fence can not be found anymore */
391 mutex_lock(&submit->gpu->fence_idr_lock);
392 idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
393 mutex_unlock(&submit->gpu->fence_idr_lock);
394 dma_fence_put(submit->out_fence);
395 }
396 kfree(submit->pmrs);
397 kfree(submit);
398}
399
400void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
401{
402 kref_put(&submit->refcount, submit_cleanup);
403}
404
405int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
406 struct drm_file *file)
407{
408 struct etnaviv_file_private *ctx = file->driver_priv;
409 struct etnaviv_drm_private *priv = dev->dev_private;
410 struct drm_etnaviv_gem_submit *args = data;
411 struct drm_etnaviv_gem_submit_reloc *relocs;
412 struct drm_etnaviv_gem_submit_pmr *pmrs;
413 struct drm_etnaviv_gem_submit_bo *bos;
414 struct etnaviv_gem_submit *submit;
415 struct etnaviv_gpu *gpu;
416 struct sync_file *sync_file = NULL;
417 struct ww_acquire_ctx ticket;
418 int out_fence_fd = -1;
419 void *stream;
420 int ret;
421
422 if (args->pipe >= ETNA_MAX_PIPES)
423 return -EINVAL;
424
425 gpu = priv->gpu[args->pipe];
426 if (!gpu)
427 return -ENXIO;
428
429 if (args->stream_size % 4) {
430 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
431 args->stream_size);
432 return -EINVAL;
433 }
434
435 if (args->exec_state != ETNA_PIPE_3D &&
436 args->exec_state != ETNA_PIPE_2D &&
437 args->exec_state != ETNA_PIPE_VG) {
438 DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
439 return -EINVAL;
440 }
441
442 if (args->flags & ~ETNA_SUBMIT_FLAGS) {
443 DRM_ERROR("invalid flags: 0x%x\n", args->flags);
444 return -EINVAL;
445 }
446
447 /*
448 * Copy the command submission and bo array to kernel space in
449 * one go, and do this outside of any locks.
450 */
451 bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
452 relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
453 pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
454 stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
455 if (!bos || !relocs || !pmrs || !stream) {
456 ret = -ENOMEM;
457 goto err_submit_cmds;
458 }
459
460 ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
461 args->nr_bos * sizeof(*bos));
462 if (ret) {
463 ret = -EFAULT;
464 goto err_submit_cmds;
465 }
466
467 ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs),
468 args->nr_relocs * sizeof(*relocs));
469 if (ret) {
470 ret = -EFAULT;
471 goto err_submit_cmds;
472 }
473
474 ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
475 args->nr_pmrs * sizeof(*pmrs));
476 if (ret) {
477 ret = -EFAULT;
478 goto err_submit_cmds;
479 }
480
481 ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
482 args->stream_size);
483 if (ret) {
484 ret = -EFAULT;
485 goto err_submit_cmds;
486 }
487
488 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
489 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
490 if (out_fence_fd < 0) {
491 ret = out_fence_fd;
492 goto err_submit_cmds;
493 }
494 }
495
496 ww_acquire_init(&ticket, &reservation_ww_class);
497
498 submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs);
499 if (!submit) {
500 ret = -ENOMEM;
501 goto err_submit_ww_acquire;
502 }
503
504 ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &submit->cmdbuf,
505 ALIGN(args->stream_size, 8) + 8);
506 if (ret)
507 goto err_submit_objects;
508
509 submit->cmdbuf.ctx = file->driver_priv;
510 submit->exec_state = args->exec_state;
511 submit->flags = args->flags;
512
513 ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
514 if (ret)
515 goto err_submit_objects;
516
517 if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
518 relocs, args->nr_relocs)) {
519 ret = -EINVAL;
520 goto err_submit_objects;
521 }
522
523 if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
524 submit->in_fence = sync_file_get_fence(args->fence_fd);
525 if (!submit->in_fence) {
526 ret = -EINVAL;
527 goto err_submit_objects;
528 }
529 }
530
531 ret = submit_pin_objects(submit);
532 if (ret)
533 goto err_submit_objects;
534
535 ret = submit_reloc(submit, stream, args->stream_size / 4,
536 relocs, args->nr_relocs);
537 if (ret)
538 goto err_submit_objects;
539
540 ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
541 if (ret)
542 goto err_submit_objects;
543
544 memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
545
546 ret = submit_lock_objects(submit, &ticket);
547 if (ret)
548 goto err_submit_objects;
549
550 ret = submit_fence_sync(submit);
551 if (ret)
552 goto err_submit_objects;
553
554 ret = etnaviv_sched_push_job(&ctx->sched_entity[args->pipe], submit);
555 if (ret)
556 goto err_submit_objects;
557
558 submit_attach_object_fences(submit);
559
560 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
561 /*
562 * This can be improved: ideally we want to allocate the sync
563 * file before kicking off the GPU job and just attach the
564 * fence to the sync file here, eliminating the ENOMEM
565 * possibility at this stage.
566 */
567 sync_file = sync_file_create(submit->out_fence);
568 if (!sync_file) {
569 ret = -ENOMEM;
570 goto err_submit_objects;
571 }
572 fd_install(out_fence_fd, sync_file->file);
573 }
574
575 args->fence_fd = out_fence_fd;
576 args->fence = submit->out_fence_id;
577
578err_submit_objects:
579 etnaviv_submit_put(submit);
580
581err_submit_ww_acquire:
582 ww_acquire_fini(&ticket);
583
584err_submit_cmds:
585 if (ret && (out_fence_fd >= 0))
586 put_unused_fd(out_fence_fd);
587 if (stream)
588 kvfree(stream);
589 if (bos)
590 kvfree(bos);
591 if (relocs)
592 kvfree(relocs);
593 if (pmrs)
594 kvfree(pmrs);
595
596 return ret;
597}