Loading...
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
3
4#include <linux/mm.h>
5#include <linux/sync_file.h>
6#include <linux/pfn_t.h>
7
8#include <drm/drm_file.h>
9#include <drm/drm_syncobj.h>
10#include <drm/drm_utils.h>
11
12#include <drm/lima_drm.h>
13
14#include "lima_drv.h"
15#include "lima_gem.h"
16#include "lima_gem_prime.h"
17#include "lima_vm.h"
18#include "lima_object.h"
19
20int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
21 u32 size, u32 flags, u32 *handle)
22{
23 int err;
24 struct lima_bo *bo;
25 struct lima_device *ldev = to_lima_dev(dev);
26
27 bo = lima_bo_create(ldev, size, flags, NULL);
28 if (IS_ERR(bo))
29 return PTR_ERR(bo);
30
31 err = drm_gem_handle_create(file, &bo->gem, handle);
32
33 /* drop reference from allocate - handle holds it now */
34 drm_gem_object_put_unlocked(&bo->gem);
35
36 return err;
37}
38
39void lima_gem_free_object(struct drm_gem_object *obj)
40{
41 struct lima_bo *bo = to_lima_bo(obj);
42
43 if (!list_empty(&bo->va))
44 dev_err(obj->dev->dev, "lima gem free bo still has va\n");
45
46 lima_bo_destroy(bo);
47}
48
49int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
50{
51 struct lima_bo *bo = to_lima_bo(obj);
52 struct lima_drm_priv *priv = to_lima_drm_priv(file);
53 struct lima_vm *vm = priv->vm;
54
55 return lima_vm_bo_add(vm, bo, true);
56}
57
58void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
59{
60 struct lima_bo *bo = to_lima_bo(obj);
61 struct lima_drm_priv *priv = to_lima_drm_priv(file);
62 struct lima_vm *vm = priv->vm;
63
64 lima_vm_bo_del(vm, bo);
65}
66
67int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
68{
69 struct drm_gem_object *obj;
70 struct lima_bo *bo;
71 struct lima_drm_priv *priv = to_lima_drm_priv(file);
72 struct lima_vm *vm = priv->vm;
73 int err;
74
75 obj = drm_gem_object_lookup(file, handle);
76 if (!obj)
77 return -ENOENT;
78
79 bo = to_lima_bo(obj);
80
81 *va = lima_vm_get_va(vm, bo);
82
83 err = drm_gem_create_mmap_offset(obj);
84 if (!err)
85 *offset = drm_vma_node_offset_addr(&obj->vma_node);
86
87 drm_gem_object_put_unlocked(obj);
88 return err;
89}
90
91static vm_fault_t lima_gem_fault(struct vm_fault *vmf)
92{
93 struct vm_area_struct *vma = vmf->vma;
94 struct drm_gem_object *obj = vma->vm_private_data;
95 struct lima_bo *bo = to_lima_bo(obj);
96 pfn_t pfn;
97 pgoff_t pgoff;
98
99 /* We don't use vmf->pgoff since that has the fake offset: */
100 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
101 pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
102
103 return vmf_insert_mixed(vma, vmf->address, pfn);
104}
105
106const struct vm_operations_struct lima_gem_vm_ops = {
107 .fault = lima_gem_fault,
108 .open = drm_gem_vm_open,
109 .close = drm_gem_vm_close,
110};
111
112void lima_set_vma_flags(struct vm_area_struct *vma)
113{
114 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
115
116 vma->vm_flags |= VM_MIXEDMAP;
117 vma->vm_flags &= ~VM_PFNMAP;
118 vma->vm_page_prot = pgprot_writecombine(prot);
119}
120
121int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma)
122{
123 int ret;
124
125 ret = drm_gem_mmap(filp, vma);
126 if (ret)
127 return ret;
128
129 lima_set_vma_flags(vma);
130 return 0;
131}
132
133static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
134 bool write, bool explicit)
135{
136 int err = 0;
137
138 if (!write) {
139 err = dma_resv_reserve_shared(bo->gem.resv, 1);
140 if (err)
141 return err;
142 }
143
144 /* explicit sync use user passed dep fence */
145 if (explicit)
146 return 0;
147
148 return drm_gem_fence_array_add_implicit(&task->deps, &bo->gem, write);
149}
150
151static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos,
152 struct ww_acquire_ctx *ctx)
153{
154 int i, ret = 0, contended, slow_locked = -1;
155
156 ww_acquire_init(ctx, &reservation_ww_class);
157
158retry:
159 for (i = 0; i < nr_bos; i++) {
160 if (i == slow_locked) {
161 slow_locked = -1;
162 continue;
163 }
164
165 ret = ww_mutex_lock_interruptible(&bos[i]->gem.resv->lock, ctx);
166 if (ret < 0) {
167 contended = i;
168 goto err;
169 }
170 }
171
172 ww_acquire_done(ctx);
173 return 0;
174
175err:
176 for (i--; i >= 0; i--)
177 ww_mutex_unlock(&bos[i]->gem.resv->lock);
178
179 if (slow_locked >= 0)
180 ww_mutex_unlock(&bos[slow_locked]->gem.resv->lock);
181
182 if (ret == -EDEADLK) {
183 /* we lost out in a seqno race, lock and retry.. */
184 ret = ww_mutex_lock_slow_interruptible(
185 &bos[contended]->gem.resv->lock, ctx);
186 if (!ret) {
187 slow_locked = contended;
188 goto retry;
189 }
190 }
191 ww_acquire_fini(ctx);
192
193 return ret;
194}
195
196static void lima_gem_unlock_bos(struct lima_bo **bos, u32 nr_bos,
197 struct ww_acquire_ctx *ctx)
198{
199 int i;
200
201 for (i = 0; i < nr_bos; i++)
202 ww_mutex_unlock(&bos[i]->gem.resv->lock);
203 ww_acquire_fini(ctx);
204}
205
206static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
207{
208 int i, err;
209
210 for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
211 struct dma_fence *fence = NULL;
212
213 if (!submit->in_sync[i])
214 continue;
215
216 err = drm_syncobj_find_fence(file, submit->in_sync[i],
217 0, 0, &fence);
218 if (err)
219 return err;
220
221 err = drm_gem_fence_array_add(&submit->task->deps, fence);
222 if (err) {
223 dma_fence_put(fence);
224 return err;
225 }
226 }
227
228 return 0;
229}
230
231int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
232{
233 int i, err = 0;
234 struct ww_acquire_ctx ctx;
235 struct lima_drm_priv *priv = to_lima_drm_priv(file);
236 struct lima_vm *vm = priv->vm;
237 struct drm_syncobj *out_sync = NULL;
238 struct dma_fence *fence;
239 struct lima_bo **bos = submit->lbos;
240
241 if (submit->out_sync) {
242 out_sync = drm_syncobj_find(file, submit->out_sync);
243 if (!out_sync)
244 return -ENOENT;
245 }
246
247 for (i = 0; i < submit->nr_bos; i++) {
248 struct drm_gem_object *obj;
249 struct lima_bo *bo;
250
251 obj = drm_gem_object_lookup(file, submit->bos[i].handle);
252 if (!obj) {
253 err = -ENOENT;
254 goto err_out0;
255 }
256
257 bo = to_lima_bo(obj);
258
259 /* increase refcnt of gpu va map to prevent unmapped when executing,
260 * will be decreased when task done
261 */
262 err = lima_vm_bo_add(vm, bo, false);
263 if (err) {
264 drm_gem_object_put_unlocked(obj);
265 goto err_out0;
266 }
267
268 bos[i] = bo;
269 }
270
271 err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx);
272 if (err)
273 goto err_out0;
274
275 err = lima_sched_task_init(
276 submit->task, submit->ctx->context + submit->pipe,
277 bos, submit->nr_bos, vm);
278 if (err)
279 goto err_out1;
280
281 err = lima_gem_add_deps(file, submit);
282 if (err)
283 goto err_out2;
284
285 for (i = 0; i < submit->nr_bos; i++) {
286 err = lima_gem_sync_bo(
287 submit->task, bos[i],
288 submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE,
289 submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
290 if (err)
291 goto err_out2;
292 }
293
294 fence = lima_sched_context_queue_task(
295 submit->ctx->context + submit->pipe, submit->task);
296
297 for (i = 0; i < submit->nr_bos; i++) {
298 if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
299 dma_resv_add_excl_fence(bos[i]->gem.resv, fence);
300 else
301 dma_resv_add_shared_fence(bos[i]->gem.resv, fence);
302 }
303
304 lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
305
306 for (i = 0; i < submit->nr_bos; i++)
307 drm_gem_object_put_unlocked(&bos[i]->gem);
308
309 if (out_sync) {
310 drm_syncobj_replace_fence(out_sync, fence);
311 drm_syncobj_put(out_sync);
312 }
313
314 dma_fence_put(fence);
315
316 return 0;
317
318err_out2:
319 lima_sched_task_fini(submit->task);
320err_out1:
321 lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
322err_out0:
323 for (i = 0; i < submit->nr_bos; i++) {
324 if (!bos[i])
325 break;
326 lima_vm_bo_del(vm, bos[i]);
327 drm_gem_object_put_unlocked(&bos[i]->gem);
328 }
329 if (out_sync)
330 drm_syncobj_put(out_sync);
331 return err;
332}
333
334int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
335{
336 bool write = op & LIMA_GEM_WAIT_WRITE;
337 long ret, timeout;
338
339 if (!op)
340 return 0;
341
342 timeout = drm_timeout_abs_to_jiffies(timeout_ns);
343
344 ret = drm_gem_dma_resv_wait(file, handle, write, timeout);
345 if (ret == -ETIME)
346 ret = timeout ? -ETIMEDOUT : -EBUSY;
347
348 return ret;
349}
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
3
4#include <linux/mm.h>
5#include <linux/sync_file.h>
6#include <linux/pagemap.h>
7#include <linux/shmem_fs.h>
8#include <linux/dma-mapping.h>
9
10#include <drm/drm_file.h>
11#include <drm/drm_syncobj.h>
12#include <drm/drm_utils.h>
13
14#include <drm/lima_drm.h>
15
16#include "lima_drv.h"
17#include "lima_gem.h"
18#include "lima_vm.h"
19
20int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
21{
22 struct page **pages;
23 struct address_space *mapping = bo->base.base.filp->f_mapping;
24 struct device *dev = bo->base.base.dev->dev;
25 size_t old_size = bo->heap_size;
26 size_t new_size = bo->heap_size ? bo->heap_size * 2 :
27 (lima_heap_init_nr_pages << PAGE_SHIFT);
28 struct sg_table sgt;
29 int i, ret;
30
31 if (bo->heap_size >= bo->base.base.size)
32 return -ENOSPC;
33
34 new_size = min(new_size, bo->base.base.size);
35
36 mutex_lock(&bo->base.pages_lock);
37
38 if (bo->base.pages) {
39 pages = bo->base.pages;
40 } else {
41 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
42 sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
43 if (!pages) {
44 mutex_unlock(&bo->base.pages_lock);
45 return -ENOMEM;
46 }
47
48 bo->base.pages = pages;
49 bo->base.pages_use_count = 1;
50
51 mapping_set_unevictable(mapping);
52 }
53
54 for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) {
55 struct page *page = shmem_read_mapping_page(mapping, i);
56
57 if (IS_ERR(page)) {
58 mutex_unlock(&bo->base.pages_lock);
59 return PTR_ERR(page);
60 }
61 pages[i] = page;
62 }
63
64 mutex_unlock(&bo->base.pages_lock);
65
66 ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
67 new_size, GFP_KERNEL);
68 if (ret)
69 return ret;
70
71 if (bo->base.sgt) {
72 dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
73 sg_free_table(bo->base.sgt);
74 } else {
75 bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
76 if (!bo->base.sgt) {
77 sg_free_table(&sgt);
78 return -ENOMEM;
79 }
80 }
81
82 ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
83 if (ret) {
84 sg_free_table(&sgt);
85 kfree(bo->base.sgt);
86 bo->base.sgt = NULL;
87 return ret;
88 }
89
90 *bo->base.sgt = sgt;
91
92 if (vm) {
93 ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
94 if (ret)
95 return ret;
96 }
97
98 bo->heap_size = new_size;
99 return 0;
100}
101
102int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
103 u32 size, u32 flags, u32 *handle)
104{
105 int err;
106 gfp_t mask;
107 struct drm_gem_shmem_object *shmem;
108 struct drm_gem_object *obj;
109 struct lima_bo *bo;
110 bool is_heap = flags & LIMA_BO_FLAG_HEAP;
111
112 shmem = drm_gem_shmem_create(dev, size);
113 if (IS_ERR(shmem))
114 return PTR_ERR(shmem);
115
116 obj = &shmem->base;
117
118 /* Mali Utgard GPU can only support 32bit address space */
119 mask = mapping_gfp_mask(obj->filp->f_mapping);
120 mask &= ~__GFP_HIGHMEM;
121 mask |= __GFP_DMA32;
122 mapping_set_gfp_mask(obj->filp->f_mapping, mask);
123
124 if (is_heap) {
125 bo = to_lima_bo(obj);
126 err = lima_heap_alloc(bo, NULL);
127 if (err)
128 goto out;
129 } else {
130 struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(obj);
131
132 if (IS_ERR(sgt)) {
133 err = PTR_ERR(sgt);
134 goto out;
135 }
136 }
137
138 err = drm_gem_handle_create(file, obj, handle);
139
140out:
141 /* drop reference from allocate - handle holds it now */
142 drm_gem_object_put(obj);
143
144 return err;
145}
146
147static void lima_gem_free_object(struct drm_gem_object *obj)
148{
149 struct lima_bo *bo = to_lima_bo(obj);
150
151 if (!list_empty(&bo->va))
152 dev_err(obj->dev->dev, "lima gem free bo still has va\n");
153
154 drm_gem_shmem_free_object(obj);
155}
156
157static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
158{
159 struct lima_bo *bo = to_lima_bo(obj);
160 struct lima_drm_priv *priv = to_lima_drm_priv(file);
161 struct lima_vm *vm = priv->vm;
162
163 return lima_vm_bo_add(vm, bo, true);
164}
165
166static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
167{
168 struct lima_bo *bo = to_lima_bo(obj);
169 struct lima_drm_priv *priv = to_lima_drm_priv(file);
170 struct lima_vm *vm = priv->vm;
171
172 lima_vm_bo_del(vm, bo);
173}
174
175static int lima_gem_pin(struct drm_gem_object *obj)
176{
177 struct lima_bo *bo = to_lima_bo(obj);
178
179 if (bo->heap_size)
180 return -EINVAL;
181
182 return drm_gem_shmem_pin(obj);
183}
184
185static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
186{
187 struct lima_bo *bo = to_lima_bo(obj);
188
189 if (bo->heap_size)
190 return -EINVAL;
191
192 return drm_gem_shmem_vmap(obj, map);
193}
194
195static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
196{
197 struct lima_bo *bo = to_lima_bo(obj);
198
199 if (bo->heap_size)
200 return -EINVAL;
201
202 return drm_gem_shmem_mmap(obj, vma);
203}
204
205static const struct drm_gem_object_funcs lima_gem_funcs = {
206 .free = lima_gem_free_object,
207 .open = lima_gem_object_open,
208 .close = lima_gem_object_close,
209 .print_info = drm_gem_shmem_print_info,
210 .pin = lima_gem_pin,
211 .unpin = drm_gem_shmem_unpin,
212 .get_sg_table = drm_gem_shmem_get_sg_table,
213 .vmap = lima_gem_vmap,
214 .vunmap = drm_gem_shmem_vunmap,
215 .mmap = lima_gem_mmap,
216};
217
218struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
219{
220 struct lima_bo *bo;
221
222 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
223 if (!bo)
224 return NULL;
225
226 mutex_init(&bo->lock);
227 INIT_LIST_HEAD(&bo->va);
228 bo->base.map_wc = true;
229 bo->base.base.funcs = &lima_gem_funcs;
230
231 return &bo->base.base;
232}
233
234int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
235{
236 struct drm_gem_object *obj;
237 struct lima_bo *bo;
238 struct lima_drm_priv *priv = to_lima_drm_priv(file);
239 struct lima_vm *vm = priv->vm;
240
241 obj = drm_gem_object_lookup(file, handle);
242 if (!obj)
243 return -ENOENT;
244
245 bo = to_lima_bo(obj);
246
247 *va = lima_vm_get_va(vm, bo);
248
249 *offset = drm_vma_node_offset_addr(&obj->vma_node);
250
251 drm_gem_object_put(obj);
252 return 0;
253}
254
255static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
256 bool write, bool explicit)
257{
258 int err = 0;
259
260 if (!write) {
261 err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
262 if (err)
263 return err;
264 }
265
266 /* explicit sync use user passed dep fence */
267 if (explicit)
268 return 0;
269
270 return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write);
271}
272
273static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
274{
275 int i, err;
276
277 for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
278 struct dma_fence *fence = NULL;
279
280 if (!submit->in_sync[i])
281 continue;
282
283 err = drm_syncobj_find_fence(file, submit->in_sync[i],
284 0, 0, &fence);
285 if (err)
286 return err;
287
288 err = drm_gem_fence_array_add(&submit->task->deps, fence);
289 if (err) {
290 dma_fence_put(fence);
291 return err;
292 }
293 }
294
295 return 0;
296}
297
298int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
299{
300 int i, err = 0;
301 struct ww_acquire_ctx ctx;
302 struct lima_drm_priv *priv = to_lima_drm_priv(file);
303 struct lima_vm *vm = priv->vm;
304 struct drm_syncobj *out_sync = NULL;
305 struct dma_fence *fence;
306 struct lima_bo **bos = submit->lbos;
307
308 if (submit->out_sync) {
309 out_sync = drm_syncobj_find(file, submit->out_sync);
310 if (!out_sync)
311 return -ENOENT;
312 }
313
314 for (i = 0; i < submit->nr_bos; i++) {
315 struct drm_gem_object *obj;
316 struct lima_bo *bo;
317
318 obj = drm_gem_object_lookup(file, submit->bos[i].handle);
319 if (!obj) {
320 err = -ENOENT;
321 goto err_out0;
322 }
323
324 bo = to_lima_bo(obj);
325
326 /* increase refcnt of gpu va map to prevent unmapped when executing,
327 * will be decreased when task done
328 */
329 err = lima_vm_bo_add(vm, bo, false);
330 if (err) {
331 drm_gem_object_put(obj);
332 goto err_out0;
333 }
334
335 bos[i] = bo;
336 }
337
338 err = drm_gem_lock_reservations((struct drm_gem_object **)bos,
339 submit->nr_bos, &ctx);
340 if (err)
341 goto err_out0;
342
343 err = lima_sched_task_init(
344 submit->task, submit->ctx->context + submit->pipe,
345 bos, submit->nr_bos, vm);
346 if (err)
347 goto err_out1;
348
349 err = lima_gem_add_deps(file, submit);
350 if (err)
351 goto err_out2;
352
353 for (i = 0; i < submit->nr_bos; i++) {
354 err = lima_gem_sync_bo(
355 submit->task, bos[i],
356 submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE,
357 submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
358 if (err)
359 goto err_out2;
360 }
361
362 fence = lima_sched_context_queue_task(
363 submit->ctx->context + submit->pipe, submit->task);
364
365 for (i = 0; i < submit->nr_bos; i++) {
366 if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
367 dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence);
368 else
369 dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence);
370 }
371
372 drm_gem_unlock_reservations((struct drm_gem_object **)bos,
373 submit->nr_bos, &ctx);
374
375 for (i = 0; i < submit->nr_bos; i++)
376 drm_gem_object_put(&bos[i]->base.base);
377
378 if (out_sync) {
379 drm_syncobj_replace_fence(out_sync, fence);
380 drm_syncobj_put(out_sync);
381 }
382
383 dma_fence_put(fence);
384
385 return 0;
386
387err_out2:
388 lima_sched_task_fini(submit->task);
389err_out1:
390 drm_gem_unlock_reservations((struct drm_gem_object **)bos,
391 submit->nr_bos, &ctx);
392err_out0:
393 for (i = 0; i < submit->nr_bos; i++) {
394 if (!bos[i])
395 break;
396 lima_vm_bo_del(vm, bos[i]);
397 drm_gem_object_put(&bos[i]->base.base);
398 }
399 if (out_sync)
400 drm_syncobj_put(out_sync);
401 return err;
402}
403
404int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
405{
406 bool write = op & LIMA_GEM_WAIT_WRITE;
407 long ret, timeout;
408
409 if (!op)
410 return 0;
411
412 timeout = drm_timeout_abs_to_jiffies(timeout_ns);
413
414 ret = drm_gem_dma_resv_wait(file, handle, write, timeout);
415 if (ret == -ETIME)
416 ret = timeout ? -ETIMEDOUT : -EBUSY;
417
418 return ret;
419}