Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6#include <linux/dma-mapping.h>
7#include <linux/scatterlist.h>
8
9#include "common.xml.h"
10#include "etnaviv_cmdbuf.h"
11#include "etnaviv_drv.h"
12#include "etnaviv_gem.h"
13#include "etnaviv_gpu.h"
14#include "etnaviv_mmu.h"
15
16static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
17 unsigned long iova, size_t size)
18{
19 size_t unmapped_page, unmapped = 0;
20 size_t pgsize = SZ_4K;
21
22 if (!IS_ALIGNED(iova | size, pgsize)) {
23 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
24 iova, size, pgsize);
25 return;
26 }
27
28 while (unmapped < size) {
29 unmapped_page = context->global->ops->unmap(context, iova,
30 pgsize);
31 if (!unmapped_page)
32 break;
33
34 iova += unmapped_page;
35 unmapped += unmapped_page;
36 }
37}
38
39static int etnaviv_context_map(struct etnaviv_iommu_context *context,
40 unsigned long iova, phys_addr_t paddr,
41 size_t size, int prot)
42{
43 unsigned long orig_iova = iova;
44 size_t pgsize = SZ_4K;
45 size_t orig_size = size;
46 int ret = 0;
47
48 if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
49 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50 iova, &paddr, size, pgsize);
51 return -EINVAL;
52 }
53
54 while (size) {
55 ret = context->global->ops->map(context, iova, paddr, pgsize,
56 prot);
57 if (ret)
58 break;
59
60 iova += pgsize;
61 paddr += pgsize;
62 size -= pgsize;
63 }
64
65 /* unroll mapping in case something went wrong */
66 if (ret)
67 etnaviv_context_unmap(context, orig_iova, orig_size - size);
68
69 return ret;
70}
71
72static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
73 u32 iova, unsigned int va_len,
74 struct sg_table *sgt, int prot)
75{
76 struct scatterlist *sg;
77 unsigned int da = iova;
78 unsigned int i;
79 int ret;
80
81 if (!context || !sgt)
82 return -EINVAL;
83
84 for_each_sgtable_dma_sg(sgt, sg, i) {
85 phys_addr_t pa = sg_dma_address(sg) - sg->offset;
86 unsigned int da_len = sg_dma_len(sg) + sg->offset;
87 unsigned int bytes = min_t(unsigned int, da_len, va_len);
88
89 VERB("map[%d]: %08x %pap(%x)", i, iova, &pa, bytes);
90
91 ret = etnaviv_context_map(context, da, pa, bytes, prot);
92 if (ret)
93 goto fail;
94
95 va_len -= bytes;
96 da += bytes;
97 }
98
99 context->flush_seq++;
100
101 return 0;
102
103fail:
104 etnaviv_context_unmap(context, iova, da - iova);
105 return ret;
106}
107
108static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
109 struct sg_table *sgt, unsigned len)
110{
111 etnaviv_context_unmap(context, iova, len);
112
113 context->flush_seq++;
114}
115
116static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
117 struct etnaviv_vram_mapping *mapping)
118{
119 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
120
121 lockdep_assert_held(&context->lock);
122
123 etnaviv_iommu_unmap(context, mapping->vram_node.start,
124 etnaviv_obj->sgt, etnaviv_obj->size);
125 drm_mm_remove_node(&mapping->vram_node);
126}
127
128void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping)
129{
130 struct etnaviv_iommu_context *context = mapping->context;
131
132 lockdep_assert_held(&context->lock);
133 WARN_ON(mapping->use);
134
135 etnaviv_iommu_remove_mapping(context, mapping);
136 etnaviv_iommu_context_put(mapping->context);
137 mapping->context = NULL;
138 list_del_init(&mapping->mmu_node);
139}
140
141static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
142 struct drm_mm_node *node, size_t size)
143{
144 struct etnaviv_vram_mapping *free = NULL;
145 enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
146 int ret;
147
148 lockdep_assert_held(&context->lock);
149
150 while (1) {
151 struct etnaviv_vram_mapping *m, *n;
152 struct drm_mm_scan scan;
153 struct list_head list;
154 bool found;
155
156 ret = drm_mm_insert_node_in_range(&context->mm, node,
157 size, 0, 0, 0, U64_MAX, mode);
158 if (ret != -ENOSPC)
159 break;
160
161 /* Try to retire some entries */
162 drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
163
164 found = 0;
165 INIT_LIST_HEAD(&list);
166 list_for_each_entry(free, &context->mappings, mmu_node) {
167 /* If this vram node has not been used, skip this. */
168 if (!free->vram_node.mm)
169 continue;
170
171 /*
172 * If the iova is pinned, then it's in-use,
173 * so we must keep its mapping.
174 */
175 if (free->use)
176 continue;
177
178 list_add(&free->scan_node, &list);
179 if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
180 found = true;
181 break;
182 }
183 }
184
185 if (!found) {
186 /* Nothing found, clean up and fail */
187 list_for_each_entry_safe(m, n, &list, scan_node)
188 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
189 break;
190 }
191
192 /*
193 * drm_mm does not allow any other operations while
194 * scanning, so we have to remove all blocks first.
195 * If drm_mm_scan_remove_block() returns false, we
196 * can leave the block pinned.
197 */
198 list_for_each_entry_safe(m, n, &list, scan_node)
199 if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
200 list_del_init(&m->scan_node);
201
202 /*
203 * Unmap the blocks which need to be reaped from the MMU.
204 * Clear the mmu pointer to prevent the mapping_get finding
205 * this mapping.
206 */
207 list_for_each_entry_safe(m, n, &list, scan_node) {
208 etnaviv_iommu_reap_mapping(m);
209 list_del_init(&m->scan_node);
210 }
211
212 mode = DRM_MM_INSERT_EVICT;
213
214 /*
215 * We removed enough mappings so that the new allocation will
216 * succeed, retry the allocation one more time.
217 */
218 }
219
220 return ret;
221}
222
223static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
224 struct drm_mm_node *node, size_t size, u64 va)
225{
226 struct etnaviv_vram_mapping *m, *n;
227 struct drm_mm_node *scan_node;
228 LIST_HEAD(scan_list);
229 int ret;
230
231 lockdep_assert_held(&context->lock);
232
233 ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
234 va + size, DRM_MM_INSERT_LOWEST);
235 if (ret != -ENOSPC)
236 return ret;
237
238 /*
239 * When we can't insert the node, due to a existing mapping blocking
240 * the address space, there are two possible reasons:
241 * 1. Userspace genuinely messed up and tried to reuse address space
242 * before the last job using this VMA has finished executing.
243 * 2. The existing buffer mappings are idle, but the buffers are not
244 * destroyed yet (likely due to being referenced by another context) in
245 * which case the mappings will not be cleaned up and we must reap them
246 * here to make space for the new mapping.
247 */
248
249 drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
250 m = container_of(scan_node, struct etnaviv_vram_mapping,
251 vram_node);
252
253 if (m->use)
254 return -ENOSPC;
255
256 list_add(&m->scan_node, &scan_list);
257 }
258
259 list_for_each_entry_safe(m, n, &scan_list, scan_node) {
260 etnaviv_iommu_reap_mapping(m);
261 list_del_init(&m->scan_node);
262 }
263
264 return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
265 va + size, DRM_MM_INSERT_LOWEST);
266}
267
268int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
269 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
270 struct etnaviv_vram_mapping *mapping, u64 va)
271{
272 struct sg_table *sgt = etnaviv_obj->sgt;
273 struct drm_mm_node *node;
274 int ret;
275
276 lockdep_assert_held(&etnaviv_obj->lock);
277
278 mutex_lock(&context->lock);
279
280 /* v1 MMU can optimize single entry (contiguous) scatterlists */
281 if (context->global->version == ETNAVIV_IOMMU_V1 &&
282 sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
283 u32 iova;
284
285 iova = sg_dma_address(sgt->sgl) - memory_base;
286 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
287 mapping->iova = iova;
288 mapping->context = etnaviv_iommu_context_get(context);
289 list_add_tail(&mapping->mmu_node, &context->mappings);
290 ret = 0;
291 goto unlock;
292 }
293 }
294
295 node = &mapping->vram_node;
296
297 if (va)
298 ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va);
299 else
300 ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size);
301 if (ret < 0)
302 goto unlock;
303
304 mapping->iova = node->start;
305 ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt,
306 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
307
308 if (ret < 0) {
309 drm_mm_remove_node(node);
310 goto unlock;
311 }
312
313 mapping->context = etnaviv_iommu_context_get(context);
314 list_add_tail(&mapping->mmu_node, &context->mappings);
315unlock:
316 mutex_unlock(&context->lock);
317
318 return ret;
319}
320
321void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
322 struct etnaviv_vram_mapping *mapping)
323{
324 WARN_ON(mapping->use);
325
326 mutex_lock(&context->lock);
327
328 /* Bail if the mapping has been reaped by another thread */
329 if (!mapping->context) {
330 mutex_unlock(&context->lock);
331 return;
332 }
333
334 /* If the vram node is on the mm, unmap and remove the node */
335 if (mapping->vram_node.mm == &context->mm)
336 etnaviv_iommu_remove_mapping(context, mapping);
337
338 list_del(&mapping->mmu_node);
339 mutex_unlock(&context->lock);
340 etnaviv_iommu_context_put(context);
341}
342
343static void etnaviv_iommu_context_free(struct kref *kref)
344{
345 struct etnaviv_iommu_context *context =
346 container_of(kref, struct etnaviv_iommu_context, refcount);
347
348 etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
349 mutex_destroy(&context->lock);
350 context->global->ops->free(context);
351}
352void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
353{
354 kref_put(&context->refcount, etnaviv_iommu_context_free);
355}
356
357struct etnaviv_iommu_context *
358etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
359 struct etnaviv_cmdbuf_suballoc *suballoc)
360{
361 struct etnaviv_iommu_context *ctx;
362 int ret;
363
364 if (global->version == ETNAVIV_IOMMU_V1)
365 ctx = etnaviv_iommuv1_context_alloc(global);
366 else
367 ctx = etnaviv_iommuv2_context_alloc(global);
368
369 if (!ctx)
370 return NULL;
371
372 ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
373 global->memory_base);
374 if (ret)
375 goto out_free;
376
377 if (global->version == ETNAVIV_IOMMU_V1 &&
378 ctx->cmdbuf_mapping.iova > 0x80000000) {
379 dev_err(global->dev,
380 "command buffer outside valid memory window\n");
381 goto out_unmap;
382 }
383
384 return ctx;
385
386out_unmap:
387 etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
388out_free:
389 global->ops->free(ctx);
390 return NULL;
391}
392
393void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
394 struct etnaviv_iommu_context *context)
395{
396 context->global->ops->restore(gpu, context);
397}
398
399int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
400 struct etnaviv_vram_mapping *mapping,
401 u32 memory_base, dma_addr_t paddr,
402 size_t size)
403{
404 mutex_lock(&context->lock);
405
406 if (mapping->use > 0) {
407 mapping->use++;
408 mutex_unlock(&context->lock);
409 return 0;
410 }
411
412 /*
413 * For MMUv1 we don't add the suballoc region to the pagetables, as
414 * those GPUs can only work with cmdbufs accessed through the linear
415 * window. Instead we manufacture a mapping to make it look uniform
416 * to the upper layers.
417 */
418 if (context->global->version == ETNAVIV_IOMMU_V1) {
419 mapping->iova = paddr - memory_base;
420 } else {
421 struct drm_mm_node *node = &mapping->vram_node;
422 int ret;
423
424 ret = etnaviv_iommu_find_iova(context, node, size);
425 if (ret < 0) {
426 mutex_unlock(&context->lock);
427 return ret;
428 }
429
430 mapping->iova = node->start;
431 ret = etnaviv_context_map(context, node->start, paddr, size,
432 ETNAVIV_PROT_READ);
433 if (ret < 0) {
434 drm_mm_remove_node(node);
435 mutex_unlock(&context->lock);
436 return ret;
437 }
438
439 context->flush_seq++;
440 }
441
442 list_add_tail(&mapping->mmu_node, &context->mappings);
443 mapping->use = 1;
444
445 mutex_unlock(&context->lock);
446
447 return 0;
448}
449
450void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
451 struct etnaviv_vram_mapping *mapping)
452{
453 struct drm_mm_node *node = &mapping->vram_node;
454
455 mutex_lock(&context->lock);
456 mapping->use--;
457
458 if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
459 mutex_unlock(&context->lock);
460 return;
461 }
462
463 etnaviv_context_unmap(context, node->start, node->size);
464 drm_mm_remove_node(node);
465 mutex_unlock(&context->lock);
466}
467
468size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
469{
470 return context->global->ops->dump_size(context);
471}
472
473void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
474{
475 context->global->ops->dump(context, buf);
476}
477
478int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
479{
480 enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
481 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
482 struct etnaviv_iommu_global *global;
483 struct device *dev = gpu->drm->dev;
484
485 if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
486 version = ETNAVIV_IOMMU_V2;
487
488 if (priv->mmu_global) {
489 if (priv->mmu_global->version != version) {
490 dev_err(gpu->dev,
491 "MMU version doesn't match global version\n");
492 return -ENXIO;
493 }
494
495 priv->mmu_global->use++;
496 return 0;
497 }
498
499 global = kzalloc(sizeof(*global), GFP_KERNEL);
500 if (!global)
501 return -ENOMEM;
502
503 global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
504 GFP_KERNEL);
505 if (!global->bad_page_cpu)
506 goto free_global;
507
508 memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
509
510 if (version == ETNAVIV_IOMMU_V2) {
511 global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
512 &global->v2.pta_dma, GFP_KERNEL);
513 if (!global->v2.pta_cpu)
514 goto free_bad_page;
515 }
516
517 global->dev = dev;
518 global->version = version;
519 global->use = 1;
520 mutex_init(&global->lock);
521
522 if (version == ETNAVIV_IOMMU_V1)
523 global->ops = &etnaviv_iommuv1_ops;
524 else
525 global->ops = &etnaviv_iommuv2_ops;
526
527 priv->mmu_global = global;
528
529 return 0;
530
531free_bad_page:
532 dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
533free_global:
534 kfree(global);
535
536 return -ENOMEM;
537}
538
539void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
540{
541 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
542 struct etnaviv_iommu_global *global = priv->mmu_global;
543
544 if (!global)
545 return;
546
547 if (--global->use > 0)
548 return;
549
550 if (global->v2.pta_cpu)
551 dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
552 global->v2.pta_cpu, global->v2.pta_dma);
553
554 if (global->bad_page_cpu)
555 dma_free_wc(global->dev, SZ_4K,
556 global->bad_page_cpu, global->bad_page_dma);
557
558 mutex_destroy(&global->lock);
559 kfree(global);
560
561 priv->mmu_global = NULL;
562}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6#include <linux/dma-mapping.h>
7#include <linux/scatterlist.h>
8
9#include "common.xml.h"
10#include "etnaviv_cmdbuf.h"
11#include "etnaviv_drv.h"
12#include "etnaviv_gem.h"
13#include "etnaviv_gpu.h"
14#include "etnaviv_mmu.h"
15
16static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
17 unsigned long iova, size_t size)
18{
19 size_t unmapped_page, unmapped = 0;
20 size_t pgsize = SZ_4K;
21
22 if (!IS_ALIGNED(iova | size, pgsize)) {
23 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
24 iova, size, pgsize);
25 return;
26 }
27
28 while (unmapped < size) {
29 unmapped_page = context->global->ops->unmap(context, iova,
30 pgsize);
31 if (!unmapped_page)
32 break;
33
34 iova += unmapped_page;
35 unmapped += unmapped_page;
36 }
37}
38
39static int etnaviv_context_map(struct etnaviv_iommu_context *context,
40 unsigned long iova, phys_addr_t paddr,
41 size_t size, int prot)
42{
43 unsigned long orig_iova = iova;
44 size_t pgsize = SZ_4K;
45 size_t orig_size = size;
46 int ret = 0;
47
48 if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
49 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50 iova, &paddr, size, pgsize);
51 return -EINVAL;
52 }
53
54 while (size) {
55 ret = context->global->ops->map(context, iova, paddr, pgsize,
56 prot);
57 if (ret)
58 break;
59
60 iova += pgsize;
61 paddr += pgsize;
62 size -= pgsize;
63 }
64
65 /* unroll mapping in case something went wrong */
66 if (ret)
67 etnaviv_context_unmap(context, orig_iova, orig_size - size);
68
69 return ret;
70}
71
72static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
73 struct sg_table *sgt, unsigned len, int prot)
74{ struct scatterlist *sg;
75 unsigned int da = iova;
76 unsigned int i, j;
77 int ret;
78
79 if (!context || !sgt)
80 return -EINVAL;
81
82 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
83 u32 pa = sg_dma_address(sg) - sg->offset;
84 size_t bytes = sg_dma_len(sg) + sg->offset;
85
86 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
87
88 ret = etnaviv_context_map(context, da, pa, bytes, prot);
89 if (ret)
90 goto fail;
91
92 da += bytes;
93 }
94
95 return 0;
96
97fail:
98 da = iova;
99
100 for_each_sg(sgt->sgl, sg, i, j) {
101 size_t bytes = sg_dma_len(sg) + sg->offset;
102
103 etnaviv_context_unmap(context, da, bytes);
104 da += bytes;
105 }
106 return ret;
107}
108
109static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
110 struct sg_table *sgt, unsigned len)
111{
112 struct scatterlist *sg;
113 unsigned int da = iova;
114 int i;
115
116 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
117 size_t bytes = sg_dma_len(sg) + sg->offset;
118
119 etnaviv_context_unmap(context, da, bytes);
120
121 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
122
123 BUG_ON(!PAGE_ALIGNED(bytes));
124
125 da += bytes;
126 }
127}
128
129static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
130 struct etnaviv_vram_mapping *mapping)
131{
132 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
133
134 etnaviv_iommu_unmap(context, mapping->vram_node.start,
135 etnaviv_obj->sgt, etnaviv_obj->base.size);
136 drm_mm_remove_node(&mapping->vram_node);
137}
138
139static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
140 struct drm_mm_node *node, size_t size)
141{
142 struct etnaviv_vram_mapping *free = NULL;
143 enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
144 int ret;
145
146 lockdep_assert_held(&context->lock);
147
148 while (1) {
149 struct etnaviv_vram_mapping *m, *n;
150 struct drm_mm_scan scan;
151 struct list_head list;
152 bool found;
153
154 ret = drm_mm_insert_node_in_range(&context->mm, node,
155 size, 0, 0, 0, U64_MAX, mode);
156 if (ret != -ENOSPC)
157 break;
158
159 /* Try to retire some entries */
160 drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
161
162 found = 0;
163 INIT_LIST_HEAD(&list);
164 list_for_each_entry(free, &context->mappings, mmu_node) {
165 /* If this vram node has not been used, skip this. */
166 if (!free->vram_node.mm)
167 continue;
168
169 /*
170 * If the iova is pinned, then it's in-use,
171 * so we must keep its mapping.
172 */
173 if (free->use)
174 continue;
175
176 list_add(&free->scan_node, &list);
177 if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
178 found = true;
179 break;
180 }
181 }
182
183 if (!found) {
184 /* Nothing found, clean up and fail */
185 list_for_each_entry_safe(m, n, &list, scan_node)
186 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
187 break;
188 }
189
190 /*
191 * drm_mm does not allow any other operations while
192 * scanning, so we have to remove all blocks first.
193 * If drm_mm_scan_remove_block() returns false, we
194 * can leave the block pinned.
195 */
196 list_for_each_entry_safe(m, n, &list, scan_node)
197 if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
198 list_del_init(&m->scan_node);
199
200 /*
201 * Unmap the blocks which need to be reaped from the MMU.
202 * Clear the mmu pointer to prevent the mapping_get finding
203 * this mapping.
204 */
205 list_for_each_entry_safe(m, n, &list, scan_node) {
206 etnaviv_iommu_remove_mapping(context, m);
207 m->context = NULL;
208 list_del_init(&m->mmu_node);
209 list_del_init(&m->scan_node);
210 }
211
212 mode = DRM_MM_INSERT_EVICT;
213
214 /*
215 * We removed enough mappings so that the new allocation will
216 * succeed, retry the allocation one more time.
217 */
218 }
219
220 return ret;
221}
222
223static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
224 struct drm_mm_node *node, size_t size, u64 va)
225{
226 return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
227 va + size, DRM_MM_INSERT_LOWEST);
228}
229
230int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
231 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
232 struct etnaviv_vram_mapping *mapping, u64 va)
233{
234 struct sg_table *sgt = etnaviv_obj->sgt;
235 struct drm_mm_node *node;
236 int ret;
237
238 lockdep_assert_held(&etnaviv_obj->lock);
239
240 mutex_lock(&context->lock);
241
242 /* v1 MMU can optimize single entry (contiguous) scatterlists */
243 if (context->global->version == ETNAVIV_IOMMU_V1 &&
244 sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
245 u32 iova;
246
247 iova = sg_dma_address(sgt->sgl) - memory_base;
248 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
249 mapping->iova = iova;
250 list_add_tail(&mapping->mmu_node, &context->mappings);
251 ret = 0;
252 goto unlock;
253 }
254 }
255
256 node = &mapping->vram_node;
257
258 if (va)
259 ret = etnaviv_iommu_insert_exact(context, node,
260 etnaviv_obj->base.size, va);
261 else
262 ret = etnaviv_iommu_find_iova(context, node,
263 etnaviv_obj->base.size);
264 if (ret < 0)
265 goto unlock;
266
267 mapping->iova = node->start;
268 ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
269 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
270
271 if (ret < 0) {
272 drm_mm_remove_node(node);
273 goto unlock;
274 }
275
276 list_add_tail(&mapping->mmu_node, &context->mappings);
277 context->flush_seq++;
278unlock:
279 mutex_unlock(&context->lock);
280
281 return ret;
282}
283
284void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
285 struct etnaviv_vram_mapping *mapping)
286{
287 WARN_ON(mapping->use);
288
289 mutex_lock(&context->lock);
290
291 /* If the vram node is on the mm, unmap and remove the node */
292 if (mapping->vram_node.mm == &context->mm)
293 etnaviv_iommu_remove_mapping(context, mapping);
294
295 list_del(&mapping->mmu_node);
296 context->flush_seq++;
297 mutex_unlock(&context->lock);
298}
299
300static void etnaviv_iommu_context_free(struct kref *kref)
301{
302 struct etnaviv_iommu_context *context =
303 container_of(kref, struct etnaviv_iommu_context, refcount);
304
305 etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
306
307 context->global->ops->free(context);
308}
309void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
310{
311 kref_put(&context->refcount, etnaviv_iommu_context_free);
312}
313
314struct etnaviv_iommu_context *
315etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
316 struct etnaviv_cmdbuf_suballoc *suballoc)
317{
318 struct etnaviv_iommu_context *ctx;
319 int ret;
320
321 if (global->version == ETNAVIV_IOMMU_V1)
322 ctx = etnaviv_iommuv1_context_alloc(global);
323 else
324 ctx = etnaviv_iommuv2_context_alloc(global);
325
326 if (!ctx)
327 return NULL;
328
329 ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
330 global->memory_base);
331 if (ret)
332 goto out_free;
333
334 if (global->version == ETNAVIV_IOMMU_V1 &&
335 ctx->cmdbuf_mapping.iova > 0x80000000) {
336 dev_err(global->dev,
337 "command buffer outside valid memory window\n");
338 goto out_unmap;
339 }
340
341 return ctx;
342
343out_unmap:
344 etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
345out_free:
346 global->ops->free(ctx);
347 return NULL;
348}
349
350void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
351 struct etnaviv_iommu_context *context)
352{
353 context->global->ops->restore(gpu, context);
354}
355
356int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
357 struct etnaviv_vram_mapping *mapping,
358 u32 memory_base, dma_addr_t paddr,
359 size_t size)
360{
361 mutex_lock(&context->lock);
362
363 if (mapping->use > 0) {
364 mapping->use++;
365 mutex_unlock(&context->lock);
366 return 0;
367 }
368
369 /*
370 * For MMUv1 we don't add the suballoc region to the pagetables, as
371 * those GPUs can only work with cmdbufs accessed through the linear
372 * window. Instead we manufacture a mapping to make it look uniform
373 * to the upper layers.
374 */
375 if (context->global->version == ETNAVIV_IOMMU_V1) {
376 mapping->iova = paddr - memory_base;
377 } else {
378 struct drm_mm_node *node = &mapping->vram_node;
379 int ret;
380
381 ret = etnaviv_iommu_find_iova(context, node, size);
382 if (ret < 0) {
383 mutex_unlock(&context->lock);
384 return ret;
385 }
386
387 mapping->iova = node->start;
388 ret = etnaviv_context_map(context, node->start, paddr, size,
389 ETNAVIV_PROT_READ);
390 if (ret < 0) {
391 drm_mm_remove_node(node);
392 mutex_unlock(&context->lock);
393 return ret;
394 }
395
396 context->flush_seq++;
397 }
398
399 list_add_tail(&mapping->mmu_node, &context->mappings);
400 mapping->use = 1;
401
402 mutex_unlock(&context->lock);
403
404 return 0;
405}
406
407void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
408 struct etnaviv_vram_mapping *mapping)
409{
410 struct drm_mm_node *node = &mapping->vram_node;
411
412 mutex_lock(&context->lock);
413 mapping->use--;
414
415 if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
416 mutex_unlock(&context->lock);
417 return;
418 }
419
420 etnaviv_context_unmap(context, node->start, node->size);
421 drm_mm_remove_node(node);
422 mutex_unlock(&context->lock);
423}
424
425size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
426{
427 return context->global->ops->dump_size(context);
428}
429
430void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
431{
432 context->global->ops->dump(context, buf);
433}
434
435int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
436{
437 enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
438 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
439 struct etnaviv_iommu_global *global;
440 struct device *dev = gpu->drm->dev;
441
442 if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
443 version = ETNAVIV_IOMMU_V2;
444
445 if (priv->mmu_global) {
446 if (priv->mmu_global->version != version) {
447 dev_err(gpu->dev,
448 "MMU version doesn't match global version\n");
449 return -ENXIO;
450 }
451
452 priv->mmu_global->use++;
453 return 0;
454 }
455
456 global = kzalloc(sizeof(*global), GFP_KERNEL);
457 if (!global)
458 return -ENOMEM;
459
460 global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
461 GFP_KERNEL);
462 if (!global->bad_page_cpu)
463 goto free_global;
464
465 memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
466
467 if (version == ETNAVIV_IOMMU_V2) {
468 global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
469 &global->v2.pta_dma, GFP_KERNEL);
470 if (!global->v2.pta_cpu)
471 goto free_bad_page;
472 }
473
474 global->dev = dev;
475 global->version = version;
476 global->use = 1;
477 mutex_init(&global->lock);
478
479 if (version == ETNAVIV_IOMMU_V1)
480 global->ops = &etnaviv_iommuv1_ops;
481 else
482 global->ops = &etnaviv_iommuv2_ops;
483
484 priv->mmu_global = global;
485
486 return 0;
487
488free_bad_page:
489 dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
490free_global:
491 kfree(global);
492
493 return -ENOMEM;
494}
495
496void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
497{
498 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
499 struct etnaviv_iommu_global *global = priv->mmu_global;
500
501 if (--global->use > 0)
502 return;
503
504 if (global->v2.pta_cpu)
505 dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
506 global->v2.pta_cpu, global->v2.pta_dma);
507
508 if (global->bad_page_cpu)
509 dma_free_wc(global->dev, SZ_4K,
510 global->bad_page_cpu, global->bad_page_dma);
511
512 mutex_destroy(&global->lock);
513 kfree(global);
514
515 priv->mmu_global = NULL;
516}