Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2015-2018 Etnaviv Project
  4 */
  5
  6#include <linux/dma-mapping.h>
  7#include <linux/scatterlist.h>
  8
  9#include "common.xml.h"
 10#include "etnaviv_cmdbuf.h"
 11#include "etnaviv_drv.h"
 12#include "etnaviv_gem.h"
 13#include "etnaviv_gpu.h"
 14#include "etnaviv_mmu.h"
 15
 16static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
 17				 unsigned long iova, size_t size)
 18{
 19	size_t unmapped_page, unmapped = 0;
 20	size_t pgsize = SZ_4K;
 21
 22	if (!IS_ALIGNED(iova | size, pgsize)) {
 23		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
 24		       iova, size, pgsize);
 25		return;
 26	}
 27
 28	while (unmapped < size) {
 29		unmapped_page = context->global->ops->unmap(context, iova,
 30							    pgsize);
 31		if (!unmapped_page)
 32			break;
 33
 34		iova += unmapped_page;
 35		unmapped += unmapped_page;
 36	}
 37}
 38
 39static int etnaviv_context_map(struct etnaviv_iommu_context *context,
 40			      unsigned long iova, phys_addr_t paddr,
 41			      size_t size, int prot)
 42{
 43	unsigned long orig_iova = iova;
 44	size_t pgsize = SZ_4K;
 45	size_t orig_size = size;
 46	int ret = 0;
 47
 48	if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
 49		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
 50		       iova, &paddr, size, pgsize);
 51		return -EINVAL;
 52	}
 53
 54	while (size) {
 55		ret = context->global->ops->map(context, iova, paddr, pgsize,
 56						prot);
 57		if (ret)
 58			break;
 59
 60		iova += pgsize;
 61		paddr += pgsize;
 62		size -= pgsize;
 63	}
 64
 65	/* unroll mapping in case something went wrong */
 66	if (ret)
 67		etnaviv_context_unmap(context, orig_iova, orig_size - size);
 68
 69	return ret;
 70}
 71
 72static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
 73			     u32 iova, unsigned int va_len,
 74			     struct sg_table *sgt, int prot)
 75{
 76	struct scatterlist *sg;
 77	unsigned int da = iova;
 78	unsigned int i;
 79	int ret;
 80
 81	if (!context || !sgt)
 82		return -EINVAL;
 83
 84	for_each_sgtable_dma_sg(sgt, sg, i) {
 85		phys_addr_t pa = sg_dma_address(sg) - sg->offset;
 86		unsigned int da_len = sg_dma_len(sg) + sg->offset;
 87		unsigned int bytes = min_t(unsigned int, da_len, va_len);
 88
 89		VERB("map[%d]: %08x %pap(%x)", i, iova, &pa, bytes);
 90
 91		ret = etnaviv_context_map(context, da, pa, bytes, prot);
 92		if (ret)
 93			goto fail;
 94
 95		va_len -= bytes;
 96		da += bytes;
 97	}
 98
 99	context->flush_seq++;
100
101	return 0;
102
103fail:
104	etnaviv_context_unmap(context, iova, da - iova);
105	return ret;
106}
107
108static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
109				struct sg_table *sgt, unsigned len)
110{
111	etnaviv_context_unmap(context, iova, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
113	context->flush_seq++;
114}
115
116static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
117	struct etnaviv_vram_mapping *mapping)
118{
119	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
120
121	lockdep_assert_held(&context->lock);
122
123	etnaviv_iommu_unmap(context, mapping->vram_node.start,
124			    etnaviv_obj->sgt, etnaviv_obj->size);
125	drm_mm_remove_node(&mapping->vram_node);
126}
127
128void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping)
129{
130	struct etnaviv_iommu_context *context = mapping->context;
131
132	lockdep_assert_held(&context->lock);
133	WARN_ON(mapping->use);
134
135	etnaviv_iommu_remove_mapping(context, mapping);
136	etnaviv_iommu_context_put(mapping->context);
137	mapping->context = NULL;
138	list_del_init(&mapping->mmu_node);
139}
140
141static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
142				   struct drm_mm_node *node, size_t size)
143{
144	struct etnaviv_vram_mapping *free = NULL;
145	enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
146	int ret;
147
148	lockdep_assert_held(&context->lock);
149
150	while (1) {
151		struct etnaviv_vram_mapping *m, *n;
152		struct drm_mm_scan scan;
153		struct list_head list;
154		bool found;
155
156		ret = drm_mm_insert_node_in_range(&context->mm, node,
157						  size, 0, 0, 0, U64_MAX, mode);
158		if (ret != -ENOSPC)
159			break;
160
161		/* Try to retire some entries */
162		drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
163
164		found = 0;
165		INIT_LIST_HEAD(&list);
166		list_for_each_entry(free, &context->mappings, mmu_node) {
167			/* If this vram node has not been used, skip this. */
168			if (!free->vram_node.mm)
169				continue;
170
171			/*
172			 * If the iova is pinned, then it's in-use,
173			 * so we must keep its mapping.
174			 */
175			if (free->use)
176				continue;
177
178			list_add(&free->scan_node, &list);
179			if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
180				found = true;
181				break;
182			}
183		}
184
185		if (!found) {
186			/* Nothing found, clean up and fail */
187			list_for_each_entry_safe(m, n, &list, scan_node)
188				BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
189			break;
190		}
191
192		/*
193		 * drm_mm does not allow any other operations while
194		 * scanning, so we have to remove all blocks first.
195		 * If drm_mm_scan_remove_block() returns false, we
196		 * can leave the block pinned.
197		 */
198		list_for_each_entry_safe(m, n, &list, scan_node)
199			if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
200				list_del_init(&m->scan_node);
201
202		/*
203		 * Unmap the blocks which need to be reaped from the MMU.
204		 * Clear the mmu pointer to prevent the mapping_get finding
205		 * this mapping.
206		 */
207		list_for_each_entry_safe(m, n, &list, scan_node) {
208			etnaviv_iommu_reap_mapping(m);
209			list_del_init(&m->scan_node);
210		}
211
212		mode = DRM_MM_INSERT_EVICT;
213
214		/*
215		 * We removed enough mappings so that the new allocation will
216		 * succeed, retry the allocation one more time.
217		 */
218	}
219
220	return ret;
221}
222
223static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
224		   struct drm_mm_node *node, size_t size, u64 va)
225{
226	struct etnaviv_vram_mapping *m, *n;
227	struct drm_mm_node *scan_node;
228	LIST_HEAD(scan_list);
229	int ret;
230
231	lockdep_assert_held(&context->lock);
232
233	ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
234					  va + size, DRM_MM_INSERT_LOWEST);
235	if (ret != -ENOSPC)
236		return ret;
237
238	/*
239	 * When we can't insert the node, due to a existing mapping blocking
240	 * the address space, there are two possible reasons:
241	 * 1. Userspace genuinely messed up and tried to reuse address space
242	 * before the last job using this VMA has finished executing.
243	 * 2. The existing buffer mappings are idle, but the buffers are not
244	 * destroyed yet (likely due to being referenced by another context) in
245	 * which case the mappings will not be cleaned up and we must reap them
246	 * here to make space for the new mapping.
247	 */
248
249	drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
250		m = container_of(scan_node, struct etnaviv_vram_mapping,
251				 vram_node);
252
253		if (m->use)
254			return -ENOSPC;
255
256		list_add(&m->scan_node, &scan_list);
257	}
258
259	list_for_each_entry_safe(m, n, &scan_list, scan_node) {
260		etnaviv_iommu_reap_mapping(m);
261		list_del_init(&m->scan_node);
262	}
263
264	return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
265					   va + size, DRM_MM_INSERT_LOWEST);
266}
267
268int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
269	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
270	struct etnaviv_vram_mapping *mapping, u64 va)
271{
272	struct sg_table *sgt = etnaviv_obj->sgt;
273	struct drm_mm_node *node;
274	int ret;
275
276	lockdep_assert_held(&etnaviv_obj->lock);
277
278	mutex_lock(&context->lock);
279
280	/* v1 MMU can optimize single entry (contiguous) scatterlists */
281	if (context->global->version == ETNAVIV_IOMMU_V1 &&
282	    sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
283		u32 iova;
284
285		iova = sg_dma_address(sgt->sgl) - memory_base;
286		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
287			mapping->iova = iova;
288			mapping->context = etnaviv_iommu_context_get(context);
289			list_add_tail(&mapping->mmu_node, &context->mappings);
290			ret = 0;
291			goto unlock;
292		}
293	}
294
295	node = &mapping->vram_node;
296
297	if (va)
298		ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va);
 
299	else
300		ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size);
 
301	if (ret < 0)
302		goto unlock;
303
304	mapping->iova = node->start;
305	ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt,
306				ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
307
308	if (ret < 0) {
309		drm_mm_remove_node(node);
310		goto unlock;
311	}
312
313	mapping->context = etnaviv_iommu_context_get(context);
314	list_add_tail(&mapping->mmu_node, &context->mappings);
315unlock:
316	mutex_unlock(&context->lock);
317
318	return ret;
319}
320
321void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
322	struct etnaviv_vram_mapping *mapping)
323{
324	WARN_ON(mapping->use);
325
326	mutex_lock(&context->lock);
327
328	/* Bail if the mapping has been reaped by another thread */
329	if (!mapping->context) {
330		mutex_unlock(&context->lock);
331		return;
332	}
333
334	/* If the vram node is on the mm, unmap and remove the node */
335	if (mapping->vram_node.mm == &context->mm)
336		etnaviv_iommu_remove_mapping(context, mapping);
337
338	list_del(&mapping->mmu_node);
339	mutex_unlock(&context->lock);
340	etnaviv_iommu_context_put(context);
341}
342
343static void etnaviv_iommu_context_free(struct kref *kref)
344{
345	struct etnaviv_iommu_context *context =
346		container_of(kref, struct etnaviv_iommu_context, refcount);
347
348	etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
349	mutex_destroy(&context->lock);
350	context->global->ops->free(context);
351}
352void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
353{
354	kref_put(&context->refcount, etnaviv_iommu_context_free);
355}
356
357struct etnaviv_iommu_context *
358etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
359			   struct etnaviv_cmdbuf_suballoc *suballoc)
360{
361	struct etnaviv_iommu_context *ctx;
362	int ret;
363
364	if (global->version == ETNAVIV_IOMMU_V1)
365		ctx = etnaviv_iommuv1_context_alloc(global);
366	else
367		ctx = etnaviv_iommuv2_context_alloc(global);
368
369	if (!ctx)
370		return NULL;
371
372	ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
373					  global->memory_base);
374	if (ret)
375		goto out_free;
376
377	if (global->version == ETNAVIV_IOMMU_V1 &&
378	    ctx->cmdbuf_mapping.iova > 0x80000000) {
379		dev_err(global->dev,
380		        "command buffer outside valid memory window\n");
381		goto out_unmap;
382	}
383
384	return ctx;
385
386out_unmap:
387	etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
388out_free:
389	global->ops->free(ctx);
390	return NULL;
391}
392
393void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
394			   struct etnaviv_iommu_context *context)
395{
396	context->global->ops->restore(gpu, context);
397}
398
399int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
400				  struct etnaviv_vram_mapping *mapping,
401				  u32 memory_base, dma_addr_t paddr,
402				  size_t size)
403{
404	mutex_lock(&context->lock);
405
406	if (mapping->use > 0) {
407		mapping->use++;
408		mutex_unlock(&context->lock);
409		return 0;
410	}
411
412	/*
413	 * For MMUv1 we don't add the suballoc region to the pagetables, as
414	 * those GPUs can only work with cmdbufs accessed through the linear
415	 * window. Instead we manufacture a mapping to make it look uniform
416	 * to the upper layers.
417	 */
418	if (context->global->version == ETNAVIV_IOMMU_V1) {
419		mapping->iova = paddr - memory_base;
420	} else {
421		struct drm_mm_node *node = &mapping->vram_node;
422		int ret;
423
424		ret = etnaviv_iommu_find_iova(context, node, size);
425		if (ret < 0) {
426			mutex_unlock(&context->lock);
427			return ret;
428		}
429
430		mapping->iova = node->start;
431		ret = etnaviv_context_map(context, node->start, paddr, size,
432					  ETNAVIV_PROT_READ);
433		if (ret < 0) {
434			drm_mm_remove_node(node);
435			mutex_unlock(&context->lock);
436			return ret;
437		}
438
439		context->flush_seq++;
440	}
441
442	list_add_tail(&mapping->mmu_node, &context->mappings);
443	mapping->use = 1;
444
445	mutex_unlock(&context->lock);
446
447	return 0;
448}
449
450void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
451		  struct etnaviv_vram_mapping *mapping)
452{
453	struct drm_mm_node *node = &mapping->vram_node;
454
455	mutex_lock(&context->lock);
456	mapping->use--;
457
458	if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
459		mutex_unlock(&context->lock);
460		return;
461	}
462
463	etnaviv_context_unmap(context, node->start, node->size);
464	drm_mm_remove_node(node);
465	mutex_unlock(&context->lock);
466}
467
468size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
469{
470	return context->global->ops->dump_size(context);
471}
472
473void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
474{
475	context->global->ops->dump(context, buf);
476}
477
478int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
479{
480	enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
481	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
482	struct etnaviv_iommu_global *global;
483	struct device *dev = gpu->drm->dev;
484
485	if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
486		version = ETNAVIV_IOMMU_V2;
487
488	if (priv->mmu_global) {
489		if (priv->mmu_global->version != version) {
490			dev_err(gpu->dev,
491				"MMU version doesn't match global version\n");
492			return -ENXIO;
493		}
494
495		priv->mmu_global->use++;
496		return 0;
497	}
498
499	global = kzalloc(sizeof(*global), GFP_KERNEL);
500	if (!global)
501		return -ENOMEM;
502
503	global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
504					    GFP_KERNEL);
505	if (!global->bad_page_cpu)
506		goto free_global;
507
508	memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
509
510	if (version == ETNAVIV_IOMMU_V2) {
511		global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
512					       &global->v2.pta_dma, GFP_KERNEL);
513		if (!global->v2.pta_cpu)
514			goto free_bad_page;
515	}
516
517	global->dev = dev;
518	global->version = version;
519	global->use = 1;
520	mutex_init(&global->lock);
521
522	if (version == ETNAVIV_IOMMU_V1)
523		global->ops = &etnaviv_iommuv1_ops;
524	else
525		global->ops = &etnaviv_iommuv2_ops;
526
527	priv->mmu_global = global;
528
529	return 0;
530
531free_bad_page:
532	dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
533free_global:
534	kfree(global);
535
536	return -ENOMEM;
537}
538
539void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
540{
541	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
542	struct etnaviv_iommu_global *global = priv->mmu_global;
543
544	if (!global)
545		return;
546
547	if (--global->use > 0)
548		return;
549
550	if (global->v2.pta_cpu)
551		dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
552			    global->v2.pta_cpu, global->v2.pta_dma);
553
554	if (global->bad_page_cpu)
555		dma_free_wc(global->dev, SZ_4K,
556			    global->bad_page_cpu, global->bad_page_dma);
557
558	mutex_destroy(&global->lock);
559	kfree(global);
560
561	priv->mmu_global = NULL;
562}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2015-2018 Etnaviv Project
  4 */
  5
  6#include <linux/dma-mapping.h>
  7#include <linux/scatterlist.h>
  8
  9#include "common.xml.h"
 10#include "etnaviv_cmdbuf.h"
 11#include "etnaviv_drv.h"
 12#include "etnaviv_gem.h"
 13#include "etnaviv_gpu.h"
 14#include "etnaviv_mmu.h"
 15
 16static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
 17				 unsigned long iova, size_t size)
 18{
 19	size_t unmapped_page, unmapped = 0;
 20	size_t pgsize = SZ_4K;
 21
 22	if (!IS_ALIGNED(iova | size, pgsize)) {
 23		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
 24		       iova, size, pgsize);
 25		return;
 26	}
 27
 28	while (unmapped < size) {
 29		unmapped_page = context->global->ops->unmap(context, iova,
 30							    pgsize);
 31		if (!unmapped_page)
 32			break;
 33
 34		iova += unmapped_page;
 35		unmapped += unmapped_page;
 36	}
 37}
 38
 39static int etnaviv_context_map(struct etnaviv_iommu_context *context,
 40			      unsigned long iova, phys_addr_t paddr,
 41			      size_t size, int prot)
 42{
 43	unsigned long orig_iova = iova;
 44	size_t pgsize = SZ_4K;
 45	size_t orig_size = size;
 46	int ret = 0;
 47
 48	if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
 49		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
 50		       iova, &paddr, size, pgsize);
 51		return -EINVAL;
 52	}
 53
 54	while (size) {
 55		ret = context->global->ops->map(context, iova, paddr, pgsize,
 56						prot);
 57		if (ret)
 58			break;
 59
 60		iova += pgsize;
 61		paddr += pgsize;
 62		size -= pgsize;
 63	}
 64
 65	/* unroll mapping in case something went wrong */
 66	if (ret)
 67		etnaviv_context_unmap(context, orig_iova, orig_size - size);
 68
 69	return ret;
 70}
 71
 72static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
 73			     struct sg_table *sgt, unsigned len, int prot)
 74{	struct scatterlist *sg;
 
 
 75	unsigned int da = iova;
 76	unsigned int i;
 77	int ret;
 78
 79	if (!context || !sgt)
 80		return -EINVAL;
 81
 82	for_each_sgtable_dma_sg(sgt, sg, i) {
 83		phys_addr_t pa = sg_dma_address(sg) - sg->offset;
 84		size_t bytes = sg_dma_len(sg) + sg->offset;
 
 85
 86		VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
 87
 88		ret = etnaviv_context_map(context, da, pa, bytes, prot);
 89		if (ret)
 90			goto fail;
 91
 
 92		da += bytes;
 93	}
 94
 95	context->flush_seq++;
 96
 97	return 0;
 98
 99fail:
100	etnaviv_context_unmap(context, iova, da - iova);
101	return ret;
102}
103
104static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
105				struct sg_table *sgt, unsigned len)
106{
107	struct scatterlist *sg;
108	unsigned int da = iova;
109	int i;
110
111	for_each_sgtable_dma_sg(sgt, sg, i) {
112		size_t bytes = sg_dma_len(sg) + sg->offset;
113
114		etnaviv_context_unmap(context, da, bytes);
115
116		VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
117
118		BUG_ON(!PAGE_ALIGNED(bytes));
119
120		da += bytes;
121	}
122
123	context->flush_seq++;
124}
125
126static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
127	struct etnaviv_vram_mapping *mapping)
128{
129	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
130
131	lockdep_assert_held(&context->lock);
132
133	etnaviv_iommu_unmap(context, mapping->vram_node.start,
134			    etnaviv_obj->sgt, etnaviv_obj->base.size);
135	drm_mm_remove_node(&mapping->vram_node);
136}
137
138void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping)
139{
140	struct etnaviv_iommu_context *context = mapping->context;
141
142	lockdep_assert_held(&context->lock);
143	WARN_ON(mapping->use);
144
145	etnaviv_iommu_remove_mapping(context, mapping);
146	etnaviv_iommu_context_put(mapping->context);
147	mapping->context = NULL;
148	list_del_init(&mapping->mmu_node);
149}
150
151static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
152				   struct drm_mm_node *node, size_t size)
153{
154	struct etnaviv_vram_mapping *free = NULL;
155	enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
156	int ret;
157
158	lockdep_assert_held(&context->lock);
159
160	while (1) {
161		struct etnaviv_vram_mapping *m, *n;
162		struct drm_mm_scan scan;
163		struct list_head list;
164		bool found;
165
166		ret = drm_mm_insert_node_in_range(&context->mm, node,
167						  size, 0, 0, 0, U64_MAX, mode);
168		if (ret != -ENOSPC)
169			break;
170
171		/* Try to retire some entries */
172		drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
173
174		found = 0;
175		INIT_LIST_HEAD(&list);
176		list_for_each_entry(free, &context->mappings, mmu_node) {
177			/* If this vram node has not been used, skip this. */
178			if (!free->vram_node.mm)
179				continue;
180
181			/*
182			 * If the iova is pinned, then it's in-use,
183			 * so we must keep its mapping.
184			 */
185			if (free->use)
186				continue;
187
188			list_add(&free->scan_node, &list);
189			if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
190				found = true;
191				break;
192			}
193		}
194
195		if (!found) {
196			/* Nothing found, clean up and fail */
197			list_for_each_entry_safe(m, n, &list, scan_node)
198				BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
199			break;
200		}
201
202		/*
203		 * drm_mm does not allow any other operations while
204		 * scanning, so we have to remove all blocks first.
205		 * If drm_mm_scan_remove_block() returns false, we
206		 * can leave the block pinned.
207		 */
208		list_for_each_entry_safe(m, n, &list, scan_node)
209			if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
210				list_del_init(&m->scan_node);
211
212		/*
213		 * Unmap the blocks which need to be reaped from the MMU.
214		 * Clear the mmu pointer to prevent the mapping_get finding
215		 * this mapping.
216		 */
217		list_for_each_entry_safe(m, n, &list, scan_node) {
218			etnaviv_iommu_reap_mapping(m);
219			list_del_init(&m->scan_node);
220		}
221
222		mode = DRM_MM_INSERT_EVICT;
223
224		/*
225		 * We removed enough mappings so that the new allocation will
226		 * succeed, retry the allocation one more time.
227		 */
228	}
229
230	return ret;
231}
232
233static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
234		   struct drm_mm_node *node, size_t size, u64 va)
235{
236	struct etnaviv_vram_mapping *m, *n;
237	struct drm_mm_node *scan_node;
238	LIST_HEAD(scan_list);
239	int ret;
240
241	lockdep_assert_held(&context->lock);
242
243	ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
244					  va + size, DRM_MM_INSERT_LOWEST);
245	if (ret != -ENOSPC)
246		return ret;
247
248	/*
249	 * When we can't insert the node, due to a existing mapping blocking
250	 * the address space, there are two possible reasons:
251	 * 1. Userspace genuinely messed up and tried to reuse address space
252	 * before the last job using this VMA has finished executing.
253	 * 2. The existing buffer mappings are idle, but the buffers are not
254	 * destroyed yet (likely due to being referenced by another context) in
255	 * which case the mappings will not be cleaned up and we must reap them
256	 * here to make space for the new mapping.
257	 */
258
259	drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
260		m = container_of(scan_node, struct etnaviv_vram_mapping,
261				 vram_node);
262
263		if (m->use)
264			return -ENOSPC;
265
266		list_add(&m->scan_node, &scan_list);
267	}
268
269	list_for_each_entry_safe(m, n, &scan_list, scan_node) {
270		etnaviv_iommu_reap_mapping(m);
271		list_del_init(&m->scan_node);
272	}
273
274	return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
275					   va + size, DRM_MM_INSERT_LOWEST);
276}
277
278int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
279	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
280	struct etnaviv_vram_mapping *mapping, u64 va)
281{
282	struct sg_table *sgt = etnaviv_obj->sgt;
283	struct drm_mm_node *node;
284	int ret;
285
286	lockdep_assert_held(&etnaviv_obj->lock);
287
288	mutex_lock(&context->lock);
289
290	/* v1 MMU can optimize single entry (contiguous) scatterlists */
291	if (context->global->version == ETNAVIV_IOMMU_V1 &&
292	    sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
293		u32 iova;
294
295		iova = sg_dma_address(sgt->sgl) - memory_base;
296		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
297			mapping->iova = iova;
298			mapping->context = etnaviv_iommu_context_get(context);
299			list_add_tail(&mapping->mmu_node, &context->mappings);
300			ret = 0;
301			goto unlock;
302		}
303	}
304
305	node = &mapping->vram_node;
306
307	if (va)
308		ret = etnaviv_iommu_insert_exact(context, node,
309						 etnaviv_obj->base.size, va);
310	else
311		ret = etnaviv_iommu_find_iova(context, node,
312					      etnaviv_obj->base.size);
313	if (ret < 0)
314		goto unlock;
315
316	mapping->iova = node->start;
317	ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
318				ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
319
320	if (ret < 0) {
321		drm_mm_remove_node(node);
322		goto unlock;
323	}
324
325	mapping->context = etnaviv_iommu_context_get(context);
326	list_add_tail(&mapping->mmu_node, &context->mappings);
327unlock:
328	mutex_unlock(&context->lock);
329
330	return ret;
331}
332
333void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
334	struct etnaviv_vram_mapping *mapping)
335{
336	WARN_ON(mapping->use);
337
338	mutex_lock(&context->lock);
339
340	/* Bail if the mapping has been reaped by another thread */
341	if (!mapping->context) {
342		mutex_unlock(&context->lock);
343		return;
344	}
345
346	/* If the vram node is on the mm, unmap and remove the node */
347	if (mapping->vram_node.mm == &context->mm)
348		etnaviv_iommu_remove_mapping(context, mapping);
349
350	list_del(&mapping->mmu_node);
351	mutex_unlock(&context->lock);
352	etnaviv_iommu_context_put(context);
353}
354
355static void etnaviv_iommu_context_free(struct kref *kref)
356{
357	struct etnaviv_iommu_context *context =
358		container_of(kref, struct etnaviv_iommu_context, refcount);
359
360	etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
361
362	context->global->ops->free(context);
363}
364void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
365{
366	kref_put(&context->refcount, etnaviv_iommu_context_free);
367}
368
369struct etnaviv_iommu_context *
370etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
371			   struct etnaviv_cmdbuf_suballoc *suballoc)
372{
373	struct etnaviv_iommu_context *ctx;
374	int ret;
375
376	if (global->version == ETNAVIV_IOMMU_V1)
377		ctx = etnaviv_iommuv1_context_alloc(global);
378	else
379		ctx = etnaviv_iommuv2_context_alloc(global);
380
381	if (!ctx)
382		return NULL;
383
384	ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
385					  global->memory_base);
386	if (ret)
387		goto out_free;
388
389	if (global->version == ETNAVIV_IOMMU_V1 &&
390	    ctx->cmdbuf_mapping.iova > 0x80000000) {
391		dev_err(global->dev,
392		        "command buffer outside valid memory window\n");
393		goto out_unmap;
394	}
395
396	return ctx;
397
398out_unmap:
399	etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
400out_free:
401	global->ops->free(ctx);
402	return NULL;
403}
404
405void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
406			   struct etnaviv_iommu_context *context)
407{
408	context->global->ops->restore(gpu, context);
409}
410
411int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
412				  struct etnaviv_vram_mapping *mapping,
413				  u32 memory_base, dma_addr_t paddr,
414				  size_t size)
415{
416	mutex_lock(&context->lock);
417
418	if (mapping->use > 0) {
419		mapping->use++;
420		mutex_unlock(&context->lock);
421		return 0;
422	}
423
424	/*
425	 * For MMUv1 we don't add the suballoc region to the pagetables, as
426	 * those GPUs can only work with cmdbufs accessed through the linear
427	 * window. Instead we manufacture a mapping to make it look uniform
428	 * to the upper layers.
429	 */
430	if (context->global->version == ETNAVIV_IOMMU_V1) {
431		mapping->iova = paddr - memory_base;
432	} else {
433		struct drm_mm_node *node = &mapping->vram_node;
434		int ret;
435
436		ret = etnaviv_iommu_find_iova(context, node, size);
437		if (ret < 0) {
438			mutex_unlock(&context->lock);
439			return ret;
440		}
441
442		mapping->iova = node->start;
443		ret = etnaviv_context_map(context, node->start, paddr, size,
444					  ETNAVIV_PROT_READ);
445		if (ret < 0) {
446			drm_mm_remove_node(node);
447			mutex_unlock(&context->lock);
448			return ret;
449		}
450
451		context->flush_seq++;
452	}
453
454	list_add_tail(&mapping->mmu_node, &context->mappings);
455	mapping->use = 1;
456
457	mutex_unlock(&context->lock);
458
459	return 0;
460}
461
462void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
463		  struct etnaviv_vram_mapping *mapping)
464{
465	struct drm_mm_node *node = &mapping->vram_node;
466
467	mutex_lock(&context->lock);
468	mapping->use--;
469
470	if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
471		mutex_unlock(&context->lock);
472		return;
473	}
474
475	etnaviv_context_unmap(context, node->start, node->size);
476	drm_mm_remove_node(node);
477	mutex_unlock(&context->lock);
478}
479
480size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
481{
482	return context->global->ops->dump_size(context);
483}
484
485void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
486{
487	context->global->ops->dump(context, buf);
488}
489
490int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
491{
492	enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
493	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
494	struct etnaviv_iommu_global *global;
495	struct device *dev = gpu->drm->dev;
496
497	if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
498		version = ETNAVIV_IOMMU_V2;
499
500	if (priv->mmu_global) {
501		if (priv->mmu_global->version != version) {
502			dev_err(gpu->dev,
503				"MMU version doesn't match global version\n");
504			return -ENXIO;
505		}
506
507		priv->mmu_global->use++;
508		return 0;
509	}
510
511	global = kzalloc(sizeof(*global), GFP_KERNEL);
512	if (!global)
513		return -ENOMEM;
514
515	global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
516					    GFP_KERNEL);
517	if (!global->bad_page_cpu)
518		goto free_global;
519
520	memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
521
522	if (version == ETNAVIV_IOMMU_V2) {
523		global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
524					       &global->v2.pta_dma, GFP_KERNEL);
525		if (!global->v2.pta_cpu)
526			goto free_bad_page;
527	}
528
529	global->dev = dev;
530	global->version = version;
531	global->use = 1;
532	mutex_init(&global->lock);
533
534	if (version == ETNAVIV_IOMMU_V1)
535		global->ops = &etnaviv_iommuv1_ops;
536	else
537		global->ops = &etnaviv_iommuv2_ops;
538
539	priv->mmu_global = global;
540
541	return 0;
542
543free_bad_page:
544	dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
545free_global:
546	kfree(global);
547
548	return -ENOMEM;
549}
550
551void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
552{
553	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
554	struct etnaviv_iommu_global *global = priv->mmu_global;
 
 
 
555
556	if (--global->use > 0)
557		return;
558
559	if (global->v2.pta_cpu)
560		dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
561			    global->v2.pta_cpu, global->v2.pta_dma);
562
563	if (global->bad_page_cpu)
564		dma_free_wc(global->dev, SZ_4K,
565			    global->bad_page_cpu, global->bad_page_dma);
566
567	mutex_destroy(&global->lock);
568	kfree(global);
569
570	priv->mmu_global = NULL;
571}