Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright (C) 2015 Etnaviv Project
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms of the GNU General Public License version 2 as published by
  6 * the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful, but WITHOUT
  9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 11 * more details.
 12 *
 13 * You should have received a copy of the GNU General Public License along with
 14 * this program.  If not, see <http://www.gnu.org/licenses/>.
 15 */
 16
 17#include "common.xml.h"
 18#include "etnaviv_cmdbuf.h"
 19#include "etnaviv_drv.h"
 20#include "etnaviv_gem.h"
 21#include "etnaviv_gpu.h"
 22#include "etnaviv_iommu.h"
 23#include "etnaviv_mmu.h"
 24
 25static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
 26				 unsigned long iova, size_t size)
 27{
 28	size_t unmapped_page, unmapped = 0;
 29	size_t pgsize = SZ_4K;
 30
 31	if (!IS_ALIGNED(iova | size, pgsize)) {
 32		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
 33		       iova, size, pgsize);
 34		return;
 35	}
 36
 37	while (unmapped < size) {
 38		unmapped_page = domain->ops->unmap(domain, iova, pgsize);
 39		if (!unmapped_page)
 40			break;
 41
 42		iova += unmapped_page;
 43		unmapped += unmapped_page;
 44	}
 45}
 46
 47static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
 48			      unsigned long iova, phys_addr_t paddr,
 49			      size_t size, int prot)
 50{
 51	unsigned long orig_iova = iova;
 52	size_t pgsize = SZ_4K;
 53	size_t orig_size = size;
 54	int ret = 0;
 55
 56	if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
 57		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
 58		       iova, &paddr, size, pgsize);
 59		return -EINVAL;
 60	}
 61
 62	while (size) {
 63		ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
 64		if (ret)
 65			break;
 66
 67		iova += pgsize;
 68		paddr += pgsize;
 69		size -= pgsize;
 70	}
 71
 72	/* unroll mapping in case something went wrong */
 73	if (ret)
 74		etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
 75
 76	return ret;
 77}
 78
 79static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
 80			     struct sg_table *sgt, unsigned len, int prot)
 81{
 82	struct etnaviv_iommu_domain *domain = iommu->domain;
 83	struct scatterlist *sg;
 84	unsigned int da = iova;
 85	unsigned int i, j;
 86	int ret;
 87
 88	if (!domain || !sgt)
 89		return -EINVAL;
 90
 91	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 92		u32 pa = sg_dma_address(sg) - sg->offset;
 93		size_t bytes = sg_dma_len(sg) + sg->offset;
 94
 95		VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
 96
 97		ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
 98		if (ret)
 99			goto fail;
100
101		da += bytes;
102	}
103
104	return 0;
105
106fail:
107	da = iova;
108
109	for_each_sg(sgt->sgl, sg, i, j) {
110		size_t bytes = sg_dma_len(sg) + sg->offset;
111
112		etnaviv_domain_unmap(domain, da, bytes);
113		da += bytes;
114	}
115	return ret;
116}
117
118static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
119				struct sg_table *sgt, unsigned len)
120{
121	struct etnaviv_iommu_domain *domain = iommu->domain;
122	struct scatterlist *sg;
123	unsigned int da = iova;
124	int i;
125
126	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
127		size_t bytes = sg_dma_len(sg) + sg->offset;
128
129		etnaviv_domain_unmap(domain, da, bytes);
130
131		VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
132
133		BUG_ON(!PAGE_ALIGNED(bytes));
134
135		da += bytes;
136	}
137}
138
139static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
140	struct etnaviv_vram_mapping *mapping)
141{
142	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
143
144	etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
145			    etnaviv_obj->sgt, etnaviv_obj->base.size);
146	drm_mm_remove_node(&mapping->vram_node);
147}
148
149static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
150				   struct drm_mm_node *node, size_t size)
151{
152	struct etnaviv_vram_mapping *free = NULL;
153	enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
154	int ret;
155
156	lockdep_assert_held(&mmu->lock);
157
158	while (1) {
159		struct etnaviv_vram_mapping *m, *n;
160		struct drm_mm_scan scan;
161		struct list_head list;
162		bool found;
163
164		ret = drm_mm_insert_node_in_range(&mmu->mm, node,
165						  size, 0, 0,
166						  mmu->last_iova, U64_MAX,
167						  mode);
168		if (ret != -ENOSPC)
169			break;
170
171		/*
172		 * If we did not search from the start of the MMU region,
173		 * try again in case there are free slots.
174		 */
175		if (mmu->last_iova) {
176			mmu->last_iova = 0;
177			mmu->need_flush = true;
178			continue;
179		}
180
181		/* Try to retire some entries */
182		drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
183
184		found = 0;
185		INIT_LIST_HEAD(&list);
186		list_for_each_entry(free, &mmu->mappings, mmu_node) {
187			/* If this vram node has not been used, skip this. */
188			if (!free->vram_node.mm)
189				continue;
190
191			/*
192			 * If the iova is pinned, then it's in-use,
193			 * so we must keep its mapping.
194			 */
195			if (free->use)
196				continue;
197
198			list_add(&free->scan_node, &list);
199			if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
200				found = true;
201				break;
202			}
203		}
204
205		if (!found) {
206			/* Nothing found, clean up and fail */
207			list_for_each_entry_safe(m, n, &list, scan_node)
208				BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
209			break;
210		}
211
212		/*
213		 * drm_mm does not allow any other operations while
214		 * scanning, so we have to remove all blocks first.
215		 * If drm_mm_scan_remove_block() returns false, we
216		 * can leave the block pinned.
217		 */
218		list_for_each_entry_safe(m, n, &list, scan_node)
219			if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
220				list_del_init(&m->scan_node);
221
222		/*
223		 * Unmap the blocks which need to be reaped from the MMU.
224		 * Clear the mmu pointer to prevent the mapping_get finding
225		 * this mapping.
226		 */
227		list_for_each_entry_safe(m, n, &list, scan_node) {
228			etnaviv_iommu_remove_mapping(mmu, m);
229			m->mmu = NULL;
230			list_del_init(&m->mmu_node);
231			list_del_init(&m->scan_node);
232		}
233
234		mode = DRM_MM_INSERT_EVICT;
235
236		/*
237		 * We removed enough mappings so that the new allocation will
238		 * succeed, retry the allocation one more time.
239		 */
240	}
241
242	return ret;
243}
244
245int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
246	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
247	struct etnaviv_vram_mapping *mapping)
248{
249	struct sg_table *sgt = etnaviv_obj->sgt;
250	struct drm_mm_node *node;
251	int ret;
252
253	lockdep_assert_held(&etnaviv_obj->lock);
254
255	mutex_lock(&mmu->lock);
256
257	/* v1 MMU can optimize single entry (contiguous) scatterlists */
258	if (mmu->version == ETNAVIV_IOMMU_V1 &&
259	    sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
260		u32 iova;
261
262		iova = sg_dma_address(sgt->sgl) - memory_base;
263		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
264			mapping->iova = iova;
265			list_add_tail(&mapping->mmu_node, &mmu->mappings);
266			ret = 0;
267			goto unlock;
268		}
269	}
270
271	node = &mapping->vram_node;
272
273	ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
274	if (ret < 0)
275		goto unlock;
276
277	mmu->last_iova = node->start + etnaviv_obj->base.size;
278	mapping->iova = node->start;
279	ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
280				ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
281
282	if (ret < 0) {
283		drm_mm_remove_node(node);
284		goto unlock;
285	}
286
287	list_add_tail(&mapping->mmu_node, &mmu->mappings);
288	mmu->need_flush = true;
289unlock:
290	mutex_unlock(&mmu->lock);
291
292	return ret;
293}
294
295void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
296	struct etnaviv_vram_mapping *mapping)
297{
298	WARN_ON(mapping->use);
299
300	mutex_lock(&mmu->lock);
301
302	/* If the vram node is on the mm, unmap and remove the node */
303	if (mapping->vram_node.mm == &mmu->mm)
304		etnaviv_iommu_remove_mapping(mmu, mapping);
305
306	list_del(&mapping->mmu_node);
307	mmu->need_flush = true;
308	mutex_unlock(&mmu->lock);
309}
310
311void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
312{
313	drm_mm_takedown(&mmu->mm);
314	mmu->domain->ops->free(mmu->domain);
315	kfree(mmu);
316}
317
318struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
319{
320	enum etnaviv_iommu_version version;
321	struct etnaviv_iommu *mmu;
322
323	mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
324	if (!mmu)
325		return ERR_PTR(-ENOMEM);
326
327	if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
328		mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
329		version = ETNAVIV_IOMMU_V1;
330	} else {
331		mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
332		version = ETNAVIV_IOMMU_V2;
333	}
334
335	if (!mmu->domain) {
336		dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
337		kfree(mmu);
338		return ERR_PTR(-ENOMEM);
339	}
340
341	mmu->gpu = gpu;
342	mmu->version = version;
343	mutex_init(&mmu->lock);
344	INIT_LIST_HEAD(&mmu->mappings);
345
346	drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
347
348	return mmu;
349}
350
351void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
352{
353	if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
354		etnaviv_iommuv1_restore(gpu);
355	else
356		etnaviv_iommuv2_restore(gpu);
357}
358
359int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
360				  struct drm_mm_node *vram_node, size_t size,
361				  u32 *iova)
362{
363	struct etnaviv_iommu *mmu = gpu->mmu;
364
365	if (mmu->version == ETNAVIV_IOMMU_V1) {
366		*iova = paddr - gpu->memory_base;
367		return 0;
368	} else {
369		int ret;
370
371		mutex_lock(&mmu->lock);
372		ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
373		if (ret < 0) {
374			mutex_unlock(&mmu->lock);
375			return ret;
376		}
377		ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
378					 size, ETNAVIV_PROT_READ);
379		if (ret < 0) {
380			drm_mm_remove_node(vram_node);
381			mutex_unlock(&mmu->lock);
382			return ret;
383		}
384		mmu->last_iova = vram_node->start + size;
385		gpu->mmu->need_flush = true;
386		mutex_unlock(&mmu->lock);
387
388		*iova = (u32)vram_node->start;
389		return 0;
390	}
391}
392
393void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
394				   struct drm_mm_node *vram_node, size_t size,
395				   u32 iova)
396{
397	struct etnaviv_iommu *mmu = gpu->mmu;
398
399	if (mmu->version == ETNAVIV_IOMMU_V2) {
400		mutex_lock(&mmu->lock);
401		etnaviv_domain_unmap(mmu->domain, iova, size);
402		drm_mm_remove_node(vram_node);
403		mutex_unlock(&mmu->lock);
404	}
405}
406size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
407{
408	return iommu->domain->ops->dump_size(iommu->domain);
409}
410
411void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
412{
413	iommu->domain->ops->dump(iommu->domain, buf);
414}