Linux Audio

Check our new training course

Loading...
  1/*
  2 * omap iommu: simple virtual address space management
  3 *
  4 * Copyright (C) 2008-2009 Nokia Corporation
  5 *
  6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License version 2 as
 10 * published by the Free Software Foundation.
 11 */
 12
 13#include <linux/module.h>
 14#include <linux/err.h>
 15#include <linux/slab.h>
 16#include <linux/vmalloc.h>
 17#include <linux/device.h>
 18#include <linux/scatterlist.h>
 19#include <linux/iommu.h>
 20
 21#include <asm/cacheflush.h>
 22#include <asm/mach/map.h>
 23
 24#include <plat/iommu.h>
 25#include <plat/iovmm.h>
 26
 27#include <plat/iopgtable.h>
 28
 29static struct kmem_cache *iovm_area_cachep;
 30
 31/* return the offset of the first scatterlist entry in a sg table */
 32static unsigned int sgtable_offset(const struct sg_table *sgt)
 33{
 34	if (!sgt || !sgt->nents)
 35		return 0;
 36
 37	return sgt->sgl->offset;
 38}
 39
 40/* return total bytes of sg buffers */
 41static size_t sgtable_len(const struct sg_table *sgt)
 42{
 43	unsigned int i, total = 0;
 44	struct scatterlist *sg;
 45
 46	if (!sgt)
 47		return 0;
 48
 49	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 50		size_t bytes;
 51
 52		bytes = sg->length + sg->offset;
 53
 54		if (!iopgsz_ok(bytes)) {
 55			pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
 56			       __func__, i, bytes, sg->offset);
 57			return 0;
 58		}
 59
 60		if (i && sg->offset) {
 61			pr_err("%s: sg[%d] offset not allowed in internal "
 62					"entries\n", __func__, i);
 63			return 0;
 64		}
 65
 66		total += bytes;
 67	}
 68
 69	return total;
 70}
 71#define sgtable_ok(x)	(!!sgtable_len(x))
 72
 73static unsigned max_alignment(u32 addr)
 74{
 75	int i;
 76	unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
 77	for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
 78		;
 79	return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
 80}
 81
 82/*
 83 * calculate the optimal number sg elements from total bytes based on
 84 * iommu superpages
 85 */
 86static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
 87{
 88	unsigned nr_entries = 0, ent_sz;
 89
 90	if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
 91		pr_err("%s: wrong size %08x\n", __func__, bytes);
 92		return 0;
 93	}
 94
 95	while (bytes) {
 96		ent_sz = max_alignment(da | pa);
 97		ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
 98		nr_entries++;
 99		da += ent_sz;
100		pa += ent_sz;
101		bytes -= ent_sz;
102	}
103
104	return nr_entries;
105}
106
107/* allocate and initialize sg_table header(a kind of 'superblock') */
108static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
109							u32 da, u32 pa)
110{
111	unsigned int nr_entries;
112	int err;
113	struct sg_table *sgt;
114
115	if (!bytes)
116		return ERR_PTR(-EINVAL);
117
118	if (!IS_ALIGNED(bytes, PAGE_SIZE))
119		return ERR_PTR(-EINVAL);
120
121	if (flags & IOVMF_LINEAR) {
122		nr_entries = sgtable_nents(bytes, da, pa);
123		if (!nr_entries)
124			return ERR_PTR(-EINVAL);
125	} else
126		nr_entries =  bytes / PAGE_SIZE;
127
128	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
129	if (!sgt)
130		return ERR_PTR(-ENOMEM);
131
132	err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
133	if (err) {
134		kfree(sgt);
135		return ERR_PTR(err);
136	}
137
138	pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
139
140	return sgt;
141}
142
143/* free sg_table header(a kind of superblock) */
144static void sgtable_free(struct sg_table *sgt)
145{
146	if (!sgt)
147		return;
148
149	sg_free_table(sgt);
150	kfree(sgt);
151
152	pr_debug("%s: sgt:%p\n", __func__, sgt);
153}
154
155/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
156static void *vmap_sg(const struct sg_table *sgt)
157{
158	u32 va;
159	size_t total;
160	unsigned int i;
161	struct scatterlist *sg;
162	struct vm_struct *new;
163	const struct mem_type *mtype;
164
165	mtype = get_mem_type(MT_DEVICE);
166	if (!mtype)
167		return ERR_PTR(-EINVAL);
168
169	total = sgtable_len(sgt);
170	if (!total)
171		return ERR_PTR(-EINVAL);
172
173	new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
174	if (!new)
175		return ERR_PTR(-ENOMEM);
176	va = (u32)new->addr;
177
178	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
179		size_t bytes;
180		u32 pa;
181		int err;
182
183		pa = sg_phys(sg) - sg->offset;
184		bytes = sg->length + sg->offset;
185
186		BUG_ON(bytes != PAGE_SIZE);
187
188		err = ioremap_page(va,  pa, mtype);
189		if (err)
190			goto err_out;
191
192		va += bytes;
193	}
194
195	flush_cache_vmap((unsigned long)new->addr,
196				(unsigned long)(new->addr + total));
197	return new->addr;
198
199err_out:
200	WARN_ON(1); /* FIXME: cleanup some mpu mappings */
201	vunmap(new->addr);
202	return ERR_PTR(-EAGAIN);
203}
204
205static inline void vunmap_sg(const void *va)
206{
207	vunmap(va);
208}
209
210static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
211							const u32 da)
212{
213	struct iovm_struct *tmp;
214
215	list_for_each_entry(tmp, &obj->mmap, list) {
216		if ((da >= tmp->da_start) && (da < tmp->da_end)) {
217			size_t len;
218
219			len = tmp->da_end - tmp->da_start;
220
221			dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
222				__func__, tmp->da_start, da, tmp->da_end, len,
223				tmp->flags);
224
225			return tmp;
226		}
227	}
228
229	return NULL;
230}
231
232/**
233 * omap_find_iovm_area  -  find iovma which includes @da
234 * @dev:	client device
235 * @da:		iommu device virtual address
236 *
237 * Find the existing iovma starting at @da
238 */
239struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da)
240{
241	struct omap_iommu *obj = dev_to_omap_iommu(dev);
242	struct iovm_struct *area;
243
244	mutex_lock(&obj->mmap_lock);
245	area = __find_iovm_area(obj, da);
246	mutex_unlock(&obj->mmap_lock);
247
248	return area;
249}
250EXPORT_SYMBOL_GPL(omap_find_iovm_area);
251
252/*
253 * This finds the hole(area) which fits the requested address and len
254 * in iovmas mmap, and returns the new allocated iovma.
255 */
256static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
257					   size_t bytes, u32 flags)
258{
259	struct iovm_struct *new, *tmp;
260	u32 start, prev_end, alignment;
261
262	if (!obj || !bytes)
263		return ERR_PTR(-EINVAL);
264
265	start = da;
266	alignment = PAGE_SIZE;
267
268	if (~flags & IOVMF_DA_FIXED) {
269		/* Don't map address 0 */
270		start = obj->da_start ? obj->da_start : alignment;
271
272		if (flags & IOVMF_LINEAR)
273			alignment = iopgsz_max(bytes);
274		start = roundup(start, alignment);
275	} else if (start < obj->da_start || start > obj->da_end ||
276					obj->da_end - start < bytes) {
277		return ERR_PTR(-EINVAL);
278	}
279
280	tmp = NULL;
281	if (list_empty(&obj->mmap))
282		goto found;
283
284	prev_end = 0;
285	list_for_each_entry(tmp, &obj->mmap, list) {
286
287		if (prev_end > start)
288			break;
289
290		if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
291			goto found;
292
293		if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
294			start = roundup(tmp->da_end + 1, alignment);
295
296		prev_end = tmp->da_end;
297	}
298
299	if ((start >= prev_end) && (obj->da_end - start >= bytes))
300		goto found;
301
302	dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
303		__func__, da, bytes, flags);
304
305	return ERR_PTR(-EINVAL);
306
307found:
308	new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
309	if (!new)
310		return ERR_PTR(-ENOMEM);
311
312	new->iommu = obj;
313	new->da_start = start;
314	new->da_end = start + bytes;
315	new->flags = flags;
316
317	/*
318	 * keep ascending order of iovmas
319	 */
320	if (tmp)
321		list_add_tail(&new->list, &tmp->list);
322	else
323		list_add(&new->list, &obj->mmap);
324
325	dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
326		__func__, new->da_start, start, new->da_end, bytes, flags);
327
328	return new;
329}
330
331static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
332{
333	size_t bytes;
334
335	BUG_ON(!obj || !area);
336
337	bytes = area->da_end - area->da_start;
338
339	dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
340		__func__, area->da_start, area->da_end, bytes, area->flags);
341
342	list_del(&area->list);
343	kmem_cache_free(iovm_area_cachep, area);
344}
345
346/**
347 * omap_da_to_va - convert (d) to (v)
348 * @dev:	client device
349 * @da:		iommu device virtual address
350 * @va:		mpu virtual address
351 *
352 * Returns mpu virtual addr which corresponds to a given device virtual addr
353 */
354void *omap_da_to_va(struct device *dev, u32 da)
355{
356	struct omap_iommu *obj = dev_to_omap_iommu(dev);
357	void *va = NULL;
358	struct iovm_struct *area;
359
360	mutex_lock(&obj->mmap_lock);
361
362	area = __find_iovm_area(obj, da);
363	if (!area) {
364		dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
365		goto out;
366	}
367	va = area->va;
368out:
369	mutex_unlock(&obj->mmap_lock);
370
371	return va;
372}
373EXPORT_SYMBOL_GPL(omap_da_to_va);
374
375static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
376{
377	unsigned int i;
378	struct scatterlist *sg;
379	void *va = _va;
380	void *va_end;
381
382	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
383		struct page *pg;
384		const size_t bytes = PAGE_SIZE;
385
386		/*
387		 * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
388		 */
389		pg = vmalloc_to_page(va);
390		BUG_ON(!pg);
391		sg_set_page(sg, pg, bytes, 0);
392
393		va += bytes;
394	}
395
396	va_end = _va + PAGE_SIZE * i;
397}
398
399static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
400{
401	/*
402	 * Actually this is not necessary at all, just exists for
403	 * consistency of the code readability.
404	 */
405	BUG_ON(!sgt);
406}
407
408/* create 'da' <-> 'pa' mapping from 'sgt' */
409static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
410			const struct sg_table *sgt, u32 flags)
411{
412	int err;
413	unsigned int i, j;
414	struct scatterlist *sg;
415	u32 da = new->da_start;
416
417	if (!domain || !sgt)
418		return -EINVAL;
419
420	BUG_ON(!sgtable_ok(sgt));
421
422	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
423		u32 pa;
424		size_t bytes;
425
426		pa = sg_phys(sg) - sg->offset;
427		bytes = sg->length + sg->offset;
428
429		flags &= ~IOVMF_PGSZ_MASK;
430
431		if (bytes_to_iopgsz(bytes) < 0)
432			goto err_out;
433
434		pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
435			 i, da, pa, bytes);
436
437		err = iommu_map(domain, da, pa, bytes, flags);
438		if (err)
439			goto err_out;
440
441		da += bytes;
442	}
443	return 0;
444
445err_out:
446	da = new->da_start;
447
448	for_each_sg(sgt->sgl, sg, i, j) {
449		size_t bytes;
450
451		bytes = sg->length + sg->offset;
452
453		/* ignore failures.. we're already handling one */
454		iommu_unmap(domain, da, bytes);
455
456		da += bytes;
457	}
458	return err;
459}
460
461/* release 'da' <-> 'pa' mapping */
462static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
463						struct iovm_struct *area)
464{
465	u32 start;
466	size_t total = area->da_end - area->da_start;
467	const struct sg_table *sgt = area->sgt;
468	struct scatterlist *sg;
469	int i;
470	size_t unmapped;
471
472	BUG_ON(!sgtable_ok(sgt));
473	BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
474
475	start = area->da_start;
476	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
477		size_t bytes;
478
479		bytes = sg->length + sg->offset;
480
481		unmapped = iommu_unmap(domain, start, bytes);
482		if (unmapped < bytes)
483			break;
484
485		dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
486				__func__, start, bytes, area->flags);
487
488		BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
489
490		total -= bytes;
491		start += bytes;
492	}
493	BUG_ON(total);
494}
495
496/* template function for all unmapping */
497static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
498				      struct omap_iommu *obj, const u32 da,
499				      void (*fn)(const void *), u32 flags)
500{
501	struct sg_table *sgt = NULL;
502	struct iovm_struct *area;
503
504	if (!IS_ALIGNED(da, PAGE_SIZE)) {
505		dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
506		return NULL;
507	}
508
509	mutex_lock(&obj->mmap_lock);
510
511	area = __find_iovm_area(obj, da);
512	if (!area) {
513		dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
514		goto out;
515	}
516
517	if ((area->flags & flags) != flags) {
518		dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
519			area->flags);
520		goto out;
521	}
522	sgt = (struct sg_table *)area->sgt;
523
524	unmap_iovm_area(domain, obj, area);
525
526	fn(area->va);
527
528	dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
529		area->da_start, da, area->da_end,
530		area->da_end - area->da_start, area->flags);
531
532	free_iovm_area(obj, area);
533out:
534	mutex_unlock(&obj->mmap_lock);
535
536	return sgt;
537}
538
539static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
540				u32 da, const struct sg_table *sgt, void *va,
541				size_t bytes, u32 flags)
542{
543	int err = -ENOMEM;
544	struct iovm_struct *new;
545
546	mutex_lock(&obj->mmap_lock);
547
548	new = alloc_iovm_area(obj, da, bytes, flags);
549	if (IS_ERR(new)) {
550		err = PTR_ERR(new);
551		goto err_alloc_iovma;
552	}
553	new->va = va;
554	new->sgt = sgt;
555
556	if (map_iovm_area(domain, new, sgt, new->flags))
557		goto err_map;
558
559	mutex_unlock(&obj->mmap_lock);
560
561	dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
562		__func__, new->da_start, bytes, new->flags, va);
563
564	return new->da_start;
565
566err_map:
567	free_iovm_area(obj, new);
568err_alloc_iovma:
569	mutex_unlock(&obj->mmap_lock);
570	return err;
571}
572
573static inline u32
574__iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
575				u32 da, const struct sg_table *sgt,
576				void *va, size_t bytes, u32 flags)
577{
578	return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
579}
580
581/**
582 * omap_iommu_vmap  -  (d)-(p)-(v) address mapper
583 * @domain:	iommu domain
584 * @dev:	client device
585 * @sgt:	address of scatter gather table
586 * @flags:	iovma and page property
587 *
588 * Creates 1-n-1 mapping with given @sgt and returns @da.
589 * All @sgt element must be io page size aligned.
590 */
591u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
592		const struct sg_table *sgt, u32 flags)
593{
594	struct omap_iommu *obj = dev_to_omap_iommu(dev);
595	size_t bytes;
596	void *va = NULL;
597
598	if (!obj || !obj->dev || !sgt)
599		return -EINVAL;
600
601	bytes = sgtable_len(sgt);
602	if (!bytes)
603		return -EINVAL;
604	bytes = PAGE_ALIGN(bytes);
605
606	if (flags & IOVMF_MMIO) {
607		va = vmap_sg(sgt);
608		if (IS_ERR(va))
609			return PTR_ERR(va);
610	}
611
612	flags |= IOVMF_DISCONT;
613	flags |= IOVMF_MMIO;
614
615	da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
616	if (IS_ERR_VALUE(da))
617		vunmap_sg(va);
618
619	return da + sgtable_offset(sgt);
620}
621EXPORT_SYMBOL_GPL(omap_iommu_vmap);
622
623/**
624 * omap_iommu_vunmap  -  release virtual mapping obtained by 'omap_iommu_vmap()'
625 * @domain:	iommu domain
626 * @dev:	client device
627 * @da:		iommu device virtual address
628 *
629 * Free the iommu virtually contiguous memory area starting at
630 * @da, which was returned by 'omap_iommu_vmap()'.
631 */
632struct sg_table *
633omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da)
634{
635	struct omap_iommu *obj = dev_to_omap_iommu(dev);
636	struct sg_table *sgt;
637	/*
638	 * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
639	 * Just returns 'sgt' to the caller to free
640	 */
641	da &= PAGE_MASK;
642	sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
643					IOVMF_DISCONT | IOVMF_MMIO);
644	if (!sgt)
645		dev_dbg(obj->dev, "%s: No sgt\n", __func__);
646	return sgt;
647}
648EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
649
650/**
651 * omap_iommu_vmalloc  -  (d)-(p)-(v) address allocator and mapper
652 * @dev:	client device
653 * @da:		contiguous iommu virtual memory
654 * @bytes:	allocation size
655 * @flags:	iovma and page property
656 *
657 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
658 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
659 */
660u32
661omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da,
662						size_t bytes, u32 flags)
663{
664	struct omap_iommu *obj = dev_to_omap_iommu(dev);
665	void *va;
666	struct sg_table *sgt;
667
668	if (!obj || !obj->dev || !bytes)
669		return -EINVAL;
670
671	bytes = PAGE_ALIGN(bytes);
672
673	va = vmalloc(bytes);
674	if (!va)
675		return -ENOMEM;
676
677	flags |= IOVMF_DISCONT;
678	flags |= IOVMF_ALLOC;
679
680	sgt = sgtable_alloc(bytes, flags, da, 0);
681	if (IS_ERR(sgt)) {
682		da = PTR_ERR(sgt);
683		goto err_sgt_alloc;
684	}
685	sgtable_fill_vmalloc(sgt, va);
686
687	da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
688	if (IS_ERR_VALUE(da))
689		goto err_iommu_vmap;
690
691	return da;
692
693err_iommu_vmap:
694	sgtable_drain_vmalloc(sgt);
695	sgtable_free(sgt);
696err_sgt_alloc:
697	vfree(va);
698	return da;
699}
700EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
701
702/**
703 * omap_iommu_vfree  -  release memory allocated by 'omap_iommu_vmalloc()'
704 * @dev:	client device
705 * @da:		iommu device virtual address
706 *
707 * Frees the iommu virtually continuous memory area starting at
708 * @da, as obtained from 'omap_iommu_vmalloc()'.
709 */
710void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
711								const u32 da)
712{
713	struct omap_iommu *obj = dev_to_omap_iommu(dev);
714	struct sg_table *sgt;
715
716	sgt = unmap_vm_area(domain, obj, da, vfree,
717						IOVMF_DISCONT | IOVMF_ALLOC);
718	if (!sgt)
719		dev_dbg(obj->dev, "%s: No sgt\n", __func__);
720	sgtable_free(sgt);
721}
722EXPORT_SYMBOL_GPL(omap_iommu_vfree);
723
724static int __init iovmm_init(void)
725{
726	const unsigned long flags = SLAB_HWCACHE_ALIGN;
727	struct kmem_cache *p;
728
729	p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
730			      flags, NULL);
731	if (!p)
732		return -ENOMEM;
733	iovm_area_cachep = p;
734
735	return 0;
736}
737module_init(iovmm_init);
738
739static void __exit iovmm_exit(void)
740{
741	kmem_cache_destroy(iovm_area_cachep);
742}
743module_exit(iovmm_exit);
744
745MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
746MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
747MODULE_LICENSE("GPL v2");