Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright © 2006-2009, Intel Corporation.
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms and conditions of the GNU General Public License,
  6 * version 2, as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope it will be useful, but WITHOUT
  9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 11 * more details.
 12 *
 13 * You should have received a copy of the GNU General Public License along with
 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
 16 *
 17 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
 18 */
 19
 20#include <linux/iova.h>
 21#include <linux/module.h>
 22#include <linux/slab.h>
 23
 24void
 25init_iova_domain(struct iova_domain *iovad, unsigned long granule,
 26	unsigned long start_pfn, unsigned long pfn_32bit)
 27{
 28	/*
 29	 * IOVA granularity will normally be equal to the smallest
 30	 * supported IOMMU page size; both *must* be capable of
 31	 * representing individual CPU pages exactly.
 32	 */
 33	BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
 34
 35	spin_lock_init(&iovad->iova_rbtree_lock);
 36	iovad->rbroot = RB_ROOT;
 37	iovad->cached32_node = NULL;
 38	iovad->granule = granule;
 39	iovad->start_pfn = start_pfn;
 40	iovad->dma_32bit_pfn = pfn_32bit;
 41}
 42EXPORT_SYMBOL_GPL(init_iova_domain);
 43
 44static struct rb_node *
 45__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
 46{
 47	if ((*limit_pfn != iovad->dma_32bit_pfn) ||
 48		(iovad->cached32_node == NULL))
 49		return rb_last(&iovad->rbroot);
 50	else {
 51		struct rb_node *prev_node = rb_prev(iovad->cached32_node);
 52		struct iova *curr_iova =
 53			container_of(iovad->cached32_node, struct iova, node);
 54		*limit_pfn = curr_iova->pfn_lo - 1;
 55		return prev_node;
 56	}
 57}
 58
 59static void
 60__cached_rbnode_insert_update(struct iova_domain *iovad,
 61	unsigned long limit_pfn, struct iova *new)
 62{
 63	if (limit_pfn != iovad->dma_32bit_pfn)
 64		return;
 65	iovad->cached32_node = &new->node;
 66}
 67
 68static void
 69__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
 70{
 71	struct iova *cached_iova;
 72	struct rb_node *curr;
 73
 74	if (!iovad->cached32_node)
 75		return;
 76	curr = iovad->cached32_node;
 77	cached_iova = container_of(curr, struct iova, node);
 78
 79	if (free->pfn_lo >= cached_iova->pfn_lo) {
 80		struct rb_node *node = rb_next(&free->node);
 81		struct iova *iova = container_of(node, struct iova, node);
 82
 83		/* only cache if it's below 32bit pfn */
 84		if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
 85			iovad->cached32_node = node;
 86		else
 87			iovad->cached32_node = NULL;
 88	}
 89}
 90
 91/*
 92 * Computes the padding size required, to make the start address
 93 * naturally aligned on the power-of-two order of its size
 94 */
 95static unsigned int
 96iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
 97{
 98	return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
 
 
 
 
 
 
 99}
100
101static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
102		unsigned long size, unsigned long limit_pfn,
103			struct iova *new, bool size_aligned)
104{
105	struct rb_node *prev, *curr = NULL;
106	unsigned long flags;
107	unsigned long saved_pfn;
108	unsigned int pad_size = 0;
109
110	/* Walk the tree backwards */
111	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
112	saved_pfn = limit_pfn;
113	curr = __get_cached_rbnode(iovad, &limit_pfn);
114	prev = curr;
115	while (curr) {
116		struct iova *curr_iova = container_of(curr, struct iova, node);
117
118		if (limit_pfn < curr_iova->pfn_lo)
119			goto move_left;
120		else if (limit_pfn < curr_iova->pfn_hi)
121			goto adjust_limit_pfn;
122		else {
123			if (size_aligned)
124				pad_size = iova_get_pad_size(size, limit_pfn);
125			if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
126				break;	/* found a free slot */
127		}
128adjust_limit_pfn:
129		limit_pfn = curr_iova->pfn_lo - 1;
130move_left:
131		prev = curr;
132		curr = rb_prev(curr);
133	}
134
135	if (!curr) {
136		if (size_aligned)
137			pad_size = iova_get_pad_size(size, limit_pfn);
138		if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
139			spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
140			return -ENOMEM;
141		}
142	}
143
144	/* pfn_lo will point to size aligned address if size_aligned is set */
145	new->pfn_lo = limit_pfn - (size + pad_size) + 1;
146	new->pfn_hi = new->pfn_lo + size - 1;
147
148	/* Insert the new_iova into domain rbtree by holding writer lock */
149	/* Add new node and rebalance tree. */
150	{
151		struct rb_node **entry, *parent = NULL;
152
153		/* If we have 'prev', it's a valid place to start the
154		   insertion. Otherwise, start from the root. */
155		if (prev)
156			entry = &prev;
157		else
158			entry = &iovad->rbroot.rb_node;
159
160		/* Figure out where to put new node */
161		while (*entry) {
162			struct iova *this = container_of(*entry,
163							struct iova, node);
164			parent = *entry;
165
166			if (new->pfn_lo < this->pfn_lo)
167				entry = &((*entry)->rb_left);
168			else if (new->pfn_lo > this->pfn_lo)
169				entry = &((*entry)->rb_right);
170			else
171				BUG(); /* this should not happen */
172		}
173
174		/* Add new node and rebalance tree. */
175		rb_link_node(&new->node, parent, entry);
176		rb_insert_color(&new->node, &iovad->rbroot);
177	}
178	__cached_rbnode_insert_update(iovad, saved_pfn, new);
179
180	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
181
182
183	return 0;
184}
185
186static void
187iova_insert_rbtree(struct rb_root *root, struct iova *iova)
188{
189	struct rb_node **new = &(root->rb_node), *parent = NULL;
190	/* Figure out where to put new node */
191	while (*new) {
192		struct iova *this = container_of(*new, struct iova, node);
193
194		parent = *new;
195
196		if (iova->pfn_lo < this->pfn_lo)
197			new = &((*new)->rb_left);
198		else if (iova->pfn_lo > this->pfn_lo)
199			new = &((*new)->rb_right);
200		else
201			BUG(); /* this should not happen */
202	}
203	/* Add new node and rebalance tree. */
204	rb_link_node(&iova->node, parent, new);
205	rb_insert_color(&iova->node, root);
206}
207
208static struct kmem_cache *iova_cache;
209static unsigned int iova_cache_users;
210static DEFINE_MUTEX(iova_cache_mutex);
211
212struct iova *alloc_iova_mem(void)
213{
214	return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
215}
216EXPORT_SYMBOL(alloc_iova_mem);
217
218void free_iova_mem(struct iova *iova)
219{
220	kmem_cache_free(iova_cache, iova);
221}
222EXPORT_SYMBOL(free_iova_mem);
223
224int iova_cache_get(void)
225{
226	mutex_lock(&iova_cache_mutex);
227	if (!iova_cache_users) {
228		iova_cache = kmem_cache_create(
229			"iommu_iova", sizeof(struct iova), 0,
230			SLAB_HWCACHE_ALIGN, NULL);
231		if (!iova_cache) {
232			mutex_unlock(&iova_cache_mutex);
233			printk(KERN_ERR "Couldn't create iova cache\n");
234			return -ENOMEM;
235		}
236	}
237
238	iova_cache_users++;
239	mutex_unlock(&iova_cache_mutex);
240
241	return 0;
242}
243EXPORT_SYMBOL_GPL(iova_cache_get);
244
245void iova_cache_put(void)
246{
247	mutex_lock(&iova_cache_mutex);
248	if (WARN_ON(!iova_cache_users)) {
249		mutex_unlock(&iova_cache_mutex);
250		return;
251	}
252	iova_cache_users--;
253	if (!iova_cache_users)
254		kmem_cache_destroy(iova_cache);
255	mutex_unlock(&iova_cache_mutex);
256}
257EXPORT_SYMBOL_GPL(iova_cache_put);
258
259/**
260 * alloc_iova - allocates an iova
261 * @iovad: - iova domain in question
262 * @size: - size of page frames to allocate
263 * @limit_pfn: - max limit address
264 * @size_aligned: - set if size_aligned address range is required
265 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
266 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
267 * flag is set then the allocated address iova->pfn_lo will be naturally
268 * aligned on roundup_power_of_two(size).
269 */
270struct iova *
271alloc_iova(struct iova_domain *iovad, unsigned long size,
272	unsigned long limit_pfn,
273	bool size_aligned)
274{
275	struct iova *new_iova;
276	int ret;
277
278	new_iova = alloc_iova_mem();
279	if (!new_iova)
280		return NULL;
281
 
 
 
 
 
 
282	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
283			new_iova, size_aligned);
284
285	if (ret) {
286		free_iova_mem(new_iova);
287		return NULL;
288	}
289
290	return new_iova;
291}
292EXPORT_SYMBOL_GPL(alloc_iova);
293
294/**
295 * find_iova - find's an iova for a given pfn
296 * @iovad: - iova domain in question.
297 * @pfn: - page frame number
298 * This function finds and returns an iova belonging to the
299 * given doamin which matches the given pfn.
300 */
301struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
302{
303	unsigned long flags;
304	struct rb_node *node;
305
306	/* Take the lock so that no other thread is manipulating the rbtree */
307	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
308	node = iovad->rbroot.rb_node;
309	while (node) {
310		struct iova *iova = container_of(node, struct iova, node);
311
312		/* If pfn falls within iova's range, return iova */
313		if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
314			spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
315			/* We are not holding the lock while this iova
316			 * is referenced by the caller as the same thread
317			 * which called this function also calls __free_iova()
318			 * and it is by design that only one thread can possibly
319			 * reference a particular iova and hence no conflict.
320			 */
321			return iova;
322		}
323
324		if (pfn < iova->pfn_lo)
325			node = node->rb_left;
326		else if (pfn > iova->pfn_lo)
327			node = node->rb_right;
328	}
329
330	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
331	return NULL;
332}
333EXPORT_SYMBOL_GPL(find_iova);
334
335/**
336 * __free_iova - frees the given iova
337 * @iovad: iova domain in question.
338 * @iova: iova in question.
339 * Frees the given iova belonging to the giving domain
340 */
341void
342__free_iova(struct iova_domain *iovad, struct iova *iova)
343{
344	unsigned long flags;
345
346	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
347	__cached_rbnode_delete_update(iovad, iova);
348	rb_erase(&iova->node, &iovad->rbroot);
349	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
350	free_iova_mem(iova);
351}
352EXPORT_SYMBOL_GPL(__free_iova);
353
354/**
355 * free_iova - finds and frees the iova for a given pfn
356 * @iovad: - iova domain in question.
357 * @pfn: - pfn that is allocated previously
358 * This functions finds an iova for a given pfn and then
359 * frees the iova from that domain.
360 */
361void
362free_iova(struct iova_domain *iovad, unsigned long pfn)
363{
364	struct iova *iova = find_iova(iovad, pfn);
365
366	if (iova)
367		__free_iova(iovad, iova);
368
369}
370EXPORT_SYMBOL_GPL(free_iova);
371
372/**
373 * put_iova_domain - destroys the iova doamin
374 * @iovad: - iova domain in question.
375 * All the iova's in that domain are destroyed.
376 */
377void put_iova_domain(struct iova_domain *iovad)
378{
379	struct rb_node *node;
380	unsigned long flags;
381
382	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
383	node = rb_first(&iovad->rbroot);
384	while (node) {
385		struct iova *iova = container_of(node, struct iova, node);
386
387		rb_erase(node, &iovad->rbroot);
388		free_iova_mem(iova);
389		node = rb_first(&iovad->rbroot);
390	}
391	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
392}
393EXPORT_SYMBOL_GPL(put_iova_domain);
394
395static int
396__is_range_overlap(struct rb_node *node,
397	unsigned long pfn_lo, unsigned long pfn_hi)
398{
399	struct iova *iova = container_of(node, struct iova, node);
400
401	if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
402		return 1;
403	return 0;
404}
405
406static inline struct iova *
407alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
408{
409	struct iova *iova;
410
411	iova = alloc_iova_mem();
412	if (iova) {
413		iova->pfn_lo = pfn_lo;
414		iova->pfn_hi = pfn_hi;
415	}
416
417	return iova;
418}
419
420static struct iova *
421__insert_new_range(struct iova_domain *iovad,
422	unsigned long pfn_lo, unsigned long pfn_hi)
423{
424	struct iova *iova;
425
426	iova = alloc_and_init_iova(pfn_lo, pfn_hi);
427	if (iova)
428		iova_insert_rbtree(&iovad->rbroot, iova);
429
 
 
 
430	return iova;
431}
432
433static void
434__adjust_overlap_range(struct iova *iova,
435	unsigned long *pfn_lo, unsigned long *pfn_hi)
436{
437	if (*pfn_lo < iova->pfn_lo)
438		iova->pfn_lo = *pfn_lo;
439	if (*pfn_hi > iova->pfn_hi)
440		*pfn_lo = iova->pfn_hi + 1;
441}
442
443/**
444 * reserve_iova - reserves an iova in the given range
445 * @iovad: - iova domain pointer
446 * @pfn_lo: - lower page frame address
447 * @pfn_hi:- higher pfn adderss
448 * This function allocates reserves the address range from pfn_lo to pfn_hi so
449 * that this address is not dished out as part of alloc_iova.
450 */
451struct iova *
452reserve_iova(struct iova_domain *iovad,
453	unsigned long pfn_lo, unsigned long pfn_hi)
454{
455	struct rb_node *node;
456	unsigned long flags;
457	struct iova *iova;
458	unsigned int overlap = 0;
459
460	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
461	for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
462		if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
463			iova = container_of(node, struct iova, node);
464			__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
465			if ((pfn_lo >= iova->pfn_lo) &&
466				(pfn_hi <= iova->pfn_hi))
467				goto finish;
468			overlap = 1;
469
470		} else if (overlap)
471				break;
472	}
473
474	/* We are here either because this is the first reserver node
475	 * or need to insert remaining non overlap addr range
476	 */
477	iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
478finish:
479
480	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
481	return iova;
482}
483EXPORT_SYMBOL_GPL(reserve_iova);
484
485/**
486 * copy_reserved_iova - copies the reserved between domains
487 * @from: - source doamin from where to copy
488 * @to: - destination domin where to copy
489 * This function copies reserved iova's from one doamin to
490 * other.
491 */
492void
493copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
494{
495	unsigned long flags;
496	struct rb_node *node;
497
498	spin_lock_irqsave(&from->iova_rbtree_lock, flags);
499	for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
500		struct iova *iova = container_of(node, struct iova, node);
501		struct iova *new_iova;
502
503		new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
504		if (!new_iova)
505			printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
506				iova->pfn_lo, iova->pfn_lo);
507	}
508	spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
509}
510EXPORT_SYMBOL_GPL(copy_reserved_iova);
511
512struct iova *
513split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
514		      unsigned long pfn_lo, unsigned long pfn_hi)
515{
516	unsigned long flags;
517	struct iova *prev = NULL, *next = NULL;
518
519	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
520	if (iova->pfn_lo < pfn_lo) {
521		prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
522		if (prev == NULL)
523			goto error;
524	}
525	if (iova->pfn_hi > pfn_hi) {
526		next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
527		if (next == NULL)
528			goto error;
529	}
530
531	__cached_rbnode_delete_update(iovad, iova);
532	rb_erase(&iova->node, &iovad->rbroot);
533
534	if (prev) {
535		iova_insert_rbtree(&iovad->rbroot, prev);
536		iova->pfn_lo = pfn_lo;
537	}
538	if (next) {
539		iova_insert_rbtree(&iovad->rbroot, next);
540		iova->pfn_hi = pfn_hi;
541	}
542	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
543
544	return iova;
545
546error:
547	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
548	if (prev)
549		free_iova_mem(prev);
550	return NULL;
551}
552
553MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
554MODULE_LICENSE("GPL");
v3.5.6
  1/*
  2 * Copyright © 2006-2009, Intel Corporation.
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms and conditions of the GNU General Public License,
  6 * version 2, as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope it will be useful, but WITHOUT
  9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 11 * more details.
 12 *
 13 * You should have received a copy of the GNU General Public License along with
 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
 16 *
 17 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
 18 */
 19
 20#include <linux/iova.h>
 
 
 21
 22void
 23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
 
 24{
 
 
 
 
 
 
 
 25	spin_lock_init(&iovad->iova_rbtree_lock);
 26	iovad->rbroot = RB_ROOT;
 27	iovad->cached32_node = NULL;
 
 
 28	iovad->dma_32bit_pfn = pfn_32bit;
 29}
 
 30
 31static struct rb_node *
 32__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
 33{
 34	if ((*limit_pfn != iovad->dma_32bit_pfn) ||
 35		(iovad->cached32_node == NULL))
 36		return rb_last(&iovad->rbroot);
 37	else {
 38		struct rb_node *prev_node = rb_prev(iovad->cached32_node);
 39		struct iova *curr_iova =
 40			container_of(iovad->cached32_node, struct iova, node);
 41		*limit_pfn = curr_iova->pfn_lo - 1;
 42		return prev_node;
 43	}
 44}
 45
 46static void
 47__cached_rbnode_insert_update(struct iova_domain *iovad,
 48	unsigned long limit_pfn, struct iova *new)
 49{
 50	if (limit_pfn != iovad->dma_32bit_pfn)
 51		return;
 52	iovad->cached32_node = &new->node;
 53}
 54
 55static void
 56__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
 57{
 58	struct iova *cached_iova;
 59	struct rb_node *curr;
 60
 61	if (!iovad->cached32_node)
 62		return;
 63	curr = iovad->cached32_node;
 64	cached_iova = container_of(curr, struct iova, node);
 65
 66	if (free->pfn_lo >= cached_iova->pfn_lo) {
 67		struct rb_node *node = rb_next(&free->node);
 68		struct iova *iova = container_of(node, struct iova, node);
 69
 70		/* only cache if it's below 32bit pfn */
 71		if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
 72			iovad->cached32_node = node;
 73		else
 74			iovad->cached32_node = NULL;
 75	}
 76}
 77
 78/* Computes the padding size required, to make the
 79 * the start address naturally aligned on its size
 
 80 */
 81static int
 82iova_get_pad_size(int size, unsigned int limit_pfn)
 83{
 84	unsigned int pad_size = 0;
 85	unsigned int order = ilog2(size);
 86
 87	if (order)
 88		pad_size = (limit_pfn + 1) % (1 << order);
 89
 90	return pad_size;
 91}
 92
 93static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 94		unsigned long size, unsigned long limit_pfn,
 95			struct iova *new, bool size_aligned)
 96{
 97	struct rb_node *prev, *curr = NULL;
 98	unsigned long flags;
 99	unsigned long saved_pfn;
100	unsigned int pad_size = 0;
101
102	/* Walk the tree backwards */
103	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
104	saved_pfn = limit_pfn;
105	curr = __get_cached_rbnode(iovad, &limit_pfn);
106	prev = curr;
107	while (curr) {
108		struct iova *curr_iova = container_of(curr, struct iova, node);
109
110		if (limit_pfn < curr_iova->pfn_lo)
111			goto move_left;
112		else if (limit_pfn < curr_iova->pfn_hi)
113			goto adjust_limit_pfn;
114		else {
115			if (size_aligned)
116				pad_size = iova_get_pad_size(size, limit_pfn);
117			if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
118				break;	/* found a free slot */
119		}
120adjust_limit_pfn:
121		limit_pfn = curr_iova->pfn_lo - 1;
122move_left:
123		prev = curr;
124		curr = rb_prev(curr);
125	}
126
127	if (!curr) {
128		if (size_aligned)
129			pad_size = iova_get_pad_size(size, limit_pfn);
130		if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
131			spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
132			return -ENOMEM;
133		}
134	}
135
136	/* pfn_lo will point to size aligned address if size_aligned is set */
137	new->pfn_lo = limit_pfn - (size + pad_size) + 1;
138	new->pfn_hi = new->pfn_lo + size - 1;
139
140	/* Insert the new_iova into domain rbtree by holding writer lock */
141	/* Add new node and rebalance tree. */
142	{
143		struct rb_node **entry, *parent = NULL;
144
145		/* If we have 'prev', it's a valid place to start the
146		   insertion. Otherwise, start from the root. */
147		if (prev)
148			entry = &prev;
149		else
150			entry = &iovad->rbroot.rb_node;
151
152		/* Figure out where to put new node */
153		while (*entry) {
154			struct iova *this = container_of(*entry,
155							struct iova, node);
156			parent = *entry;
157
158			if (new->pfn_lo < this->pfn_lo)
159				entry = &((*entry)->rb_left);
160			else if (new->pfn_lo > this->pfn_lo)
161				entry = &((*entry)->rb_right);
162			else
163				BUG(); /* this should not happen */
164		}
165
166		/* Add new node and rebalance tree. */
167		rb_link_node(&new->node, parent, entry);
168		rb_insert_color(&new->node, &iovad->rbroot);
169	}
170	__cached_rbnode_insert_update(iovad, saved_pfn, new);
171
172	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
173
174
175	return 0;
176}
177
178static void
179iova_insert_rbtree(struct rb_root *root, struct iova *iova)
180{
181	struct rb_node **new = &(root->rb_node), *parent = NULL;
182	/* Figure out where to put new node */
183	while (*new) {
184		struct iova *this = container_of(*new, struct iova, node);
 
185		parent = *new;
186
187		if (iova->pfn_lo < this->pfn_lo)
188			new = &((*new)->rb_left);
189		else if (iova->pfn_lo > this->pfn_lo)
190			new = &((*new)->rb_right);
191		else
192			BUG(); /* this should not happen */
193	}
194	/* Add new node and rebalance tree. */
195	rb_link_node(&iova->node, parent, new);
196	rb_insert_color(&iova->node, root);
197}
198
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199/**
200 * alloc_iova - allocates an iova
201 * @iovad - iova domain in question
202 * @size - size of page frames to allocate
203 * @limit_pfn - max limit address
204 * @size_aligned - set if size_aligned address range is required
205 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
206 * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
207 * flag is set then the allocated address iova->pfn_lo will be naturally
208 * aligned on roundup_power_of_two(size).
209 */
210struct iova *
211alloc_iova(struct iova_domain *iovad, unsigned long size,
212	unsigned long limit_pfn,
213	bool size_aligned)
214{
215	struct iova *new_iova;
216	int ret;
217
218	new_iova = alloc_iova_mem();
219	if (!new_iova)
220		return NULL;
221
222	/* If size aligned is set then round the size to
223	 * to next power of two.
224	 */
225	if (size_aligned)
226		size = __roundup_pow_of_two(size);
227
228	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
229			new_iova, size_aligned);
230
231	if (ret) {
232		free_iova_mem(new_iova);
233		return NULL;
234	}
235
236	return new_iova;
237}
 
238
239/**
240 * find_iova - find's an iova for a given pfn
241 * @iovad - iova domain in question.
242 * pfn - page frame number
243 * This function finds and returns an iova belonging to the
244 * given doamin which matches the given pfn.
245 */
246struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
247{
248	unsigned long flags;
249	struct rb_node *node;
250
251	/* Take the lock so that no other thread is manipulating the rbtree */
252	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
253	node = iovad->rbroot.rb_node;
254	while (node) {
255		struct iova *iova = container_of(node, struct iova, node);
256
257		/* If pfn falls within iova's range, return iova */
258		if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
259			spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
260			/* We are not holding the lock while this iova
261			 * is referenced by the caller as the same thread
262			 * which called this function also calls __free_iova()
263			 * and it is by desing that only one thread can possibly
264			 * reference a particular iova and hence no conflict.
265			 */
266			return iova;
267		}
268
269		if (pfn < iova->pfn_lo)
270			node = node->rb_left;
271		else if (pfn > iova->pfn_lo)
272			node = node->rb_right;
273	}
274
275	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
276	return NULL;
277}
 
278
279/**
280 * __free_iova - frees the given iova
281 * @iovad: iova domain in question.
282 * @iova: iova in question.
283 * Frees the given iova belonging to the giving domain
284 */
285void
286__free_iova(struct iova_domain *iovad, struct iova *iova)
287{
288	unsigned long flags;
289
290	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
291	__cached_rbnode_delete_update(iovad, iova);
292	rb_erase(&iova->node, &iovad->rbroot);
293	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
294	free_iova_mem(iova);
295}
 
296
297/**
298 * free_iova - finds and frees the iova for a given pfn
299 * @iovad: - iova domain in question.
300 * @pfn: - pfn that is allocated previously
301 * This functions finds an iova for a given pfn and then
302 * frees the iova from that domain.
303 */
304void
305free_iova(struct iova_domain *iovad, unsigned long pfn)
306{
307	struct iova *iova = find_iova(iovad, pfn);
 
308	if (iova)
309		__free_iova(iovad, iova);
310
311}
 
312
313/**
314 * put_iova_domain - destroys the iova doamin
315 * @iovad: - iova domain in question.
316 * All the iova's in that domain are destroyed.
317 */
318void put_iova_domain(struct iova_domain *iovad)
319{
320	struct rb_node *node;
321	unsigned long flags;
322
323	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
324	node = rb_first(&iovad->rbroot);
325	while (node) {
326		struct iova *iova = container_of(node, struct iova, node);
 
327		rb_erase(node, &iovad->rbroot);
328		free_iova_mem(iova);
329		node = rb_first(&iovad->rbroot);
330	}
331	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
332}
 
333
334static int
335__is_range_overlap(struct rb_node *node,
336	unsigned long pfn_lo, unsigned long pfn_hi)
337{
338	struct iova *iova = container_of(node, struct iova, node);
339
340	if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
341		return 1;
342	return 0;
343}
344
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345static struct iova *
346__insert_new_range(struct iova_domain *iovad,
347	unsigned long pfn_lo, unsigned long pfn_hi)
348{
349	struct iova *iova;
350
351	iova = alloc_iova_mem();
352	if (!iova)
353		return iova;
354
355	iova->pfn_hi = pfn_hi;
356	iova->pfn_lo = pfn_lo;
357	iova_insert_rbtree(&iovad->rbroot, iova);
358	return iova;
359}
360
361static void
362__adjust_overlap_range(struct iova *iova,
363	unsigned long *pfn_lo, unsigned long *pfn_hi)
364{
365	if (*pfn_lo < iova->pfn_lo)
366		iova->pfn_lo = *pfn_lo;
367	if (*pfn_hi > iova->pfn_hi)
368		*pfn_lo = iova->pfn_hi + 1;
369}
370
371/**
372 * reserve_iova - reserves an iova in the given range
373 * @iovad: - iova domain pointer
374 * @pfn_lo: - lower page frame address
375 * @pfn_hi:- higher pfn adderss
376 * This function allocates reserves the address range from pfn_lo to pfn_hi so
377 * that this address is not dished out as part of alloc_iova.
378 */
379struct iova *
380reserve_iova(struct iova_domain *iovad,
381	unsigned long pfn_lo, unsigned long pfn_hi)
382{
383	struct rb_node *node;
384	unsigned long flags;
385	struct iova *iova;
386	unsigned int overlap = 0;
387
388	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
389	for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
390		if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
391			iova = container_of(node, struct iova, node);
392			__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
393			if ((pfn_lo >= iova->pfn_lo) &&
394				(pfn_hi <= iova->pfn_hi))
395				goto finish;
396			overlap = 1;
397
398		} else if (overlap)
399				break;
400	}
401
402	/* We are here either because this is the first reserver node
403	 * or need to insert remaining non overlap addr range
404	 */
405	iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
406finish:
407
408	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
409	return iova;
410}
 
411
412/**
413 * copy_reserved_iova - copies the reserved between domains
414 * @from: - source doamin from where to copy
415 * @to: - destination domin where to copy
416 * This function copies reserved iova's from one doamin to
417 * other.
418 */
419void
420copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
421{
422	unsigned long flags;
423	struct rb_node *node;
424
425	spin_lock_irqsave(&from->iova_rbtree_lock, flags);
426	for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
427		struct iova *iova = container_of(node, struct iova, node);
428		struct iova *new_iova;
 
429		new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
430		if (!new_iova)
431			printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
432				iova->pfn_lo, iova->pfn_lo);
433	}
434	spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
435}