Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v3.1
  1/*
  2 * Copyright (c) Red Hat Inc.
  3
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the
 12 * next paragraph) shall be included in all copies or substantial portions
 13 * of the Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 21 * DEALINGS IN THE SOFTWARE.
 22 *
 23 * Authors: Dave Airlie <airlied@redhat.com>
 24 *          Jerome Glisse <jglisse@redhat.com>
 25 *          Pauli Nieminen <suokkos@gmail.com>
 26 */
 27
 28/* simple list based uncached page pool
 29 * - Pool collects resently freed pages for reuse
 30 * - Use page->lru to keep a free list
 31 * - doesn't track currently in use pages
 32 */
 
 
 
 33#include <linux/list.h>
 34#include <linux/spinlock.h>
 35#include <linux/highmem.h>
 36#include <linux/mm_types.h>
 37#include <linux/module.h>
 38#include <linux/mm.h>
 39#include <linux/seq_file.h> /* for seq_printf */
 40#include <linux/slab.h>
 41#include <linux/dma-mapping.h>
 42
 43#include <linux/atomic.h>
 44
 45#include "ttm/ttm_bo_driver.h"
 46#include "ttm/ttm_page_alloc.h"
 47
 48#ifdef TTM_HAS_AGP
 49#include <asm/agp.h>
 50#endif
 
 
 
 51
 52#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
 53#define SMALL_ALLOCATION		16
 54#define FREE_ALL_PAGES			(~0U)
 55/* times are in msecs */
 56#define PAGE_FREE_INTERVAL		1000
 57
 58/**
 59 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
 60 *
 61 * @lock: Protects the shared pool from concurrnet access. Must be used with
 62 * irqsave/irqrestore variants because pool allocator maybe called from
 63 * delayed work.
 64 * @fill_lock: Prevent concurrent calls to fill.
 65 * @list: Pool of free uc/wc pages for fast reuse.
 66 * @gfp_flags: Flags to pass for alloc_page.
 67 * @npages: Number of pages in pool.
 68 */
 69struct ttm_page_pool {
 70	spinlock_t		lock;
 71	bool			fill_lock;
 72	struct list_head	list;
 73	gfp_t			gfp_flags;
 74	unsigned		npages;
 75	char			*name;
 76	unsigned long		nfrees;
 77	unsigned long		nrefills;
 
 78};
 79
 80/**
 81 * Limits for the pool. They are handled without locks because only place where
 82 * they may change is in sysfs store. They won't have immediate effect anyway
 83 * so forcing serialization to access them is pointless.
 84 */
 85
 86struct ttm_pool_opts {
 87	unsigned	alloc_size;
 88	unsigned	max_size;
 89	unsigned	small;
 90};
 91
 92#define NUM_POOLS 4
 93
 94/**
 95 * struct ttm_pool_manager - Holds memory pools for fst allocation
 96 *
 97 * Manager is read only object for pool code so it doesn't need locking.
 98 *
 99 * @free_interval: minimum number of jiffies between freeing pages from pool.
100 * @page_alloc_inited: reference counting for pool allocation.
101 * @work: Work that is used to shrink the pool. Work is only run when there is
102 * some pages to free.
103 * @small_allocation: Limit in number of pages what is small allocation.
104 *
105 * @pools: All pool objects in use.
106 **/
107struct ttm_pool_manager {
108	struct kobject		kobj;
109	struct shrinker		mm_shrink;
110	struct ttm_pool_opts	options;
111
112	union {
113		struct ttm_page_pool	pools[NUM_POOLS];
114		struct {
115			struct ttm_page_pool	wc_pool;
116			struct ttm_page_pool	uc_pool;
117			struct ttm_page_pool	wc_pool_dma32;
118			struct ttm_page_pool	uc_pool_dma32;
 
 
119		} ;
120	};
121};
122
123static struct attribute ttm_page_pool_max = {
124	.name = "pool_max_size",
125	.mode = S_IRUGO | S_IWUSR
126};
127static struct attribute ttm_page_pool_small = {
128	.name = "pool_small_allocation",
129	.mode = S_IRUGO | S_IWUSR
130};
131static struct attribute ttm_page_pool_alloc_size = {
132	.name = "pool_allocation_size",
133	.mode = S_IRUGO | S_IWUSR
134};
135
136static struct attribute *ttm_pool_attrs[] = {
137	&ttm_page_pool_max,
138	&ttm_page_pool_small,
139	&ttm_page_pool_alloc_size,
140	NULL
141};
142
143static void ttm_pool_kobj_release(struct kobject *kobj)
144{
145	struct ttm_pool_manager *m =
146		container_of(kobj, struct ttm_pool_manager, kobj);
147	kfree(m);
148}
149
150static ssize_t ttm_pool_store(struct kobject *kobj,
151		struct attribute *attr, const char *buffer, size_t size)
152{
153	struct ttm_pool_manager *m =
154		container_of(kobj, struct ttm_pool_manager, kobj);
155	int chars;
156	unsigned val;
157	chars = sscanf(buffer, "%u", &val);
158	if (chars == 0)
159		return size;
160
161	/* Convert kb to number of pages */
162	val = val / (PAGE_SIZE >> 10);
163
164	if (attr == &ttm_page_pool_max)
165		m->options.max_size = val;
166	else if (attr == &ttm_page_pool_small)
167		m->options.small = val;
168	else if (attr == &ttm_page_pool_alloc_size) {
169		if (val > NUM_PAGES_TO_ALLOC*8) {
170			printk(KERN_ERR TTM_PFX
171			       "Setting allocation size to %lu "
172			       "is not allowed. Recommended size is "
173			       "%lu\n",
174			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
175			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
176			return size;
177		} else if (val > NUM_PAGES_TO_ALLOC) {
178			printk(KERN_WARNING TTM_PFX
179			       "Setting allocation size to "
180			       "larger than %lu is not recommended.\n",
181			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
182		}
183		m->options.alloc_size = val;
184	}
185
186	return size;
187}
188
189static ssize_t ttm_pool_show(struct kobject *kobj,
190		struct attribute *attr, char *buffer)
191{
192	struct ttm_pool_manager *m =
193		container_of(kobj, struct ttm_pool_manager, kobj);
194	unsigned val = 0;
195
196	if (attr == &ttm_page_pool_max)
197		val = m->options.max_size;
198	else if (attr == &ttm_page_pool_small)
199		val = m->options.small;
200	else if (attr == &ttm_page_pool_alloc_size)
201		val = m->options.alloc_size;
202
203	val = val * (PAGE_SIZE >> 10);
204
205	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
206}
207
208static const struct sysfs_ops ttm_pool_sysfs_ops = {
209	.show = &ttm_pool_show,
210	.store = &ttm_pool_store,
211};
212
213static struct kobj_type ttm_pool_kobj_type = {
214	.release = &ttm_pool_kobj_release,
215	.sysfs_ops = &ttm_pool_sysfs_ops,
216	.default_attrs = ttm_pool_attrs,
217};
218
219static struct ttm_pool_manager *_manager;
220
221#ifndef CONFIG_X86
 
 
 
 
 
 
 
 
 
 
 
222static int set_pages_array_wb(struct page **pages, int addrinarray)
223{
224#ifdef TTM_HAS_AGP
225	int i;
226
227	for (i = 0; i < addrinarray; i++)
228		unmap_page_from_agp(pages[i]);
229#endif
230	return 0;
231}
232
233static int set_pages_array_wc(struct page **pages, int addrinarray)
234{
235#ifdef TTM_HAS_AGP
236	int i;
237
238	for (i = 0; i < addrinarray; i++)
239		map_page_into_agp(pages[i]);
240#endif
241	return 0;
242}
243
244static int set_pages_array_uc(struct page **pages, int addrinarray)
245{
246#ifdef TTM_HAS_AGP
247	int i;
248
249	for (i = 0; i < addrinarray; i++)
250		map_page_into_agp(pages[i]);
251#endif
252	return 0;
253}
254#endif
255
256/**
257 * Select the right pool or requested caching state and ttm flags. */
258static struct ttm_page_pool *ttm_get_pool(int flags,
259		enum ttm_caching_state cstate)
260{
261	int pool_index;
262
263	if (cstate == tt_cached)
264		return NULL;
265
266	if (cstate == tt_wc)
267		pool_index = 0x0;
268	else
269		pool_index = 0x1;
270
271	if (flags & TTM_PAGE_FLAG_DMA32)
 
 
272		pool_index |= 0x2;
273
 
 
 
 
274	return &_manager->pools[pool_index];
275}
276
277/* set memory back to wb and free the pages. */
278static void ttm_pages_put(struct page *pages[], unsigned npages)
 
279{
280	unsigned i;
281	if (set_pages_array_wb(pages, npages))
282		printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
283				npages);
284	for (i = 0; i < npages; ++i)
285		__free_page(pages[i]);
 
 
 
 
 
 
 
 
286}
287
288static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
289		unsigned freed_pages)
290{
291	pool->npages -= freed_pages;
292	pool->nfrees += freed_pages;
293}
294
295/**
296 * Free pages from pool.
297 *
298 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
299 * number of pages in one go.
300 *
301 * @pool: to free the pages from
302 * @free_all: If set to true will free all pages in pool
 
303 **/
304static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
 
305{
 
306	unsigned long irq_flags;
307	struct page *p;
308	struct page **pages_to_free;
309	unsigned freed_pages = 0,
310		 npages_to_free = nr_free;
311
312	if (NUM_PAGES_TO_ALLOC < nr_free)
313		npages_to_free = NUM_PAGES_TO_ALLOC;
314
315	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
316			GFP_KERNEL);
 
 
 
317	if (!pages_to_free) {
318		printk(KERN_ERR TTM_PFX
319		       "Failed to allocate memory for pool free operation.\n");
320		return 0;
321	}
322
323restart:
324	spin_lock_irqsave(&pool->lock, irq_flags);
325
326	list_for_each_entry_reverse(p, &pool->list, lru) {
327		if (freed_pages >= npages_to_free)
328			break;
329
330		pages_to_free[freed_pages++] = p;
331		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
332		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
333			/* remove range of pages from the pool */
334			__list_del(p->lru.prev, &pool->list);
335
336			ttm_pool_update_free_locked(pool, freed_pages);
337			/**
338			 * Because changing page caching is costly
339			 * we unlock the pool to prevent stalling.
340			 */
341			spin_unlock_irqrestore(&pool->lock, irq_flags);
342
343			ttm_pages_put(pages_to_free, freed_pages);
344			if (likely(nr_free != FREE_ALL_PAGES))
345				nr_free -= freed_pages;
346
347			if (NUM_PAGES_TO_ALLOC >= nr_free)
348				npages_to_free = nr_free;
349			else
350				npages_to_free = NUM_PAGES_TO_ALLOC;
351
352			freed_pages = 0;
353
354			/* free all so restart the processing */
355			if (nr_free)
356				goto restart;
357
358			/* Not allowed to fall through or break because
359			 * following context is inside spinlock while we are
360			 * outside here.
361			 */
362			goto out;
363
364		}
365	}
366
367	/* remove range of pages from the pool */
368	if (freed_pages) {
369		__list_del(&p->lru, &pool->list);
370
371		ttm_pool_update_free_locked(pool, freed_pages);
372		nr_free -= freed_pages;
373	}
374
375	spin_unlock_irqrestore(&pool->lock, irq_flags);
376
377	if (freed_pages)
378		ttm_pages_put(pages_to_free, freed_pages);
379out:
380	kfree(pages_to_free);
 
381	return nr_free;
382}
383
384/* Get good estimation how many pages are free in pools */
385static int ttm_pool_get_num_unused_pages(void)
386{
387	unsigned i;
388	int total = 0;
389	for (i = 0; i < NUM_POOLS; ++i)
390		total += _manager->pools[i].npages;
391
392	return total;
393}
394
395/**
396 * Callback for mm to request pool to reduce number of page held.
 
 
 
 
397 */
398static int ttm_pool_mm_shrink(struct shrinker *shrink,
399			      struct shrink_control *sc)
400{
401	static atomic_t start_pool = ATOMIC_INIT(0);
 
402	unsigned i;
403	unsigned pool_offset = atomic_add_return(1, &start_pool);
404	struct ttm_page_pool *pool;
405	int shrink_pages = sc->nr_to_scan;
 
 
406
407	pool_offset = pool_offset % NUM_POOLS;
 
 
408	/* select start pool in round robin fashion */
409	for (i = 0; i < NUM_POOLS; ++i) {
410		unsigned nr_free = shrink_pages;
 
 
411		if (shrink_pages == 0)
412			break;
 
413		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
414		shrink_pages = ttm_page_pool_free(pool, nr_free);
 
 
 
 
 
 
 
415	}
416	/* return estimated number of unused pages in pool */
417	return ttm_pool_get_num_unused_pages();
418}
419
420static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
 
 
421{
422	manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
423	manager->mm_shrink.seeks = 1;
424	register_shrinker(&manager->mm_shrink);
425}
426
427static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
428{
429	unregister_shrinker(&manager->mm_shrink);
430}
431
432static int ttm_set_pages_caching(struct page **pages,
433		enum ttm_caching_state cstate, unsigned cpages)
434{
435	int r = 0;
436	/* Set page caching */
437	switch (cstate) {
438	case tt_uncached:
439		r = set_pages_array_uc(pages, cpages);
440		if (r)
441			printk(KERN_ERR TTM_PFX
442			       "Failed to set %d pages to uc!\n",
443			       cpages);
444		break;
445	case tt_wc:
446		r = set_pages_array_wc(pages, cpages);
447		if (r)
448			printk(KERN_ERR TTM_PFX
449			       "Failed to set %d pages to wc!\n",
450			       cpages);
451		break;
452	default:
453		break;
454	}
455	return r;
456}
457
458/**
459 * Free pages the pages that failed to change the caching state. If there is
460 * any pages that have changed their caching state already put them to the
461 * pool.
462 */
463static void ttm_handle_caching_state_failure(struct list_head *pages,
464		int ttm_flags, enum ttm_caching_state cstate,
465		struct page **failed_pages, unsigned cpages)
466{
467	unsigned i;
468	/* Failed pages have to be freed */
469	for (i = 0; i < cpages; ++i) {
470		list_del(&failed_pages[i]->lru);
471		__free_page(failed_pages[i]);
472	}
473}
474
475/**
476 * Allocate new pages with correct caching.
477 *
478 * This function is reentrant if caller updates count depending on number of
479 * pages returned in pages array.
480 */
481static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
482		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
 
483{
484	struct page **caching_array;
485	struct page *p;
486	int r = 0;
487	unsigned i, cpages;
488	unsigned max_cpages = min(count,
489			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
490
491	/* allocate array for page caching change */
492	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
493
494	if (!caching_array) {
495		printk(KERN_ERR TTM_PFX
496		       "Unable to allocate table for new pages.");
497		return -ENOMEM;
498	}
499
500	for (i = 0, cpages = 0; i < count; ++i) {
501		p = alloc_page(gfp_flags);
502
503		if (!p) {
504			printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
505
506			/* store already allocated pages in the pool after
507			 * setting the caching state */
508			if (cpages) {
509				r = ttm_set_pages_caching(caching_array,
510							  cstate, cpages);
511				if (r)
512					ttm_handle_caching_state_failure(pages,
513						ttm_flags, cstate,
514						caching_array, cpages);
515			}
516			r = -ENOMEM;
517			goto out;
518		}
519
 
 
520#ifdef CONFIG_HIGHMEM
521		/* gfp flags of highmem page should never be dma32 so we
522		 * we should be fine in such case
523		 */
524		if (!PageHighMem(p))
 
 
525#endif
526		{
527			caching_array[cpages++] = p;
528			if (cpages == max_cpages) {
529
530				r = ttm_set_pages_caching(caching_array,
531						cstate, cpages);
532				if (r) {
533					ttm_handle_caching_state_failure(pages,
534						ttm_flags, cstate,
535						caching_array, cpages);
536					goto out;
537				}
538				cpages = 0;
539			}
540		}
541
542		list_add(&p->lru, pages);
543	}
544
545	if (cpages) {
546		r = ttm_set_pages_caching(caching_array, cstate, cpages);
547		if (r)
548			ttm_handle_caching_state_failure(pages,
549					ttm_flags, cstate,
550					caching_array, cpages);
551	}
552out:
553	kfree(caching_array);
554
555	return r;
556}
557
558/**
559 * Fill the given pool if there aren't enough pages and the requested number of
560 * pages is small.
561 */
562static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
563		int ttm_flags, enum ttm_caching_state cstate, unsigned count,
564		unsigned long *irq_flags)
565{
566	struct page *p;
567	int r;
568	unsigned cpages = 0;
569	/**
570	 * Only allow one pool fill operation at a time.
571	 * If pool doesn't have enough pages for the allocation new pages are
572	 * allocated from outside of pool.
573	 */
574	if (pool->fill_lock)
575		return;
576
577	pool->fill_lock = true;
578
579	/* If allocation request is small and there are not enough
580	 * pages in a pool we fill the pool up first. */
581	if (count < _manager->options.small
582		&& count > pool->npages) {
583		struct list_head new_pages;
584		unsigned alloc_size = _manager->options.alloc_size;
585
586		/**
587		 * Can't change page caching if in irqsave context. We have to
588		 * drop the pool->lock.
589		 */
590		spin_unlock_irqrestore(&pool->lock, *irq_flags);
591
592		INIT_LIST_HEAD(&new_pages);
593		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
594				cstate,	alloc_size);
595		spin_lock_irqsave(&pool->lock, *irq_flags);
596
597		if (!r) {
598			list_splice(&new_pages, &pool->list);
599			++pool->nrefills;
600			pool->npages += alloc_size;
601		} else {
602			printk(KERN_ERR TTM_PFX
603			       "Failed to fill pool (%p).", pool);
604			/* If we have any pages left put them to the pool. */
605			list_for_each_entry(p, &pool->list, lru) {
606				++cpages;
607			}
608			list_splice(&new_pages, &pool->list);
609			pool->npages += cpages;
610		}
611
612	}
613	pool->fill_lock = false;
614}
615
616/**
617 * Cut 'count' number of pages from the pool and put them on the return list.
618 *
619 * @return count of pages still required to fulfill the request.
620 */
621static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
622		struct list_head *pages, int ttm_flags,
623		enum ttm_caching_state cstate, unsigned count)
 
 
624{
625	unsigned long irq_flags;
626	struct list_head *p;
627	unsigned i;
 
628
629	spin_lock_irqsave(&pool->lock, irq_flags);
630	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
 
 
631
632	if (count >= pool->npages) {
633		/* take all pages from the pool */
634		list_splice_init(&pool->list, pages);
635		count -= pool->npages;
636		pool->npages = 0;
637		goto out;
638	}
639	/* find the last pages to include for requested number of pages. Split
640	 * pool to begin and halve it to reduce search space. */
641	if (count <= pool->npages/2) {
642		i = 0;
643		list_for_each(p, &pool->list) {
644			if (++i == count)
645				break;
646		}
647	} else {
648		i = pool->npages + 1;
649		list_for_each_prev(p, &pool->list) {
650			if (--i == count)
651				break;
652		}
653	}
654	/* Cut 'count' number of pages from the pool */
655	list_cut_position(pages, &pool->list, p);
656	pool->npages -= count;
657	count = 0;
658out:
659	spin_unlock_irqrestore(&pool->lock, irq_flags);
660	return count;
661}
662
663/*
664 * On success pages list will hold count number of correctly
665 * cached pages.
666 */
667int ttm_get_pages(struct list_head *pages, int flags,
668		  enum ttm_caching_state cstate, unsigned count,
669		  dma_addr_t *dma_address)
670{
671	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
672	struct page *p = NULL;
673	gfp_t gfp_flags = GFP_USER;
674	int r;
675
676	/* set zero flag for page allocation if required */
677	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
678		gfp_flags |= __GFP_ZERO;
679
680	/* No pool for cached pages */
681	if (pool == NULL) {
682		if (flags & TTM_PAGE_FLAG_DMA32)
683			gfp_flags |= GFP_DMA32;
684		else
685			gfp_flags |= GFP_HIGHUSER;
686
687		for (r = 0; r < count; ++r) {
688			p = alloc_page(gfp_flags);
689			if (!p) {
690
691				printk(KERN_ERR TTM_PFX
692				       "Unable to allocate page.");
693				return -ENOMEM;
694			}
695
696			list_add(&p->lru, pages);
 
 
 
 
697		}
698		return 0;
699	}
700
 
 
 
701
702	/* combine zero flag to pool flags */
703	gfp_flags |= pool->gfp_flags;
 
704
705	/* First we take pages from the pool */
706	count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
707
708	/* clear the pages coming from the pool if requested */
709	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
710		list_for_each_entry(p, pages, lru) {
711			clear_page(page_address(p));
712		}
713	}
714
715	/* If pool didn't have enough pages allocate new one. */
716	if (count > 0) {
717		/* ttm_alloc_new_pages doesn't reference pool so we can run
718		 * multiple requests in parallel.
719		 **/
720		r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
721		if (r) {
722			/* If there is any pages in the list put them back to
723			 * the pool. */
724			printk(KERN_ERR TTM_PFX
725			       "Failed to allocate extra pages "
726			       "for large request.");
727			ttm_put_pages(pages, 0, flags, cstate, NULL);
728			return r;
729		}
730	}
731
732
733	return 0;
734}
735
736/* Put all pages in pages list to correct pool to wait for reuse */
737void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
738		   enum ttm_caching_state cstate, dma_addr_t *dma_address)
739{
 
 
 
 
740	unsigned long irq_flags;
741	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
742	struct page *p, *tmp;
743
744	if (pool == NULL) {
745		/* No pool for this memory type so free the pages */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
746
747		list_for_each_entry_safe(p, tmp, pages, lru) {
748			__free_page(p);
 
 
 
 
 
 
 
 
 
 
 
 
749		}
750		/* Make the pages list empty */
751		INIT_LIST_HEAD(pages);
752		return;
753	}
754	if (page_count == 0) {
755		list_for_each_entry_safe(p, tmp, pages, lru) {
756			++page_count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
757		}
 
 
 
 
 
 
 
 
 
 
 
758	}
 
759
760	spin_lock_irqsave(&pool->lock, irq_flags);
761	list_splice_init(pages, &pool->list);
762	pool->npages += page_count;
 
 
 
 
 
 
 
 
763	/* Check that we don't go over the pool limit */
764	page_count = 0;
765	if (pool->npages > _manager->options.max_size) {
766		page_count = pool->npages - _manager->options.max_size;
767		/* free at least NUM_PAGES_TO_ALLOC number of pages
768		 * to reduce calls to set_memory_wb */
769		if (page_count < NUM_PAGES_TO_ALLOC)
770			page_count = NUM_PAGES_TO_ALLOC;
771	}
772	spin_unlock_irqrestore(&pool->lock, irq_flags);
773	if (page_count)
774		ttm_page_pool_free(pool, page_count);
775}
776
777static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
778		char *name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
779{
780	spin_lock_init(&pool->lock);
781	pool->fill_lock = false;
782	INIT_LIST_HEAD(&pool->list);
783	pool->npages = pool->nfrees = 0;
784	pool->gfp_flags = flags;
785	pool->name = name;
 
786}
787
788int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
789{
790	int ret;
 
 
 
 
 
791
792	WARN_ON(_manager);
793
794	printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
795
796	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
 
 
797
798	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
799
800	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
801
802	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
803				  GFP_USER | GFP_DMA32, "wc dma");
804
805	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
806				  GFP_USER | GFP_DMA32, "uc dma");
 
 
 
 
 
 
 
 
 
 
 
 
807
808	_manager->options.max_size = max_pages;
809	_manager->options.small = SMALL_ALLOCATION;
810	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
811
812	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
813				   &glob->kobj, "pool");
814	if (unlikely(ret != 0)) {
815		kobject_put(&_manager->kobj);
816		_manager = NULL;
817		return ret;
818	}
819
820	ttm_pool_mm_shrink_init(_manager);
821
 
 
 
822	return 0;
 
 
 
 
 
823}
824
825void ttm_page_alloc_fini(void)
826{
827	int i;
828
829	printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
830	ttm_pool_mm_shrink_fini(_manager);
831
 
832	for (i = 0; i < NUM_POOLS; ++i)
833		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
834
835	kobject_put(&_manager->kobj);
836	_manager = NULL;
837}
838
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
839int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
840{
841	struct ttm_page_pool *p;
842	unsigned i;
843	char *h[] = {"pool", "refills", "pages freed", "size"};
844	if (!_manager) {
845		seq_printf(m, "No pool allocator running.\n");
846		return 0;
847	}
848	seq_printf(m, "%6s %12s %13s %8s\n",
849			h[0], h[1], h[2], h[3]);
850	for (i = 0; i < NUM_POOLS; ++i) {
851		p = &_manager->pools[i];
852
853		seq_printf(m, "%6s %12ld %13ld %8d\n",
854				p->name, p->nrefills,
855				p->nfrees, p->npages);
856	}
857	return 0;
858}
859EXPORT_SYMBOL(ttm_page_alloc_debugfs);
v4.17
   1/*
   2 * Copyright (c) Red Hat Inc.
   3
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the
  12 * next paragraph) shall be included in all copies or substantial portions
  13 * of the Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21 * DEALINGS IN THE SOFTWARE.
  22 *
  23 * Authors: Dave Airlie <airlied@redhat.com>
  24 *          Jerome Glisse <jglisse@redhat.com>
  25 *          Pauli Nieminen <suokkos@gmail.com>
  26 */
  27
  28/* simple list based uncached page pool
  29 * - Pool collects resently freed pages for reuse
  30 * - Use page->lru to keep a free list
  31 * - doesn't track currently in use pages
  32 */
  33
  34#define pr_fmt(fmt) "[TTM] " fmt
  35
  36#include <linux/list.h>
  37#include <linux/spinlock.h>
  38#include <linux/highmem.h>
  39#include <linux/mm_types.h>
  40#include <linux/module.h>
  41#include <linux/mm.h>
  42#include <linux/seq_file.h> /* for seq_printf */
  43#include <linux/slab.h>
  44#include <linux/dma-mapping.h>
  45
  46#include <linux/atomic.h>
  47
  48#include <drm/ttm/ttm_bo_driver.h>
  49#include <drm/ttm/ttm_page_alloc.h>
  50
  51#if IS_ENABLED(CONFIG_AGP)
  52#include <asm/agp.h>
  53#endif
  54#ifdef CONFIG_X86
  55#include <asm/set_memory.h>
  56#endif
  57
  58#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
  59#define SMALL_ALLOCATION		16
  60#define FREE_ALL_PAGES			(~0U)
  61/* times are in msecs */
  62#define PAGE_FREE_INTERVAL		1000
  63
  64/**
  65 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
  66 *
  67 * @lock: Protects the shared pool from concurrnet access. Must be used with
  68 * irqsave/irqrestore variants because pool allocator maybe called from
  69 * delayed work.
  70 * @fill_lock: Prevent concurrent calls to fill.
  71 * @list: Pool of free uc/wc pages for fast reuse.
  72 * @gfp_flags: Flags to pass for alloc_page.
  73 * @npages: Number of pages in pool.
  74 */
  75struct ttm_page_pool {
  76	spinlock_t		lock;
  77	bool			fill_lock;
  78	struct list_head	list;
  79	gfp_t			gfp_flags;
  80	unsigned		npages;
  81	char			*name;
  82	unsigned long		nfrees;
  83	unsigned long		nrefills;
  84	unsigned int		order;
  85};
  86
  87/**
  88 * Limits for the pool. They are handled without locks because only place where
  89 * they may change is in sysfs store. They won't have immediate effect anyway
  90 * so forcing serialization to access them is pointless.
  91 */
  92
  93struct ttm_pool_opts {
  94	unsigned	alloc_size;
  95	unsigned	max_size;
  96	unsigned	small;
  97};
  98
  99#define NUM_POOLS 6
 100
 101/**
 102 * struct ttm_pool_manager - Holds memory pools for fst allocation
 103 *
 104 * Manager is read only object for pool code so it doesn't need locking.
 105 *
 106 * @free_interval: minimum number of jiffies between freeing pages from pool.
 107 * @page_alloc_inited: reference counting for pool allocation.
 108 * @work: Work that is used to shrink the pool. Work is only run when there is
 109 * some pages to free.
 110 * @small_allocation: Limit in number of pages what is small allocation.
 111 *
 112 * @pools: All pool objects in use.
 113 **/
 114struct ttm_pool_manager {
 115	struct kobject		kobj;
 116	struct shrinker		mm_shrink;
 117	struct ttm_pool_opts	options;
 118
 119	union {
 120		struct ttm_page_pool	pools[NUM_POOLS];
 121		struct {
 122			struct ttm_page_pool	wc_pool;
 123			struct ttm_page_pool	uc_pool;
 124			struct ttm_page_pool	wc_pool_dma32;
 125			struct ttm_page_pool	uc_pool_dma32;
 126			struct ttm_page_pool	wc_pool_huge;
 127			struct ttm_page_pool	uc_pool_huge;
 128		} ;
 129	};
 130};
 131
 132static struct attribute ttm_page_pool_max = {
 133	.name = "pool_max_size",
 134	.mode = S_IRUGO | S_IWUSR
 135};
 136static struct attribute ttm_page_pool_small = {
 137	.name = "pool_small_allocation",
 138	.mode = S_IRUGO | S_IWUSR
 139};
 140static struct attribute ttm_page_pool_alloc_size = {
 141	.name = "pool_allocation_size",
 142	.mode = S_IRUGO | S_IWUSR
 143};
 144
 145static struct attribute *ttm_pool_attrs[] = {
 146	&ttm_page_pool_max,
 147	&ttm_page_pool_small,
 148	&ttm_page_pool_alloc_size,
 149	NULL
 150};
 151
 152static void ttm_pool_kobj_release(struct kobject *kobj)
 153{
 154	struct ttm_pool_manager *m =
 155		container_of(kobj, struct ttm_pool_manager, kobj);
 156	kfree(m);
 157}
 158
 159static ssize_t ttm_pool_store(struct kobject *kobj,
 160		struct attribute *attr, const char *buffer, size_t size)
 161{
 162	struct ttm_pool_manager *m =
 163		container_of(kobj, struct ttm_pool_manager, kobj);
 164	int chars;
 165	unsigned val;
 166	chars = sscanf(buffer, "%u", &val);
 167	if (chars == 0)
 168		return size;
 169
 170	/* Convert kb to number of pages */
 171	val = val / (PAGE_SIZE >> 10);
 172
 173	if (attr == &ttm_page_pool_max)
 174		m->options.max_size = val;
 175	else if (attr == &ttm_page_pool_small)
 176		m->options.small = val;
 177	else if (attr == &ttm_page_pool_alloc_size) {
 178		if (val > NUM_PAGES_TO_ALLOC*8) {
 179			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
 
 
 
 180			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
 181			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
 182			return size;
 183		} else if (val > NUM_PAGES_TO_ALLOC) {
 184			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
 185				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
 
 
 186		}
 187		m->options.alloc_size = val;
 188	}
 189
 190	return size;
 191}
 192
 193static ssize_t ttm_pool_show(struct kobject *kobj,
 194		struct attribute *attr, char *buffer)
 195{
 196	struct ttm_pool_manager *m =
 197		container_of(kobj, struct ttm_pool_manager, kobj);
 198	unsigned val = 0;
 199
 200	if (attr == &ttm_page_pool_max)
 201		val = m->options.max_size;
 202	else if (attr == &ttm_page_pool_small)
 203		val = m->options.small;
 204	else if (attr == &ttm_page_pool_alloc_size)
 205		val = m->options.alloc_size;
 206
 207	val = val * (PAGE_SIZE >> 10);
 208
 209	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
 210}
 211
 212static const struct sysfs_ops ttm_pool_sysfs_ops = {
 213	.show = &ttm_pool_show,
 214	.store = &ttm_pool_store,
 215};
 216
 217static struct kobj_type ttm_pool_kobj_type = {
 218	.release = &ttm_pool_kobj_release,
 219	.sysfs_ops = &ttm_pool_sysfs_ops,
 220	.default_attrs = ttm_pool_attrs,
 221};
 222
 223static struct ttm_pool_manager *_manager;
 224
 225#ifndef CONFIG_X86
 226static int set_pages_wb(struct page *page, int numpages)
 227{
 228#if IS_ENABLED(CONFIG_AGP)
 229	int i;
 230
 231	for (i = 0; i < numpages; i++)
 232		unmap_page_from_agp(page++);
 233#endif
 234	return 0;
 235}
 236
 237static int set_pages_array_wb(struct page **pages, int addrinarray)
 238{
 239#if IS_ENABLED(CONFIG_AGP)
 240	int i;
 241
 242	for (i = 0; i < addrinarray; i++)
 243		unmap_page_from_agp(pages[i]);
 244#endif
 245	return 0;
 246}
 247
 248static int set_pages_array_wc(struct page **pages, int addrinarray)
 249{
 250#if IS_ENABLED(CONFIG_AGP)
 251	int i;
 252
 253	for (i = 0; i < addrinarray; i++)
 254		map_page_into_agp(pages[i]);
 255#endif
 256	return 0;
 257}
 258
 259static int set_pages_array_uc(struct page **pages, int addrinarray)
 260{
 261#if IS_ENABLED(CONFIG_AGP)
 262	int i;
 263
 264	for (i = 0; i < addrinarray; i++)
 265		map_page_into_agp(pages[i]);
 266#endif
 267	return 0;
 268}
 269#endif
 270
 271/**
 272 * Select the right pool or requested caching state and ttm flags. */
 273static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
 274					  enum ttm_caching_state cstate)
 275{
 276	int pool_index;
 277
 278	if (cstate == tt_cached)
 279		return NULL;
 280
 281	if (cstate == tt_wc)
 282		pool_index = 0x0;
 283	else
 284		pool_index = 0x1;
 285
 286	if (flags & TTM_PAGE_FLAG_DMA32) {
 287		if (huge)
 288			return NULL;
 289		pool_index |= 0x2;
 290
 291	} else if (huge) {
 292		pool_index |= 0x4;
 293	}
 294
 295	return &_manager->pools[pool_index];
 296}
 297
 298/* set memory back to wb and free the pages. */
 299static void ttm_pages_put(struct page *pages[], unsigned npages,
 300		unsigned int order)
 301{
 302	unsigned int i, pages_nr = (1 << order);
 303
 304	if (order == 0) {
 305		if (set_pages_array_wb(pages, npages))
 306			pr_err("Failed to set %d pages to wb!\n", npages);
 307	}
 308
 309	for (i = 0; i < npages; ++i) {
 310		if (order > 0) {
 311			if (set_pages_wb(pages[i], pages_nr))
 312				pr_err("Failed to set %d pages to wb!\n", pages_nr);
 313		}
 314		__free_pages(pages[i], order);
 315	}
 316}
 317
 318static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
 319		unsigned freed_pages)
 320{
 321	pool->npages -= freed_pages;
 322	pool->nfrees += freed_pages;
 323}
 324
 325/**
 326 * Free pages from pool.
 327 *
 328 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
 329 * number of pages in one go.
 330 *
 331 * @pool: to free the pages from
 332 * @free_all: If set to true will free all pages in pool
 333 * @use_static: Safe to use static buffer
 334 **/
 335static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
 336			      bool use_static)
 337{
 338	static struct page *static_buf[NUM_PAGES_TO_ALLOC];
 339	unsigned long irq_flags;
 340	struct page *p;
 341	struct page **pages_to_free;
 342	unsigned freed_pages = 0,
 343		 npages_to_free = nr_free;
 344
 345	if (NUM_PAGES_TO_ALLOC < nr_free)
 346		npages_to_free = NUM_PAGES_TO_ALLOC;
 347
 348	if (use_static)
 349		pages_to_free = static_buf;
 350	else
 351		pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
 352					GFP_KERNEL);
 353	if (!pages_to_free) {
 354		pr_debug("Failed to allocate memory for pool free operation\n");
 
 355		return 0;
 356	}
 357
 358restart:
 359	spin_lock_irqsave(&pool->lock, irq_flags);
 360
 361	list_for_each_entry_reverse(p, &pool->list, lru) {
 362		if (freed_pages >= npages_to_free)
 363			break;
 364
 365		pages_to_free[freed_pages++] = p;
 366		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
 367		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
 368			/* remove range of pages from the pool */
 369			__list_del(p->lru.prev, &pool->list);
 370
 371			ttm_pool_update_free_locked(pool, freed_pages);
 372			/**
 373			 * Because changing page caching is costly
 374			 * we unlock the pool to prevent stalling.
 375			 */
 376			spin_unlock_irqrestore(&pool->lock, irq_flags);
 377
 378			ttm_pages_put(pages_to_free, freed_pages, pool->order);
 379			if (likely(nr_free != FREE_ALL_PAGES))
 380				nr_free -= freed_pages;
 381
 382			if (NUM_PAGES_TO_ALLOC >= nr_free)
 383				npages_to_free = nr_free;
 384			else
 385				npages_to_free = NUM_PAGES_TO_ALLOC;
 386
 387			freed_pages = 0;
 388
 389			/* free all so restart the processing */
 390			if (nr_free)
 391				goto restart;
 392
 393			/* Not allowed to fall through or break because
 394			 * following context is inside spinlock while we are
 395			 * outside here.
 396			 */
 397			goto out;
 398
 399		}
 400	}
 401
 402	/* remove range of pages from the pool */
 403	if (freed_pages) {
 404		__list_del(&p->lru, &pool->list);
 405
 406		ttm_pool_update_free_locked(pool, freed_pages);
 407		nr_free -= freed_pages;
 408	}
 409
 410	spin_unlock_irqrestore(&pool->lock, irq_flags);
 411
 412	if (freed_pages)
 413		ttm_pages_put(pages_to_free, freed_pages, pool->order);
 414out:
 415	if (pages_to_free != static_buf)
 416		kfree(pages_to_free);
 417	return nr_free;
 418}
 419
 
 
 
 
 
 
 
 
 
 
 
 420/**
 421 * Callback for mm to request pool to reduce number of page held.
 422 *
 423 * XXX: (dchinner) Deadlock warning!
 424 *
 425 * This code is crying out for a shrinker per pool....
 426 */
 427static unsigned long
 428ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 429{
 430	static DEFINE_MUTEX(lock);
 431	static unsigned start_pool;
 432	unsigned i;
 433	unsigned pool_offset;
 434	struct ttm_page_pool *pool;
 435	int shrink_pages = sc->nr_to_scan;
 436	unsigned long freed = 0;
 437	unsigned int nr_free_pool;
 438
 439	if (!mutex_trylock(&lock))
 440		return SHRINK_STOP;
 441	pool_offset = ++start_pool % NUM_POOLS;
 442	/* select start pool in round robin fashion */
 443	for (i = 0; i < NUM_POOLS; ++i) {
 444		unsigned nr_free = shrink_pages;
 445		unsigned page_nr;
 446
 447		if (shrink_pages == 0)
 448			break;
 449
 450		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
 451		page_nr = (1 << pool->order);
 452		/* OK to use static buffer since global mutex is held. */
 453		nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
 454		shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
 455		freed += (nr_free_pool - shrink_pages) << pool->order;
 456		if (freed >= sc->nr_to_scan)
 457			break;
 458		shrink_pages <<= pool->order;
 459	}
 460	mutex_unlock(&lock);
 461	return freed;
 462}
 463
 464
 465static unsigned long
 466ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 467{
 468	unsigned i;
 469	unsigned long count = 0;
 470	struct ttm_page_pool *pool;
 471
 472	for (i = 0; i < NUM_POOLS; ++i) {
 473		pool = &_manager->pools[i];
 474		count += (pool->npages << pool->order);
 475	}
 476
 477	return count;
 478}
 479
 480static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
 481{
 482	manager->mm_shrink.count_objects = ttm_pool_shrink_count;
 483	manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
 484	manager->mm_shrink.seeks = 1;
 485	return register_shrinker(&manager->mm_shrink);
 486}
 487
 488static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
 489{
 490	unregister_shrinker(&manager->mm_shrink);
 491}
 492
 493static int ttm_set_pages_caching(struct page **pages,
 494		enum ttm_caching_state cstate, unsigned cpages)
 495{
 496	int r = 0;
 497	/* Set page caching */
 498	switch (cstate) {
 499	case tt_uncached:
 500		r = set_pages_array_uc(pages, cpages);
 501		if (r)
 502			pr_err("Failed to set %d pages to uc!\n", cpages);
 
 
 503		break;
 504	case tt_wc:
 505		r = set_pages_array_wc(pages, cpages);
 506		if (r)
 507			pr_err("Failed to set %d pages to wc!\n", cpages);
 
 
 508		break;
 509	default:
 510		break;
 511	}
 512	return r;
 513}
 514
 515/**
 516 * Free pages the pages that failed to change the caching state. If there is
 517 * any pages that have changed their caching state already put them to the
 518 * pool.
 519 */
 520static void ttm_handle_caching_state_failure(struct list_head *pages,
 521		int ttm_flags, enum ttm_caching_state cstate,
 522		struct page **failed_pages, unsigned cpages)
 523{
 524	unsigned i;
 525	/* Failed pages have to be freed */
 526	for (i = 0; i < cpages; ++i) {
 527		list_del(&failed_pages[i]->lru);
 528		__free_page(failed_pages[i]);
 529	}
 530}
 531
 532/**
 533 * Allocate new pages with correct caching.
 534 *
 535 * This function is reentrant if caller updates count depending on number of
 536 * pages returned in pages array.
 537 */
 538static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
 539			       int ttm_flags, enum ttm_caching_state cstate,
 540			       unsigned count, unsigned order)
 541{
 542	struct page **caching_array;
 543	struct page *p;
 544	int r = 0;
 545	unsigned i, j, cpages;
 546	unsigned npages = 1 << order;
 547	unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
 548
 549	/* allocate array for page caching change */
 550	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
 551
 552	if (!caching_array) {
 553		pr_debug("Unable to allocate table for new pages\n");
 
 554		return -ENOMEM;
 555	}
 556
 557	for (i = 0, cpages = 0; i < count; ++i) {
 558		p = alloc_pages(gfp_flags, order);
 559
 560		if (!p) {
 561			pr_debug("Unable to get page %u\n", i);
 562
 563			/* store already allocated pages in the pool after
 564			 * setting the caching state */
 565			if (cpages) {
 566				r = ttm_set_pages_caching(caching_array,
 567							  cstate, cpages);
 568				if (r)
 569					ttm_handle_caching_state_failure(pages,
 570						ttm_flags, cstate,
 571						caching_array, cpages);
 572			}
 573			r = -ENOMEM;
 574			goto out;
 575		}
 576
 577		list_add(&p->lru, pages);
 578
 579#ifdef CONFIG_HIGHMEM
 580		/* gfp flags of highmem page should never be dma32 so we
 581		 * we should be fine in such case
 582		 */
 583		if (PageHighMem(p))
 584			continue;
 585
 586#endif
 587		for (j = 0; j < npages; ++j) {
 588			caching_array[cpages++] = p++;
 589			if (cpages == max_cpages) {
 590
 591				r = ttm_set_pages_caching(caching_array,
 592						cstate, cpages);
 593				if (r) {
 594					ttm_handle_caching_state_failure(pages,
 595						ttm_flags, cstate,
 596						caching_array, cpages);
 597					goto out;
 598				}
 599				cpages = 0;
 600			}
 601		}
 
 
 602	}
 603
 604	if (cpages) {
 605		r = ttm_set_pages_caching(caching_array, cstate, cpages);
 606		if (r)
 607			ttm_handle_caching_state_failure(pages,
 608					ttm_flags, cstate,
 609					caching_array, cpages);
 610	}
 611out:
 612	kfree(caching_array);
 613
 614	return r;
 615}
 616
 617/**
 618 * Fill the given pool if there aren't enough pages and the requested number of
 619 * pages is small.
 620 */
 621static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
 622				      enum ttm_caching_state cstate,
 623				      unsigned count, unsigned long *irq_flags)
 624{
 625	struct page *p;
 626	int r;
 627	unsigned cpages = 0;
 628	/**
 629	 * Only allow one pool fill operation at a time.
 630	 * If pool doesn't have enough pages for the allocation new pages are
 631	 * allocated from outside of pool.
 632	 */
 633	if (pool->fill_lock)
 634		return;
 635
 636	pool->fill_lock = true;
 637
 638	/* If allocation request is small and there are not enough
 639	 * pages in a pool we fill the pool up first. */
 640	if (count < _manager->options.small
 641		&& count > pool->npages) {
 642		struct list_head new_pages;
 643		unsigned alloc_size = _manager->options.alloc_size;
 644
 645		/**
 646		 * Can't change page caching if in irqsave context. We have to
 647		 * drop the pool->lock.
 648		 */
 649		spin_unlock_irqrestore(&pool->lock, *irq_flags);
 650
 651		INIT_LIST_HEAD(&new_pages);
 652		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
 653					cstate, alloc_size, 0);
 654		spin_lock_irqsave(&pool->lock, *irq_flags);
 655
 656		if (!r) {
 657			list_splice(&new_pages, &pool->list);
 658			++pool->nrefills;
 659			pool->npages += alloc_size;
 660		} else {
 661			pr_debug("Failed to fill pool (%p)\n", pool);
 
 662			/* If we have any pages left put them to the pool. */
 663			list_for_each_entry(p, &new_pages, lru) {
 664				++cpages;
 665			}
 666			list_splice(&new_pages, &pool->list);
 667			pool->npages += cpages;
 668		}
 669
 670	}
 671	pool->fill_lock = false;
 672}
 673
 674/**
 675 * Allocate pages from the pool and put them on the return list.
 676 *
 677 * @return zero for success or negative error code.
 678 */
 679static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
 680				   struct list_head *pages,
 681				   int ttm_flags,
 682				   enum ttm_caching_state cstate,
 683				   unsigned count, unsigned order)
 684{
 685	unsigned long irq_flags;
 686	struct list_head *p;
 687	unsigned i;
 688	int r = 0;
 689
 690	spin_lock_irqsave(&pool->lock, irq_flags);
 691	if (!order)
 692		ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
 693					  &irq_flags);
 694
 695	if (count >= pool->npages) {
 696		/* take all pages from the pool */
 697		list_splice_init(&pool->list, pages);
 698		count -= pool->npages;
 699		pool->npages = 0;
 700		goto out;
 701	}
 702	/* find the last pages to include for requested number of pages. Split
 703	 * pool to begin and halve it to reduce search space. */
 704	if (count <= pool->npages/2) {
 705		i = 0;
 706		list_for_each(p, &pool->list) {
 707			if (++i == count)
 708				break;
 709		}
 710	} else {
 711		i = pool->npages + 1;
 712		list_for_each_prev(p, &pool->list) {
 713			if (--i == count)
 714				break;
 715		}
 716	}
 717	/* Cut 'count' number of pages from the pool */
 718	list_cut_position(pages, &pool->list, p);
 719	pool->npages -= count;
 720	count = 0;
 721out:
 722	spin_unlock_irqrestore(&pool->lock, irq_flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 723
 724	/* clear the pages coming from the pool if requested */
 725	if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
 726		struct page *page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 727
 728		list_for_each_entry(page, pages, lru) {
 729			if (PageHighMem(page))
 730				clear_highpage(page);
 731			else
 732				clear_page(page_address(page));
 733		}
 
 734	}
 735
 736	/* If pool didn't have enough pages allocate new one. */
 737	if (count) {
 738		gfp_t gfp_flags = pool->gfp_flags;
 739
 740		/* set zero flag for page allocation if required */
 741		if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
 742			gfp_flags |= __GFP_ZERO;
 743
 744		if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY)
 745			gfp_flags |= __GFP_RETRY_MAYFAIL;
 746
 
 
 
 
 
 
 
 
 
 747		/* ttm_alloc_new_pages doesn't reference pool so we can run
 748		 * multiple requests in parallel.
 749		 **/
 750		r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
 751					count, order);
 
 
 
 
 
 
 
 
 752	}
 753
 754	return r;
 
 755}
 756
 757/* Put all pages in pages list to correct pool to wait for reuse */
 758static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
 759			  enum ttm_caching_state cstate)
 760{
 761	struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
 762#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 763	struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
 764#endif
 765	unsigned long irq_flags;
 766	unsigned i;
 
 767
 768	if (pool == NULL) {
 769		/* No pool for this memory type so free the pages */
 770		i = 0;
 771		while (i < npages) {
 772#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 773			struct page *p = pages[i];
 774#endif
 775			unsigned order = 0, j;
 776
 777			if (!pages[i]) {
 778				++i;
 779				continue;
 780			}
 781
 782#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 783			if (!(flags & TTM_PAGE_FLAG_DMA32)) {
 784				for (j = 0; j < HPAGE_PMD_NR; ++j)
 785					if (p++ != pages[i + j])
 786					    break;
 787
 788				if (j == HPAGE_PMD_NR)
 789					order = HPAGE_PMD_ORDER;
 790			}
 791#endif
 792
 793			if (page_count(pages[i]) != 1)
 794				pr_err("Erroneous page count. Leaking pages.\n");
 795			__free_pages(pages[i], order);
 796
 797			j = 1 << order;
 798			while (j) {
 799				pages[i++] = NULL;
 800				--j;
 801			}
 802		}
 
 
 803		return;
 804	}
 805
 806	i = 0;
 807#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 808	if (huge) {
 809		unsigned max_size, n2free;
 810
 811		spin_lock_irqsave(&huge->lock, irq_flags);
 812		while (i < npages) {
 813			struct page *p = pages[i];
 814			unsigned j;
 815
 816			if (!p)
 817				break;
 818
 819			for (j = 0; j < HPAGE_PMD_NR; ++j)
 820				if (p++ != pages[i + j])
 821				    break;
 822
 823			if (j != HPAGE_PMD_NR)
 824				break;
 825
 826			list_add_tail(&pages[i]->lru, &huge->list);
 827
 828			for (j = 0; j < HPAGE_PMD_NR; ++j)
 829				pages[i++] = NULL;
 830			huge->npages++;
 831		}
 832
 833		/* Check that we don't go over the pool limit */
 834		max_size = _manager->options.max_size;
 835		max_size /= HPAGE_PMD_NR;
 836		if (huge->npages > max_size)
 837			n2free = huge->npages - max_size;
 838		else
 839			n2free = 0;
 840		spin_unlock_irqrestore(&huge->lock, irq_flags);
 841		if (n2free)
 842			ttm_page_pool_free(huge, n2free, false);
 843	}
 844#endif
 845
 846	spin_lock_irqsave(&pool->lock, irq_flags);
 847	while (i < npages) {
 848		if (pages[i]) {
 849			if (page_count(pages[i]) != 1)
 850				pr_err("Erroneous page count. Leaking pages.\n");
 851			list_add_tail(&pages[i]->lru, &pool->list);
 852			pages[i] = NULL;
 853			pool->npages++;
 854		}
 855		++i;
 856	}
 857	/* Check that we don't go over the pool limit */
 858	npages = 0;
 859	if (pool->npages > _manager->options.max_size) {
 860		npages = pool->npages - _manager->options.max_size;
 861		/* free at least NUM_PAGES_TO_ALLOC number of pages
 862		 * to reduce calls to set_memory_wb */
 863		if (npages < NUM_PAGES_TO_ALLOC)
 864			npages = NUM_PAGES_TO_ALLOC;
 865	}
 866	spin_unlock_irqrestore(&pool->lock, irq_flags);
 867	if (npages)
 868		ttm_page_pool_free(pool, npages, false);
 869}
 870
 871/*
 872 * On success pages list will hold count number of correctly
 873 * cached pages.
 874 */
 875static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
 876			 enum ttm_caching_state cstate)
 877{
 878	struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
 879#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 880	struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
 881#endif
 882	struct list_head plist;
 883	struct page *p = NULL;
 884	unsigned count, first;
 885	int r;
 886
 887	/* No pool for cached pages */
 888	if (pool == NULL) {
 889		gfp_t gfp_flags = GFP_USER;
 890		unsigned i;
 891#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 892		unsigned j;
 893#endif
 894
 895		/* set zero flag for page allocation if required */
 896		if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
 897			gfp_flags |= __GFP_ZERO;
 898
 899		if (flags & TTM_PAGE_FLAG_NO_RETRY)
 900			gfp_flags |= __GFP_RETRY_MAYFAIL;
 901
 902		if (flags & TTM_PAGE_FLAG_DMA32)
 903			gfp_flags |= GFP_DMA32;
 904		else
 905			gfp_flags |= GFP_HIGHUSER;
 906
 907		i = 0;
 908#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 909		if (!(gfp_flags & GFP_DMA32)) {
 910			while (npages >= HPAGE_PMD_NR) {
 911				gfp_t huge_flags = gfp_flags;
 912
 913				huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
 914					__GFP_KSWAPD_RECLAIM;
 915				huge_flags &= ~__GFP_MOVABLE;
 916				huge_flags &= ~__GFP_COMP;
 917				p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
 918				if (!p)
 919					break;
 920
 921				for (j = 0; j < HPAGE_PMD_NR; ++j)
 922					pages[i++] = p++;
 923
 924				npages -= HPAGE_PMD_NR;
 925			}
 926		}
 927#endif
 928
 929		first = i;
 930		while (npages) {
 931			p = alloc_page(gfp_flags);
 932			if (!p) {
 933				pr_debug("Unable to allocate page\n");
 934				return -ENOMEM;
 935			}
 936
 937			/* Swap the pages if we detect consecutive order */
 938			if (i > first && pages[i - 1] == p - 1)
 939				swap(p, pages[i - 1]);
 940
 941			pages[i++] = p;
 942			--npages;
 943		}
 944		return 0;
 945	}
 946
 947	count = 0;
 948
 949#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 950	if (huge && npages >= HPAGE_PMD_NR) {
 951		INIT_LIST_HEAD(&plist);
 952		ttm_page_pool_get_pages(huge, &plist, flags, cstate,
 953					npages / HPAGE_PMD_NR,
 954					HPAGE_PMD_ORDER);
 955
 956		list_for_each_entry(p, &plist, lru) {
 957			unsigned j;
 958
 959			for (j = 0; j < HPAGE_PMD_NR; ++j)
 960				pages[count++] = &p[j];
 961		}
 962	}
 963#endif
 964
 965	INIT_LIST_HEAD(&plist);
 966	r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
 967				    npages - count, 0);
 968
 969	first = count;
 970	list_for_each_entry(p, &plist, lru) {
 971		struct page *tmp = p;
 972
 973		/* Swap the pages if we detect consecutive order */
 974		if (count > first && pages[count - 1] == tmp - 1)
 975			swap(tmp, pages[count - 1]);
 976		pages[count++] = tmp;
 977	}
 978
 979	if (r) {
 980		/* If there is any pages in the list put them back to
 981		 * the pool.
 982		 */
 983		pr_debug("Failed to allocate extra pages for large request\n");
 984		ttm_put_pages(pages, count, flags, cstate);
 985		return r;
 986	}
 987
 988	return 0;
 989}
 990
 991static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
 992		char *name, unsigned int order)
 993{
 994	spin_lock_init(&pool->lock);
 995	pool->fill_lock = false;
 996	INIT_LIST_HEAD(&pool->list);
 997	pool->npages = pool->nfrees = 0;
 998	pool->gfp_flags = flags;
 999	pool->name = name;
1000	pool->order = order;
1001}
1002
1003int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1004{
1005	int ret;
1006#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1007	unsigned order = HPAGE_PMD_ORDER;
1008#else
1009	unsigned order = 0;
1010#endif
1011
1012	WARN_ON(_manager);
1013
1014	pr_info("Initializing pool allocator\n");
1015
1016	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1017	if (!_manager)
1018		return -ENOMEM;
1019
1020	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
1021
1022	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
1023
1024	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
1025				  GFP_USER | GFP_DMA32, "wc dma", 0);
1026
1027	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
1028				  GFP_USER | GFP_DMA32, "uc dma", 0);
1029
1030	ttm_page_pool_init_locked(&_manager->wc_pool_huge,
1031				  (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
1032				   __GFP_KSWAPD_RECLAIM) &
1033				  ~(__GFP_MOVABLE | __GFP_COMP),
1034				  "wc huge", order);
1035
1036	ttm_page_pool_init_locked(&_manager->uc_pool_huge,
1037				  (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
1038				   __GFP_KSWAPD_RECLAIM) &
1039				  ~(__GFP_MOVABLE | __GFP_COMP)
1040				  , "uc huge", order);
1041
1042	_manager->options.max_size = max_pages;
1043	_manager->options.small = SMALL_ALLOCATION;
1044	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1045
1046	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1047				   &glob->kobj, "pool");
1048	if (unlikely(ret != 0))
1049		goto error;
 
 
 
 
 
1050
1051	ret = ttm_pool_mm_shrink_init(_manager);
1052	if (unlikely(ret != 0))
1053		goto error;
1054	return 0;
1055
1056error:
1057	kobject_put(&_manager->kobj);
1058	_manager = NULL;
1059	return ret;
1060}
1061
1062void ttm_page_alloc_fini(void)
1063{
1064	int i;
1065
1066	pr_info("Finalizing pool allocator\n");
1067	ttm_pool_mm_shrink_fini(_manager);
1068
1069	/* OK to use static buffer since global mutex is no longer used. */
1070	for (i = 0; i < NUM_POOLS; ++i)
1071		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
1072
1073	kobject_put(&_manager->kobj);
1074	_manager = NULL;
1075}
1076
1077static void
1078ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
1079{
1080	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
1081	unsigned i;
1082
1083	if (mem_count_update == 0)
1084		goto put_pages;
1085
1086	for (i = 0; i < mem_count_update; ++i) {
1087		if (!ttm->pages[i])
1088			continue;
1089
1090		ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE);
1091	}
1092
1093put_pages:
1094	ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
1095		      ttm->caching_state);
1096	ttm->state = tt_unpopulated;
1097}
1098
1099int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1100{
1101	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
1102	unsigned i;
1103	int ret;
1104
1105	if (ttm->state != tt_unpopulated)
1106		return 0;
1107
1108	if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx))
1109		return -ENOMEM;
1110
1111	ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
1112			    ttm->caching_state);
1113	if (unlikely(ret != 0)) {
1114		ttm_pool_unpopulate_helper(ttm, 0);
1115		return ret;
1116	}
1117
1118	for (i = 0; i < ttm->num_pages; ++i) {
1119		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
1120						PAGE_SIZE, ctx);
1121		if (unlikely(ret != 0)) {
1122			ttm_pool_unpopulate_helper(ttm, i);
1123			return -ENOMEM;
1124		}
1125	}
1126
1127	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
1128		ret = ttm_tt_swapin(ttm);
1129		if (unlikely(ret != 0)) {
1130			ttm_pool_unpopulate(ttm);
1131			return ret;
1132		}
1133	}
1134
1135	ttm->state = tt_unbound;
1136	return 0;
1137}
1138EXPORT_SYMBOL(ttm_pool_populate);
1139
1140void ttm_pool_unpopulate(struct ttm_tt *ttm)
1141{
1142	ttm_pool_unpopulate_helper(ttm, ttm->num_pages);
1143}
1144EXPORT_SYMBOL(ttm_pool_unpopulate);
1145
1146int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
1147					struct ttm_operation_ctx *ctx)
1148{
1149	unsigned i, j;
1150	int r;
1151
1152	r = ttm_pool_populate(&tt->ttm, ctx);
1153	if (r)
1154		return r;
1155
1156	for (i = 0; i < tt->ttm.num_pages; ++i) {
1157		struct page *p = tt->ttm.pages[i];
1158		size_t num_pages = 1;
1159
1160		for (j = i + 1; j < tt->ttm.num_pages; ++j) {
1161			if (++p != tt->ttm.pages[j])
1162				break;
1163
1164			++num_pages;
1165		}
1166
1167		tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
1168						  0, num_pages * PAGE_SIZE,
1169						  DMA_BIDIRECTIONAL);
1170		if (dma_mapping_error(dev, tt->dma_address[i])) {
1171			while (i--) {
1172				dma_unmap_page(dev, tt->dma_address[i],
1173					       PAGE_SIZE, DMA_BIDIRECTIONAL);
1174				tt->dma_address[i] = 0;
1175			}
1176			ttm_pool_unpopulate(&tt->ttm);
1177			return -EFAULT;
1178		}
1179
1180		for (j = 1; j < num_pages; ++j) {
1181			tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
1182			++i;
1183		}
1184	}
1185	return 0;
1186}
1187EXPORT_SYMBOL(ttm_populate_and_map_pages);
1188
1189void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
1190{
1191	unsigned i, j;
1192
1193	for (i = 0; i < tt->ttm.num_pages;) {
1194		struct page *p = tt->ttm.pages[i];
1195		size_t num_pages = 1;
1196
1197		if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
1198			++i;
1199			continue;
1200		}
1201
1202		for (j = i + 1; j < tt->ttm.num_pages; ++j) {
1203			if (++p != tt->ttm.pages[j])
1204				break;
1205
1206			++num_pages;
1207		}
1208
1209		dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
1210			       DMA_BIDIRECTIONAL);
1211
1212		i += num_pages;
1213	}
1214	ttm_pool_unpopulate(&tt->ttm);
1215}
1216EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
1217
1218int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
1219{
1220	struct ttm_page_pool *p;
1221	unsigned i;
1222	char *h[] = {"pool", "refills", "pages freed", "size"};
1223	if (!_manager) {
1224		seq_printf(m, "No pool allocator running.\n");
1225		return 0;
1226	}
1227	seq_printf(m, "%7s %12s %13s %8s\n",
1228			h[0], h[1], h[2], h[3]);
1229	for (i = 0; i < NUM_POOLS; ++i) {
1230		p = &_manager->pools[i];
1231
1232		seq_printf(m, "%7s %12ld %13ld %8d\n",
1233				p->name, p->nrefills,
1234				p->nfrees, p->npages);
1235	}
1236	return 0;
1237}
1238EXPORT_SYMBOL(ttm_page_alloc_debugfs);