Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/*
  3 * Copyright 2020 Advanced Micro Devices, Inc.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the "Software"),
  7 * to deal in the Software without restriction, including without limitation
  8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9 * and/or sell copies of the Software, and to permit persons to whom the
 10 * Software is furnished to do so, subject to the following conditions:
 11 *
 12 * The above copyright notice and this permission notice shall be included in
 13 * all copies or substantial portions of the Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 21 * OTHER DEALINGS IN THE SOFTWARE.
 22 *
 23 * Authors: Christian König
 24 */
 25
 26/* Pooling of allocated pages is necessary because changing the caching
 27 * attributes on x86 of the linear mapping requires a costly cross CPU TLB
 28 * invalidate for those addresses.
 29 *
 30 * Additional to that allocations from the DMA coherent API are pooled as well
 31 * cause they are rather slow compared to alloc_pages+map.
 32 */
 33
 34#include <linux/module.h>
 35#include <linux/dma-mapping.h>
 36#include <linux/highmem.h>
 37#include <linux/sched/mm.h>
 38
 39#ifdef CONFIG_X86
 40#include <asm/set_memory.h>
 41#endif
 42
 43#include <drm/ttm/ttm_pool.h>
 44#include <drm/ttm/ttm_bo_driver.h>
 45#include <drm/ttm/ttm_tt.h>
 46
 47#include "ttm_module.h"
 48
 49/**
 50 * struct ttm_pool_dma - Helper object for coherent DMA mappings
 51 *
 52 * @addr: original DMA address returned for the mapping
 53 * @vaddr: original vaddr return for the mapping and order in the lower bits
 54 */
 55struct ttm_pool_dma {
 56	dma_addr_t addr;
 57	unsigned long vaddr;
 58};
 59
 60static unsigned long page_pool_size;
 61
 62MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
 63module_param(page_pool_size, ulong, 0644);
 64
 65static atomic_long_t allocated_pages;
 66
 67static struct ttm_pool_type global_write_combined[MAX_ORDER];
 68static struct ttm_pool_type global_uncached[MAX_ORDER];
 69
 70static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
 71static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
 72
 73static struct mutex shrinker_lock;
 74static struct list_head shrinker_list;
 75static struct shrinker mm_shrinker;
 76
 77/* Allocate pages of size 1 << order with the given gfp_flags */
 78static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
 79					unsigned int order)
 80{
 81	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
 82	struct ttm_pool_dma *dma;
 83	struct page *p;
 84	void *vaddr;
 85
 86	/* Don't set the __GFP_COMP flag for higher order allocations.
 87	 * Mapping pages directly into an userspace process and calling
 88	 * put_page() on a TTM allocated page is illegal.
 89	 */
 90	if (order)
 91		gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
 92			__GFP_KSWAPD_RECLAIM;
 93
 94	if (!pool->use_dma_alloc) {
 95		p = alloc_pages(gfp_flags, order);
 96		if (p)
 97			p->private = order;
 98		return p;
 99	}
100
101	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
102	if (!dma)
103		return NULL;
104
105	if (order)
106		attr |= DMA_ATTR_NO_WARN;
107
108	vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
109				&dma->addr, gfp_flags, attr);
110	if (!vaddr)
111		goto error_free;
112
113	/* TODO: This is an illegal abuse of the DMA API, but we need to rework
114	 * TTM page fault handling and extend the DMA API to clean this up.
115	 */
116	if (is_vmalloc_addr(vaddr))
117		p = vmalloc_to_page(vaddr);
118	else
119		p = virt_to_page(vaddr);
120
121	dma->vaddr = (unsigned long)vaddr | order;
122	p->private = (unsigned long)dma;
123	return p;
124
125error_free:
126	kfree(dma);
127	return NULL;
128}
129
130/* Reset the caching and pages of size 1 << order */
131static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
132			       unsigned int order, struct page *p)
133{
134	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
135	struct ttm_pool_dma *dma;
136	void *vaddr;
137
138#ifdef CONFIG_X86
139	/* We don't care that set_pages_wb is inefficient here. This is only
140	 * used when we have to shrink and CPU overhead is irrelevant then.
141	 */
142	if (caching != ttm_cached && !PageHighMem(p))
143		set_pages_wb(p, 1 << order);
144#endif
145
146	if (!pool || !pool->use_dma_alloc) {
147		__free_pages(p, order);
148		return;
149	}
150
151	if (order)
152		attr |= DMA_ATTR_NO_WARN;
153
154	dma = (void *)p->private;
155	vaddr = (void *)(dma->vaddr & PAGE_MASK);
156	dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
157		       attr);
158	kfree(dma);
159}
160
161/* Apply a new caching to an array of pages */
162static int ttm_pool_apply_caching(struct page **first, struct page **last,
163				  enum ttm_caching caching)
164{
165#ifdef CONFIG_X86
166	unsigned int num_pages = last - first;
167
168	if (!num_pages)
169		return 0;
170
171	switch (caching) {
172	case ttm_cached:
173		break;
174	case ttm_write_combined:
175		return set_pages_array_wc(first, num_pages);
176	case ttm_uncached:
177		return set_pages_array_uc(first, num_pages);
178	}
179#endif
180	return 0;
181}
182
183/* Map pages of 1 << order size and fill the DMA address array  */
184static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
185			struct page *p, dma_addr_t **dma_addr)
186{
187	dma_addr_t addr;
188	unsigned int i;
189
190	if (pool->use_dma_alloc) {
191		struct ttm_pool_dma *dma = (void *)p->private;
192
193		addr = dma->addr;
194	} else {
195		size_t size = (1ULL << order) * PAGE_SIZE;
196
197		addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
198		if (dma_mapping_error(pool->dev, addr))
199			return -EFAULT;
200	}
201
202	for (i = 1 << order; i ; --i) {
203		*(*dma_addr)++ = addr;
204		addr += PAGE_SIZE;
205	}
206
207	return 0;
208}
209
210/* Unmap pages of 1 << order size */
211static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
212			   unsigned int num_pages)
213{
214	/* Unmapped while freeing the page */
215	if (pool->use_dma_alloc)
216		return;
217
218	dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
219		       DMA_BIDIRECTIONAL);
220}
221
222/* Give pages into a specific pool_type */
223static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
224{
225	unsigned int i, num_pages = 1 << pt->order;
226
227	for (i = 0; i < num_pages; ++i) {
228		if (PageHighMem(p))
229			clear_highpage(p + i);
230		else
231			clear_page(page_address(p + i));
232	}
233
234	spin_lock(&pt->lock);
235	list_add(&p->lru, &pt->pages);
236	spin_unlock(&pt->lock);
237	atomic_long_add(1 << pt->order, &allocated_pages);
238}
239
240/* Take pages from a specific pool_type, return NULL when nothing available */
241static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
242{
243	struct page *p;
244
245	spin_lock(&pt->lock);
246	p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
247	if (p) {
248		atomic_long_sub(1 << pt->order, &allocated_pages);
249		list_del(&p->lru);
250	}
251	spin_unlock(&pt->lock);
252
253	return p;
254}
255
256/* Initialize and add a pool type to the global shrinker list */
257static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
258			       enum ttm_caching caching, unsigned int order)
259{
260	pt->pool = pool;
261	pt->caching = caching;
262	pt->order = order;
263	spin_lock_init(&pt->lock);
264	INIT_LIST_HEAD(&pt->pages);
265
266	mutex_lock(&shrinker_lock);
267	list_add_tail(&pt->shrinker_list, &shrinker_list);
268	mutex_unlock(&shrinker_lock);
269}
270
271/* Remove a pool_type from the global shrinker list and free all pages */
272static void ttm_pool_type_fini(struct ttm_pool_type *pt)
273{
274	struct page *p;
275
276	mutex_lock(&shrinker_lock);
277	list_del(&pt->shrinker_list);
278	mutex_unlock(&shrinker_lock);
279
280	while ((p = ttm_pool_type_take(pt)))
281		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
282}
283
284/* Return the pool_type to use for the given caching and order */
285static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
286						  enum ttm_caching caching,
287						  unsigned int order)
288{
289	if (pool->use_dma_alloc)
290		return &pool->caching[caching].orders[order];
291
292#ifdef CONFIG_X86
293	switch (caching) {
294	case ttm_write_combined:
295		if (pool->use_dma32)
296			return &global_dma32_write_combined[order];
297
298		return &global_write_combined[order];
299	case ttm_uncached:
300		if (pool->use_dma32)
301			return &global_dma32_uncached[order];
302
303		return &global_uncached[order];
304	default:
305		break;
306	}
307#endif
308
309	return NULL;
310}
311
312/* Free pages using the global shrinker list */
313static unsigned int ttm_pool_shrink(void)
314{
315	struct ttm_pool_type *pt;
316	unsigned int num_freed;
317	struct page *p;
318
319	mutex_lock(&shrinker_lock);
320	pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
321
322	p = ttm_pool_type_take(pt);
323	if (p) {
324		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
325		num_freed = 1 << pt->order;
326	} else {
327		num_freed = 0;
328	}
329
330	list_move_tail(&pt->shrinker_list, &shrinker_list);
331	mutex_unlock(&shrinker_lock);
332
333	return num_freed;
334}
335
336/* Return the allocation order based for a page */
337static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
338{
339	if (pool->use_dma_alloc) {
340		struct ttm_pool_dma *dma = (void *)p->private;
341
342		return dma->vaddr & ~PAGE_MASK;
343	}
344
345	return p->private;
346}
347
348/**
349 * ttm_pool_alloc - Fill a ttm_tt object
350 *
351 * @pool: ttm_pool to use
352 * @tt: ttm_tt object to fill
353 * @ctx: operation context
354 *
355 * Fill the ttm_tt object with pages and also make sure to DMA map them when
356 * necessary.
357 *
358 * Returns: 0 on successe, negative error code otherwise.
359 */
360int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
361		   struct ttm_operation_ctx *ctx)
362{
363	unsigned long num_pages = tt->num_pages;
364	dma_addr_t *dma_addr = tt->dma_address;
365	struct page **caching = tt->pages;
366	struct page **pages = tt->pages;
367	gfp_t gfp_flags = GFP_USER;
368	unsigned int i, order;
369	struct page *p;
370	int r;
371
372	WARN_ON(!num_pages || ttm_tt_is_populated(tt));
373	WARN_ON(dma_addr && !pool->dev);
374
375	if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
376		gfp_flags |= __GFP_ZERO;
377
378	if (ctx->gfp_retry_mayfail)
379		gfp_flags |= __GFP_RETRY_MAYFAIL;
380
381	if (pool->use_dma32)
382		gfp_flags |= GFP_DMA32;
383	else
384		gfp_flags |= GFP_HIGHUSER;
385
386	for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
387	     num_pages;
388	     order = min_t(unsigned int, order, __fls(num_pages))) {
389		bool apply_caching = false;
390		struct ttm_pool_type *pt;
391
392		pt = ttm_pool_select_type(pool, tt->caching, order);
393		p = pt ? ttm_pool_type_take(pt) : NULL;
394		if (p) {
395			apply_caching = true;
396		} else {
397			p = ttm_pool_alloc_page(pool, gfp_flags, order);
398			if (p && PageHighMem(p))
399				apply_caching = true;
400		}
401
402		if (!p) {
403			if (order) {
404				--order;
405				continue;
406			}
407			r = -ENOMEM;
408			goto error_free_all;
409		}
410
411		if (apply_caching) {
412			r = ttm_pool_apply_caching(caching, pages,
413						   tt->caching);
414			if (r)
415				goto error_free_page;
416			caching = pages + (1 << order);
417		}
418
419		if (dma_addr) {
420			r = ttm_pool_map(pool, order, p, &dma_addr);
421			if (r)
422				goto error_free_page;
423		}
424
425		num_pages -= 1 << order;
426		for (i = 1 << order; i; --i)
427			*(pages++) = p++;
428	}
429
430	r = ttm_pool_apply_caching(caching, pages, tt->caching);
431	if (r)
432		goto error_free_all;
433
434	return 0;
435
436error_free_page:
437	ttm_pool_free_page(pool, tt->caching, order, p);
438
439error_free_all:
440	num_pages = tt->num_pages - num_pages;
441	for (i = 0; i < num_pages; ) {
442		order = ttm_pool_page_order(pool, tt->pages[i]);
443		ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
444		i += 1 << order;
445	}
446
447	return r;
448}
449EXPORT_SYMBOL(ttm_pool_alloc);
450
451/**
452 * ttm_pool_free - Free the backing pages from a ttm_tt object
453 *
454 * @pool: Pool to give pages back to.
455 * @tt: ttm_tt object to unpopulate
456 *
457 * Give the packing pages back to a pool or free them
458 */
459void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
460{
461	unsigned int i;
462
463	for (i = 0; i < tt->num_pages; ) {
464		struct page *p = tt->pages[i];
465		unsigned int order, num_pages;
466		struct ttm_pool_type *pt;
467
468		order = ttm_pool_page_order(pool, p);
469		num_pages = 1ULL << order;
470		if (tt->dma_address)
471			ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
472
473		pt = ttm_pool_select_type(pool, tt->caching, order);
474		if (pt)
475			ttm_pool_type_give(pt, tt->pages[i]);
476		else
477			ttm_pool_free_page(pool, tt->caching, order,
478					   tt->pages[i]);
479
480		i += num_pages;
481	}
482
483	while (atomic_long_read(&allocated_pages) > page_pool_size)
484		ttm_pool_shrink();
485}
486EXPORT_SYMBOL(ttm_pool_free);
487
488/**
489 * ttm_pool_init - Initialize a pool
490 *
491 * @pool: the pool to initialize
492 * @dev: device for DMA allocations and mappings
493 * @use_dma_alloc: true if coherent DMA alloc should be used
494 * @use_dma32: true if GFP_DMA32 should be used
495 *
496 * Initialize the pool and its pool types.
497 */
498void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
499		   bool use_dma_alloc, bool use_dma32)
500{
501	unsigned int i, j;
502
503	WARN_ON(!dev && use_dma_alloc);
504
505	pool->dev = dev;
506	pool->use_dma_alloc = use_dma_alloc;
507	pool->use_dma32 = use_dma32;
508
509	if (use_dma_alloc) {
510		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
511			for (j = 0; j < MAX_ORDER; ++j)
512				ttm_pool_type_init(&pool->caching[i].orders[j],
513						   pool, i, j);
514	}
515}
516
517/**
518 * ttm_pool_fini - Cleanup a pool
519 *
520 * @pool: the pool to clean up
521 *
522 * Free all pages in the pool and unregister the types from the global
523 * shrinker.
524 */
525void ttm_pool_fini(struct ttm_pool *pool)
526{
527	unsigned int i, j;
528
529	if (pool->use_dma_alloc) {
530		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
531			for (j = 0; j < MAX_ORDER; ++j)
532				ttm_pool_type_fini(&pool->caching[i].orders[j]);
533	}
534}
535
536/* As long as pages are available make sure to release at least one */
537static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
538					    struct shrink_control *sc)
539{
540	unsigned long num_freed = 0;
541
542	do
543		num_freed += ttm_pool_shrink();
544	while (!num_freed && atomic_long_read(&allocated_pages));
545
546	return num_freed;
547}
548
549/* Return the number of pages available or SHRINK_EMPTY if we have none */
550static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
551					     struct shrink_control *sc)
552{
553	unsigned long num_pages = atomic_long_read(&allocated_pages);
554
555	return num_pages ? num_pages : SHRINK_EMPTY;
556}
557
558#ifdef CONFIG_DEBUG_FS
559/* Count the number of pages available in a pool_type */
560static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
561{
562	unsigned int count = 0;
563	struct page *p;
564
565	spin_lock(&pt->lock);
566	/* Only used for debugfs, the overhead doesn't matter */
567	list_for_each_entry(p, &pt->pages, lru)
568		++count;
569	spin_unlock(&pt->lock);
570
571	return count;
572}
573
574/* Print a nice header for the order */
575static void ttm_pool_debugfs_header(struct seq_file *m)
576{
577	unsigned int i;
578
579	seq_puts(m, "\t ");
580	for (i = 0; i < MAX_ORDER; ++i)
581		seq_printf(m, " ---%2u---", i);
582	seq_puts(m, "\n");
583}
584
585/* Dump information about the different pool types */
586static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
587				    struct seq_file *m)
588{
589	unsigned int i;
590
591	for (i = 0; i < MAX_ORDER; ++i)
592		seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
593	seq_puts(m, "\n");
594}
595
596/* Dump the total amount of allocated pages */
597static void ttm_pool_debugfs_footer(struct seq_file *m)
598{
599	seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
600		   atomic_long_read(&allocated_pages), page_pool_size);
601}
602
603/* Dump the information for the global pools */
604static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
605{
606	ttm_pool_debugfs_header(m);
607
608	mutex_lock(&shrinker_lock);
609	seq_puts(m, "wc\t:");
610	ttm_pool_debugfs_orders(global_write_combined, m);
611	seq_puts(m, "uc\t:");
612	ttm_pool_debugfs_orders(global_uncached, m);
613	seq_puts(m, "wc 32\t:");
614	ttm_pool_debugfs_orders(global_dma32_write_combined, m);
615	seq_puts(m, "uc 32\t:");
616	ttm_pool_debugfs_orders(global_dma32_uncached, m);
617	mutex_unlock(&shrinker_lock);
618
619	ttm_pool_debugfs_footer(m);
620
621	return 0;
622}
623DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
624
625/**
626 * ttm_pool_debugfs - Debugfs dump function for a pool
627 *
628 * @pool: the pool to dump the information for
629 * @m: seq_file to dump to
630 *
631 * Make a debugfs dump with the per pool and global information.
632 */
633int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
634{
635	unsigned int i;
636
637	if (!pool->use_dma_alloc) {
638		seq_puts(m, "unused\n");
639		return 0;
640	}
641
642	ttm_pool_debugfs_header(m);
643
644	mutex_lock(&shrinker_lock);
645	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
646		seq_puts(m, "DMA ");
647		switch (i) {
648		case ttm_cached:
649			seq_puts(m, "\t:");
650			break;
651		case ttm_write_combined:
652			seq_puts(m, "wc\t:");
653			break;
654		case ttm_uncached:
655			seq_puts(m, "uc\t:");
656			break;
657		}
658		ttm_pool_debugfs_orders(pool->caching[i].orders, m);
659	}
660	mutex_unlock(&shrinker_lock);
661
662	ttm_pool_debugfs_footer(m);
663	return 0;
664}
665EXPORT_SYMBOL(ttm_pool_debugfs);
666
667/* Test the shrinker functions and dump the result */
668static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
669{
670	struct shrink_control sc = { .gfp_mask = GFP_NOFS };
671
672	fs_reclaim_acquire(GFP_KERNEL);
673	seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
674		   ttm_pool_shrinker_scan(&mm_shrinker, &sc));
675	fs_reclaim_release(GFP_KERNEL);
676
677	return 0;
678}
679DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
680
681#endif
682
683/**
684 * ttm_pool_mgr_init - Initialize globals
685 *
686 * @num_pages: default number of pages
687 *
688 * Initialize the global locks and lists for the MM shrinker.
689 */
690int ttm_pool_mgr_init(unsigned long num_pages)
691{
692	unsigned int i;
693
694	if (!page_pool_size)
695		page_pool_size = num_pages;
696
697	mutex_init(&shrinker_lock);
698	INIT_LIST_HEAD(&shrinker_list);
699
700	for (i = 0; i < MAX_ORDER; ++i) {
701		ttm_pool_type_init(&global_write_combined[i], NULL,
702				   ttm_write_combined, i);
703		ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
704
705		ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
706				   ttm_write_combined, i);
707		ttm_pool_type_init(&global_dma32_uncached[i], NULL,
708				   ttm_uncached, i);
709	}
710
711#ifdef CONFIG_DEBUG_FS
712	debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
713			    &ttm_pool_debugfs_globals_fops);
714	debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
715			    &ttm_pool_debugfs_shrink_fops);
716#endif
717
718	mm_shrinker.count_objects = ttm_pool_shrinker_count;
719	mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
720	mm_shrinker.seeks = 1;
721	return register_shrinker(&mm_shrinker);
722}
723
724/**
725 * ttm_pool_mgr_fini - Finalize globals
726 *
727 * Cleanup the global pools and unregister the MM shrinker.
728 */
729void ttm_pool_mgr_fini(void)
730{
731	unsigned int i;
732
733	for (i = 0; i < MAX_ORDER; ++i) {
734		ttm_pool_type_fini(&global_write_combined[i]);
735		ttm_pool_type_fini(&global_uncached[i]);
736
737		ttm_pool_type_fini(&global_dma32_write_combined[i]);
738		ttm_pool_type_fini(&global_dma32_uncached[i]);
739	}
740
741	unregister_shrinker(&mm_shrinker);
742	WARN_ON(!list_empty(&shrinker_list));
743}