Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * Copyright 2020 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Christian König
24 */
25
26/* Pooling of allocated pages is necessary because changing the caching
27 * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28 * invalidate for those addresses.
29 *
30 * Additional to that allocations from the DMA coherent API are pooled as well
31 * cause they are rather slow compared to alloc_pages+map.
32 */
33
34#include <linux/module.h>
35#include <linux/dma-mapping.h>
36#include <linux/highmem.h>
37#include <linux/sched/mm.h>
38
39#ifdef CONFIG_X86
40#include <asm/set_memory.h>
41#endif
42
43#include <drm/ttm/ttm_pool.h>
44#include <drm/ttm/ttm_bo_driver.h>
45#include <drm/ttm/ttm_tt.h>
46
47#include "ttm_module.h"
48
49/**
50 * struct ttm_pool_dma - Helper object for coherent DMA mappings
51 *
52 * @addr: original DMA address returned for the mapping
53 * @vaddr: original vaddr return for the mapping and order in the lower bits
54 */
55struct ttm_pool_dma {
56 dma_addr_t addr;
57 unsigned long vaddr;
58};
59
60static unsigned long page_pool_size;
61
62MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
63module_param(page_pool_size, ulong, 0644);
64
65static atomic_long_t allocated_pages;
66
67static struct ttm_pool_type global_write_combined[MAX_ORDER];
68static struct ttm_pool_type global_uncached[MAX_ORDER];
69
70static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
71static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
72
73static spinlock_t shrinker_lock;
74static struct list_head shrinker_list;
75static struct shrinker mm_shrinker;
76
77/* Allocate pages of size 1 << order with the given gfp_flags */
78static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
79 unsigned int order)
80{
81 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
82 struct ttm_pool_dma *dma;
83 struct page *p;
84 void *vaddr;
85
86 /* Don't set the __GFP_COMP flag for higher order allocations.
87 * Mapping pages directly into an userspace process and calling
88 * put_page() on a TTM allocated page is illegal.
89 */
90 if (order)
91 gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
92 __GFP_KSWAPD_RECLAIM;
93
94 if (!pool->use_dma_alloc) {
95 p = alloc_pages(gfp_flags, order);
96 if (p)
97 p->private = order;
98 return p;
99 }
100
101 dma = kmalloc(sizeof(*dma), GFP_KERNEL);
102 if (!dma)
103 return NULL;
104
105 if (order)
106 attr |= DMA_ATTR_NO_WARN;
107
108 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
109 &dma->addr, gfp_flags, attr);
110 if (!vaddr)
111 goto error_free;
112
113 /* TODO: This is an illegal abuse of the DMA API, but we need to rework
114 * TTM page fault handling and extend the DMA API to clean this up.
115 */
116 if (is_vmalloc_addr(vaddr))
117 p = vmalloc_to_page(vaddr);
118 else
119 p = virt_to_page(vaddr);
120
121 dma->vaddr = (unsigned long)vaddr | order;
122 p->private = (unsigned long)dma;
123 return p;
124
125error_free:
126 kfree(dma);
127 return NULL;
128}
129
130/* Reset the caching and pages of size 1 << order */
131static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
132 unsigned int order, struct page *p)
133{
134 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
135 struct ttm_pool_dma *dma;
136 void *vaddr;
137
138#ifdef CONFIG_X86
139 /* We don't care that set_pages_wb is inefficient here. This is only
140 * used when we have to shrink and CPU overhead is irrelevant then.
141 */
142 if (caching != ttm_cached && !PageHighMem(p))
143 set_pages_wb(p, 1 << order);
144#endif
145
146 if (!pool || !pool->use_dma_alloc) {
147 __free_pages(p, order);
148 return;
149 }
150
151 if (order)
152 attr |= DMA_ATTR_NO_WARN;
153
154 dma = (void *)p->private;
155 vaddr = (void *)(dma->vaddr & PAGE_MASK);
156 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
157 attr);
158 kfree(dma);
159}
160
161/* Apply a new caching to an array of pages */
162static int ttm_pool_apply_caching(struct page **first, struct page **last,
163 enum ttm_caching caching)
164{
165#ifdef CONFIG_X86
166 unsigned int num_pages = last - first;
167
168 if (!num_pages)
169 return 0;
170
171 switch (caching) {
172 case ttm_cached:
173 break;
174 case ttm_write_combined:
175 return set_pages_array_wc(first, num_pages);
176 case ttm_uncached:
177 return set_pages_array_uc(first, num_pages);
178 }
179#endif
180 return 0;
181}
182
183/* Map pages of 1 << order size and fill the DMA address array */
184static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
185 struct page *p, dma_addr_t **dma_addr)
186{
187 dma_addr_t addr;
188 unsigned int i;
189
190 if (pool->use_dma_alloc) {
191 struct ttm_pool_dma *dma = (void *)p->private;
192
193 addr = dma->addr;
194 } else {
195 size_t size = (1ULL << order) * PAGE_SIZE;
196
197 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
198 if (dma_mapping_error(pool->dev, addr))
199 return -EFAULT;
200 }
201
202 for (i = 1 << order; i ; --i) {
203 *(*dma_addr)++ = addr;
204 addr += PAGE_SIZE;
205 }
206
207 return 0;
208}
209
210/* Unmap pages of 1 << order size */
211static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
212 unsigned int num_pages)
213{
214 /* Unmapped while freeing the page */
215 if (pool->use_dma_alloc)
216 return;
217
218 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
219 DMA_BIDIRECTIONAL);
220}
221
222/* Give pages into a specific pool_type */
223static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
224{
225 unsigned int i, num_pages = 1 << pt->order;
226
227 for (i = 0; i < num_pages; ++i) {
228 if (PageHighMem(p))
229 clear_highpage(p + i);
230 else
231 clear_page(page_address(p + i));
232 }
233
234 spin_lock(&pt->lock);
235 list_add(&p->lru, &pt->pages);
236 spin_unlock(&pt->lock);
237 atomic_long_add(1 << pt->order, &allocated_pages);
238}
239
240/* Take pages from a specific pool_type, return NULL when nothing available */
241static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
242{
243 struct page *p;
244
245 spin_lock(&pt->lock);
246 p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
247 if (p) {
248 atomic_long_sub(1 << pt->order, &allocated_pages);
249 list_del(&p->lru);
250 }
251 spin_unlock(&pt->lock);
252
253 return p;
254}
255
256/* Initialize and add a pool type to the global shrinker list */
257static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
258 enum ttm_caching caching, unsigned int order)
259{
260 pt->pool = pool;
261 pt->caching = caching;
262 pt->order = order;
263 spin_lock_init(&pt->lock);
264 INIT_LIST_HEAD(&pt->pages);
265
266 spin_lock(&shrinker_lock);
267 list_add_tail(&pt->shrinker_list, &shrinker_list);
268 spin_unlock(&shrinker_lock);
269}
270
271/* Remove a pool_type from the global shrinker list and free all pages */
272static void ttm_pool_type_fini(struct ttm_pool_type *pt)
273{
274 struct page *p;
275
276 spin_lock(&shrinker_lock);
277 list_del(&pt->shrinker_list);
278 spin_unlock(&shrinker_lock);
279
280 while ((p = ttm_pool_type_take(pt)))
281 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
282}
283
284/* Return the pool_type to use for the given caching and order */
285static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
286 enum ttm_caching caching,
287 unsigned int order)
288{
289 if (pool->use_dma_alloc)
290 return &pool->caching[caching].orders[order];
291
292#ifdef CONFIG_X86
293 switch (caching) {
294 case ttm_write_combined:
295 if (pool->use_dma32)
296 return &global_dma32_write_combined[order];
297
298 return &global_write_combined[order];
299 case ttm_uncached:
300 if (pool->use_dma32)
301 return &global_dma32_uncached[order];
302
303 return &global_uncached[order];
304 default:
305 break;
306 }
307#endif
308
309 return NULL;
310}
311
312/* Free pages using the global shrinker list */
313static unsigned int ttm_pool_shrink(void)
314{
315 struct ttm_pool_type *pt;
316 unsigned int num_pages;
317 struct page *p;
318
319 spin_lock(&shrinker_lock);
320 pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
321 list_move_tail(&pt->shrinker_list, &shrinker_list);
322 spin_unlock(&shrinker_lock);
323
324 p = ttm_pool_type_take(pt);
325 if (p) {
326 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
327 num_pages = 1 << pt->order;
328 } else {
329 num_pages = 0;
330 }
331
332 return num_pages;
333}
334
335/* Return the allocation order based for a page */
336static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
337{
338 if (pool->use_dma_alloc) {
339 struct ttm_pool_dma *dma = (void *)p->private;
340
341 return dma->vaddr & ~PAGE_MASK;
342 }
343
344 return p->private;
345}
346
347/* Called when we got a page, either from a pool or newly allocated */
348static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
349 struct page *p, dma_addr_t **dma_addr,
350 unsigned long *num_pages,
351 struct page ***pages)
352{
353 unsigned int i;
354 int r;
355
356 if (*dma_addr) {
357 r = ttm_pool_map(pool, order, p, dma_addr);
358 if (r)
359 return r;
360 }
361
362 *num_pages -= 1 << order;
363 for (i = 1 << order; i; --i, ++(*pages), ++p)
364 **pages = p;
365
366 return 0;
367}
368
369/**
370 * ttm_pool_alloc - Fill a ttm_tt object
371 *
372 * @pool: ttm_pool to use
373 * @tt: ttm_tt object to fill
374 * @ctx: operation context
375 *
376 * Fill the ttm_tt object with pages and also make sure to DMA map them when
377 * necessary.
378 *
379 * Returns: 0 on successe, negative error code otherwise.
380 */
381int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
382 struct ttm_operation_ctx *ctx)
383{
384 unsigned long num_pages = tt->num_pages;
385 dma_addr_t *dma_addr = tt->dma_address;
386 struct page **caching = tt->pages;
387 struct page **pages = tt->pages;
388 gfp_t gfp_flags = GFP_USER;
389 unsigned int i, order;
390 struct page *p;
391 int r;
392
393 WARN_ON(!num_pages || ttm_tt_is_populated(tt));
394 WARN_ON(dma_addr && !pool->dev);
395
396 if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
397 gfp_flags |= __GFP_ZERO;
398
399 if (ctx->gfp_retry_mayfail)
400 gfp_flags |= __GFP_RETRY_MAYFAIL;
401
402 if (pool->use_dma32)
403 gfp_flags |= GFP_DMA32;
404 else
405 gfp_flags |= GFP_HIGHUSER;
406
407 for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
408 num_pages;
409 order = min_t(unsigned int, order, __fls(num_pages))) {
410 struct ttm_pool_type *pt;
411
412 pt = ttm_pool_select_type(pool, tt->caching, order);
413 p = pt ? ttm_pool_type_take(pt) : NULL;
414 if (p) {
415 r = ttm_pool_apply_caching(caching, pages,
416 tt->caching);
417 if (r)
418 goto error_free_page;
419
420 do {
421 r = ttm_pool_page_allocated(pool, order, p,
422 &dma_addr,
423 &num_pages,
424 &pages);
425 if (r)
426 goto error_free_page;
427
428 if (num_pages < (1 << order))
429 break;
430
431 p = ttm_pool_type_take(pt);
432 } while (p);
433 caching = pages;
434 }
435
436 while (num_pages >= (1 << order) &&
437 (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
438
439 if (PageHighMem(p)) {
440 r = ttm_pool_apply_caching(caching, pages,
441 tt->caching);
442 if (r)
443 goto error_free_page;
444 }
445 r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
446 &num_pages, &pages);
447 if (r)
448 goto error_free_page;
449 if (PageHighMem(p))
450 caching = pages;
451 }
452
453 if (!p) {
454 if (order) {
455 --order;
456 continue;
457 }
458 r = -ENOMEM;
459 goto error_free_all;
460 }
461 }
462
463 r = ttm_pool_apply_caching(caching, pages, tt->caching);
464 if (r)
465 goto error_free_all;
466
467 return 0;
468
469error_free_page:
470 ttm_pool_free_page(pool, tt->caching, order, p);
471
472error_free_all:
473 num_pages = tt->num_pages - num_pages;
474 for (i = 0; i < num_pages; ) {
475 order = ttm_pool_page_order(pool, tt->pages[i]);
476 ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
477 i += 1 << order;
478 }
479
480 return r;
481}
482EXPORT_SYMBOL(ttm_pool_alloc);
483
484/**
485 * ttm_pool_free - Free the backing pages from a ttm_tt object
486 *
487 * @pool: Pool to give pages back to.
488 * @tt: ttm_tt object to unpopulate
489 *
490 * Give the packing pages back to a pool or free them
491 */
492void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
493{
494 unsigned int i;
495
496 for (i = 0; i < tt->num_pages; ) {
497 struct page *p = tt->pages[i];
498 unsigned int order, num_pages;
499 struct ttm_pool_type *pt;
500
501 order = ttm_pool_page_order(pool, p);
502 num_pages = 1ULL << order;
503 if (tt->dma_address)
504 ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
505
506 pt = ttm_pool_select_type(pool, tt->caching, order);
507 if (pt)
508 ttm_pool_type_give(pt, tt->pages[i]);
509 else
510 ttm_pool_free_page(pool, tt->caching, order,
511 tt->pages[i]);
512
513 i += num_pages;
514 }
515
516 while (atomic_long_read(&allocated_pages) > page_pool_size)
517 ttm_pool_shrink();
518}
519EXPORT_SYMBOL(ttm_pool_free);
520
521/**
522 * ttm_pool_init - Initialize a pool
523 *
524 * @pool: the pool to initialize
525 * @dev: device for DMA allocations and mappings
526 * @use_dma_alloc: true if coherent DMA alloc should be used
527 * @use_dma32: true if GFP_DMA32 should be used
528 *
529 * Initialize the pool and its pool types.
530 */
531void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
532 bool use_dma_alloc, bool use_dma32)
533{
534 unsigned int i, j;
535
536 WARN_ON(!dev && use_dma_alloc);
537
538 pool->dev = dev;
539 pool->use_dma_alloc = use_dma_alloc;
540 pool->use_dma32 = use_dma32;
541
542 if (use_dma_alloc) {
543 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
544 for (j = 0; j < MAX_ORDER; ++j)
545 ttm_pool_type_init(&pool->caching[i].orders[j],
546 pool, i, j);
547 }
548}
549
550/**
551 * ttm_pool_fini - Cleanup a pool
552 *
553 * @pool: the pool to clean up
554 *
555 * Free all pages in the pool and unregister the types from the global
556 * shrinker.
557 */
558void ttm_pool_fini(struct ttm_pool *pool)
559{
560 unsigned int i, j;
561
562 if (pool->use_dma_alloc) {
563 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
564 for (j = 0; j < MAX_ORDER; ++j)
565 ttm_pool_type_fini(&pool->caching[i].orders[j]);
566 }
567
568 /* We removed the pool types from the LRU, but we need to also make sure
569 * that no shrinker is concurrently freeing pages from the pool.
570 */
571 synchronize_shrinkers();
572}
573
574/* As long as pages are available make sure to release at least one */
575static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
576 struct shrink_control *sc)
577{
578 unsigned long num_freed = 0;
579
580 do
581 num_freed += ttm_pool_shrink();
582 while (!num_freed && atomic_long_read(&allocated_pages));
583
584 return num_freed;
585}
586
587/* Return the number of pages available or SHRINK_EMPTY if we have none */
588static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
589 struct shrink_control *sc)
590{
591 unsigned long num_pages = atomic_long_read(&allocated_pages);
592
593 return num_pages ? num_pages : SHRINK_EMPTY;
594}
595
596#ifdef CONFIG_DEBUG_FS
597/* Count the number of pages available in a pool_type */
598static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
599{
600 unsigned int count = 0;
601 struct page *p;
602
603 spin_lock(&pt->lock);
604 /* Only used for debugfs, the overhead doesn't matter */
605 list_for_each_entry(p, &pt->pages, lru)
606 ++count;
607 spin_unlock(&pt->lock);
608
609 return count;
610}
611
612/* Print a nice header for the order */
613static void ttm_pool_debugfs_header(struct seq_file *m)
614{
615 unsigned int i;
616
617 seq_puts(m, "\t ");
618 for (i = 0; i < MAX_ORDER; ++i)
619 seq_printf(m, " ---%2u---", i);
620 seq_puts(m, "\n");
621}
622
623/* Dump information about the different pool types */
624static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
625 struct seq_file *m)
626{
627 unsigned int i;
628
629 for (i = 0; i < MAX_ORDER; ++i)
630 seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
631 seq_puts(m, "\n");
632}
633
634/* Dump the total amount of allocated pages */
635static void ttm_pool_debugfs_footer(struct seq_file *m)
636{
637 seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
638 atomic_long_read(&allocated_pages), page_pool_size);
639}
640
641/* Dump the information for the global pools */
642static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
643{
644 ttm_pool_debugfs_header(m);
645
646 spin_lock(&shrinker_lock);
647 seq_puts(m, "wc\t:");
648 ttm_pool_debugfs_orders(global_write_combined, m);
649 seq_puts(m, "uc\t:");
650 ttm_pool_debugfs_orders(global_uncached, m);
651 seq_puts(m, "wc 32\t:");
652 ttm_pool_debugfs_orders(global_dma32_write_combined, m);
653 seq_puts(m, "uc 32\t:");
654 ttm_pool_debugfs_orders(global_dma32_uncached, m);
655 spin_unlock(&shrinker_lock);
656
657 ttm_pool_debugfs_footer(m);
658
659 return 0;
660}
661DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
662
663/**
664 * ttm_pool_debugfs - Debugfs dump function for a pool
665 *
666 * @pool: the pool to dump the information for
667 * @m: seq_file to dump to
668 *
669 * Make a debugfs dump with the per pool and global information.
670 */
671int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
672{
673 unsigned int i;
674
675 if (!pool->use_dma_alloc) {
676 seq_puts(m, "unused\n");
677 return 0;
678 }
679
680 ttm_pool_debugfs_header(m);
681
682 spin_lock(&shrinker_lock);
683 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
684 seq_puts(m, "DMA ");
685 switch (i) {
686 case ttm_cached:
687 seq_puts(m, "\t:");
688 break;
689 case ttm_write_combined:
690 seq_puts(m, "wc\t:");
691 break;
692 case ttm_uncached:
693 seq_puts(m, "uc\t:");
694 break;
695 }
696 ttm_pool_debugfs_orders(pool->caching[i].orders, m);
697 }
698 spin_unlock(&shrinker_lock);
699
700 ttm_pool_debugfs_footer(m);
701 return 0;
702}
703EXPORT_SYMBOL(ttm_pool_debugfs);
704
705/* Test the shrinker functions and dump the result */
706static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
707{
708 struct shrink_control sc = { .gfp_mask = GFP_NOFS };
709
710 fs_reclaim_acquire(GFP_KERNEL);
711 seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
712 ttm_pool_shrinker_scan(&mm_shrinker, &sc));
713 fs_reclaim_release(GFP_KERNEL);
714
715 return 0;
716}
717DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
718
719#endif
720
721/**
722 * ttm_pool_mgr_init - Initialize globals
723 *
724 * @num_pages: default number of pages
725 *
726 * Initialize the global locks and lists for the MM shrinker.
727 */
728int ttm_pool_mgr_init(unsigned long num_pages)
729{
730 unsigned int i;
731
732 if (!page_pool_size)
733 page_pool_size = num_pages;
734
735 spin_lock_init(&shrinker_lock);
736 INIT_LIST_HEAD(&shrinker_list);
737
738 for (i = 0; i < MAX_ORDER; ++i) {
739 ttm_pool_type_init(&global_write_combined[i], NULL,
740 ttm_write_combined, i);
741 ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
742
743 ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
744 ttm_write_combined, i);
745 ttm_pool_type_init(&global_dma32_uncached[i], NULL,
746 ttm_uncached, i);
747 }
748
749#ifdef CONFIG_DEBUG_FS
750 debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
751 &ttm_pool_debugfs_globals_fops);
752 debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
753 &ttm_pool_debugfs_shrink_fops);
754#endif
755
756 mm_shrinker.count_objects = ttm_pool_shrinker_count;
757 mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
758 mm_shrinker.seeks = 1;
759 return register_shrinker(&mm_shrinker, "drm-ttm_pool");
760}
761
762/**
763 * ttm_pool_mgr_fini - Finalize globals
764 *
765 * Cleanup the global pools and unregister the MM shrinker.
766 */
767void ttm_pool_mgr_fini(void)
768{
769 unsigned int i;
770
771 for (i = 0; i < MAX_ORDER; ++i) {
772 ttm_pool_type_fini(&global_write_combined[i]);
773 ttm_pool_type_fini(&global_uncached[i]);
774
775 ttm_pool_type_fini(&global_dma32_write_combined[i]);
776 ttm_pool_type_fini(&global_dma32_uncached[i]);
777 }
778
779 unregister_shrinker(&mm_shrinker);
780 WARN_ON(!list_empty(&shrinker_list));
781}