Loading...
1/*
2 * Copyright 2011 (c) Oracle Corp.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
24 */
25
26/*
27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
28 * over the DMA pools:
29 * - Pool collects resently freed pages for reuse (and hooks up to
30 * the shrinker).
31 * - Tracks currently in use pages
32 * - Tracks whether the page is UC, WB or cached (and reverts to WB
33 * when freed).
34 */
35
36#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
37#define pr_fmt(fmt) "[TTM] " fmt
38
39#include <linux/dma-mapping.h>
40#include <linux/list.h>
41#include <linux/seq_file.h> /* for seq_printf */
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/highmem.h>
45#include <linux/mm_types.h>
46#include <linux/module.h>
47#include <linux/mm.h>
48#include <linux/atomic.h>
49#include <linux/device.h>
50#include <linux/kthread.h>
51#include <drm/ttm/ttm_bo_driver.h>
52#include <drm/ttm/ttm_page_alloc.h>
53#ifdef TTM_HAS_AGP
54#include <asm/agp.h>
55#endif
56
57#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
58#define SMALL_ALLOCATION 4
59#define FREE_ALL_PAGES (~0U)
60/* times are in msecs */
61#define IS_UNDEFINED (0)
62#define IS_WC (1<<1)
63#define IS_UC (1<<2)
64#define IS_CACHED (1<<3)
65#define IS_DMA32 (1<<4)
66
67enum pool_type {
68 POOL_IS_UNDEFINED,
69 POOL_IS_WC = IS_WC,
70 POOL_IS_UC = IS_UC,
71 POOL_IS_CACHED = IS_CACHED,
72 POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
73 POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
74 POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
75};
76/*
77 * The pool structure. There are usually six pools:
78 * - generic (not restricted to DMA32):
79 * - write combined, uncached, cached.
80 * - dma32 (up to 2^32 - so up 4GB):
81 * - write combined, uncached, cached.
82 * for each 'struct device'. The 'cached' is for pages that are actively used.
83 * The other ones can be shrunk by the shrinker API if neccessary.
84 * @pools: The 'struct device->dma_pools' link.
85 * @type: Type of the pool
86 * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
87 * used with irqsave/irqrestore variants because pool allocator maybe called
88 * from delayed work.
89 * @inuse_list: Pool of pages that are in use. The order is very important and
90 * it is in the order that the TTM pages that are put back are in.
91 * @free_list: Pool of pages that are free to be used. No order requirements.
92 * @dev: The device that is associated with these pools.
93 * @size: Size used during DMA allocation.
94 * @npages_free: Count of available pages for re-use.
95 * @npages_in_use: Count of pages that are in use.
96 * @nfrees: Stats when pool is shrinking.
97 * @nrefills: Stats when the pool is grown.
98 * @gfp_flags: Flags to pass for alloc_page.
99 * @name: Name of the pool.
100 * @dev_name: Name derieved from dev - similar to how dev_info works.
101 * Used during shutdown as the dev_info during release is unavailable.
102 */
103struct dma_pool {
104 struct list_head pools; /* The 'struct device->dma_pools link */
105 enum pool_type type;
106 spinlock_t lock;
107 struct list_head inuse_list;
108 struct list_head free_list;
109 struct device *dev;
110 unsigned size;
111 unsigned npages_free;
112 unsigned npages_in_use;
113 unsigned long nfrees; /* Stats when shrunk. */
114 unsigned long nrefills; /* Stats when grown. */
115 gfp_t gfp_flags;
116 char name[13]; /* "cached dma32" */
117 char dev_name[64]; /* Constructed from dev */
118};
119
120/*
121 * The accounting page keeping track of the allocated page along with
122 * the DMA address.
123 * @page_list: The link to the 'page_list' in 'struct dma_pool'.
124 * @vaddr: The virtual address of the page
125 * @dma: The bus address of the page. If the page is not allocated
126 * via the DMA API, it will be -1.
127 */
128struct dma_page {
129 struct list_head page_list;
130 void *vaddr;
131 struct page *p;
132 dma_addr_t dma;
133};
134
135/*
136 * Limits for the pool. They are handled without locks because only place where
137 * they may change is in sysfs store. They won't have immediate effect anyway
138 * so forcing serialization to access them is pointless.
139 */
140
141struct ttm_pool_opts {
142 unsigned alloc_size;
143 unsigned max_size;
144 unsigned small;
145};
146
147/*
148 * Contains the list of all of the 'struct device' and their corresponding
149 * DMA pools. Guarded by _mutex->lock.
150 * @pools: The link to 'struct ttm_pool_manager->pools'
151 * @dev: The 'struct device' associated with the 'pool'
152 * @pool: The 'struct dma_pool' associated with the 'dev'
153 */
154struct device_pools {
155 struct list_head pools;
156 struct device *dev;
157 struct dma_pool *pool;
158};
159
160/*
161 * struct ttm_pool_manager - Holds memory pools for fast allocation
162 *
163 * @lock: Lock used when adding/removing from pools
164 * @pools: List of 'struct device' and 'struct dma_pool' tuples.
165 * @options: Limits for the pool.
166 * @npools: Total amount of pools in existence.
167 * @shrinker: The structure used by [un|]register_shrinker
168 */
169struct ttm_pool_manager {
170 struct mutex lock;
171 struct list_head pools;
172 struct ttm_pool_opts options;
173 unsigned npools;
174 struct shrinker mm_shrink;
175 struct kobject kobj;
176};
177
178static struct ttm_pool_manager *_manager;
179
180static struct attribute ttm_page_pool_max = {
181 .name = "pool_max_size",
182 .mode = S_IRUGO | S_IWUSR
183};
184static struct attribute ttm_page_pool_small = {
185 .name = "pool_small_allocation",
186 .mode = S_IRUGO | S_IWUSR
187};
188static struct attribute ttm_page_pool_alloc_size = {
189 .name = "pool_allocation_size",
190 .mode = S_IRUGO | S_IWUSR
191};
192
193static struct attribute *ttm_pool_attrs[] = {
194 &ttm_page_pool_max,
195 &ttm_page_pool_small,
196 &ttm_page_pool_alloc_size,
197 NULL
198};
199
200static void ttm_pool_kobj_release(struct kobject *kobj)
201{
202 struct ttm_pool_manager *m =
203 container_of(kobj, struct ttm_pool_manager, kobj);
204 kfree(m);
205}
206
207static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
208 const char *buffer, size_t size)
209{
210 struct ttm_pool_manager *m =
211 container_of(kobj, struct ttm_pool_manager, kobj);
212 int chars;
213 unsigned val;
214 chars = sscanf(buffer, "%u", &val);
215 if (chars == 0)
216 return size;
217
218 /* Convert kb to number of pages */
219 val = val / (PAGE_SIZE >> 10);
220
221 if (attr == &ttm_page_pool_max)
222 m->options.max_size = val;
223 else if (attr == &ttm_page_pool_small)
224 m->options.small = val;
225 else if (attr == &ttm_page_pool_alloc_size) {
226 if (val > NUM_PAGES_TO_ALLOC*8) {
227 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
228 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
229 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
230 return size;
231 } else if (val > NUM_PAGES_TO_ALLOC) {
232 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
233 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
234 }
235 m->options.alloc_size = val;
236 }
237
238 return size;
239}
240
241static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
242 char *buffer)
243{
244 struct ttm_pool_manager *m =
245 container_of(kobj, struct ttm_pool_manager, kobj);
246 unsigned val = 0;
247
248 if (attr == &ttm_page_pool_max)
249 val = m->options.max_size;
250 else if (attr == &ttm_page_pool_small)
251 val = m->options.small;
252 else if (attr == &ttm_page_pool_alloc_size)
253 val = m->options.alloc_size;
254
255 val = val * (PAGE_SIZE >> 10);
256
257 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
258}
259
260static const struct sysfs_ops ttm_pool_sysfs_ops = {
261 .show = &ttm_pool_show,
262 .store = &ttm_pool_store,
263};
264
265static struct kobj_type ttm_pool_kobj_type = {
266 .release = &ttm_pool_kobj_release,
267 .sysfs_ops = &ttm_pool_sysfs_ops,
268 .default_attrs = ttm_pool_attrs,
269};
270
271#ifndef CONFIG_X86
272static int set_pages_array_wb(struct page **pages, int addrinarray)
273{
274#ifdef TTM_HAS_AGP
275 int i;
276
277 for (i = 0; i < addrinarray; i++)
278 unmap_page_from_agp(pages[i]);
279#endif
280 return 0;
281}
282
283static int set_pages_array_wc(struct page **pages, int addrinarray)
284{
285#ifdef TTM_HAS_AGP
286 int i;
287
288 for (i = 0; i < addrinarray; i++)
289 map_page_into_agp(pages[i]);
290#endif
291 return 0;
292}
293
294static int set_pages_array_uc(struct page **pages, int addrinarray)
295{
296#ifdef TTM_HAS_AGP
297 int i;
298
299 for (i = 0; i < addrinarray; i++)
300 map_page_into_agp(pages[i]);
301#endif
302 return 0;
303}
304#endif /* for !CONFIG_X86 */
305
306static int ttm_set_pages_caching(struct dma_pool *pool,
307 struct page **pages, unsigned cpages)
308{
309 int r = 0;
310 /* Set page caching */
311 if (pool->type & IS_UC) {
312 r = set_pages_array_uc(pages, cpages);
313 if (r)
314 pr_err("%s: Failed to set %d pages to uc!\n",
315 pool->dev_name, cpages);
316 }
317 if (pool->type & IS_WC) {
318 r = set_pages_array_wc(pages, cpages);
319 if (r)
320 pr_err("%s: Failed to set %d pages to wc!\n",
321 pool->dev_name, cpages);
322 }
323 return r;
324}
325
326static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
327{
328 dma_addr_t dma = d_page->dma;
329 dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
330
331 kfree(d_page);
332 d_page = NULL;
333}
334static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
335{
336 struct dma_page *d_page;
337
338 d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
339 if (!d_page)
340 return NULL;
341
342 d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
343 &d_page->dma,
344 pool->gfp_flags);
345 if (d_page->vaddr) {
346 if (is_vmalloc_addr(d_page->vaddr))
347 d_page->p = vmalloc_to_page(d_page->vaddr);
348 else
349 d_page->p = virt_to_page(d_page->vaddr);
350 } else {
351 kfree(d_page);
352 d_page = NULL;
353 }
354 return d_page;
355}
356static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
357{
358 enum pool_type type = IS_UNDEFINED;
359
360 if (flags & TTM_PAGE_FLAG_DMA32)
361 type |= IS_DMA32;
362 if (cstate == tt_cached)
363 type |= IS_CACHED;
364 else if (cstate == tt_uncached)
365 type |= IS_UC;
366 else
367 type |= IS_WC;
368
369 return type;
370}
371
372static void ttm_pool_update_free_locked(struct dma_pool *pool,
373 unsigned freed_pages)
374{
375 pool->npages_free -= freed_pages;
376 pool->nfrees += freed_pages;
377
378}
379
380/* set memory back to wb and free the pages. */
381static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
382 struct page *pages[], unsigned npages)
383{
384 struct dma_page *d_page, *tmp;
385
386 /* Don't set WB on WB page pool. */
387 if (npages && !(pool->type & IS_CACHED) &&
388 set_pages_array_wb(pages, npages))
389 pr_err("%s: Failed to set %d pages to wb!\n",
390 pool->dev_name, npages);
391
392 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
393 list_del(&d_page->page_list);
394 __ttm_dma_free_page(pool, d_page);
395 }
396}
397
398static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
399{
400 /* Don't set WB on WB page pool. */
401 if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
402 pr_err("%s: Failed to set %d pages to wb!\n",
403 pool->dev_name, 1);
404
405 list_del(&d_page->page_list);
406 __ttm_dma_free_page(pool, d_page);
407}
408
409/*
410 * Free pages from pool.
411 *
412 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
413 * number of pages in one go.
414 *
415 * @pool: to free the pages from
416 * @nr_free: If set to true will free all pages in pool
417 * @use_static: Safe to use static buffer
418 **/
419static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
420 bool use_static)
421{
422 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
423 unsigned long irq_flags;
424 struct dma_page *dma_p, *tmp;
425 struct page **pages_to_free;
426 struct list_head d_pages;
427 unsigned freed_pages = 0,
428 npages_to_free = nr_free;
429
430 if (NUM_PAGES_TO_ALLOC < nr_free)
431 npages_to_free = NUM_PAGES_TO_ALLOC;
432#if 0
433 if (nr_free > 1) {
434 pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
435 pool->dev_name, pool->name, current->pid,
436 npages_to_free, nr_free);
437 }
438#endif
439 if (use_static)
440 pages_to_free = static_buf;
441 else
442 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
443 GFP_KERNEL);
444
445 if (!pages_to_free) {
446 pr_err("%s: Failed to allocate memory for pool free operation\n",
447 pool->dev_name);
448 return 0;
449 }
450 INIT_LIST_HEAD(&d_pages);
451restart:
452 spin_lock_irqsave(&pool->lock, irq_flags);
453
454 /* We picking the oldest ones off the list */
455 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
456 page_list) {
457 if (freed_pages >= npages_to_free)
458 break;
459
460 /* Move the dma_page from one list to another. */
461 list_move(&dma_p->page_list, &d_pages);
462
463 pages_to_free[freed_pages++] = dma_p->p;
464 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
465 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
466
467 ttm_pool_update_free_locked(pool, freed_pages);
468 /**
469 * Because changing page caching is costly
470 * we unlock the pool to prevent stalling.
471 */
472 spin_unlock_irqrestore(&pool->lock, irq_flags);
473
474 ttm_dma_pages_put(pool, &d_pages, pages_to_free,
475 freed_pages);
476
477 INIT_LIST_HEAD(&d_pages);
478
479 if (likely(nr_free != FREE_ALL_PAGES))
480 nr_free -= freed_pages;
481
482 if (NUM_PAGES_TO_ALLOC >= nr_free)
483 npages_to_free = nr_free;
484 else
485 npages_to_free = NUM_PAGES_TO_ALLOC;
486
487 freed_pages = 0;
488
489 /* free all so restart the processing */
490 if (nr_free)
491 goto restart;
492
493 /* Not allowed to fall through or break because
494 * following context is inside spinlock while we are
495 * outside here.
496 */
497 goto out;
498
499 }
500 }
501
502 /* remove range of pages from the pool */
503 if (freed_pages) {
504 ttm_pool_update_free_locked(pool, freed_pages);
505 nr_free -= freed_pages;
506 }
507
508 spin_unlock_irqrestore(&pool->lock, irq_flags);
509
510 if (freed_pages)
511 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
512out:
513 if (pages_to_free != static_buf)
514 kfree(pages_to_free);
515 return nr_free;
516}
517
518static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
519{
520 struct device_pools *p;
521 struct dma_pool *pool;
522
523 if (!dev)
524 return;
525
526 mutex_lock(&_manager->lock);
527 list_for_each_entry_reverse(p, &_manager->pools, pools) {
528 if (p->dev != dev)
529 continue;
530 pool = p->pool;
531 if (pool->type != type)
532 continue;
533
534 list_del(&p->pools);
535 kfree(p);
536 _manager->npools--;
537 break;
538 }
539 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
540 if (pool->type != type)
541 continue;
542 /* Takes a spinlock.. */
543 /* OK to use static buffer since global mutex is held. */
544 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
545 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
546 /* This code path is called after _all_ references to the
547 * struct device has been dropped - so nobody should be
548 * touching it. In case somebody is trying to _add_ we are
549 * guarded by the mutex. */
550 list_del(&pool->pools);
551 kfree(pool);
552 break;
553 }
554 mutex_unlock(&_manager->lock);
555}
556
557/*
558 * On free-ing of the 'struct device' this deconstructor is run.
559 * Albeit the pool might have already been freed earlier.
560 */
561static void ttm_dma_pool_release(struct device *dev, void *res)
562{
563 struct dma_pool *pool = *(struct dma_pool **)res;
564
565 if (pool)
566 ttm_dma_free_pool(dev, pool->type);
567}
568
569static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
570{
571 return *(struct dma_pool **)res == match_data;
572}
573
574static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
575 enum pool_type type)
576{
577 char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
578 enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
579 struct device_pools *sec_pool = NULL;
580 struct dma_pool *pool = NULL, **ptr;
581 unsigned i;
582 int ret = -ENODEV;
583 char *p;
584
585 if (!dev)
586 return NULL;
587
588 ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
589 if (!ptr)
590 return NULL;
591
592 ret = -ENOMEM;
593
594 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
595 dev_to_node(dev));
596 if (!pool)
597 goto err_mem;
598
599 sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
600 dev_to_node(dev));
601 if (!sec_pool)
602 goto err_mem;
603
604 INIT_LIST_HEAD(&sec_pool->pools);
605 sec_pool->dev = dev;
606 sec_pool->pool = pool;
607
608 INIT_LIST_HEAD(&pool->free_list);
609 INIT_LIST_HEAD(&pool->inuse_list);
610 INIT_LIST_HEAD(&pool->pools);
611 spin_lock_init(&pool->lock);
612 pool->dev = dev;
613 pool->npages_free = pool->npages_in_use = 0;
614 pool->nfrees = 0;
615 pool->gfp_flags = flags;
616 pool->size = PAGE_SIZE;
617 pool->type = type;
618 pool->nrefills = 0;
619 p = pool->name;
620 for (i = 0; i < 5; i++) {
621 if (type & t[i]) {
622 p += snprintf(p, sizeof(pool->name) - (p - pool->name),
623 "%s", n[i]);
624 }
625 }
626 *p = 0;
627 /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
628 * - the kobj->name has already been deallocated.*/
629 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
630 dev_driver_string(dev), dev_name(dev));
631 mutex_lock(&_manager->lock);
632 /* You can get the dma_pool from either the global: */
633 list_add(&sec_pool->pools, &_manager->pools);
634 _manager->npools++;
635 /* or from 'struct device': */
636 list_add(&pool->pools, &dev->dma_pools);
637 mutex_unlock(&_manager->lock);
638
639 *ptr = pool;
640 devres_add(dev, ptr);
641
642 return pool;
643err_mem:
644 devres_free(ptr);
645 kfree(sec_pool);
646 kfree(pool);
647 return ERR_PTR(ret);
648}
649
650static struct dma_pool *ttm_dma_find_pool(struct device *dev,
651 enum pool_type type)
652{
653 struct dma_pool *pool, *tmp, *found = NULL;
654
655 if (type == IS_UNDEFINED)
656 return found;
657
658 /* NB: We iterate on the 'struct dev' which has no spinlock, but
659 * it does have a kref which we have taken. The kref is taken during
660 * graphic driver loading - in the drm_pci_init it calls either
661 * pci_dev_get or pci_register_driver which both end up taking a kref
662 * on 'struct device'.
663 *
664 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
665 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
666 * thing is at that point of time there are no pages associated with the
667 * driver so this function will not be called.
668 */
669 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
670 if (pool->type != type)
671 continue;
672 found = pool;
673 break;
674 }
675 return found;
676}
677
678/*
679 * Free pages the pages that failed to change the caching state. If there
680 * are pages that have changed their caching state already put them to the
681 * pool.
682 */
683static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
684 struct list_head *d_pages,
685 struct page **failed_pages,
686 unsigned cpages)
687{
688 struct dma_page *d_page, *tmp;
689 struct page *p;
690 unsigned i = 0;
691
692 p = failed_pages[0];
693 if (!p)
694 return;
695 /* Find the failed page. */
696 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
697 if (d_page->p != p)
698 continue;
699 /* .. and then progress over the full list. */
700 list_del(&d_page->page_list);
701 __ttm_dma_free_page(pool, d_page);
702 if (++i < cpages)
703 p = failed_pages[i];
704 else
705 break;
706 }
707
708}
709
710/*
711 * Allocate 'count' pages, and put 'need' number of them on the
712 * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
713 * The full list of pages should also be on 'd_pages'.
714 * We return zero for success, and negative numbers as errors.
715 */
716static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
717 struct list_head *d_pages,
718 unsigned count)
719{
720 struct page **caching_array;
721 struct dma_page *dma_p;
722 struct page *p;
723 int r = 0;
724 unsigned i, cpages;
725 unsigned max_cpages = min(count,
726 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
727
728 /* allocate array for page caching change */
729 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
730
731 if (!caching_array) {
732 pr_err("%s: Unable to allocate table for new pages\n",
733 pool->dev_name);
734 return -ENOMEM;
735 }
736
737 if (count > 1) {
738 pr_debug("%s: (%s:%d) Getting %d pages\n",
739 pool->dev_name, pool->name, current->pid, count);
740 }
741
742 for (i = 0, cpages = 0; i < count; ++i) {
743 dma_p = __ttm_dma_alloc_page(pool);
744 if (!dma_p) {
745 pr_err("%s: Unable to get page %u\n",
746 pool->dev_name, i);
747
748 /* store already allocated pages in the pool after
749 * setting the caching state */
750 if (cpages) {
751 r = ttm_set_pages_caching(pool, caching_array,
752 cpages);
753 if (r)
754 ttm_dma_handle_caching_state_failure(
755 pool, d_pages, caching_array,
756 cpages);
757 }
758 r = -ENOMEM;
759 goto out;
760 }
761 p = dma_p->p;
762#ifdef CONFIG_HIGHMEM
763 /* gfp flags of highmem page should never be dma32 so we
764 * we should be fine in such case
765 */
766 if (!PageHighMem(p))
767#endif
768 {
769 caching_array[cpages++] = p;
770 if (cpages == max_cpages) {
771 /* Note: Cannot hold the spinlock */
772 r = ttm_set_pages_caching(pool, caching_array,
773 cpages);
774 if (r) {
775 ttm_dma_handle_caching_state_failure(
776 pool, d_pages, caching_array,
777 cpages);
778 goto out;
779 }
780 cpages = 0;
781 }
782 }
783 list_add(&dma_p->page_list, d_pages);
784 }
785
786 if (cpages) {
787 r = ttm_set_pages_caching(pool, caching_array, cpages);
788 if (r)
789 ttm_dma_handle_caching_state_failure(pool, d_pages,
790 caching_array, cpages);
791 }
792out:
793 kfree(caching_array);
794 return r;
795}
796
797/*
798 * @return count of pages still required to fulfill the request.
799 */
800static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
801 unsigned long *irq_flags)
802{
803 unsigned count = _manager->options.small;
804 int r = pool->npages_free;
805
806 if (count > pool->npages_free) {
807 struct list_head d_pages;
808
809 INIT_LIST_HEAD(&d_pages);
810
811 spin_unlock_irqrestore(&pool->lock, *irq_flags);
812
813 /* Returns how many more are neccessary to fulfill the
814 * request. */
815 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
816
817 spin_lock_irqsave(&pool->lock, *irq_flags);
818 if (!r) {
819 /* Add the fresh to the end.. */
820 list_splice(&d_pages, &pool->free_list);
821 ++pool->nrefills;
822 pool->npages_free += count;
823 r = count;
824 } else {
825 struct dma_page *d_page;
826 unsigned cpages = 0;
827
828 pr_err("%s: Failed to fill %s pool (r:%d)!\n",
829 pool->dev_name, pool->name, r);
830
831 list_for_each_entry(d_page, &d_pages, page_list) {
832 cpages++;
833 }
834 list_splice_tail(&d_pages, &pool->free_list);
835 pool->npages_free += cpages;
836 r = cpages;
837 }
838 }
839 return r;
840}
841
842/*
843 * @return count of pages still required to fulfill the request.
844 * The populate list is actually a stack (not that is matters as TTM
845 * allocates one page at a time.
846 */
847static int ttm_dma_pool_get_pages(struct dma_pool *pool,
848 struct ttm_dma_tt *ttm_dma,
849 unsigned index)
850{
851 struct dma_page *d_page;
852 struct ttm_tt *ttm = &ttm_dma->ttm;
853 unsigned long irq_flags;
854 int count, r = -ENOMEM;
855
856 spin_lock_irqsave(&pool->lock, irq_flags);
857 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
858 if (count) {
859 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
860 ttm->pages[index] = d_page->p;
861 ttm_dma->cpu_address[index] = d_page->vaddr;
862 ttm_dma->dma_address[index] = d_page->dma;
863 list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
864 r = 0;
865 pool->npages_in_use += 1;
866 pool->npages_free -= 1;
867 }
868 spin_unlock_irqrestore(&pool->lock, irq_flags);
869 return r;
870}
871
872/*
873 * On success pages list will hold count number of correctly
874 * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
875 */
876int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
877{
878 struct ttm_tt *ttm = &ttm_dma->ttm;
879 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
880 struct dma_pool *pool;
881 enum pool_type type;
882 unsigned i;
883 gfp_t gfp_flags;
884 int ret;
885
886 if (ttm->state != tt_unpopulated)
887 return 0;
888
889 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
890 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
891 gfp_flags = GFP_USER | GFP_DMA32;
892 else
893 gfp_flags = GFP_HIGHUSER;
894 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
895 gfp_flags |= __GFP_ZERO;
896
897 pool = ttm_dma_find_pool(dev, type);
898 if (!pool) {
899 pool = ttm_dma_pool_init(dev, gfp_flags, type);
900 if (IS_ERR_OR_NULL(pool)) {
901 return -ENOMEM;
902 }
903 }
904
905 INIT_LIST_HEAD(&ttm_dma->pages_list);
906 for (i = 0; i < ttm->num_pages; ++i) {
907 ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
908 if (ret != 0) {
909 ttm_dma_unpopulate(ttm_dma, dev);
910 return -ENOMEM;
911 }
912
913 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
914 false, false);
915 if (unlikely(ret != 0)) {
916 ttm_dma_unpopulate(ttm_dma, dev);
917 return -ENOMEM;
918 }
919 }
920
921 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
922 ret = ttm_tt_swapin(ttm);
923 if (unlikely(ret != 0)) {
924 ttm_dma_unpopulate(ttm_dma, dev);
925 return ret;
926 }
927 }
928
929 ttm->state = tt_unbound;
930 return 0;
931}
932EXPORT_SYMBOL_GPL(ttm_dma_populate);
933
934/* Put all pages in pages list to correct pool to wait for reuse */
935void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
936{
937 struct ttm_tt *ttm = &ttm_dma->ttm;
938 struct dma_pool *pool;
939 struct dma_page *d_page, *next;
940 enum pool_type type;
941 bool is_cached = false;
942 unsigned count = 0, i, npages = 0;
943 unsigned long irq_flags;
944
945 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
946 pool = ttm_dma_find_pool(dev, type);
947 if (!pool)
948 return;
949
950 is_cached = (ttm_dma_find_pool(pool->dev,
951 ttm_to_type(ttm->page_flags, tt_cached)) == pool);
952
953 /* make sure pages array match list and count number of pages */
954 list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
955 ttm->pages[count] = d_page->p;
956 count++;
957 }
958
959 spin_lock_irqsave(&pool->lock, irq_flags);
960 pool->npages_in_use -= count;
961 if (is_cached) {
962 pool->nfrees += count;
963 } else {
964 pool->npages_free += count;
965 list_splice(&ttm_dma->pages_list, &pool->free_list);
966 /*
967 * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
968 * to free in order to minimize calls to set_memory_wb().
969 */
970 if (pool->npages_free >= (_manager->options.max_size +
971 NUM_PAGES_TO_ALLOC))
972 npages = pool->npages_free - _manager->options.max_size;
973 }
974 spin_unlock_irqrestore(&pool->lock, irq_flags);
975
976 if (is_cached) {
977 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
978 ttm_mem_global_free_page(ttm->glob->mem_glob,
979 d_page->p);
980 ttm_dma_page_put(pool, d_page);
981 }
982 } else {
983 for (i = 0; i < count; i++) {
984 ttm_mem_global_free_page(ttm->glob->mem_glob,
985 ttm->pages[i]);
986 }
987 }
988
989 INIT_LIST_HEAD(&ttm_dma->pages_list);
990 for (i = 0; i < ttm->num_pages; i++) {
991 ttm->pages[i] = NULL;
992 ttm_dma->cpu_address[i] = 0;
993 ttm_dma->dma_address[i] = 0;
994 }
995
996 /* shrink pool if necessary (only on !is_cached pools)*/
997 if (npages)
998 ttm_dma_page_pool_free(pool, npages, false);
999 ttm->state = tt_unpopulated;
1000}
1001EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1002
1003/**
1004 * Callback for mm to request pool to reduce number of page held.
1005 *
1006 * XXX: (dchinner) Deadlock warning!
1007 *
1008 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1009 * shrinkers
1010 */
1011static unsigned long
1012ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1013{
1014 static unsigned start_pool;
1015 unsigned idx = 0;
1016 unsigned pool_offset;
1017 unsigned shrink_pages = sc->nr_to_scan;
1018 struct device_pools *p;
1019 unsigned long freed = 0;
1020
1021 if (list_empty(&_manager->pools))
1022 return SHRINK_STOP;
1023
1024 if (!mutex_trylock(&_manager->lock))
1025 return SHRINK_STOP;
1026 if (!_manager->npools)
1027 goto out;
1028 pool_offset = ++start_pool % _manager->npools;
1029 list_for_each_entry(p, &_manager->pools, pools) {
1030 unsigned nr_free;
1031
1032 if (!p->dev)
1033 continue;
1034 if (shrink_pages == 0)
1035 break;
1036 /* Do it in round-robin fashion. */
1037 if (++idx < pool_offset)
1038 continue;
1039 nr_free = shrink_pages;
1040 /* OK to use static buffer since global mutex is held. */
1041 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
1042 freed += nr_free - shrink_pages;
1043
1044 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1045 p->pool->dev_name, p->pool->name, current->pid,
1046 nr_free, shrink_pages);
1047 }
1048out:
1049 mutex_unlock(&_manager->lock);
1050 return freed;
1051}
1052
1053static unsigned long
1054ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1055{
1056 struct device_pools *p;
1057 unsigned long count = 0;
1058
1059 if (!mutex_trylock(&_manager->lock))
1060 return 0;
1061 list_for_each_entry(p, &_manager->pools, pools)
1062 count += p->pool->npages_free;
1063 mutex_unlock(&_manager->lock);
1064 return count;
1065}
1066
1067static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1068{
1069 manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
1070 manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
1071 manager->mm_shrink.seeks = 1;
1072 register_shrinker(&manager->mm_shrink);
1073}
1074
1075static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1076{
1077 unregister_shrinker(&manager->mm_shrink);
1078}
1079
1080int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1081{
1082 int ret = -ENOMEM;
1083
1084 WARN_ON(_manager);
1085
1086 pr_info("Initializing DMA pool allocator\n");
1087
1088 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1089 if (!_manager)
1090 goto err;
1091
1092 mutex_init(&_manager->lock);
1093 INIT_LIST_HEAD(&_manager->pools);
1094
1095 _manager->options.max_size = max_pages;
1096 _manager->options.small = SMALL_ALLOCATION;
1097 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1098
1099 /* This takes care of auto-freeing the _manager */
1100 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1101 &glob->kobj, "dma_pool");
1102 if (unlikely(ret != 0)) {
1103 kobject_put(&_manager->kobj);
1104 goto err;
1105 }
1106 ttm_dma_pool_mm_shrink_init(_manager);
1107 return 0;
1108err:
1109 return ret;
1110}
1111
1112void ttm_dma_page_alloc_fini(void)
1113{
1114 struct device_pools *p, *t;
1115
1116 pr_info("Finalizing DMA pool allocator\n");
1117 ttm_dma_pool_mm_shrink_fini(_manager);
1118
1119 list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1120 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1121 current->pid);
1122 WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1123 ttm_dma_pool_match, p->pool));
1124 ttm_dma_free_pool(p->dev, p->pool->type);
1125 }
1126 kobject_put(&_manager->kobj);
1127 _manager = NULL;
1128}
1129
1130int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1131{
1132 struct device_pools *p;
1133 struct dma_pool *pool = NULL;
1134 char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
1135 "name", "virt", "busaddr"};
1136
1137 if (!_manager) {
1138 seq_printf(m, "No pool allocator running.\n");
1139 return 0;
1140 }
1141 seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
1142 h[0], h[1], h[2], h[3], h[4], h[5]);
1143 mutex_lock(&_manager->lock);
1144 list_for_each_entry(p, &_manager->pools, pools) {
1145 struct device *dev = p->dev;
1146 if (!dev)
1147 continue;
1148 pool = p->pool;
1149 seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1150 pool->name, pool->nrefills,
1151 pool->nfrees, pool->npages_in_use,
1152 pool->npages_free,
1153 pool->dev_name);
1154 }
1155 mutex_unlock(&_manager->lock);
1156 return 0;
1157}
1158EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
1159
1160#endif
1/*
2 * Copyright 2011 (c) Oracle Corp.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
24 */
25
26/*
27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
28 * over the DMA pools:
29 * - Pool collects resently freed pages for reuse (and hooks up to
30 * the shrinker).
31 * - Tracks currently in use pages
32 * - Tracks whether the page is UC, WB or cached (and reverts to WB
33 * when freed).
34 */
35
36#define pr_fmt(fmt) "[TTM] " fmt
37
38#include <linux/dma-mapping.h>
39#include <linux/list.h>
40#include <linux/seq_file.h> /* for seq_printf */
41#include <linux/slab.h>
42#include <linux/spinlock.h>
43#include <linux/highmem.h>
44#include <linux/mm_types.h>
45#include <linux/module.h>
46#include <linux/mm.h>
47#include <linux/atomic.h>
48#include <linux/device.h>
49#include <linux/kthread.h>
50#include <drm/ttm/ttm_bo_driver.h>
51#include <drm/ttm/ttm_page_alloc.h>
52#include <drm/ttm/ttm_set_memory.h>
53
54#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
55#define SMALL_ALLOCATION 4
56#define FREE_ALL_PAGES (~0U)
57#define VADDR_FLAG_HUGE_POOL 1UL
58#define VADDR_FLAG_UPDATED_COUNT 2UL
59
60enum pool_type {
61 IS_UNDEFINED = 0,
62 IS_WC = 1 << 1,
63 IS_UC = 1 << 2,
64 IS_CACHED = 1 << 3,
65 IS_DMA32 = 1 << 4,
66 IS_HUGE = 1 << 5
67};
68
69/*
70 * The pool structure. There are up to nine pools:
71 * - generic (not restricted to DMA32):
72 * - write combined, uncached, cached.
73 * - dma32 (up to 2^32 - so up 4GB):
74 * - write combined, uncached, cached.
75 * - huge (not restricted to DMA32):
76 * - write combined, uncached, cached.
77 * for each 'struct device'. The 'cached' is for pages that are actively used.
78 * The other ones can be shrunk by the shrinker API if neccessary.
79 * @pools: The 'struct device->dma_pools' link.
80 * @type: Type of the pool
81 * @lock: Protects the free_list from concurrnet access. Must be
82 * used with irqsave/irqrestore variants because pool allocator maybe called
83 * from delayed work.
84 * @free_list: Pool of pages that are free to be used. No order requirements.
85 * @dev: The device that is associated with these pools.
86 * @size: Size used during DMA allocation.
87 * @npages_free: Count of available pages for re-use.
88 * @npages_in_use: Count of pages that are in use.
89 * @nfrees: Stats when pool is shrinking.
90 * @nrefills: Stats when the pool is grown.
91 * @gfp_flags: Flags to pass for alloc_page.
92 * @name: Name of the pool.
93 * @dev_name: Name derieved from dev - similar to how dev_info works.
94 * Used during shutdown as the dev_info during release is unavailable.
95 */
96struct dma_pool {
97 struct list_head pools; /* The 'struct device->dma_pools link */
98 enum pool_type type;
99 spinlock_t lock;
100 struct list_head free_list;
101 struct device *dev;
102 unsigned size;
103 unsigned npages_free;
104 unsigned npages_in_use;
105 unsigned long nfrees; /* Stats when shrunk. */
106 unsigned long nrefills; /* Stats when grown. */
107 gfp_t gfp_flags;
108 char name[13]; /* "cached dma32" */
109 char dev_name[64]; /* Constructed from dev */
110};
111
112/*
113 * The accounting page keeping track of the allocated page along with
114 * the DMA address.
115 * @page_list: The link to the 'page_list' in 'struct dma_pool'.
116 * @vaddr: The virtual address of the page and a flag if the page belongs to a
117 * huge pool
118 * @dma: The bus address of the page. If the page is not allocated
119 * via the DMA API, it will be -1.
120 */
121struct dma_page {
122 struct list_head page_list;
123 unsigned long vaddr;
124 struct page *p;
125 dma_addr_t dma;
126};
127
128/*
129 * Limits for the pool. They are handled without locks because only place where
130 * they may change is in sysfs store. They won't have immediate effect anyway
131 * so forcing serialization to access them is pointless.
132 */
133
134struct ttm_pool_opts {
135 unsigned alloc_size;
136 unsigned max_size;
137 unsigned small;
138};
139
140/*
141 * Contains the list of all of the 'struct device' and their corresponding
142 * DMA pools. Guarded by _mutex->lock.
143 * @pools: The link to 'struct ttm_pool_manager->pools'
144 * @dev: The 'struct device' associated with the 'pool'
145 * @pool: The 'struct dma_pool' associated with the 'dev'
146 */
147struct device_pools {
148 struct list_head pools;
149 struct device *dev;
150 struct dma_pool *pool;
151};
152
153/*
154 * struct ttm_pool_manager - Holds memory pools for fast allocation
155 *
156 * @lock: Lock used when adding/removing from pools
157 * @pools: List of 'struct device' and 'struct dma_pool' tuples.
158 * @options: Limits for the pool.
159 * @npools: Total amount of pools in existence.
160 * @shrinker: The structure used by [un|]register_shrinker
161 */
162struct ttm_pool_manager {
163 struct mutex lock;
164 struct list_head pools;
165 struct ttm_pool_opts options;
166 unsigned npools;
167 struct shrinker mm_shrink;
168 struct kobject kobj;
169};
170
171static struct ttm_pool_manager *_manager;
172
173static struct attribute ttm_page_pool_max = {
174 .name = "pool_max_size",
175 .mode = S_IRUGO | S_IWUSR
176};
177static struct attribute ttm_page_pool_small = {
178 .name = "pool_small_allocation",
179 .mode = S_IRUGO | S_IWUSR
180};
181static struct attribute ttm_page_pool_alloc_size = {
182 .name = "pool_allocation_size",
183 .mode = S_IRUGO | S_IWUSR
184};
185
186static struct attribute *ttm_pool_attrs[] = {
187 &ttm_page_pool_max,
188 &ttm_page_pool_small,
189 &ttm_page_pool_alloc_size,
190 NULL
191};
192
193static void ttm_pool_kobj_release(struct kobject *kobj)
194{
195 struct ttm_pool_manager *m =
196 container_of(kobj, struct ttm_pool_manager, kobj);
197 kfree(m);
198}
199
200static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
201 const char *buffer, size_t size)
202{
203 struct ttm_pool_manager *m =
204 container_of(kobj, struct ttm_pool_manager, kobj);
205 int chars;
206 unsigned val;
207
208 chars = sscanf(buffer, "%u", &val);
209 if (chars == 0)
210 return size;
211
212 /* Convert kb to number of pages */
213 val = val / (PAGE_SIZE >> 10);
214
215 if (attr == &ttm_page_pool_max) {
216 m->options.max_size = val;
217 } else if (attr == &ttm_page_pool_small) {
218 m->options.small = val;
219 } else if (attr == &ttm_page_pool_alloc_size) {
220 if (val > NUM_PAGES_TO_ALLOC*8) {
221 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
222 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
223 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
224 return size;
225 } else if (val > NUM_PAGES_TO_ALLOC) {
226 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
227 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
228 }
229 m->options.alloc_size = val;
230 }
231
232 return size;
233}
234
235static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
236 char *buffer)
237{
238 struct ttm_pool_manager *m =
239 container_of(kobj, struct ttm_pool_manager, kobj);
240 unsigned val = 0;
241
242 if (attr == &ttm_page_pool_max)
243 val = m->options.max_size;
244 else if (attr == &ttm_page_pool_small)
245 val = m->options.small;
246 else if (attr == &ttm_page_pool_alloc_size)
247 val = m->options.alloc_size;
248
249 val = val * (PAGE_SIZE >> 10);
250
251 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
252}
253
254static const struct sysfs_ops ttm_pool_sysfs_ops = {
255 .show = &ttm_pool_show,
256 .store = &ttm_pool_store,
257};
258
259static struct kobj_type ttm_pool_kobj_type = {
260 .release = &ttm_pool_kobj_release,
261 .sysfs_ops = &ttm_pool_sysfs_ops,
262 .default_attrs = ttm_pool_attrs,
263};
264
265static int ttm_set_pages_caching(struct dma_pool *pool,
266 struct page **pages, unsigned cpages)
267{
268 int r = 0;
269 /* Set page caching */
270 if (pool->type & IS_UC) {
271 r = ttm_set_pages_array_uc(pages, cpages);
272 if (r)
273 pr_err("%s: Failed to set %d pages to uc!\n",
274 pool->dev_name, cpages);
275 }
276 if (pool->type & IS_WC) {
277 r = ttm_set_pages_array_wc(pages, cpages);
278 if (r)
279 pr_err("%s: Failed to set %d pages to wc!\n",
280 pool->dev_name, cpages);
281 }
282 return r;
283}
284
285static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
286{
287 unsigned long attrs = 0;
288 dma_addr_t dma = d_page->dma;
289 d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
290 if (pool->type & IS_HUGE)
291 attrs = DMA_ATTR_NO_WARN;
292
293 dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs);
294
295 kfree(d_page);
296 d_page = NULL;
297}
298static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
299{
300 struct dma_page *d_page;
301 unsigned long attrs = 0;
302 void *vaddr;
303
304 d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
305 if (!d_page)
306 return NULL;
307
308 if (pool->type & IS_HUGE)
309 attrs = DMA_ATTR_NO_WARN;
310
311 vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
312 pool->gfp_flags, attrs);
313 if (vaddr) {
314 if (is_vmalloc_addr(vaddr))
315 d_page->p = vmalloc_to_page(vaddr);
316 else
317 d_page->p = virt_to_page(vaddr);
318 d_page->vaddr = (unsigned long)vaddr;
319 if (pool->type & IS_HUGE)
320 d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
321 } else {
322 kfree(d_page);
323 d_page = NULL;
324 }
325 return d_page;
326}
327static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
328{
329 enum pool_type type = IS_UNDEFINED;
330
331 if (flags & TTM_PAGE_FLAG_DMA32)
332 type |= IS_DMA32;
333 if (cstate == tt_cached)
334 type |= IS_CACHED;
335 else if (cstate == tt_uncached)
336 type |= IS_UC;
337 else
338 type |= IS_WC;
339
340 return type;
341}
342
343static void ttm_pool_update_free_locked(struct dma_pool *pool,
344 unsigned freed_pages)
345{
346 pool->npages_free -= freed_pages;
347 pool->nfrees += freed_pages;
348
349}
350
351/* set memory back to wb and free the pages. */
352static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
353{
354 struct page *page = d_page->p;
355 unsigned num_pages;
356
357 /* Don't set WB on WB page pool. */
358 if (!(pool->type & IS_CACHED)) {
359 num_pages = pool->size / PAGE_SIZE;
360 if (ttm_set_pages_wb(page, num_pages))
361 pr_err("%s: Failed to set %d pages to wb!\n",
362 pool->dev_name, num_pages);
363 }
364
365 list_del(&d_page->page_list);
366 __ttm_dma_free_page(pool, d_page);
367}
368
369static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
370 struct page *pages[], unsigned npages)
371{
372 struct dma_page *d_page, *tmp;
373
374 if (pool->type & IS_HUGE) {
375 list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
376 ttm_dma_page_put(pool, d_page);
377
378 return;
379 }
380
381 /* Don't set WB on WB page pool. */
382 if (npages && !(pool->type & IS_CACHED) &&
383 ttm_set_pages_array_wb(pages, npages))
384 pr_err("%s: Failed to set %d pages to wb!\n",
385 pool->dev_name, npages);
386
387 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
388 list_del(&d_page->page_list);
389 __ttm_dma_free_page(pool, d_page);
390 }
391}
392
393/*
394 * Free pages from pool.
395 *
396 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
397 * number of pages in one go.
398 *
399 * @pool: to free the pages from
400 * @nr_free: If set to true will free all pages in pool
401 * @use_static: Safe to use static buffer
402 **/
403static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
404 bool use_static)
405{
406 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
407 unsigned long irq_flags;
408 struct dma_page *dma_p, *tmp;
409 struct page **pages_to_free;
410 struct list_head d_pages;
411 unsigned freed_pages = 0,
412 npages_to_free = nr_free;
413
414 if (NUM_PAGES_TO_ALLOC < nr_free)
415 npages_to_free = NUM_PAGES_TO_ALLOC;
416
417 if (use_static)
418 pages_to_free = static_buf;
419 else
420 pages_to_free = kmalloc_array(npages_to_free,
421 sizeof(struct page *),
422 GFP_KERNEL);
423
424 if (!pages_to_free) {
425 pr_debug("%s: Failed to allocate memory for pool free operation\n",
426 pool->dev_name);
427 return 0;
428 }
429 INIT_LIST_HEAD(&d_pages);
430restart:
431 spin_lock_irqsave(&pool->lock, irq_flags);
432
433 /* We picking the oldest ones off the list */
434 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
435 page_list) {
436 if (freed_pages >= npages_to_free)
437 break;
438
439 /* Move the dma_page from one list to another. */
440 list_move(&dma_p->page_list, &d_pages);
441
442 pages_to_free[freed_pages++] = dma_p->p;
443 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
444 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
445
446 ttm_pool_update_free_locked(pool, freed_pages);
447 /**
448 * Because changing page caching is costly
449 * we unlock the pool to prevent stalling.
450 */
451 spin_unlock_irqrestore(&pool->lock, irq_flags);
452
453 ttm_dma_pages_put(pool, &d_pages, pages_to_free,
454 freed_pages);
455
456 INIT_LIST_HEAD(&d_pages);
457
458 if (likely(nr_free != FREE_ALL_PAGES))
459 nr_free -= freed_pages;
460
461 if (NUM_PAGES_TO_ALLOC >= nr_free)
462 npages_to_free = nr_free;
463 else
464 npages_to_free = NUM_PAGES_TO_ALLOC;
465
466 freed_pages = 0;
467
468 /* free all so restart the processing */
469 if (nr_free)
470 goto restart;
471
472 /* Not allowed to fall through or break because
473 * following context is inside spinlock while we are
474 * outside here.
475 */
476 goto out;
477
478 }
479 }
480
481 /* remove range of pages from the pool */
482 if (freed_pages) {
483 ttm_pool_update_free_locked(pool, freed_pages);
484 nr_free -= freed_pages;
485 }
486
487 spin_unlock_irqrestore(&pool->lock, irq_flags);
488
489 if (freed_pages)
490 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
491out:
492 if (pages_to_free != static_buf)
493 kfree(pages_to_free);
494 return nr_free;
495}
496
497static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
498{
499 struct device_pools *p;
500 struct dma_pool *pool;
501
502 if (!dev)
503 return;
504
505 mutex_lock(&_manager->lock);
506 list_for_each_entry_reverse(p, &_manager->pools, pools) {
507 if (p->dev != dev)
508 continue;
509 pool = p->pool;
510 if (pool->type != type)
511 continue;
512
513 list_del(&p->pools);
514 kfree(p);
515 _manager->npools--;
516 break;
517 }
518 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
519 if (pool->type != type)
520 continue;
521 /* Takes a spinlock.. */
522 /* OK to use static buffer since global mutex is held. */
523 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
524 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
525 /* This code path is called after _all_ references to the
526 * struct device has been dropped - so nobody should be
527 * touching it. In case somebody is trying to _add_ we are
528 * guarded by the mutex. */
529 list_del(&pool->pools);
530 kfree(pool);
531 break;
532 }
533 mutex_unlock(&_manager->lock);
534}
535
536/*
537 * On free-ing of the 'struct device' this deconstructor is run.
538 * Albeit the pool might have already been freed earlier.
539 */
540static void ttm_dma_pool_release(struct device *dev, void *res)
541{
542 struct dma_pool *pool = *(struct dma_pool **)res;
543
544 if (pool)
545 ttm_dma_free_pool(dev, pool->type);
546}
547
548static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
549{
550 return *(struct dma_pool **)res == match_data;
551}
552
553static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
554 enum pool_type type)
555{
556 const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
557 enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
558 struct device_pools *sec_pool = NULL;
559 struct dma_pool *pool = NULL, **ptr;
560 unsigned i;
561 int ret = -ENODEV;
562 char *p;
563
564 if (!dev)
565 return NULL;
566
567 ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
568 if (!ptr)
569 return NULL;
570
571 ret = -ENOMEM;
572
573 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
574 dev_to_node(dev));
575 if (!pool)
576 goto err_mem;
577
578 sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
579 dev_to_node(dev));
580 if (!sec_pool)
581 goto err_mem;
582
583 INIT_LIST_HEAD(&sec_pool->pools);
584 sec_pool->dev = dev;
585 sec_pool->pool = pool;
586
587 INIT_LIST_HEAD(&pool->free_list);
588 INIT_LIST_HEAD(&pool->pools);
589 spin_lock_init(&pool->lock);
590 pool->dev = dev;
591 pool->npages_free = pool->npages_in_use = 0;
592 pool->nfrees = 0;
593 pool->gfp_flags = flags;
594 if (type & IS_HUGE)
595#ifdef CONFIG_TRANSPARENT_HUGEPAGE
596 pool->size = HPAGE_PMD_SIZE;
597#else
598 BUG();
599#endif
600 else
601 pool->size = PAGE_SIZE;
602 pool->type = type;
603 pool->nrefills = 0;
604 p = pool->name;
605 for (i = 0; i < ARRAY_SIZE(t); i++) {
606 if (type & t[i]) {
607 p += scnprintf(p, sizeof(pool->name) - (p - pool->name),
608 "%s", n[i]);
609 }
610 }
611 *p = 0;
612 /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
613 * - the kobj->name has already been deallocated.*/
614 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
615 dev_driver_string(dev), dev_name(dev));
616 mutex_lock(&_manager->lock);
617 /* You can get the dma_pool from either the global: */
618 list_add(&sec_pool->pools, &_manager->pools);
619 _manager->npools++;
620 /* or from 'struct device': */
621 list_add(&pool->pools, &dev->dma_pools);
622 mutex_unlock(&_manager->lock);
623
624 *ptr = pool;
625 devres_add(dev, ptr);
626
627 return pool;
628err_mem:
629 devres_free(ptr);
630 kfree(sec_pool);
631 kfree(pool);
632 return ERR_PTR(ret);
633}
634
635static struct dma_pool *ttm_dma_find_pool(struct device *dev,
636 enum pool_type type)
637{
638 struct dma_pool *pool, *tmp;
639
640 if (type == IS_UNDEFINED)
641 return NULL;
642
643 /* NB: We iterate on the 'struct dev' which has no spinlock, but
644 * it does have a kref which we have taken. The kref is taken during
645 * graphic driver loading - in the drm_pci_init it calls either
646 * pci_dev_get or pci_register_driver which both end up taking a kref
647 * on 'struct device'.
648 *
649 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
650 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
651 * thing is at that point of time there are no pages associated with the
652 * driver so this function will not be called.
653 */
654 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
655 if (pool->type == type)
656 return pool;
657 return NULL;
658}
659
660/*
661 * Free pages the pages that failed to change the caching state. If there
662 * are pages that have changed their caching state already put them to the
663 * pool.
664 */
665static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
666 struct list_head *d_pages,
667 struct page **failed_pages,
668 unsigned cpages)
669{
670 struct dma_page *d_page, *tmp;
671 struct page *p;
672 unsigned i = 0;
673
674 p = failed_pages[0];
675 if (!p)
676 return;
677 /* Find the failed page. */
678 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
679 if (d_page->p != p)
680 continue;
681 /* .. and then progress over the full list. */
682 list_del(&d_page->page_list);
683 __ttm_dma_free_page(pool, d_page);
684 if (++i < cpages)
685 p = failed_pages[i];
686 else
687 break;
688 }
689
690}
691
692/*
693 * Allocate 'count' pages, and put 'need' number of them on the
694 * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
695 * The full list of pages should also be on 'd_pages'.
696 * We return zero for success, and negative numbers as errors.
697 */
698static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
699 struct list_head *d_pages,
700 unsigned count)
701{
702 struct page **caching_array;
703 struct dma_page *dma_p;
704 struct page *p;
705 int r = 0;
706 unsigned i, j, npages, cpages;
707 unsigned max_cpages = min(count,
708 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
709
710 /* allocate array for page caching change */
711 caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
712 GFP_KERNEL);
713
714 if (!caching_array) {
715 pr_debug("%s: Unable to allocate table for new pages\n",
716 pool->dev_name);
717 return -ENOMEM;
718 }
719
720 if (count > 1)
721 pr_debug("%s: (%s:%d) Getting %d pages\n",
722 pool->dev_name, pool->name, current->pid, count);
723
724 for (i = 0, cpages = 0; i < count; ++i) {
725 dma_p = __ttm_dma_alloc_page(pool);
726 if (!dma_p) {
727 pr_debug("%s: Unable to get page %u\n",
728 pool->dev_name, i);
729
730 /* store already allocated pages in the pool after
731 * setting the caching state */
732 if (cpages) {
733 r = ttm_set_pages_caching(pool, caching_array,
734 cpages);
735 if (r)
736 ttm_dma_handle_caching_state_failure(
737 pool, d_pages, caching_array,
738 cpages);
739 }
740 r = -ENOMEM;
741 goto out;
742 }
743 p = dma_p->p;
744 list_add(&dma_p->page_list, d_pages);
745
746#ifdef CONFIG_HIGHMEM
747 /* gfp flags of highmem page should never be dma32 so we
748 * we should be fine in such case
749 */
750 if (PageHighMem(p))
751 continue;
752#endif
753
754 npages = pool->size / PAGE_SIZE;
755 for (j = 0; j < npages; ++j) {
756 caching_array[cpages++] = p + j;
757 if (cpages == max_cpages) {
758 /* Note: Cannot hold the spinlock */
759 r = ttm_set_pages_caching(pool, caching_array,
760 cpages);
761 if (r) {
762 ttm_dma_handle_caching_state_failure(
763 pool, d_pages, caching_array,
764 cpages);
765 goto out;
766 }
767 cpages = 0;
768 }
769 }
770 }
771
772 if (cpages) {
773 r = ttm_set_pages_caching(pool, caching_array, cpages);
774 if (r)
775 ttm_dma_handle_caching_state_failure(pool, d_pages,
776 caching_array, cpages);
777 }
778out:
779 kfree(caching_array);
780 return r;
781}
782
783/*
784 * @return count of pages still required to fulfill the request.
785 */
786static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
787 unsigned long *irq_flags)
788{
789 unsigned count = _manager->options.small;
790 int r = pool->npages_free;
791
792 if (count > pool->npages_free) {
793 struct list_head d_pages;
794
795 INIT_LIST_HEAD(&d_pages);
796
797 spin_unlock_irqrestore(&pool->lock, *irq_flags);
798
799 /* Returns how many more are neccessary to fulfill the
800 * request. */
801 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
802
803 spin_lock_irqsave(&pool->lock, *irq_flags);
804 if (!r) {
805 /* Add the fresh to the end.. */
806 list_splice(&d_pages, &pool->free_list);
807 ++pool->nrefills;
808 pool->npages_free += count;
809 r = count;
810 } else {
811 struct dma_page *d_page;
812 unsigned cpages = 0;
813
814 pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
815 pool->dev_name, pool->name, r);
816
817 list_for_each_entry(d_page, &d_pages, page_list) {
818 cpages++;
819 }
820 list_splice_tail(&d_pages, &pool->free_list);
821 pool->npages_free += cpages;
822 r = cpages;
823 }
824 }
825 return r;
826}
827
828/*
829 * The populate list is actually a stack (not that is matters as TTM
830 * allocates one page at a time.
831 * return dma_page pointer if success, otherwise NULL.
832 */
833static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
834 struct ttm_dma_tt *ttm_dma,
835 unsigned index)
836{
837 struct dma_page *d_page = NULL;
838 struct ttm_tt *ttm = &ttm_dma->ttm;
839 unsigned long irq_flags;
840 int count;
841
842 spin_lock_irqsave(&pool->lock, irq_flags);
843 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
844 if (count) {
845 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
846 ttm->pages[index] = d_page->p;
847 ttm_dma->dma_address[index] = d_page->dma;
848 list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
849 pool->npages_in_use += 1;
850 pool->npages_free -= 1;
851 }
852 spin_unlock_irqrestore(&pool->lock, irq_flags);
853 return d_page;
854}
855
856static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
857{
858 struct ttm_tt *ttm = &ttm_dma->ttm;
859 gfp_t gfp_flags;
860
861 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
862 gfp_flags = GFP_USER | GFP_DMA32;
863 else
864 gfp_flags = GFP_HIGHUSER;
865 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
866 gfp_flags |= __GFP_ZERO;
867
868 if (huge) {
869 gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
870 __GFP_KSWAPD_RECLAIM;
871 gfp_flags &= ~__GFP_MOVABLE;
872 gfp_flags &= ~__GFP_COMP;
873 }
874
875 if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
876 gfp_flags |= __GFP_RETRY_MAYFAIL;
877
878 return gfp_flags;
879}
880
881/*
882 * On success pages list will hold count number of correctly
883 * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
884 */
885int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
886 struct ttm_operation_ctx *ctx)
887{
888 struct ttm_mem_global *mem_glob = &ttm_mem_glob;
889 struct ttm_tt *ttm = &ttm_dma->ttm;
890 unsigned long num_pages = ttm->num_pages;
891 struct dma_pool *pool;
892 struct dma_page *d_page;
893 enum pool_type type;
894 unsigned i;
895 int ret;
896
897 if (ttm->state != tt_unpopulated)
898 return 0;
899
900 if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
901 return -ENOMEM;
902
903 INIT_LIST_HEAD(&ttm_dma->pages_list);
904 i = 0;
905
906 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
907
908#ifdef CONFIG_TRANSPARENT_HUGEPAGE
909 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
910 goto skip_huge;
911
912 pool = ttm_dma_find_pool(dev, type | IS_HUGE);
913 if (!pool) {
914 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
915
916 pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
917 if (IS_ERR_OR_NULL(pool))
918 goto skip_huge;
919 }
920
921 while (num_pages >= HPAGE_PMD_NR) {
922 unsigned j;
923
924 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
925 if (!d_page)
926 break;
927
928 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
929 pool->size, ctx);
930 if (unlikely(ret != 0)) {
931 ttm_dma_unpopulate(ttm_dma, dev);
932 return -ENOMEM;
933 }
934
935 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
936 for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
937 ttm->pages[j] = ttm->pages[j - 1] + 1;
938 ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
939 PAGE_SIZE;
940 }
941
942 i += HPAGE_PMD_NR;
943 num_pages -= HPAGE_PMD_NR;
944 }
945
946skip_huge:
947#endif
948
949 pool = ttm_dma_find_pool(dev, type);
950 if (!pool) {
951 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
952
953 pool = ttm_dma_pool_init(dev, gfp_flags, type);
954 if (IS_ERR_OR_NULL(pool))
955 return -ENOMEM;
956 }
957
958 while (num_pages) {
959 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
960 if (!d_page) {
961 ttm_dma_unpopulate(ttm_dma, dev);
962 return -ENOMEM;
963 }
964
965 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
966 pool->size, ctx);
967 if (unlikely(ret != 0)) {
968 ttm_dma_unpopulate(ttm_dma, dev);
969 return -ENOMEM;
970 }
971
972 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
973 ++i;
974 --num_pages;
975 }
976
977 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
978 ret = ttm_tt_swapin(ttm);
979 if (unlikely(ret != 0)) {
980 ttm_dma_unpopulate(ttm_dma, dev);
981 return ret;
982 }
983 }
984
985 ttm->state = tt_unbound;
986 return 0;
987}
988EXPORT_SYMBOL_GPL(ttm_dma_populate);
989
990/* Put all pages in pages list to correct pool to wait for reuse */
991void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
992{
993 struct ttm_mem_global *mem_glob = &ttm_mem_glob;
994 struct ttm_tt *ttm = &ttm_dma->ttm;
995 struct dma_pool *pool;
996 struct dma_page *d_page, *next;
997 enum pool_type type;
998 bool is_cached = false;
999 unsigned count, i, npages = 0;
1000 unsigned long irq_flags;
1001
1002 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
1003
1004#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1005 pool = ttm_dma_find_pool(dev, type | IS_HUGE);
1006 if (pool) {
1007 count = 0;
1008 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
1009 page_list) {
1010 if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
1011 continue;
1012
1013 count++;
1014 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
1015 ttm_mem_global_free_page(mem_glob, d_page->p,
1016 pool->size);
1017 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
1018 }
1019 ttm_dma_page_put(pool, d_page);
1020 }
1021
1022 spin_lock_irqsave(&pool->lock, irq_flags);
1023 pool->npages_in_use -= count;
1024 pool->nfrees += count;
1025 spin_unlock_irqrestore(&pool->lock, irq_flags);
1026 }
1027#endif
1028
1029 pool = ttm_dma_find_pool(dev, type);
1030 if (!pool)
1031 return;
1032
1033 is_cached = (ttm_dma_find_pool(pool->dev,
1034 ttm_to_type(ttm->page_flags, tt_cached)) == pool);
1035
1036 /* make sure pages array match list and count number of pages */
1037 count = 0;
1038 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
1039 page_list) {
1040 ttm->pages[count] = d_page->p;
1041 count++;
1042
1043 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
1044 ttm_mem_global_free_page(mem_glob, d_page->p,
1045 pool->size);
1046 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
1047 }
1048
1049 if (is_cached)
1050 ttm_dma_page_put(pool, d_page);
1051 }
1052
1053 spin_lock_irqsave(&pool->lock, irq_flags);
1054 pool->npages_in_use -= count;
1055 if (is_cached) {
1056 pool->nfrees += count;
1057 } else {
1058 pool->npages_free += count;
1059 list_splice(&ttm_dma->pages_list, &pool->free_list);
1060 /*
1061 * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
1062 * to free in order to minimize calls to set_memory_wb().
1063 */
1064 if (pool->npages_free >= (_manager->options.max_size +
1065 NUM_PAGES_TO_ALLOC))
1066 npages = pool->npages_free - _manager->options.max_size;
1067 }
1068 spin_unlock_irqrestore(&pool->lock, irq_flags);
1069
1070 INIT_LIST_HEAD(&ttm_dma->pages_list);
1071 for (i = 0; i < ttm->num_pages; i++) {
1072 ttm->pages[i] = NULL;
1073 ttm_dma->dma_address[i] = 0;
1074 }
1075
1076 /* shrink pool if necessary (only on !is_cached pools)*/
1077 if (npages)
1078 ttm_dma_page_pool_free(pool, npages, false);
1079 ttm->state = tt_unpopulated;
1080}
1081EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1082
1083/**
1084 * Callback for mm to request pool to reduce number of page held.
1085 *
1086 * XXX: (dchinner) Deadlock warning!
1087 *
1088 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1089 * shrinkers
1090 */
1091static unsigned long
1092ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1093{
1094 static unsigned start_pool;
1095 unsigned idx = 0;
1096 unsigned pool_offset;
1097 unsigned shrink_pages = sc->nr_to_scan;
1098 struct device_pools *p;
1099 unsigned long freed = 0;
1100
1101 if (list_empty(&_manager->pools))
1102 return SHRINK_STOP;
1103
1104 if (!mutex_trylock(&_manager->lock))
1105 return SHRINK_STOP;
1106 if (!_manager->npools)
1107 goto out;
1108 pool_offset = ++start_pool % _manager->npools;
1109 list_for_each_entry(p, &_manager->pools, pools) {
1110 unsigned nr_free;
1111
1112 if (!p->dev)
1113 continue;
1114 if (shrink_pages == 0)
1115 break;
1116 /* Do it in round-robin fashion. */
1117 if (++idx < pool_offset)
1118 continue;
1119 nr_free = shrink_pages;
1120 /* OK to use static buffer since global mutex is held. */
1121 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
1122 freed += nr_free - shrink_pages;
1123
1124 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1125 p->pool->dev_name, p->pool->name, current->pid,
1126 nr_free, shrink_pages);
1127 }
1128out:
1129 mutex_unlock(&_manager->lock);
1130 return freed;
1131}
1132
1133static unsigned long
1134ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1135{
1136 struct device_pools *p;
1137 unsigned long count = 0;
1138
1139 if (!mutex_trylock(&_manager->lock))
1140 return 0;
1141 list_for_each_entry(p, &_manager->pools, pools)
1142 count += p->pool->npages_free;
1143 mutex_unlock(&_manager->lock);
1144 return count;
1145}
1146
1147static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1148{
1149 manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
1150 manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
1151 manager->mm_shrink.seeks = 1;
1152 return register_shrinker(&manager->mm_shrink);
1153}
1154
1155static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1156{
1157 unregister_shrinker(&manager->mm_shrink);
1158}
1159
1160int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1161{
1162 int ret;
1163
1164 WARN_ON(_manager);
1165
1166 pr_info("Initializing DMA pool allocator\n");
1167
1168 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1169 if (!_manager)
1170 return -ENOMEM;
1171
1172 mutex_init(&_manager->lock);
1173 INIT_LIST_HEAD(&_manager->pools);
1174
1175 _manager->options.max_size = max_pages;
1176 _manager->options.small = SMALL_ALLOCATION;
1177 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1178
1179 /* This takes care of auto-freeing the _manager */
1180 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1181 &glob->kobj, "dma_pool");
1182 if (unlikely(ret != 0))
1183 goto error;
1184
1185 ret = ttm_dma_pool_mm_shrink_init(_manager);
1186 if (unlikely(ret != 0))
1187 goto error;
1188 return 0;
1189
1190error:
1191 kobject_put(&_manager->kobj);
1192 _manager = NULL;
1193 return ret;
1194}
1195
1196void ttm_dma_page_alloc_fini(void)
1197{
1198 struct device_pools *p, *t;
1199
1200 pr_info("Finalizing DMA pool allocator\n");
1201 ttm_dma_pool_mm_shrink_fini(_manager);
1202
1203 list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1204 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1205 current->pid);
1206 WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1207 ttm_dma_pool_match, p->pool));
1208 ttm_dma_free_pool(p->dev, p->pool->type);
1209 }
1210 kobject_put(&_manager->kobj);
1211 _manager = NULL;
1212}
1213
1214int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1215{
1216 struct device_pools *p;
1217 struct dma_pool *pool = NULL;
1218
1219 if (!_manager) {
1220 seq_printf(m, "No pool allocator running.\n");
1221 return 0;
1222 }
1223 seq_printf(m, " pool refills pages freed inuse available name\n");
1224 mutex_lock(&_manager->lock);
1225 list_for_each_entry(p, &_manager->pools, pools) {
1226 struct device *dev = p->dev;
1227 if (!dev)
1228 continue;
1229 pool = p->pool;
1230 seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1231 pool->name, pool->nrefills,
1232 pool->nfrees, pool->npages_in_use,
1233 pool->npages_free,
1234 pool->dev_name);
1235 }
1236 mutex_unlock(&_manager->lock);
1237 return 0;
1238}
1239EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);