Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2/**************************************************************************
  3 *
  4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28/*
 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 30 */
 31
 32#define pr_fmt(fmt) "[TTM] " fmt
 33
 34#include <linux/cc_platform.h>
 35#include <linux/debugfs.h>
 36#include <linux/file.h>
 37#include <linux/module.h>
 38#include <linux/sched.h>
 
 39#include <linux/shmem_fs.h>
 
 40#include <drm/drm_cache.h>
 41#include <drm/drm_device.h>
 42#include <drm/drm_util.h>
 43#include <drm/ttm/ttm_bo.h>
 44#include <drm/ttm/ttm_tt.h>
 45
 46#include "ttm_module.h"
 47
 48static unsigned long ttm_pages_limit;
 49
 50MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
 51module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
 52
 53static unsigned long ttm_dma32_pages_limit;
 54
 55MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
 56module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
 57
 58static atomic_long_t ttm_pages_allocated;
 59static atomic_long_t ttm_dma32_pages_allocated;
 60
 61/*
 62 * Allocates a ttm structure for the given BO.
 63 */
 64int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
 65{
 66	struct ttm_device *bdev = bo->bdev;
 67	struct drm_device *ddev = bo->base.dev;
 68	uint32_t page_flags = 0;
 69
 70	dma_resv_assert_held(bo->base.resv);
 71
 72	if (bo->ttm)
 73		return 0;
 74
 75	switch (bo->type) {
 76	case ttm_bo_type_device:
 77		if (zero_alloc)
 78			page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
 79		break;
 80	case ttm_bo_type_kernel:
 81		break;
 82	case ttm_bo_type_sg:
 83		page_flags |= TTM_TT_FLAG_EXTERNAL;
 84		break;
 85	default:
 86		pr_err("Illegal buffer object type\n");
 87		return -EINVAL;
 88	}
 89	/*
 90	 * When using dma_alloc_coherent with memory encryption the
 91	 * mapped TT pages need to be decrypted or otherwise the drivers
 92	 * will end up sending encrypted mem to the gpu.
 93	 */
 94	if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
 95		page_flags |= TTM_TT_FLAG_DECRYPTED;
 96		drm_info_once(ddev, "TT memory decryption enabled.");
 97	}
 98
 99	bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
100	if (unlikely(bo->ttm == NULL))
101		return -ENOMEM;
102
103	WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
104		!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
105
106	return 0;
107}
108EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_create);
109
110/*
111 * Allocates storage for pointers to the pages that back the ttm.
112 */
113static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
114{
115	ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
 
116	if (!ttm->pages)
117		return -ENOMEM;
118
119	return 0;
120}
121
122static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
123{
124	ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
125			      sizeof(*ttm->dma_address), GFP_KERNEL);
 
 
126	if (!ttm->pages)
127		return -ENOMEM;
128
129	ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
130	return 0;
131}
132
133static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
134{
135	ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
136				    GFP_KERNEL);
 
137	if (!ttm->dma_address)
138		return -ENOMEM;
139
140	return 0;
141}
142
 
 
 
 
 
 
 
 
 
 
 
143void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
144{
145	bdev->funcs->ttm_tt_destroy(bdev, ttm);
146}
147EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_destroy);
148
149static void ttm_tt_init_fields(struct ttm_tt *ttm,
150			       struct ttm_buffer_object *bo,
151			       uint32_t page_flags,
152			       enum ttm_caching caching,
153			       unsigned long extra_pages)
154{
155	ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
 
156	ttm->page_flags = page_flags;
157	ttm->dma_address = NULL;
158	ttm->swap_storage = NULL;
159	ttm->sg = bo->sg;
160	ttm->caching = caching;
161}
162
163int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
164		uint32_t page_flags, enum ttm_caching caching,
165		unsigned long extra_pages)
166{
167	ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
168
169	if (ttm_tt_alloc_page_directory(ttm)) {
170		pr_err("Failed allocating page table\n");
171		return -ENOMEM;
172	}
173	return 0;
174}
175EXPORT_SYMBOL(ttm_tt_init);
176
177void ttm_tt_fini(struct ttm_tt *ttm)
178{
179	WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
180
181	if (ttm->swap_storage)
182		fput(ttm->swap_storage);
183	ttm->swap_storage = NULL;
184
185	if (ttm->pages)
186		kvfree(ttm->pages);
187	else
188		kvfree(ttm->dma_address);
189	ttm->pages = NULL;
190	ttm->dma_address = NULL;
191}
192EXPORT_SYMBOL(ttm_tt_fini);
193
194int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
195		   uint32_t page_flags, enum ttm_caching caching)
196{
197	int ret;
198
199	ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
200
201	if (page_flags & TTM_TT_FLAG_EXTERNAL)
202		ret = ttm_sg_tt_alloc_page_directory(ttm);
203	else
204		ret = ttm_dma_tt_alloc_page_directory(ttm);
205	if (ret) {
206		pr_err("Failed allocating page table\n");
207		return -ENOMEM;
208	}
209	return 0;
210}
211EXPORT_SYMBOL(ttm_sg_tt_init);
212
213int ttm_tt_swapin(struct ttm_tt *ttm)
214{
215	struct address_space *swap_space;
216	struct file *swap_storage;
217	struct page *from_page;
218	struct page *to_page;
219	gfp_t gfp_mask;
220	int i, ret;
221
222	swap_storage = ttm->swap_storage;
223	BUG_ON(swap_storage == NULL);
224
225	swap_space = swap_storage->f_mapping;
226	gfp_mask = mapping_gfp_mask(swap_space);
227
228	for (i = 0; i < ttm->num_pages; ++i) {
229		from_page = shmem_read_mapping_page_gfp(swap_space, i,
230							gfp_mask);
231		if (IS_ERR(from_page)) {
232			ret = PTR_ERR(from_page);
233			goto out_err;
234		}
235		to_page = ttm->pages[i];
236		if (unlikely(to_page == NULL)) {
237			ret = -ENOMEM;
238			goto out_err;
239		}
240
241		copy_highpage(to_page, from_page);
242		put_page(from_page);
243	}
244
245	fput(swap_storage);
246	ttm->swap_storage = NULL;
247	ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
248
249	return 0;
250
251out_err:
252	return ret;
253}
254EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin);
255
256/**
257 * ttm_tt_swapout - swap out tt object
258 *
259 * @bdev: TTM device structure.
260 * @ttm: The struct ttm_tt.
261 * @gfp_flags: Flags to use for memory allocation.
262 *
263 * Swapout a TT object to a shmem_file, return number of pages swapped out or
264 * negative error code.
265 */
266int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
267		   gfp_t gfp_flags)
268{
269	loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
270	struct address_space *swap_space;
271	struct file *swap_storage;
272	struct page *from_page;
273	struct page *to_page;
274	int i, ret;
275
276	swap_storage = shmem_file_setup("ttm swap", size, 0);
277	if (IS_ERR(swap_storage)) {
278		pr_err("Failed allocating swap storage\n");
279		return PTR_ERR(swap_storage);
280	}
281
282	swap_space = swap_storage->f_mapping;
283	gfp_flags &= mapping_gfp_mask(swap_space);
284
285	for (i = 0; i < ttm->num_pages; ++i) {
286		from_page = ttm->pages[i];
287		if (unlikely(from_page == NULL))
288			continue;
289
290		to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
291		if (IS_ERR(to_page)) {
292			ret = PTR_ERR(to_page);
293			goto out_err;
294		}
295		copy_highpage(to_page, from_page);
296		set_page_dirty(to_page);
297		mark_page_accessed(to_page);
298		put_page(to_page);
299	}
300
301	ttm_tt_unpopulate(bdev, ttm);
302	ttm->swap_storage = swap_storage;
303	ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
304
305	return ttm->num_pages;
306
307out_err:
308	fput(swap_storage);
309
310	return ret;
311}
312EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapout);
 
 
 
 
 
 
 
 
 
 
313
314int ttm_tt_populate(struct ttm_device *bdev,
315		    struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
316{
317	int ret;
318
319	if (!ttm)
320		return -EINVAL;
321
322	if (ttm_tt_is_populated(ttm))
323		return 0;
324
325	if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
326		atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
327		if (bdev->pool.use_dma32)
328			atomic_long_add(ttm->num_pages,
329					&ttm_dma32_pages_allocated);
330	}
331
332	while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
333	       atomic_long_read(&ttm_dma32_pages_allocated) >
334	       ttm_dma32_pages_limit) {
335
336		ret = ttm_global_swapout(ctx, GFP_KERNEL);
337		if (ret == 0)
338			break;
339		if (ret < 0)
340			goto error;
341	}
342
343	if (bdev->funcs->ttm_tt_populate)
344		ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
345	else
346		ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
347	if (ret)
348		goto error;
349
350	ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
351	if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
 
352		ret = ttm_tt_swapin(ttm);
353		if (unlikely(ret != 0)) {
354			ttm_tt_unpopulate(bdev, ttm);
355			return ret;
356		}
357	}
358
359	return 0;
360
361error:
362	if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
363		atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
364		if (bdev->pool.use_dma32)
365			atomic_long_sub(ttm->num_pages,
366					&ttm_dma32_pages_allocated);
367	}
368	return ret;
369}
370
371#if IS_ENABLED(CONFIG_DRM_TTM_KUNIT_TEST)
372EXPORT_SYMBOL(ttm_tt_populate);
373#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
374
375void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
376{
377	if (!ttm_tt_is_populated(ttm))
378		return;
379
 
380	if (bdev->funcs->ttm_tt_unpopulate)
381		bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
382	else
383		ttm_pool_free(&bdev->pool, ttm);
384
385	if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
386		atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
387		if (bdev->pool.use_dma32)
388			atomic_long_sub(ttm->num_pages,
389					&ttm_dma32_pages_allocated);
390	}
391
392	ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
393}
394EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_unpopulate);
395
396#ifdef CONFIG_DEBUG_FS
397
398/* Test the shrinker functions and dump the result */
399static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
400{
401	struct ttm_operation_ctx ctx = { false, false };
402
403	seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
404	return 0;
405}
406DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
407
408#endif
409
410
411/*
412 * ttm_tt_mgr_init - register with the MM shrinker
413 *
414 * Register with the MM shrinker for swapping out BOs.
415 */
416void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
417{
418#ifdef CONFIG_DEBUG_FS
419	debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
420			    &ttm_tt_debugfs_shrink_fops);
421#endif
422
423	if (!ttm_pages_limit)
424		ttm_pages_limit = num_pages;
425
426	if (!ttm_dma32_pages_limit)
427		ttm_dma32_pages_limit = num_dma32_pages;
428}
429
430static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
431				       struct iosys_map *dmap,
432				       pgoff_t i)
433{
434	struct ttm_kmap_iter_tt *iter_tt =
435		container_of(iter, typeof(*iter_tt), base);
436
437	iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
438						       iter_tt->prot));
439}
440
441static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
442					 struct iosys_map *map)
443{
444	kunmap_local(map->vaddr);
445}
446
447static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
448	.map_local = ttm_kmap_iter_tt_map_local,
449	.unmap_local = ttm_kmap_iter_tt_unmap_local,
450	.maps_tt = true,
451};
452
453/**
454 * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
455 * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
456 * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
457 *
458 * Return: Pointer to the embedded struct ttm_kmap_iter.
459 */
460struct ttm_kmap_iter *
461ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
462		      struct ttm_tt *tt)
463{
464	iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
465	iter_tt->tt = tt;
466	if (tt)
467		iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
468	else
469		iter_tt->prot = PAGE_KERNEL;
470
471	return &iter_tt->base;
472}
473EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
474
475unsigned long ttm_tt_pages_limit(void)
476{
477	return ttm_pages_limit;
478}
479EXPORT_SYMBOL(ttm_tt_pages_limit);
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2/**************************************************************************
  3 *
  4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28/*
 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 30 */
 31
 32#define pr_fmt(fmt) "[TTM] " fmt
 33
 
 
 
 
 34#include <linux/sched.h>
 35#include <linux/pagemap.h>
 36#include <linux/shmem_fs.h>
 37#include <linux/file.h>
 38#include <drm/drm_cache.h>
 39#include <drm/ttm/ttm_bo_driver.h>
 
 
 
 40
 41#include "ttm_module.h"
 42
 43static unsigned long ttm_pages_limit;
 44
 45MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
 46module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
 47
 48static unsigned long ttm_dma32_pages_limit;
 49
 50MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
 51module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
 52
 53static atomic_long_t ttm_pages_allocated;
 54static atomic_long_t ttm_dma32_pages_allocated;
 55
 56/*
 57 * Allocates a ttm structure for the given BO.
 58 */
 59int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
 60{
 61	struct ttm_device *bdev = bo->bdev;
 
 62	uint32_t page_flags = 0;
 63
 64	dma_resv_assert_held(bo->base.resv);
 65
 66	if (bo->ttm)
 67		return 0;
 68
 69	switch (bo->type) {
 70	case ttm_bo_type_device:
 71		if (zero_alloc)
 72			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
 73		break;
 74	case ttm_bo_type_kernel:
 75		break;
 76	case ttm_bo_type_sg:
 77		page_flags |= TTM_PAGE_FLAG_SG;
 78		break;
 79	default:
 80		pr_err("Illegal buffer object type\n");
 81		return -EINVAL;
 82	}
 
 
 
 
 
 
 
 
 
 83
 84	bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
 85	if (unlikely(bo->ttm == NULL))
 86		return -ENOMEM;
 87
 
 
 
 88	return 0;
 89}
 
 90
 91/*
 92 * Allocates storage for pointers to the pages that back the ttm.
 93 */
 94static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
 95{
 96	ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
 97			GFP_KERNEL | __GFP_ZERO);
 98	if (!ttm->pages)
 99		return -ENOMEM;
 
100	return 0;
101}
102
103static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
104{
105	ttm->pages = kvmalloc_array(ttm->num_pages,
106				    sizeof(*ttm->pages) +
107				    sizeof(*ttm->dma_address),
108				    GFP_KERNEL | __GFP_ZERO);
109	if (!ttm->pages)
110		return -ENOMEM;
111
112	ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
113	return 0;
114}
115
116static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
117{
118	ttm->dma_address = kvmalloc_array(ttm->num_pages,
119					  sizeof(*ttm->dma_address),
120					  GFP_KERNEL | __GFP_ZERO);
121	if (!ttm->dma_address)
122		return -ENOMEM;
 
123	return 0;
124}
125
126void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm)
127{
128	ttm_tt_unpopulate(bdev, ttm);
129
130	if (ttm->swap_storage)
131		fput(ttm->swap_storage);
132
133	ttm->swap_storage = NULL;
134}
135EXPORT_SYMBOL(ttm_tt_destroy_common);
136
137void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
138{
139	bdev->funcs->ttm_tt_destroy(bdev, ttm);
140}
 
141
142static void ttm_tt_init_fields(struct ttm_tt *ttm,
143			       struct ttm_buffer_object *bo,
144			       uint32_t page_flags,
145			       enum ttm_caching caching)
 
146{
147	ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
148	ttm->caching = ttm_cached;
149	ttm->page_flags = page_flags;
150	ttm->dma_address = NULL;
151	ttm->swap_storage = NULL;
152	ttm->sg = bo->sg;
153	ttm->caching = caching;
154}
155
156int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
157		uint32_t page_flags, enum ttm_caching caching)
 
158{
159	ttm_tt_init_fields(ttm, bo, page_flags, caching);
160
161	if (ttm_tt_alloc_page_directory(ttm)) {
162		pr_err("Failed allocating page table\n");
163		return -ENOMEM;
164	}
165	return 0;
166}
167EXPORT_SYMBOL(ttm_tt_init);
168
169void ttm_tt_fini(struct ttm_tt *ttm)
170{
 
 
 
 
 
 
171	if (ttm->pages)
172		kvfree(ttm->pages);
173	else
174		kvfree(ttm->dma_address);
175	ttm->pages = NULL;
176	ttm->dma_address = NULL;
177}
178EXPORT_SYMBOL(ttm_tt_fini);
179
180int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
181		   uint32_t page_flags, enum ttm_caching caching)
182{
183	int ret;
184
185	ttm_tt_init_fields(ttm, bo, page_flags, caching);
186
187	if (page_flags & TTM_PAGE_FLAG_SG)
188		ret = ttm_sg_tt_alloc_page_directory(ttm);
189	else
190		ret = ttm_dma_tt_alloc_page_directory(ttm);
191	if (ret) {
192		pr_err("Failed allocating page table\n");
193		return -ENOMEM;
194	}
195	return 0;
196}
197EXPORT_SYMBOL(ttm_sg_tt_init);
198
199int ttm_tt_swapin(struct ttm_tt *ttm)
200{
201	struct address_space *swap_space;
202	struct file *swap_storage;
203	struct page *from_page;
204	struct page *to_page;
205	gfp_t gfp_mask;
206	int i, ret;
207
208	swap_storage = ttm->swap_storage;
209	BUG_ON(swap_storage == NULL);
210
211	swap_space = swap_storage->f_mapping;
212	gfp_mask = mapping_gfp_mask(swap_space);
213
214	for (i = 0; i < ttm->num_pages; ++i) {
215		from_page = shmem_read_mapping_page_gfp(swap_space, i,
216							gfp_mask);
217		if (IS_ERR(from_page)) {
218			ret = PTR_ERR(from_page);
219			goto out_err;
220		}
221		to_page = ttm->pages[i];
222		if (unlikely(to_page == NULL)) {
223			ret = -ENOMEM;
224			goto out_err;
225		}
226
227		copy_highpage(to_page, from_page);
228		put_page(from_page);
229	}
230
231	fput(swap_storage);
232	ttm->swap_storage = NULL;
233	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
234
235	return 0;
236
237out_err:
238	return ret;
239}
 
240
241/**
242 * ttm_tt_swapout - swap out tt object
243 *
244 * @bdev: TTM device structure.
245 * @ttm: The struct ttm_tt.
246 * @gfp_flags: Flags to use for memory allocation.
247 *
248 * Swapout a TT object to a shmem_file, return number of pages swapped out or
249 * negative error code.
250 */
251int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
252		   gfp_t gfp_flags)
253{
254	loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
255	struct address_space *swap_space;
256	struct file *swap_storage;
257	struct page *from_page;
258	struct page *to_page;
259	int i, ret;
260
261	swap_storage = shmem_file_setup("ttm swap", size, 0);
262	if (IS_ERR(swap_storage)) {
263		pr_err("Failed allocating swap storage\n");
264		return PTR_ERR(swap_storage);
265	}
266
267	swap_space = swap_storage->f_mapping;
268	gfp_flags &= mapping_gfp_mask(swap_space);
269
270	for (i = 0; i < ttm->num_pages; ++i) {
271		from_page = ttm->pages[i];
272		if (unlikely(from_page == NULL))
273			continue;
274
275		to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
276		if (IS_ERR(to_page)) {
277			ret = PTR_ERR(to_page);
278			goto out_err;
279		}
280		copy_highpage(to_page, from_page);
281		set_page_dirty(to_page);
282		mark_page_accessed(to_page);
283		put_page(to_page);
284	}
285
286	ttm_tt_unpopulate(bdev, ttm);
287	ttm->swap_storage = swap_storage;
288	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
289
290	return ttm->num_pages;
291
292out_err:
293	fput(swap_storage);
294
295	return ret;
296}
297
298static void ttm_tt_add_mapping(struct ttm_device *bdev, struct ttm_tt *ttm)
299{
300	pgoff_t i;
301
302	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
303		return;
304
305	for (i = 0; i < ttm->num_pages; ++i)
306		ttm->pages[i]->mapping = bdev->dev_mapping;
307}
308
309int ttm_tt_populate(struct ttm_device *bdev,
310		    struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
311{
312	int ret;
313
314	if (!ttm)
315		return -EINVAL;
316
317	if (ttm_tt_is_populated(ttm))
318		return 0;
319
320	if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
321		atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
322		if (bdev->pool.use_dma32)
323			atomic_long_add(ttm->num_pages,
324					&ttm_dma32_pages_allocated);
325	}
326
327	while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
328	       atomic_long_read(&ttm_dma32_pages_allocated) >
329	       ttm_dma32_pages_limit) {
330
331		ret = ttm_global_swapout(ctx, GFP_KERNEL);
332		if (ret == 0)
333			break;
334		if (ret < 0)
335			goto error;
336	}
337
338	if (bdev->funcs->ttm_tt_populate)
339		ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
340	else
341		ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
342	if (ret)
343		goto error;
344
345	ttm_tt_add_mapping(bdev, ttm);
346	ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
347	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
348		ret = ttm_tt_swapin(ttm);
349		if (unlikely(ret != 0)) {
350			ttm_tt_unpopulate(bdev, ttm);
351			return ret;
352		}
353	}
354
355	return 0;
356
357error:
358	if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
359		atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
360		if (bdev->pool.use_dma32)
361			atomic_long_sub(ttm->num_pages,
362					&ttm_dma32_pages_allocated);
363	}
364	return ret;
365}
 
 
366EXPORT_SYMBOL(ttm_tt_populate);
367
368static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
369{
370	pgoff_t i;
371	struct page **page = ttm->pages;
372
373	if (ttm->page_flags & TTM_PAGE_FLAG_SG)
374		return;
375
376	for (i = 0; i < ttm->num_pages; ++i) {
377		(*page)->mapping = NULL;
378		(*page++)->index = 0;
379	}
380}
381
382void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
383{
384	if (!ttm_tt_is_populated(ttm))
385		return;
386
387	ttm_tt_clear_mapping(ttm);
388	if (bdev->funcs->ttm_tt_unpopulate)
389		bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
390	else
391		ttm_pool_free(&bdev->pool, ttm);
392
393	if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
394		atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
395		if (bdev->pool.use_dma32)
396			atomic_long_sub(ttm->num_pages,
397					&ttm_dma32_pages_allocated);
398	}
399
400	ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
401}
 
402
403#ifdef CONFIG_DEBUG_FS
404
405/* Test the shrinker functions and dump the result */
406static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
407{
408	struct ttm_operation_ctx ctx = { false, false };
409
410	seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
411	return 0;
412}
413DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
414
415#endif
416
417
418/*
419 * ttm_tt_mgr_init - register with the MM shrinker
420 *
421 * Register with the MM shrinker for swapping out BOs.
422 */
423void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
424{
425#ifdef CONFIG_DEBUG_FS
426	debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
427			    &ttm_tt_debugfs_shrink_fops);
428#endif
429
430	if (!ttm_pages_limit)
431		ttm_pages_limit = num_pages;
432
433	if (!ttm_dma32_pages_limit)
434		ttm_dma32_pages_limit = num_dma32_pages;
435}
436
437static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
438				       struct dma_buf_map *dmap,
439				       pgoff_t i)
440{
441	struct ttm_kmap_iter_tt *iter_tt =
442		container_of(iter, typeof(*iter_tt), base);
443
444	dma_buf_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
445							 iter_tt->prot));
446}
447
448static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
449					 struct dma_buf_map *map)
450{
451	kunmap_local(map->vaddr);
452}
453
454static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
455	.map_local = ttm_kmap_iter_tt_map_local,
456	.unmap_local = ttm_kmap_iter_tt_unmap_local,
457	.maps_tt = true,
458};
459
460/**
461 * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
462 * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
463 * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
464 *
465 * Return: Pointer to the embedded struct ttm_kmap_iter.
466 */
467struct ttm_kmap_iter *
468ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
469		      struct ttm_tt *tt)
470{
471	iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
472	iter_tt->tt = tt;
473	if (tt)
474		iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
475	else
476		iter_tt->prot = PAGE_KERNEL;
477
478	return &iter_tt->base;
479}
480EXPORT_SYMBOL(ttm_kmap_iter_tt_init);