Loading...
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#define pr_fmt(fmt) "[TTM] " fmt
32
33#include <linux/sched.h>
34#include <linux/highmem.h>
35#include <linux/pagemap.h>
36#include <linux/shmem_fs.h>
37#include <linux/file.h>
38#include <linux/swap.h>
39#include <linux/slab.h>
40#include <linux/export.h>
41#include "drm_cache.h"
42#include "drm_mem_util.h"
43#include "ttm/ttm_module.h"
44#include "ttm/ttm_bo_driver.h"
45#include "ttm/ttm_placement.h"
46#include "ttm/ttm_page_alloc.h"
47
48/**
49 * Allocates storage for pointers to the pages that back the ttm.
50 */
51static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
52{
53 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
54}
55
56static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
57{
58 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
59 ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
60 sizeof(*ttm->dma_address));
61}
62
63#ifdef CONFIG_X86
64static inline int ttm_tt_set_page_caching(struct page *p,
65 enum ttm_caching_state c_old,
66 enum ttm_caching_state c_new)
67{
68 int ret = 0;
69
70 if (PageHighMem(p))
71 return 0;
72
73 if (c_old != tt_cached) {
74 /* p isn't in the default caching state, set it to
75 * writeback first to free its current memtype. */
76
77 ret = set_pages_wb(p, 1);
78 if (ret)
79 return ret;
80 }
81
82 if (c_new == tt_wc)
83 ret = set_memory_wc((unsigned long) page_address(p), 1);
84 else if (c_new == tt_uncached)
85 ret = set_pages_uc(p, 1);
86
87 return ret;
88}
89#else /* CONFIG_X86 */
90static inline int ttm_tt_set_page_caching(struct page *p,
91 enum ttm_caching_state c_old,
92 enum ttm_caching_state c_new)
93{
94 return 0;
95}
96#endif /* CONFIG_X86 */
97
98/*
99 * Change caching policy for the linear kernel map
100 * for range of pages in a ttm.
101 */
102
103static int ttm_tt_set_caching(struct ttm_tt *ttm,
104 enum ttm_caching_state c_state)
105{
106 int i, j;
107 struct page *cur_page;
108 int ret;
109
110 if (ttm->caching_state == c_state)
111 return 0;
112
113 if (ttm->state == tt_unpopulated) {
114 /* Change caching but don't populate */
115 ttm->caching_state = c_state;
116 return 0;
117 }
118
119 if (ttm->caching_state == tt_cached)
120 drm_clflush_pages(ttm->pages, ttm->num_pages);
121
122 for (i = 0; i < ttm->num_pages; ++i) {
123 cur_page = ttm->pages[i];
124 if (likely(cur_page != NULL)) {
125 ret = ttm_tt_set_page_caching(cur_page,
126 ttm->caching_state,
127 c_state);
128 if (unlikely(ret != 0))
129 goto out_err;
130 }
131 }
132
133 ttm->caching_state = c_state;
134
135 return 0;
136
137out_err:
138 for (j = 0; j < i; ++j) {
139 cur_page = ttm->pages[j];
140 if (likely(cur_page != NULL)) {
141 (void)ttm_tt_set_page_caching(cur_page, c_state,
142 ttm->caching_state);
143 }
144 }
145
146 return ret;
147}
148
149int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
150{
151 enum ttm_caching_state state;
152
153 if (placement & TTM_PL_FLAG_WC)
154 state = tt_wc;
155 else if (placement & TTM_PL_FLAG_UNCACHED)
156 state = tt_uncached;
157 else
158 state = tt_cached;
159
160 return ttm_tt_set_caching(ttm, state);
161}
162EXPORT_SYMBOL(ttm_tt_set_placement_caching);
163
164void ttm_tt_destroy(struct ttm_tt *ttm)
165{
166 if (unlikely(ttm == NULL))
167 return;
168
169 if (ttm->state == tt_bound) {
170 ttm_tt_unbind(ttm);
171 }
172
173 if (likely(ttm->pages != NULL)) {
174 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
175 }
176
177 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
178 ttm->swap_storage)
179 fput(ttm->swap_storage);
180
181 ttm->swap_storage = NULL;
182 ttm->func->destroy(ttm);
183}
184
185int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
186 unsigned long size, uint32_t page_flags,
187 struct page *dummy_read_page)
188{
189 ttm->bdev = bdev;
190 ttm->glob = bdev->glob;
191 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
192 ttm->caching_state = tt_cached;
193 ttm->page_flags = page_flags;
194 ttm->dummy_read_page = dummy_read_page;
195 ttm->state = tt_unpopulated;
196 ttm->swap_storage = NULL;
197
198 ttm_tt_alloc_page_directory(ttm);
199 if (!ttm->pages) {
200 ttm_tt_destroy(ttm);
201 pr_err("Failed allocating page table\n");
202 return -ENOMEM;
203 }
204 return 0;
205}
206EXPORT_SYMBOL(ttm_tt_init);
207
208void ttm_tt_fini(struct ttm_tt *ttm)
209{
210 drm_free_large(ttm->pages);
211 ttm->pages = NULL;
212}
213EXPORT_SYMBOL(ttm_tt_fini);
214
215int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
216 unsigned long size, uint32_t page_flags,
217 struct page *dummy_read_page)
218{
219 struct ttm_tt *ttm = &ttm_dma->ttm;
220
221 ttm->bdev = bdev;
222 ttm->glob = bdev->glob;
223 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
224 ttm->caching_state = tt_cached;
225 ttm->page_flags = page_flags;
226 ttm->dummy_read_page = dummy_read_page;
227 ttm->state = tt_unpopulated;
228 ttm->swap_storage = NULL;
229
230 INIT_LIST_HEAD(&ttm_dma->pages_list);
231 ttm_dma_tt_alloc_page_directory(ttm_dma);
232 if (!ttm->pages || !ttm_dma->dma_address) {
233 ttm_tt_destroy(ttm);
234 pr_err("Failed allocating page table\n");
235 return -ENOMEM;
236 }
237 return 0;
238}
239EXPORT_SYMBOL(ttm_dma_tt_init);
240
241void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
242{
243 struct ttm_tt *ttm = &ttm_dma->ttm;
244
245 drm_free_large(ttm->pages);
246 ttm->pages = NULL;
247 drm_free_large(ttm_dma->dma_address);
248 ttm_dma->dma_address = NULL;
249}
250EXPORT_SYMBOL(ttm_dma_tt_fini);
251
252void ttm_tt_unbind(struct ttm_tt *ttm)
253{
254 int ret;
255
256 if (ttm->state == tt_bound) {
257 ret = ttm->func->unbind(ttm);
258 BUG_ON(ret);
259 ttm->state = tt_unbound;
260 }
261}
262
263int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
264{
265 int ret = 0;
266
267 if (!ttm)
268 return -EINVAL;
269
270 if (ttm->state == tt_bound)
271 return 0;
272
273 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
274 if (ret)
275 return ret;
276
277 ret = ttm->func->bind(ttm, bo_mem);
278 if (unlikely(ret != 0))
279 return ret;
280
281 ttm->state = tt_bound;
282
283 return 0;
284}
285EXPORT_SYMBOL(ttm_tt_bind);
286
287int ttm_tt_swapin(struct ttm_tt *ttm)
288{
289 struct address_space *swap_space;
290 struct file *swap_storage;
291 struct page *from_page;
292 struct page *to_page;
293 void *from_virtual;
294 void *to_virtual;
295 int i;
296 int ret = -ENOMEM;
297
298 swap_storage = ttm->swap_storage;
299 BUG_ON(swap_storage == NULL);
300
301 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
302
303 for (i = 0; i < ttm->num_pages; ++i) {
304 from_page = shmem_read_mapping_page(swap_space, i);
305 if (IS_ERR(from_page)) {
306 ret = PTR_ERR(from_page);
307 goto out_err;
308 }
309 to_page = ttm->pages[i];
310 if (unlikely(to_page == NULL))
311 goto out_err;
312
313 preempt_disable();
314 from_virtual = kmap_atomic(from_page);
315 to_virtual = kmap_atomic(to_page);
316 memcpy(to_virtual, from_virtual, PAGE_SIZE);
317 kunmap_atomic(to_virtual);
318 kunmap_atomic(from_virtual);
319 preempt_enable();
320 page_cache_release(from_page);
321 }
322
323 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
324 fput(swap_storage);
325 ttm->swap_storage = NULL;
326 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
327
328 return 0;
329out_err:
330 return ret;
331}
332
333int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
334{
335 struct address_space *swap_space;
336 struct file *swap_storage;
337 struct page *from_page;
338 struct page *to_page;
339 void *from_virtual;
340 void *to_virtual;
341 int i;
342 int ret = -ENOMEM;
343
344 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
345 BUG_ON(ttm->caching_state != tt_cached);
346
347 if (!persistent_swap_storage) {
348 swap_storage = shmem_file_setup("ttm swap",
349 ttm->num_pages << PAGE_SHIFT,
350 0);
351 if (unlikely(IS_ERR(swap_storage))) {
352 pr_err("Failed allocating swap storage\n");
353 return PTR_ERR(swap_storage);
354 }
355 } else
356 swap_storage = persistent_swap_storage;
357
358 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
359
360 for (i = 0; i < ttm->num_pages; ++i) {
361 from_page = ttm->pages[i];
362 if (unlikely(from_page == NULL))
363 continue;
364 to_page = shmem_read_mapping_page(swap_space, i);
365 if (unlikely(IS_ERR(to_page))) {
366 ret = PTR_ERR(to_page);
367 goto out_err;
368 }
369 preempt_disable();
370 from_virtual = kmap_atomic(from_page);
371 to_virtual = kmap_atomic(to_page);
372 memcpy(to_virtual, from_virtual, PAGE_SIZE);
373 kunmap_atomic(to_virtual);
374 kunmap_atomic(from_virtual);
375 preempt_enable();
376 set_page_dirty(to_page);
377 mark_page_accessed(to_page);
378 page_cache_release(to_page);
379 }
380
381 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
382 ttm->swap_storage = swap_storage;
383 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
384 if (persistent_swap_storage)
385 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
386
387 return 0;
388out_err:
389 if (!persistent_swap_storage)
390 fput(swap_storage);
391
392 return ret;
393}
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28/*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32#define pr_fmt(fmt) "[TTM] " fmt
33
34#include <linux/sched.h>
35#include <linux/pagemap.h>
36#include <linux/shmem_fs.h>
37#include <linux/file.h>
38#include <drm/drm_cache.h>
39#include <drm/ttm/ttm_bo_driver.h>
40#include <drm/ttm/ttm_page_alloc.h>
41#include <drm/ttm/ttm_set_memory.h>
42
43/**
44 * Allocates a ttm structure for the given BO.
45 */
46int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
47{
48 struct ttm_bo_device *bdev = bo->bdev;
49 uint32_t page_flags = 0;
50
51 dma_resv_assert_held(bo->base.resv);
52
53 if (bdev->need_dma32)
54 page_flags |= TTM_PAGE_FLAG_DMA32;
55
56 if (bdev->no_retry)
57 page_flags |= TTM_PAGE_FLAG_NO_RETRY;
58
59 switch (bo->type) {
60 case ttm_bo_type_device:
61 if (zero_alloc)
62 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
63 break;
64 case ttm_bo_type_kernel:
65 break;
66 case ttm_bo_type_sg:
67 page_flags |= TTM_PAGE_FLAG_SG;
68 break;
69 default:
70 bo->ttm = NULL;
71 pr_err("Illegal buffer object type\n");
72 return -EINVAL;
73 }
74
75 bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
76 if (unlikely(bo->ttm == NULL))
77 return -ENOMEM;
78
79 return 0;
80}
81
82/**
83 * Allocates storage for pointers to the pages that back the ttm.
84 */
85static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
86{
87 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
88 GFP_KERNEL | __GFP_ZERO);
89 if (!ttm->pages)
90 return -ENOMEM;
91 return 0;
92}
93
94static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
95{
96 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
97 sizeof(*ttm->ttm.pages) +
98 sizeof(*ttm->dma_address),
99 GFP_KERNEL | __GFP_ZERO);
100 if (!ttm->ttm.pages)
101 return -ENOMEM;
102 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
103 return 0;
104}
105
106static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
107{
108 ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
109 sizeof(*ttm->dma_address),
110 GFP_KERNEL | __GFP_ZERO);
111 if (!ttm->dma_address)
112 return -ENOMEM;
113 return 0;
114}
115
116static int ttm_tt_set_page_caching(struct page *p,
117 enum ttm_caching_state c_old,
118 enum ttm_caching_state c_new)
119{
120 int ret = 0;
121
122 if (PageHighMem(p))
123 return 0;
124
125 if (c_old != tt_cached) {
126 /* p isn't in the default caching state, set it to
127 * writeback first to free its current memtype. */
128
129 ret = ttm_set_pages_wb(p, 1);
130 if (ret)
131 return ret;
132 }
133
134 if (c_new == tt_wc)
135 ret = ttm_set_pages_wc(p, 1);
136 else if (c_new == tt_uncached)
137 ret = ttm_set_pages_uc(p, 1);
138
139 return ret;
140}
141
142/*
143 * Change caching policy for the linear kernel map
144 * for range of pages in a ttm.
145 */
146
147static int ttm_tt_set_caching(struct ttm_tt *ttm,
148 enum ttm_caching_state c_state)
149{
150 int i, j;
151 struct page *cur_page;
152 int ret;
153
154 if (ttm->caching_state == c_state)
155 return 0;
156
157 if (ttm->state == tt_unpopulated) {
158 /* Change caching but don't populate */
159 ttm->caching_state = c_state;
160 return 0;
161 }
162
163 if (ttm->caching_state == tt_cached)
164 drm_clflush_pages(ttm->pages, ttm->num_pages);
165
166 for (i = 0; i < ttm->num_pages; ++i) {
167 cur_page = ttm->pages[i];
168 if (likely(cur_page != NULL)) {
169 ret = ttm_tt_set_page_caching(cur_page,
170 ttm->caching_state,
171 c_state);
172 if (unlikely(ret != 0))
173 goto out_err;
174 }
175 }
176
177 ttm->caching_state = c_state;
178
179 return 0;
180
181out_err:
182 for (j = 0; j < i; ++j) {
183 cur_page = ttm->pages[j];
184 if (likely(cur_page != NULL)) {
185 (void)ttm_tt_set_page_caching(cur_page, c_state,
186 ttm->caching_state);
187 }
188 }
189
190 return ret;
191}
192
193int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
194{
195 enum ttm_caching_state state;
196
197 if (placement & TTM_PL_FLAG_WC)
198 state = tt_wc;
199 else if (placement & TTM_PL_FLAG_UNCACHED)
200 state = tt_uncached;
201 else
202 state = tt_cached;
203
204 return ttm_tt_set_caching(ttm, state);
205}
206EXPORT_SYMBOL(ttm_tt_set_placement_caching);
207
208void ttm_tt_destroy(struct ttm_tt *ttm)
209{
210 if (ttm == NULL)
211 return;
212
213 ttm_tt_unbind(ttm);
214
215 if (ttm->state == tt_unbound)
216 ttm_tt_unpopulate(ttm);
217
218 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
219 ttm->swap_storage)
220 fput(ttm->swap_storage);
221
222 ttm->swap_storage = NULL;
223 ttm->func->destroy(ttm);
224}
225
226void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
227 uint32_t page_flags)
228{
229 ttm->bdev = bo->bdev;
230 ttm->num_pages = bo->num_pages;
231 ttm->caching_state = tt_cached;
232 ttm->page_flags = page_flags;
233 ttm->state = tt_unpopulated;
234 ttm->swap_storage = NULL;
235 ttm->sg = bo->sg;
236}
237
238int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
239 uint32_t page_flags)
240{
241 ttm_tt_init_fields(ttm, bo, page_flags);
242
243 if (ttm_tt_alloc_page_directory(ttm)) {
244 ttm_tt_destroy(ttm);
245 pr_err("Failed allocating page table\n");
246 return -ENOMEM;
247 }
248 return 0;
249}
250EXPORT_SYMBOL(ttm_tt_init);
251
252void ttm_tt_fini(struct ttm_tt *ttm)
253{
254 kvfree(ttm->pages);
255 ttm->pages = NULL;
256}
257EXPORT_SYMBOL(ttm_tt_fini);
258
259int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
260 uint32_t page_flags)
261{
262 struct ttm_tt *ttm = &ttm_dma->ttm;
263
264 ttm_tt_init_fields(ttm, bo, page_flags);
265
266 INIT_LIST_HEAD(&ttm_dma->pages_list);
267 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
268 ttm_tt_destroy(ttm);
269 pr_err("Failed allocating page table\n");
270 return -ENOMEM;
271 }
272 return 0;
273}
274EXPORT_SYMBOL(ttm_dma_tt_init);
275
276int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
277 uint32_t page_flags)
278{
279 struct ttm_tt *ttm = &ttm_dma->ttm;
280 int ret;
281
282 ttm_tt_init_fields(ttm, bo, page_flags);
283
284 INIT_LIST_HEAD(&ttm_dma->pages_list);
285 if (page_flags & TTM_PAGE_FLAG_SG)
286 ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
287 else
288 ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
289 if (ret) {
290 ttm_tt_destroy(ttm);
291 pr_err("Failed allocating page table\n");
292 return -ENOMEM;
293 }
294 return 0;
295}
296EXPORT_SYMBOL(ttm_sg_tt_init);
297
298void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
299{
300 struct ttm_tt *ttm = &ttm_dma->ttm;
301
302 if (ttm->pages)
303 kvfree(ttm->pages);
304 else
305 kvfree(ttm_dma->dma_address);
306 ttm->pages = NULL;
307 ttm_dma->dma_address = NULL;
308}
309EXPORT_SYMBOL(ttm_dma_tt_fini);
310
311void ttm_tt_unbind(struct ttm_tt *ttm)
312{
313 int ret;
314
315 if (ttm->state == tt_bound) {
316 ret = ttm->func->unbind(ttm);
317 BUG_ON(ret);
318 ttm->state = tt_unbound;
319 }
320}
321
322int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
323 struct ttm_operation_ctx *ctx)
324{
325 int ret = 0;
326
327 if (!ttm)
328 return -EINVAL;
329
330 if (ttm->state == tt_bound)
331 return 0;
332
333 ret = ttm_tt_populate(ttm, ctx);
334 if (ret)
335 return ret;
336
337 ret = ttm->func->bind(ttm, bo_mem);
338 if (unlikely(ret != 0))
339 return ret;
340
341 ttm->state = tt_bound;
342
343 return 0;
344}
345EXPORT_SYMBOL(ttm_tt_bind);
346
347int ttm_tt_swapin(struct ttm_tt *ttm)
348{
349 struct address_space *swap_space;
350 struct file *swap_storage;
351 struct page *from_page;
352 struct page *to_page;
353 int i;
354 int ret = -ENOMEM;
355
356 swap_storage = ttm->swap_storage;
357 BUG_ON(swap_storage == NULL);
358
359 swap_space = swap_storage->f_mapping;
360
361 for (i = 0; i < ttm->num_pages; ++i) {
362 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
363
364 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
365 from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
366
367 if (IS_ERR(from_page)) {
368 ret = PTR_ERR(from_page);
369 goto out_err;
370 }
371 to_page = ttm->pages[i];
372 if (unlikely(to_page == NULL))
373 goto out_err;
374
375 copy_highpage(to_page, from_page);
376 put_page(from_page);
377 }
378
379 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
380 fput(swap_storage);
381 ttm->swap_storage = NULL;
382 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
383
384 return 0;
385out_err:
386 return ret;
387}
388
389int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
390{
391 struct address_space *swap_space;
392 struct file *swap_storage;
393 struct page *from_page;
394 struct page *to_page;
395 int i;
396 int ret = -ENOMEM;
397
398 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
399 BUG_ON(ttm->caching_state != tt_cached);
400
401 if (!persistent_swap_storage) {
402 swap_storage = shmem_file_setup("ttm swap",
403 ttm->num_pages << PAGE_SHIFT,
404 0);
405 if (IS_ERR(swap_storage)) {
406 pr_err("Failed allocating swap storage\n");
407 return PTR_ERR(swap_storage);
408 }
409 } else {
410 swap_storage = persistent_swap_storage;
411 }
412
413 swap_space = swap_storage->f_mapping;
414
415 for (i = 0; i < ttm->num_pages; ++i) {
416 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
417
418 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
419
420 from_page = ttm->pages[i];
421 if (unlikely(from_page == NULL))
422 continue;
423
424 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
425 if (IS_ERR(to_page)) {
426 ret = PTR_ERR(to_page);
427 goto out_err;
428 }
429 copy_highpage(to_page, from_page);
430 set_page_dirty(to_page);
431 mark_page_accessed(to_page);
432 put_page(to_page);
433 }
434
435 ttm_tt_unpopulate(ttm);
436 ttm->swap_storage = swap_storage;
437 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
438 if (persistent_swap_storage)
439 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
440
441 return 0;
442out_err:
443 if (!persistent_swap_storage)
444 fput(swap_storage);
445
446 return ret;
447}
448
449static void ttm_tt_add_mapping(struct ttm_tt *ttm)
450{
451 pgoff_t i;
452
453 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
454 return;
455
456 for (i = 0; i < ttm->num_pages; ++i)
457 ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
458}
459
460int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
461{
462 int ret;
463
464 if (ttm->state != tt_unpopulated)
465 return 0;
466
467 if (ttm->bdev->driver->ttm_tt_populate)
468 ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
469 else
470 ret = ttm_pool_populate(ttm, ctx);
471 if (!ret)
472 ttm_tt_add_mapping(ttm);
473 return ret;
474}
475
476static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
477{
478 pgoff_t i;
479 struct page **page = ttm->pages;
480
481 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
482 return;
483
484 for (i = 0; i < ttm->num_pages; ++i) {
485 (*page)->mapping = NULL;
486 (*page++)->index = 0;
487 }
488}
489
490void ttm_tt_unpopulate(struct ttm_tt *ttm)
491{
492 if (ttm->state == tt_unpopulated)
493 return;
494
495 ttm_tt_clear_mapping(ttm);
496 if (ttm->bdev->driver->ttm_tt_unpopulate)
497 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
498 else
499 ttm_pool_unpopulate(ttm);
500}