Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2018 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22#include "nouveau_dmem.h"
 23#include "nouveau_drv.h"
 24#include "nouveau_chan.h"
 25#include "nouveau_dma.h"
 26#include "nouveau_mem.h"
 27#include "nouveau_bo.h"
 28#include "nouveau_svm.h"
 29
 30#include <nvif/class.h>
 31#include <nvif/object.h>
 32#include <nvif/push906f.h>
 33#include <nvif/if000c.h>
 34#include <nvif/if500b.h>
 35#include <nvif/if900b.h>
 36#include <nvif/if000c.h>
 37
 38#include <nvhw/class/cla0b5.h>
 39
 40#include <linux/sched/mm.h>
 41#include <linux/hmm.h>
 42
 43/*
 44 * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
 45 * it in vram while in use. We likely want to overhaul memory management for
 46 * nouveau to be more page like (not necessarily with system page size but a
 47 * bigger page size) at lowest level and have some shim layer on top that would
 48 * provide the same functionality as TTM.
 49 */
 50#define DMEM_CHUNK_SIZE (2UL << 20)
 51#define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
 52
 53enum nouveau_aper {
 54	NOUVEAU_APER_VIRT,
 55	NOUVEAU_APER_VRAM,
 56	NOUVEAU_APER_HOST,
 57};
 58
 59typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
 60				      enum nouveau_aper, u64 dst_addr,
 61				      enum nouveau_aper, u64 src_addr);
 62typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
 63				      enum nouveau_aper, u64 dst_addr);
 64
 65struct nouveau_dmem_chunk {
 66	struct list_head list;
 67	struct nouveau_bo *bo;
 68	struct nouveau_drm *drm;
 69	unsigned long callocated;
 70	struct dev_pagemap pagemap;
 71};
 72
 73struct nouveau_dmem_migrate {
 74	nouveau_migrate_copy_t copy_func;
 75	nouveau_clear_page_t clear_func;
 76	struct nouveau_channel *chan;
 77};
 78
 79struct nouveau_dmem {
 80	struct nouveau_drm *drm;
 81	struct nouveau_dmem_migrate migrate;
 82	struct list_head chunks;
 83	struct mutex mutex;
 84	struct page *free_pages;
 85	spinlock_t lock;
 86};
 87
 88static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
 89{
 90	return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
 91}
 92
 93static struct nouveau_drm *page_to_drm(struct page *page)
 94{
 95	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
 96
 97	return chunk->drm;
 98}
 99
100unsigned long nouveau_dmem_page_addr(struct page *page)
101{
102	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
103	unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
104				chunk->pagemap.res.start;
105
106	return chunk->bo->offset + off;
107}
108
109static void nouveau_dmem_page_free(struct page *page)
110{
111	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
112	struct nouveau_dmem *dmem = chunk->drm->dmem;
113
114	spin_lock(&dmem->lock);
115	page->zone_device_data = dmem->free_pages;
116	dmem->free_pages = page;
117
118	WARN_ON(!chunk->callocated);
119	chunk->callocated--;
120	/*
121	 * FIXME when chunk->callocated reach 0 we should add the chunk to
122	 * a reclaim list so that it can be freed in case of memory pressure.
123	 */
124	spin_unlock(&dmem->lock);
125}
126
127static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
128{
129	if (fence) {
130		nouveau_fence_wait(*fence, true, false);
131		nouveau_fence_unref(fence);
132	} else {
133		/*
134		 * FIXME wait for channel to be IDLE before calling finalizing
135		 * the hmem object.
136		 */
137	}
138}
139
140static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
141		struct vm_fault *vmf, struct migrate_vma *args,
142		dma_addr_t *dma_addr)
143{
144	struct device *dev = drm->dev->dev;
145	struct page *dpage, *spage;
146	struct nouveau_svmm *svmm;
147
148	spage = migrate_pfn_to_page(args->src[0]);
149	if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
150		return 0;
151
152	dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
153	if (!dpage)
154		return VM_FAULT_SIGBUS;
155	lock_page(dpage);
156
157	*dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
158	if (dma_mapping_error(dev, *dma_addr))
159		goto error_free_page;
160
161	svmm = spage->zone_device_data;
162	mutex_lock(&svmm->mutex);
163	nouveau_svmm_invalidate(svmm, args->start, args->end);
164	if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
165			NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
166		goto error_dma_unmap;
167	mutex_unlock(&svmm->mutex);
168
169	args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
170	return 0;
171
172error_dma_unmap:
173	mutex_unlock(&svmm->mutex);
174	dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
175error_free_page:
176	__free_page(dpage);
177	return VM_FAULT_SIGBUS;
178}
179
180static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
181{
182	struct nouveau_drm *drm = page_to_drm(vmf->page);
183	struct nouveau_dmem *dmem = drm->dmem;
184	struct nouveau_fence *fence;
185	unsigned long src = 0, dst = 0;
186	dma_addr_t dma_addr = 0;
187	vm_fault_t ret;
188	struct migrate_vma args = {
189		.vma		= vmf->vma,
190		.start		= vmf->address,
191		.end		= vmf->address + PAGE_SIZE,
192		.src		= &src,
193		.dst		= &dst,
194		.pgmap_owner	= drm->dev,
195		.flags		= MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
196	};
197
198	/*
199	 * FIXME what we really want is to find some heuristic to migrate more
200	 * than just one page on CPU fault. When such fault happens it is very
201	 * likely that more surrounding page will CPU fault too.
202	 */
203	if (migrate_vma_setup(&args) < 0)
204		return VM_FAULT_SIGBUS;
205	if (!args.cpages)
206		return 0;
207
208	ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
209	if (ret || dst == 0)
210		goto done;
211
212	nouveau_fence_new(dmem->migrate.chan, false, &fence);
213	migrate_vma_pages(&args);
214	nouveau_dmem_fence_done(&fence);
215	dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
216done:
217	migrate_vma_finalize(&args);
218	return ret;
219}
220
221static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
222	.page_free		= nouveau_dmem_page_free,
223	.migrate_to_ram		= nouveau_dmem_migrate_to_ram,
224};
225
226static int
227nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
228{
229	struct nouveau_dmem_chunk *chunk;
230	struct resource *res;
231	struct page *page;
232	void *ptr;
233	unsigned long i, pfn_first;
234	int ret;
235
236	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
237	if (chunk == NULL) {
238		ret = -ENOMEM;
239		goto out;
240	}
241
242	/* Allocate unused physical address space for device private pages. */
243	res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
244				      "nouveau_dmem");
245	if (IS_ERR(res)) {
246		ret = PTR_ERR(res);
247		goto out_free;
248	}
249
250	chunk->drm = drm;
251	chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
252	chunk->pagemap.res = *res;
253	chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
254	chunk->pagemap.owner = drm->dev;
255
256	ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
257			     TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
258			     &chunk->bo);
259	if (ret)
260		goto out_release;
261
262	ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
263	if (ret)
264		goto out_bo_free;
265
266	ptr = memremap_pages(&chunk->pagemap, numa_node_id());
267	if (IS_ERR(ptr)) {
268		ret = PTR_ERR(ptr);
269		goto out_bo_unpin;
270	}
271
272	mutex_lock(&drm->dmem->mutex);
273	list_add(&chunk->list, &drm->dmem->chunks);
274	mutex_unlock(&drm->dmem->mutex);
275
276	pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT;
277	page = pfn_to_page(pfn_first);
278	spin_lock(&drm->dmem->lock);
279	for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
280		page->zone_device_data = drm->dmem->free_pages;
281		drm->dmem->free_pages = page;
282	}
283	*ppage = page;
284	chunk->callocated++;
285	spin_unlock(&drm->dmem->lock);
286
287	NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
288		DMEM_CHUNK_SIZE >> 20);
289
290	return 0;
291
292out_bo_unpin:
293	nouveau_bo_unpin(chunk->bo);
294out_bo_free:
295	nouveau_bo_ref(NULL, &chunk->bo);
296out_release:
297	release_mem_region(chunk->pagemap.res.start,
298			   resource_size(&chunk->pagemap.res));
299out_free:
300	kfree(chunk);
301out:
302	return ret;
303}
304
305static struct page *
306nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
307{
308	struct nouveau_dmem_chunk *chunk;
309	struct page *page = NULL;
310	int ret;
311
312	spin_lock(&drm->dmem->lock);
313	if (drm->dmem->free_pages) {
314		page = drm->dmem->free_pages;
315		drm->dmem->free_pages = page->zone_device_data;
316		chunk = nouveau_page_to_chunk(page);
317		chunk->callocated++;
318		spin_unlock(&drm->dmem->lock);
319	} else {
320		spin_unlock(&drm->dmem->lock);
321		ret = nouveau_dmem_chunk_alloc(drm, &page);
322		if (ret)
323			return NULL;
324	}
325
326	get_page(page);
327	lock_page(page);
328	return page;
329}
330
331static void
332nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
333{
334	unlock_page(page);
335	put_page(page);
336}
337
338void
339nouveau_dmem_resume(struct nouveau_drm *drm)
340{
341	struct nouveau_dmem_chunk *chunk;
342	int ret;
343
344	if (drm->dmem == NULL)
345		return;
346
347	mutex_lock(&drm->dmem->mutex);
348	list_for_each_entry(chunk, &drm->dmem->chunks, list) {
349		ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
350		/* FIXME handle pin failure */
351		WARN_ON(ret);
352	}
353	mutex_unlock(&drm->dmem->mutex);
354}
355
356void
357nouveau_dmem_suspend(struct nouveau_drm *drm)
358{
359	struct nouveau_dmem_chunk *chunk;
360
361	if (drm->dmem == NULL)
362		return;
363
364	mutex_lock(&drm->dmem->mutex);
365	list_for_each_entry(chunk, &drm->dmem->chunks, list)
366		nouveau_bo_unpin(chunk->bo);
367	mutex_unlock(&drm->dmem->mutex);
368}
369
370void
371nouveau_dmem_fini(struct nouveau_drm *drm)
372{
373	struct nouveau_dmem_chunk *chunk, *tmp;
374
375	if (drm->dmem == NULL)
376		return;
377
378	mutex_lock(&drm->dmem->mutex);
379
380	list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
381		nouveau_bo_unpin(chunk->bo);
382		nouveau_bo_ref(NULL, &chunk->bo);
383		list_del(&chunk->list);
384		memunmap_pages(&chunk->pagemap);
385		release_mem_region(chunk->pagemap.res.start,
386				   resource_size(&chunk->pagemap.res));
387		kfree(chunk);
388	}
389
390	mutex_unlock(&drm->dmem->mutex);
391}
392
393static int
394nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
395		    enum nouveau_aper dst_aper, u64 dst_addr,
396		    enum nouveau_aper src_aper, u64 src_addr)
397{
398	struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
399	u32 launch_dma = 0;
400	int ret;
401
402	ret = PUSH_WAIT(push, 13);
403	if (ret)
404		return ret;
405
406	if (src_aper != NOUVEAU_APER_VIRT) {
407		switch (src_aper) {
408		case NOUVEAU_APER_VRAM:
409			PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
410				  NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB));
411			break;
412		case NOUVEAU_APER_HOST:
413			PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
414				  NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM));
415			break;
416		default:
417			return -EINVAL;
418		}
419
420		launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, SRC_TYPE, PHYSICAL);
421	}
422
423	if (dst_aper != NOUVEAU_APER_VIRT) {
424		switch (dst_aper) {
425		case NOUVEAU_APER_VRAM:
426			PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
427				  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
428			break;
429		case NOUVEAU_APER_HOST:
430			PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
431				  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
432			break;
433		default:
434			return -EINVAL;
435		}
436
437		launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
438	}
439
440	PUSH_MTHD(push, NVA0B5, OFFSET_IN_UPPER,
441		  NVVAL(NVA0B5, OFFSET_IN_UPPER, UPPER, upper_32_bits(src_addr)),
442
443				OFFSET_IN_LOWER, lower_32_bits(src_addr),
444
445				OFFSET_OUT_UPPER,
446		  NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
447
448				OFFSET_OUT_LOWER, lower_32_bits(dst_addr),
449				PITCH_IN, PAGE_SIZE,
450				PITCH_OUT, PAGE_SIZE,
451				LINE_LENGTH_IN, PAGE_SIZE,
452				LINE_COUNT, npages);
453
454	PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
455		  NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
456		  NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
457		  NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
458		  NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
459		  NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
460		  NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
461		  NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, TRUE) |
462		  NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) |
463		  NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
464	return 0;
465}
466
467static int
468nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
469		     enum nouveau_aper dst_aper, u64 dst_addr)
470{
471	struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
472	u32 launch_dma = 0;
473	int ret;
474
475	ret = PUSH_WAIT(push, 12);
476	if (ret)
477		return ret;
478
479	switch (dst_aper) {
480	case NOUVEAU_APER_VRAM:
481		PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
482			  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
483		break;
484	case NOUVEAU_APER_HOST:
485		PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
486			  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
487		break;
488	default:
489		return -EINVAL;
490	}
491
492	launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
493
494	PUSH_MTHD(push, NVA0B5, SET_REMAP_CONST_A, 0,
495				SET_REMAP_CONST_B, 0,
496
497				SET_REMAP_COMPONENTS,
498		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_X, CONST_A) |
499		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_Y, CONST_B) |
500		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, COMPONENT_SIZE, FOUR) |
501		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, NUM_DST_COMPONENTS, TWO));
502
503	PUSH_MTHD(push, NVA0B5, OFFSET_OUT_UPPER,
504		  NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
505
506				OFFSET_OUT_LOWER, lower_32_bits(dst_addr));
507
508	PUSH_MTHD(push, NVA0B5, LINE_LENGTH_IN, length >> 3);
509
510	PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
511		  NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
512		  NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
513		  NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
514		  NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
515		  NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
516		  NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
517		  NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, FALSE) |
518		  NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, TRUE) |
519		  NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
520	return 0;
521}
522
523static int
524nouveau_dmem_migrate_init(struct nouveau_drm *drm)
525{
526	switch (drm->ttm.copy.oclass) {
527	case PASCAL_DMA_COPY_A:
528	case PASCAL_DMA_COPY_B:
529	case  VOLTA_DMA_COPY_A:
530	case TURING_DMA_COPY_A:
531		drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
532		drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
533		drm->dmem->migrate.chan = drm->ttm.chan;
534		return 0;
535	default:
536		break;
537	}
538	return -ENODEV;
539}
540
541void
542nouveau_dmem_init(struct nouveau_drm *drm)
543{
544	int ret;
545
546	/* This only make sense on PASCAL or newer */
547	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
548		return;
549
550	if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
551		return;
552
553	drm->dmem->drm = drm;
554	mutex_init(&drm->dmem->mutex);
555	INIT_LIST_HEAD(&drm->dmem->chunks);
556	mutex_init(&drm->dmem->mutex);
557	spin_lock_init(&drm->dmem->lock);
558
559	/* Initialize migration dma helpers before registering memory */
560	ret = nouveau_dmem_migrate_init(drm);
561	if (ret) {
562		kfree(drm->dmem);
563		drm->dmem = NULL;
564	}
565}
566
567static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
568		struct nouveau_svmm *svmm, unsigned long src,
569		dma_addr_t *dma_addr, u64 *pfn)
570{
571	struct device *dev = drm->dev->dev;
572	struct page *dpage, *spage;
573	unsigned long paddr;
574
575	spage = migrate_pfn_to_page(src);
576	if (!(src & MIGRATE_PFN_MIGRATE))
577		goto out;
578
579	dpage = nouveau_dmem_page_alloc_locked(drm);
580	if (!dpage)
581		goto out;
582
583	paddr = nouveau_dmem_page_addr(dpage);
584	if (spage) {
585		*dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
586					 DMA_BIDIRECTIONAL);
587		if (dma_mapping_error(dev, *dma_addr))
588			goto out_free_page;
589		if (drm->dmem->migrate.copy_func(drm, 1,
590			NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
591			goto out_dma_unmap;
592	} else {
593		*dma_addr = DMA_MAPPING_ERROR;
594		if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
595			NOUVEAU_APER_VRAM, paddr))
596			goto out_free_page;
597	}
598
599	dpage->zone_device_data = svmm;
600	*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
601		((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
602	if (src & MIGRATE_PFN_WRITE)
603		*pfn |= NVIF_VMM_PFNMAP_V0_W;
604	return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
605
606out_dma_unmap:
607	dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
608out_free_page:
609	nouveau_dmem_page_free_locked(drm, dpage);
610out:
611	*pfn = NVIF_VMM_PFNMAP_V0_NONE;
612	return 0;
613}
614
615static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
616		struct nouveau_svmm *svmm, struct migrate_vma *args,
617		dma_addr_t *dma_addrs, u64 *pfns)
618{
619	struct nouveau_fence *fence;
620	unsigned long addr = args->start, nr_dma = 0, i;
621
622	for (i = 0; addr < args->end; i++) {
623		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
624				args->src[i], dma_addrs + nr_dma, pfns + i);
625		if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
626			nr_dma++;
627		addr += PAGE_SIZE;
628	}
629
630	nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
631	migrate_vma_pages(args);
632	nouveau_dmem_fence_done(&fence);
633	nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
634
635	while (nr_dma--) {
636		dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
637				DMA_BIDIRECTIONAL);
638	}
639	migrate_vma_finalize(args);
640}
641
642int
643nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
644			 struct nouveau_svmm *svmm,
645			 struct vm_area_struct *vma,
646			 unsigned long start,
647			 unsigned long end)
648{
649	unsigned long npages = (end - start) >> PAGE_SHIFT;
650	unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
651	dma_addr_t *dma_addrs;
652	struct migrate_vma args = {
653		.vma		= vma,
654		.start		= start,
655		.pgmap_owner	= drm->dev,
656		.flags		= MIGRATE_VMA_SELECT_SYSTEM,
657	};
658	unsigned long i;
659	u64 *pfns;
660	int ret = -ENOMEM;
661
662	if (drm->dmem == NULL)
663		return -ENODEV;
664
665	args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
666	if (!args.src)
667		goto out;
668	args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
669	if (!args.dst)
670		goto out_free_src;
671
672	dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
673	if (!dma_addrs)
674		goto out_free_dst;
675
676	pfns = nouveau_pfns_alloc(max);
677	if (!pfns)
678		goto out_free_dma;
679
680	for (i = 0; i < npages; i += max) {
681		args.end = start + (max << PAGE_SHIFT);
682		ret = migrate_vma_setup(&args);
683		if (ret)
684			goto out_free_pfns;
685
686		if (args.cpages)
687			nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
688						   pfns);
689		args.start = args.end;
690	}
691
692	ret = 0;
693out_free_pfns:
694	nouveau_pfns_free(pfns);
695out_free_dma:
696	kfree(dma_addrs);
697out_free_dst:
698	kfree(args.dst);
699out_free_src:
700	kfree(args.src);
701out:
702	return ret;
703}