Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * Copyright 2018 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22#include "nouveau_dmem.h"
 23#include "nouveau_drv.h"
 24#include "nouveau_chan.h"
 25#include "nouveau_dma.h"
 26#include "nouveau_mem.h"
 27#include "nouveau_bo.h"
 
 28
 29#include <nvif/class.h>
 30#include <nvif/object.h>
 
 
 31#include <nvif/if500b.h>
 32#include <nvif/if900b.h>
 33
 
 
 34#include <linux/sched/mm.h>
 35#include <linux/hmm.h>
 
 
 36
 37/*
 38 * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
 39 * it in vram while in use. We likely want to overhaul memory management for
 40 * nouveau to be more page like (not necessarily with system page size but a
 41 * bigger page size) at lowest level and have some shim layer on top that would
 42 * provide the same functionality as TTM.
 43 */
 44#define DMEM_CHUNK_SIZE (2UL << 20)
 45#define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
 46
 47enum nouveau_aper {
 48	NOUVEAU_APER_VIRT,
 49	NOUVEAU_APER_VRAM,
 50	NOUVEAU_APER_HOST,
 51};
 52
 53typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
 54				      enum nouveau_aper, u64 dst_addr,
 55				      enum nouveau_aper, u64 src_addr);
 
 
 56
 57struct nouveau_dmem_chunk {
 58	struct list_head list;
 59	struct nouveau_bo *bo;
 60	struct nouveau_drm *drm;
 61	unsigned long pfn_first;
 62	unsigned long callocated;
 63	unsigned long bitmap[BITS_TO_LONGS(DMEM_CHUNK_NPAGES)];
 64	spinlock_t lock;
 65};
 66
 67struct nouveau_dmem_migrate {
 68	nouveau_migrate_copy_t copy_func;
 
 69	struct nouveau_channel *chan;
 70};
 71
 72struct nouveau_dmem {
 73	struct nouveau_drm *drm;
 74	struct dev_pagemap pagemap;
 75	struct nouveau_dmem_migrate migrate;
 76	struct list_head chunk_free;
 77	struct list_head chunk_full;
 78	struct list_head chunk_empty;
 79	struct mutex mutex;
 
 
 80};
 81
 82static inline struct nouveau_dmem *page_to_dmem(struct page *page)
 83{
 84	return container_of(page->pgmap, struct nouveau_dmem, pagemap);
 85}
 86
 87static unsigned long nouveau_dmem_page_addr(struct page *page)
 88{
 89	struct nouveau_dmem_chunk *chunk = page->zone_device_data;
 90	unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
 91
 92	return (idx << PAGE_SHIFT) + chunk->bo->bo.offset;
 
 
 
 
 
 
 
 
 
 93}
 94
 95static void nouveau_dmem_page_free(struct page *page)
 96{
 97	struct nouveau_dmem_chunk *chunk = page->zone_device_data;
 98	unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
 
 
 
 
 99
100	/*
101	 * FIXME:
102	 *
103	 * This is really a bad example, we need to overhaul nouveau memory
104	 * management to be more page focus and allow lighter locking scheme
105	 * to be use in the process.
106	 */
107	spin_lock(&chunk->lock);
108	clear_bit(idx, chunk->bitmap);
109	WARN_ON(!chunk->callocated);
110	chunk->callocated--;
111	/*
112	 * FIXME when chunk->callocated reach 0 we should add the chunk to
113	 * a reclaim list so that it can be freed in case of memory pressure.
114	 */
115	spin_unlock(&chunk->lock);
116}
117
118static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
119{
120	if (fence) {
121		nouveau_fence_wait(*fence, true, false);
122		nouveau_fence_unref(fence);
123	} else {
124		/*
125		 * FIXME wait for channel to be IDLE before calling finalizing
126		 * the hmem object.
127		 */
128	}
129}
130
131static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
132		struct vm_fault *vmf, struct migrate_vma *args,
133		dma_addr_t *dma_addr)
134{
135	struct device *dev = drm->dev->dev;
136	struct page *dpage, *spage;
137
138	spage = migrate_pfn_to_page(args->src[0]);
139	if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
140		return 0;
141
142	dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
143	if (!dpage)
144		return VM_FAULT_SIGBUS;
145	lock_page(dpage);
146
147	*dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
148	if (dma_mapping_error(dev, *dma_addr))
149		goto error_free_page;
150
151	if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
152			NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
153		goto error_dma_unmap;
 
 
154
155	args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
156	return 0;
157
158error_dma_unmap:
159	dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
160error_free_page:
161	__free_page(dpage);
162	return VM_FAULT_SIGBUS;
163}
164
165static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
166{
167	struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
168	struct nouveau_drm *drm = dmem->drm;
169	struct nouveau_fence *fence;
 
 
170	unsigned long src = 0, dst = 0;
171	dma_addr_t dma_addr = 0;
172	vm_fault_t ret;
173	struct migrate_vma args = {
174		.vma		= vmf->vma,
175		.start		= vmf->address,
176		.end		= vmf->address + PAGE_SIZE,
177		.src		= &src,
178		.dst		= &dst,
 
 
 
179	};
180
181	/*
182	 * FIXME what we really want is to find some heuristic to migrate more
183	 * than just one page on CPU fault. When such fault happens it is very
184	 * likely that more surrounding page will CPU fault too.
185	 */
186	if (migrate_vma_setup(&args) < 0)
187		return VM_FAULT_SIGBUS;
188	if (!args.cpages)
189		return 0;
190
191	ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
192	if (ret || dst == 0)
 
 
 
 
193		goto done;
194
195	nouveau_fence_new(dmem->migrate.chan, false, &fence);
 
 
 
 
 
 
 
 
 
 
 
 
196	migrate_vma_pages(&args);
197	nouveau_dmem_fence_done(&fence);
198	dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
199done:
200	migrate_vma_finalize(&args);
201	return ret;
202}
203
204static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
205	.page_free		= nouveau_dmem_page_free,
206	.migrate_to_ram		= nouveau_dmem_migrate_to_ram,
207};
208
209static int
210nouveau_dmem_chunk_alloc(struct nouveau_drm *drm)
211{
212	struct nouveau_dmem_chunk *chunk;
 
 
 
 
213	int ret;
214
215	if (drm->dmem == NULL)
216		return -EINVAL;
217
218	mutex_lock(&drm->dmem->mutex);
219	chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
220					 struct nouveau_dmem_chunk,
221					 list);
222	if (chunk == NULL) {
223		mutex_unlock(&drm->dmem->mutex);
224		return -ENOMEM;
225	}
226
227	list_del(&chunk->list);
228	mutex_unlock(&drm->dmem->mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
229
230	ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
231			     TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
232			     &chunk->bo);
233	if (ret)
234		goto out;
235
236	ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
237	if (ret) {
238		nouveau_bo_ref(NULL, &chunk->bo);
239		goto out;
240	}
241
242	bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES);
243	spin_lock_init(&chunk->lock);
 
 
 
244
245out:
246	mutex_lock(&drm->dmem->mutex);
247	if (chunk->bo)
248		list_add(&chunk->list, &drm->dmem->chunk_empty);
249	else
250		list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
251	mutex_unlock(&drm->dmem->mutex);
252
253	return ret;
254}
255
256static struct nouveau_dmem_chunk *
257nouveau_dmem_chunk_first_free_locked(struct nouveau_drm *drm)
258{
259	struct nouveau_dmem_chunk *chunk;
260
261	chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
262					 struct nouveau_dmem_chunk,
263					 list);
264	if (chunk)
265		return chunk;
266
267	chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
268					 struct nouveau_dmem_chunk,
269					 list);
270	if (chunk->bo)
271		return chunk;
272
273	return NULL;
274}
275
276static int
277nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
278			 unsigned long npages,
279			 unsigned long *pages)
280{
281	struct nouveau_dmem_chunk *chunk;
282	unsigned long c;
283	int ret;
284
285	memset(pages, 0xff, npages * sizeof(*pages));
286
287	mutex_lock(&drm->dmem->mutex);
288	for (c = 0; c < npages;) {
289		unsigned long i;
290
291		chunk = nouveau_dmem_chunk_first_free_locked(drm);
292		if (chunk == NULL) {
293			mutex_unlock(&drm->dmem->mutex);
294			ret = nouveau_dmem_chunk_alloc(drm);
295			if (ret) {
296				if (c)
297					return 0;
298				return ret;
299			}
300			mutex_lock(&drm->dmem->mutex);
301			continue;
302		}
303
304		spin_lock(&chunk->lock);
305		i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES);
306		while (i < DMEM_CHUNK_NPAGES && c < npages) {
307			pages[c] = chunk->pfn_first + i;
308			set_bit(i, chunk->bitmap);
309			chunk->callocated++;
310			c++;
311
312			i = find_next_zero_bit(chunk->bitmap,
313					DMEM_CHUNK_NPAGES, i);
314		}
315		spin_unlock(&chunk->lock);
316	}
317	mutex_unlock(&drm->dmem->mutex);
318
319	return 0;
 
 
 
 
 
 
 
 
 
 
 
320}
321
322static struct page *
323nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
324{
325	unsigned long pfns[1];
326	struct page *page;
327	int ret;
328
329	/* FIXME stop all the miss-match API ... */
330	ret = nouveau_dmem_pages_alloc(drm, 1, pfns);
331	if (ret)
332		return NULL;
 
 
 
 
 
 
 
 
 
333
334	page = pfn_to_page(pfns[0]);
335	get_page(page);
336	lock_page(page);
337	return page;
338}
339
340static void
341nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
342{
343	unlock_page(page);
344	put_page(page);
345}
346
347void
348nouveau_dmem_resume(struct nouveau_drm *drm)
349{
350	struct nouveau_dmem_chunk *chunk;
351	int ret;
352
353	if (drm->dmem == NULL)
354		return;
355
356	mutex_lock(&drm->dmem->mutex);
357	list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
358		ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
359		/* FIXME handle pin failure */
360		WARN_ON(ret);
361	}
362	list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
363		ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
364		/* FIXME handle pin failure */
365		WARN_ON(ret);
366	}
367	mutex_unlock(&drm->dmem->mutex);
368}
369
370void
371nouveau_dmem_suspend(struct nouveau_drm *drm)
372{
373	struct nouveau_dmem_chunk *chunk;
374
375	if (drm->dmem == NULL)
376		return;
377
378	mutex_lock(&drm->dmem->mutex);
379	list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
380		nouveau_bo_unpin(chunk->bo);
381	}
382	list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
383		nouveau_bo_unpin(chunk->bo);
384	}
385	mutex_unlock(&drm->dmem->mutex);
386}
387
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388void
389nouveau_dmem_fini(struct nouveau_drm *drm)
390{
391	struct nouveau_dmem_chunk *chunk, *tmp;
392
393	if (drm->dmem == NULL)
394		return;
395
396	mutex_lock(&drm->dmem->mutex);
397
398	WARN_ON(!list_empty(&drm->dmem->chunk_free));
399	WARN_ON(!list_empty(&drm->dmem->chunk_full));
400
401	list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
402		if (chunk->bo) {
403			nouveau_bo_unpin(chunk->bo);
404			nouveau_bo_ref(NULL, &chunk->bo);
405		}
406		list_del(&chunk->list);
 
 
 
407		kfree(chunk);
408	}
409
410	mutex_unlock(&drm->dmem->mutex);
411}
412
413static int
414nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
415		    enum nouveau_aper dst_aper, u64 dst_addr,
416		    enum nouveau_aper src_aper, u64 src_addr)
417{
418	struct nouveau_channel *chan = drm->dmem->migrate.chan;
419	u32 launch_dma = (1 << 9) /* MULTI_LINE_ENABLE. */ |
420			 (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
421			 (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
422			 (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
423			 (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
424	int ret;
425
426	ret = RING_SPACE(chan, 13);
427	if (ret)
428		return ret;
429
430	if (src_aper != NOUVEAU_APER_VIRT) {
431		switch (src_aper) {
432		case NOUVEAU_APER_VRAM:
433			BEGIN_IMC0(chan, NvSubCopy, 0x0260, 0);
 
434			break;
435		case NOUVEAU_APER_HOST:
436			BEGIN_IMC0(chan, NvSubCopy, 0x0260, 1);
 
437			break;
438		default:
439			return -EINVAL;
440		}
441		launch_dma |= 0x00001000; /* SRC_TYPE_PHYSICAL. */
 
442	}
443
444	if (dst_aper != NOUVEAU_APER_VIRT) {
445		switch (dst_aper) {
446		case NOUVEAU_APER_VRAM:
447			BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
 
448			break;
449		case NOUVEAU_APER_HOST:
450			BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
 
451			break;
452		default:
453			return -EINVAL;
454		}
455		launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
 
456	}
457
458	BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
459	OUT_RING  (chan, upper_32_bits(src_addr));
460	OUT_RING  (chan, lower_32_bits(src_addr));
461	OUT_RING  (chan, upper_32_bits(dst_addr));
462	OUT_RING  (chan, lower_32_bits(dst_addr));
463	OUT_RING  (chan, PAGE_SIZE);
464	OUT_RING  (chan, PAGE_SIZE);
465	OUT_RING  (chan, PAGE_SIZE);
466	OUT_RING  (chan, npages);
467	BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
468	OUT_RING  (chan, launch_dma);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
469	return 0;
470}
471
472static int
473nouveau_dmem_migrate_init(struct nouveau_drm *drm)
474{
475	switch (drm->ttm.copy.oclass) {
476	case PASCAL_DMA_COPY_A:
477	case PASCAL_DMA_COPY_B:
478	case  VOLTA_DMA_COPY_A:
479	case TURING_DMA_COPY_A:
480		drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
 
481		drm->dmem->migrate.chan = drm->ttm.chan;
482		return 0;
483	default:
484		break;
485	}
486	return -ENODEV;
487}
488
489void
490nouveau_dmem_init(struct nouveau_drm *drm)
491{
492	struct device *device = drm->dev->dev;
493	struct resource *res;
494	unsigned long i, size, pfn_first;
495	int ret;
496
497	/* This only make sense on PASCAL or newer */
498	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
499		return;
500
501	if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
502		return;
503
504	drm->dmem->drm = drm;
505	mutex_init(&drm->dmem->mutex);
506	INIT_LIST_HEAD(&drm->dmem->chunk_free);
507	INIT_LIST_HEAD(&drm->dmem->chunk_full);
508	INIT_LIST_HEAD(&drm->dmem->chunk_empty);
509
510	size = ALIGN(drm->client.device.info.ram_user, DMEM_CHUNK_SIZE);
511
512	/* Initialize migration dma helpers before registering memory */
513	ret = nouveau_dmem_migrate_init(drm);
514	if (ret)
515		goto out_free;
516
517	/*
518	 * FIXME we need some kind of policy to decide how much VRAM we
519	 * want to register with HMM. For now just register everything
520	 * and latter if we want to do thing like over commit then we
521	 * could revisit this.
522	 */
523	res = devm_request_free_mem_region(device, &iomem_resource, size);
524	if (IS_ERR(res))
525		goto out_free;
526	drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
527	drm->dmem->pagemap.res = *res;
528	drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
529	if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
530		goto out_free;
531
532	pfn_first = res->start >> PAGE_SHIFT;
533	for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
534		struct nouveau_dmem_chunk *chunk;
535		struct page *page;
536		unsigned long j;
537
538		chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
539		if (chunk == NULL) {
540			nouveau_dmem_fini(drm);
541			return;
542		}
543
544		chunk->drm = drm;
545		chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
546		list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
547
548		page = pfn_to_page(chunk->pfn_first);
549		for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page)
550			page->zone_device_data = chunk;
551	}
552
553	NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
554	return;
555out_free:
556	kfree(drm->dmem);
557	drm->dmem = NULL;
558}
559
560static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
561		unsigned long src, dma_addr_t *dma_addr)
 
562{
563	struct device *dev = drm->dev->dev;
564	struct page *dpage, *spage;
 
565
566	spage = migrate_pfn_to_page(src);
567	if (!spage || !(src & MIGRATE_PFN_MIGRATE))
568		goto out;
569
570	dpage = nouveau_dmem_page_alloc_locked(drm);
571	if (!dpage)
572		return 0;
573
574	*dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
575	if (dma_mapping_error(dev, *dma_addr))
576		goto out_free_page;
577
578	if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
579			nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
580			*dma_addr))
581		goto out_dma_unmap;
 
 
 
 
 
 
 
 
 
 
 
582
583	return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
 
 
 
 
 
584
585out_dma_unmap:
586	dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
587out_free_page:
588	nouveau_dmem_page_free_locked(drm, dpage);
589out:
 
590	return 0;
591}
592
593static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
594		struct migrate_vma *args, dma_addr_t *dma_addrs)
 
595{
596	struct nouveau_fence *fence;
597	unsigned long addr = args->start, nr_dma = 0, i;
598
599	for (i = 0; addr < args->end; i++) {
600		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
601				dma_addrs + nr_dma);
602		if (args->dst[i])
603			nr_dma++;
604		addr += PAGE_SIZE;
605	}
606
607	nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
608	migrate_vma_pages(args);
609	nouveau_dmem_fence_done(&fence);
 
610
611	while (nr_dma--) {
612		dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
613				DMA_BIDIRECTIONAL);
614	}
615	/*
616	 * FIXME optimization: update GPU page table to point to newly migrated
617	 * memory.
618	 */
619	migrate_vma_finalize(args);
620}
621
622int
623nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
 
624			 struct vm_area_struct *vma,
625			 unsigned long start,
626			 unsigned long end)
627{
628	unsigned long npages = (end - start) >> PAGE_SHIFT;
629	unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
630	dma_addr_t *dma_addrs;
631	struct migrate_vma args = {
632		.vma		= vma,
633		.start		= start,
 
 
634	};
635	unsigned long c, i;
 
636	int ret = -ENOMEM;
637
638	args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
 
 
 
639	if (!args.src)
640		goto out;
641	args.dst = kcalloc(max, sizeof(args.dst), GFP_KERNEL);
642	if (!args.dst)
643		goto out_free_src;
644
645	dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
646	if (!dma_addrs)
647		goto out_free_dst;
648
649	for (i = 0; i < npages; i += c) {
650		c = min(SG_MAX_SINGLE_ALLOC, npages);
651		args.end = start + (c << PAGE_SHIFT);
 
 
 
 
 
 
 
652		ret = migrate_vma_setup(&args);
653		if (ret)
654			goto out_free_dma;
655
656		if (args.cpages)
657			nouveau_dmem_migrate_chunk(drm, &args, dma_addrs);
 
658		args.start = args.end;
659	}
660
661	ret = 0;
 
 
662out_free_dma:
663	kfree(dma_addrs);
664out_free_dst:
665	kfree(args.dst);
666out_free_src:
667	kfree(args.src);
668out:
669	return ret;
670}
671
672static inline bool
673nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
674{
675	return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
676}
677
678void
679nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
680			 struct hmm_range *range)
681{
682	unsigned long i, npages;
683
684	npages = (range->end - range->start) >> PAGE_SHIFT;
685	for (i = 0; i < npages; ++i) {
686		struct page *page;
687		uint64_t addr;
688
689		page = hmm_device_entry_to_page(range, range->pfns[i]);
690		if (page == NULL)
691			continue;
692
693		if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
694			continue;
695		}
696
697		if (!nouveau_dmem_page(drm, page)) {
698			WARN(1, "Some unknown device memory !\n");
699			range->pfns[i] = 0;
700			continue;
701		}
702
703		addr = nouveau_dmem_page_addr(page);
704		range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
705		range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
706	}
707}
v6.8
  1/*
  2 * Copyright 2018 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 */
 22#include "nouveau_dmem.h"
 23#include "nouveau_drv.h"
 24#include "nouveau_chan.h"
 25#include "nouveau_dma.h"
 26#include "nouveau_mem.h"
 27#include "nouveau_bo.h"
 28#include "nouveau_svm.h"
 29
 30#include <nvif/class.h>
 31#include <nvif/object.h>
 32#include <nvif/push906f.h>
 33#include <nvif/if000c.h>
 34#include <nvif/if500b.h>
 35#include <nvif/if900b.h>
 36
 37#include <nvhw/class/cla0b5.h>
 38
 39#include <linux/sched/mm.h>
 40#include <linux/hmm.h>
 41#include <linux/memremap.h>
 42#include <linux/migrate.h>
 43
 44/*
 45 * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
 46 * it in vram while in use. We likely want to overhaul memory management for
 47 * nouveau to be more page like (not necessarily with system page size but a
 48 * bigger page size) at lowest level and have some shim layer on top that would
 49 * provide the same functionality as TTM.
 50 */
 51#define DMEM_CHUNK_SIZE (2UL << 20)
 52#define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
 53
 54enum nouveau_aper {
 55	NOUVEAU_APER_VIRT,
 56	NOUVEAU_APER_VRAM,
 57	NOUVEAU_APER_HOST,
 58};
 59
 60typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
 61				      enum nouveau_aper, u64 dst_addr,
 62				      enum nouveau_aper, u64 src_addr);
 63typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
 64				      enum nouveau_aper, u64 dst_addr);
 65
 66struct nouveau_dmem_chunk {
 67	struct list_head list;
 68	struct nouveau_bo *bo;
 69	struct nouveau_drm *drm;
 
 70	unsigned long callocated;
 71	struct dev_pagemap pagemap;
 
 72};
 73
 74struct nouveau_dmem_migrate {
 75	nouveau_migrate_copy_t copy_func;
 76	nouveau_clear_page_t clear_func;
 77	struct nouveau_channel *chan;
 78};
 79
 80struct nouveau_dmem {
 81	struct nouveau_drm *drm;
 
 82	struct nouveau_dmem_migrate migrate;
 83	struct list_head chunks;
 
 
 84	struct mutex mutex;
 85	struct page *free_pages;
 86	spinlock_t lock;
 87};
 88
 89static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
 90{
 91	return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
 92}
 93
 94static struct nouveau_drm *page_to_drm(struct page *page)
 95{
 96	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
 
 97
 98	return chunk->drm;
 99}
100
101unsigned long nouveau_dmem_page_addr(struct page *page)
102{
103	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
104	unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
105				chunk->pagemap.range.start;
106
107	return chunk->bo->offset + off;
108}
109
110static void nouveau_dmem_page_free(struct page *page)
111{
112	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
113	struct nouveau_dmem *dmem = chunk->drm->dmem;
114
115	spin_lock(&dmem->lock);
116	page->zone_device_data = dmem->free_pages;
117	dmem->free_pages = page;
118
 
 
 
 
 
 
 
 
 
119	WARN_ON(!chunk->callocated);
120	chunk->callocated--;
121	/*
122	 * FIXME when chunk->callocated reach 0 we should add the chunk to
123	 * a reclaim list so that it can be freed in case of memory pressure.
124	 */
125	spin_unlock(&dmem->lock);
126}
127
128static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
129{
130	if (fence) {
131		nouveau_fence_wait(*fence, true, false);
132		nouveau_fence_unref(fence);
133	} else {
134		/*
135		 * FIXME wait for channel to be IDLE before calling finalizing
136		 * the hmem object.
137		 */
138	}
139}
140
141static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage,
142				struct page *dpage, dma_addr_t *dma_addr)
 
143{
144	struct device *dev = drm->dev->dev;
 
 
 
 
 
145
 
 
 
146	lock_page(dpage);
147
148	*dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
149	if (dma_mapping_error(dev, *dma_addr))
150		return -EIO;
151
152	if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
153					 NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) {
154		dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
155		return -EIO;
156	}
157
 
158	return 0;
 
 
 
 
 
 
159}
160
161static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
162{
163	struct nouveau_drm *drm = page_to_drm(vmf->page);
164	struct nouveau_dmem *dmem = drm->dmem;
165	struct nouveau_fence *fence;
166	struct nouveau_svmm *svmm;
167	struct page *spage, *dpage;
168	unsigned long src = 0, dst = 0;
169	dma_addr_t dma_addr = 0;
170	vm_fault_t ret = 0;
171	struct migrate_vma args = {
172		.vma		= vmf->vma,
173		.start		= vmf->address,
174		.end		= vmf->address + PAGE_SIZE,
175		.src		= &src,
176		.dst		= &dst,
177		.pgmap_owner	= drm->dev,
178		.fault_page	= vmf->page,
179		.flags		= MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
180	};
181
182	/*
183	 * FIXME what we really want is to find some heuristic to migrate more
184	 * than just one page on CPU fault. When such fault happens it is very
185	 * likely that more surrounding page will CPU fault too.
186	 */
187	if (migrate_vma_setup(&args) < 0)
188		return VM_FAULT_SIGBUS;
189	if (!args.cpages)
190		return 0;
191
192	spage = migrate_pfn_to_page(src);
193	if (!spage || !(src & MIGRATE_PFN_MIGRATE))
194		goto done;
195
196	dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
197	if (!dpage)
198		goto done;
199
200	dst = migrate_pfn(page_to_pfn(dpage));
201
202	svmm = spage->zone_device_data;
203	mutex_lock(&svmm->mutex);
204	nouveau_svmm_invalidate(svmm, args.start, args.end);
205	ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr);
206	mutex_unlock(&svmm->mutex);
207	if (ret) {
208		ret = VM_FAULT_SIGBUS;
209		goto done;
210	}
211
212	nouveau_fence_new(&fence, dmem->migrate.chan);
213	migrate_vma_pages(&args);
214	nouveau_dmem_fence_done(&fence);
215	dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
216done:
217	migrate_vma_finalize(&args);
218	return ret;
219}
220
221static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
222	.page_free		= nouveau_dmem_page_free,
223	.migrate_to_ram		= nouveau_dmem_migrate_to_ram,
224};
225
226static int
227nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
228{
229	struct nouveau_dmem_chunk *chunk;
230	struct resource *res;
231	struct page *page;
232	void *ptr;
233	unsigned long i, pfn_first;
234	int ret;
235
236	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
 
 
 
 
 
 
237	if (chunk == NULL) {
238		ret = -ENOMEM;
239		goto out;
240	}
241
242	/* Allocate unused physical address space for device private pages. */
243	res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
244				      "nouveau_dmem");
245	if (IS_ERR(res)) {
246		ret = PTR_ERR(res);
247		goto out_free;
248	}
249
250	chunk->drm = drm;
251	chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
252	chunk->pagemap.range.start = res->start;
253	chunk->pagemap.range.end = res->end;
254	chunk->pagemap.nr_range = 1;
255	chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
256	chunk->pagemap.owner = drm->dev;
257
258	ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
259			     NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
260			     &chunk->bo);
261	if (ret)
262		goto out_release;
263
264	ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
265	if (ret)
266		goto out_bo_free;
 
 
267
268	ptr = memremap_pages(&chunk->pagemap, numa_node_id());
269	if (IS_ERR(ptr)) {
270		ret = PTR_ERR(ptr);
271		goto out_bo_unpin;
272	}
273
 
274	mutex_lock(&drm->dmem->mutex);
275	list_add(&chunk->list, &drm->dmem->chunks);
 
 
 
276	mutex_unlock(&drm->dmem->mutex);
277
278	pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
279	page = pfn_to_page(pfn_first);
280	spin_lock(&drm->dmem->lock);
281	for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
282		page->zone_device_data = drm->dmem->free_pages;
283		drm->dmem->free_pages = page;
284	}
285	*ppage = page;
286	chunk->callocated++;
287	spin_unlock(&drm->dmem->lock);
 
 
 
 
 
 
 
 
 
288
289	NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
290		DMEM_CHUNK_SIZE >> 20);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
292	return 0;
293
294out_bo_unpin:
295	nouveau_bo_unpin(chunk->bo);
296out_bo_free:
297	nouveau_bo_ref(NULL, &chunk->bo);
298out_release:
299	release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
300out_free:
301	kfree(chunk);
302out:
303	return ret;
304}
305
306static struct page *
307nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
308{
309	struct nouveau_dmem_chunk *chunk;
310	struct page *page = NULL;
311	int ret;
312
313	spin_lock(&drm->dmem->lock);
314	if (drm->dmem->free_pages) {
315		page = drm->dmem->free_pages;
316		drm->dmem->free_pages = page->zone_device_data;
317		chunk = nouveau_page_to_chunk(page);
318		chunk->callocated++;
319		spin_unlock(&drm->dmem->lock);
320	} else {
321		spin_unlock(&drm->dmem->lock);
322		ret = nouveau_dmem_chunk_alloc(drm, &page);
323		if (ret)
324			return NULL;
325	}
326
327	zone_device_page_init(page);
 
 
328	return page;
329}
330
331static void
332nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
333{
334	unlock_page(page);
335	put_page(page);
336}
337
338void
339nouveau_dmem_resume(struct nouveau_drm *drm)
340{
341	struct nouveau_dmem_chunk *chunk;
342	int ret;
343
344	if (drm->dmem == NULL)
345		return;
346
347	mutex_lock(&drm->dmem->mutex);
348	list_for_each_entry(chunk, &drm->dmem->chunks, list) {
349		ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
 
 
 
 
 
350		/* FIXME handle pin failure */
351		WARN_ON(ret);
352	}
353	mutex_unlock(&drm->dmem->mutex);
354}
355
356void
357nouveau_dmem_suspend(struct nouveau_drm *drm)
358{
359	struct nouveau_dmem_chunk *chunk;
360
361	if (drm->dmem == NULL)
362		return;
363
364	mutex_lock(&drm->dmem->mutex);
365	list_for_each_entry(chunk, &drm->dmem->chunks, list)
366		nouveau_bo_unpin(chunk->bo);
 
 
 
 
367	mutex_unlock(&drm->dmem->mutex);
368}
369
370/*
371 * Evict all pages mapping a chunk.
372 */
373static void
374nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
375{
376	unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
377	unsigned long *src_pfns, *dst_pfns;
378	dma_addr_t *dma_addrs;
379	struct nouveau_fence *fence;
380
381	src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
382	dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
383	dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
384
385	migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
386			npages);
387
388	for (i = 0; i < npages; i++) {
389		if (src_pfns[i] & MIGRATE_PFN_MIGRATE) {
390			struct page *dpage;
391
392			/*
393			 * _GFP_NOFAIL because the GPU is going away and there
394			 * is nothing sensible we can do if we can't copy the
395			 * data back.
396			 */
397			dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
398			dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
399			nouveau_dmem_copy_one(chunk->drm,
400					migrate_pfn_to_page(src_pfns[i]), dpage,
401					&dma_addrs[i]);
402		}
403	}
404
405	nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan);
406	migrate_device_pages(src_pfns, dst_pfns, npages);
407	nouveau_dmem_fence_done(&fence);
408	migrate_device_finalize(src_pfns, dst_pfns, npages);
409	kfree(src_pfns);
410	kfree(dst_pfns);
411	for (i = 0; i < npages; i++)
412		dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
413	kfree(dma_addrs);
414}
415
416void
417nouveau_dmem_fini(struct nouveau_drm *drm)
418{
419	struct nouveau_dmem_chunk *chunk, *tmp;
420
421	if (drm->dmem == NULL)
422		return;
423
424	mutex_lock(&drm->dmem->mutex);
425
426	list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
427		nouveau_dmem_evict_chunk(chunk);
428		nouveau_bo_unpin(chunk->bo);
429		nouveau_bo_ref(NULL, &chunk->bo);
430		WARN_ON(chunk->callocated);
 
 
 
431		list_del(&chunk->list);
432		memunmap_pages(&chunk->pagemap);
433		release_mem_region(chunk->pagemap.range.start,
434				   range_len(&chunk->pagemap.range));
435		kfree(chunk);
436	}
437
438	mutex_unlock(&drm->dmem->mutex);
439}
440
441static int
442nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
443		    enum nouveau_aper dst_aper, u64 dst_addr,
444		    enum nouveau_aper src_aper, u64 src_addr)
445{
446	struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
447	u32 launch_dma = 0;
 
 
 
 
448	int ret;
449
450	ret = PUSH_WAIT(push, 13);
451	if (ret)
452		return ret;
453
454	if (src_aper != NOUVEAU_APER_VIRT) {
455		switch (src_aper) {
456		case NOUVEAU_APER_VRAM:
457			PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
458				  NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB));
459			break;
460		case NOUVEAU_APER_HOST:
461			PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
462				  NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM));
463			break;
464		default:
465			return -EINVAL;
466		}
467
468		launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, SRC_TYPE, PHYSICAL);
469	}
470
471	if (dst_aper != NOUVEAU_APER_VIRT) {
472		switch (dst_aper) {
473		case NOUVEAU_APER_VRAM:
474			PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
475				  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
476			break;
477		case NOUVEAU_APER_HOST:
478			PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
479				  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
480			break;
481		default:
482			return -EINVAL;
483		}
484
485		launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
486	}
487
488	PUSH_MTHD(push, NVA0B5, OFFSET_IN_UPPER,
489		  NVVAL(NVA0B5, OFFSET_IN_UPPER, UPPER, upper_32_bits(src_addr)),
490
491				OFFSET_IN_LOWER, lower_32_bits(src_addr),
492
493				OFFSET_OUT_UPPER,
494		  NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
495
496				OFFSET_OUT_LOWER, lower_32_bits(dst_addr),
497				PITCH_IN, PAGE_SIZE,
498				PITCH_OUT, PAGE_SIZE,
499				LINE_LENGTH_IN, PAGE_SIZE,
500				LINE_COUNT, npages);
501
502	PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
503		  NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
504		  NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
505		  NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
506		  NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
507		  NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
508		  NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
509		  NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, TRUE) |
510		  NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) |
511		  NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
512	return 0;
513}
514
515static int
516nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
517		     enum nouveau_aper dst_aper, u64 dst_addr)
518{
519	struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
520	u32 launch_dma = 0;
521	int ret;
522
523	ret = PUSH_WAIT(push, 12);
524	if (ret)
525		return ret;
526
527	switch (dst_aper) {
528	case NOUVEAU_APER_VRAM:
529		PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
530			  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
531		break;
532	case NOUVEAU_APER_HOST:
533		PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
534			  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
535		break;
536	default:
537		return -EINVAL;
538	}
539
540	launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
541
542	PUSH_MTHD(push, NVA0B5, SET_REMAP_CONST_A, 0,
543				SET_REMAP_CONST_B, 0,
544
545				SET_REMAP_COMPONENTS,
546		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_X, CONST_A) |
547		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_Y, CONST_B) |
548		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, COMPONENT_SIZE, FOUR) |
549		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, NUM_DST_COMPONENTS, TWO));
550
551	PUSH_MTHD(push, NVA0B5, OFFSET_OUT_UPPER,
552		  NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
553
554				OFFSET_OUT_LOWER, lower_32_bits(dst_addr));
555
556	PUSH_MTHD(push, NVA0B5, LINE_LENGTH_IN, length >> 3);
557
558	PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
559		  NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
560		  NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
561		  NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
562		  NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
563		  NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
564		  NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
565		  NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, FALSE) |
566		  NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, TRUE) |
567		  NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
568	return 0;
569}
570
571static int
572nouveau_dmem_migrate_init(struct nouveau_drm *drm)
573{
574	switch (drm->ttm.copy.oclass) {
575	case PASCAL_DMA_COPY_A:
576	case PASCAL_DMA_COPY_B:
577	case  VOLTA_DMA_COPY_A:
578	case TURING_DMA_COPY_A:
579		drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
580		drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
581		drm->dmem->migrate.chan = drm->ttm.chan;
582		return 0;
583	default:
584		break;
585	}
586	return -ENODEV;
587}
588
589void
590nouveau_dmem_init(struct nouveau_drm *drm)
591{
 
 
 
592	int ret;
593
594	/* This only make sense on PASCAL or newer */
595	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
596		return;
597
598	if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
599		return;
600
601	drm->dmem->drm = drm;
602	mutex_init(&drm->dmem->mutex);
603	INIT_LIST_HEAD(&drm->dmem->chunks);
604	mutex_init(&drm->dmem->mutex);
605	spin_lock_init(&drm->dmem->lock);
 
 
606
607	/* Initialize migration dma helpers before registering memory */
608	ret = nouveau_dmem_migrate_init(drm);
609	if (ret) {
610		kfree(drm->dmem);
611		drm->dmem = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
612	}
 
 
 
 
 
 
613}
614
615static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
616		struct nouveau_svmm *svmm, unsigned long src,
617		dma_addr_t *dma_addr, u64 *pfn)
618{
619	struct device *dev = drm->dev->dev;
620	struct page *dpage, *spage;
621	unsigned long paddr;
622
623	spage = migrate_pfn_to_page(src);
624	if (!(src & MIGRATE_PFN_MIGRATE))
625		goto out;
626
627	dpage = nouveau_dmem_page_alloc_locked(drm);
628	if (!dpage)
629		goto out;
 
 
 
 
630
631	paddr = nouveau_dmem_page_addr(dpage);
632	if (spage) {
633		*dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
634					 DMA_BIDIRECTIONAL);
635		if (dma_mapping_error(dev, *dma_addr))
636			goto out_free_page;
637		if (drm->dmem->migrate.copy_func(drm, 1,
638			NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
639			goto out_dma_unmap;
640	} else {
641		*dma_addr = DMA_MAPPING_ERROR;
642		if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
643			NOUVEAU_APER_VRAM, paddr))
644			goto out_free_page;
645	}
646
647	dpage->zone_device_data = svmm;
648	*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
649		((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
650	if (src & MIGRATE_PFN_WRITE)
651		*pfn |= NVIF_VMM_PFNMAP_V0_W;
652	return migrate_pfn(page_to_pfn(dpage));
653
654out_dma_unmap:
655	dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
656out_free_page:
657	nouveau_dmem_page_free_locked(drm, dpage);
658out:
659	*pfn = NVIF_VMM_PFNMAP_V0_NONE;
660	return 0;
661}
662
663static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
664		struct nouveau_svmm *svmm, struct migrate_vma *args,
665		dma_addr_t *dma_addrs, u64 *pfns)
666{
667	struct nouveau_fence *fence;
668	unsigned long addr = args->start, nr_dma = 0, i;
669
670	for (i = 0; addr < args->end; i++) {
671		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
672				args->src[i], dma_addrs + nr_dma, pfns + i);
673		if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
674			nr_dma++;
675		addr += PAGE_SIZE;
676	}
677
678	nouveau_fence_new(&fence, drm->dmem->migrate.chan);
679	migrate_vma_pages(args);
680	nouveau_dmem_fence_done(&fence);
681	nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
682
683	while (nr_dma--) {
684		dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
685				DMA_BIDIRECTIONAL);
686	}
 
 
 
 
687	migrate_vma_finalize(args);
688}
689
690int
691nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
692			 struct nouveau_svmm *svmm,
693			 struct vm_area_struct *vma,
694			 unsigned long start,
695			 unsigned long end)
696{
697	unsigned long npages = (end - start) >> PAGE_SHIFT;
698	unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
699	dma_addr_t *dma_addrs;
700	struct migrate_vma args = {
701		.vma		= vma,
702		.start		= start,
703		.pgmap_owner	= drm->dev,
704		.flags		= MIGRATE_VMA_SELECT_SYSTEM,
705	};
706	unsigned long i;
707	u64 *pfns;
708	int ret = -ENOMEM;
709
710	if (drm->dmem == NULL)
711		return -ENODEV;
712
713	args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
714	if (!args.src)
715		goto out;
716	args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
717	if (!args.dst)
718		goto out_free_src;
719
720	dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
721	if (!dma_addrs)
722		goto out_free_dst;
723
724	pfns = nouveau_pfns_alloc(max);
725	if (!pfns)
726		goto out_free_dma;
727
728	for (i = 0; i < npages; i += max) {
729		if (args.start + (max << PAGE_SHIFT) > end)
730			args.end = end;
731		else
732			args.end = args.start + (max << PAGE_SHIFT);
733
734		ret = migrate_vma_setup(&args);
735		if (ret)
736			goto out_free_pfns;
737
738		if (args.cpages)
739			nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
740						   pfns);
741		args.start = args.end;
742	}
743
744	ret = 0;
745out_free_pfns:
746	nouveau_pfns_free(pfns);
747out_free_dma:
748	kfree(dma_addrs);
749out_free_dst:
750	kfree(args.dst);
751out_free_src:
752	kfree(args.src);
753out:
754	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
755}