Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/*
  2 * Copyright 2010 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Ben Skeggs
 23 */
 24
 25#include "drmP.h"
 26#include "nouveau_drv.h"
 27#include "nouveau_mm.h"
 28#include "nouveau_vm.h"
 29
 30void
 31nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
 32{
 33	struct nouveau_vm *vm = vma->vm;
 34	struct nouveau_mm_node *r;
 35	int big = vma->node->type != vm->spg_shift;
 36	u32 offset = vma->node->offset + (delta >> 12);
 37	u32 bits = vma->node->type - 12;
 38	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
 39	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
 40	u32 max  = 1 << (vm->pgt_bits - bits);
 41	u32 end, len;
 42
 43	delta = 0;
 44	list_for_each_entry(r, &node->regions, rl_entry) {
 45		u64 phys = (u64)r->offset << 12;
 46		u32 num  = r->length >> bits;
 47
 48		while (num) {
 49			struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
 50
 51			end = (pte + num);
 52			if (unlikely(end >= max))
 53				end = max;
 54			len = end - pte;
 55
 56			vm->map(vma, pgt, node, pte, len, phys, delta);
 57
 58			num -= len;
 59			pte += len;
 60			if (unlikely(end >= max)) {
 61				phys += len << (bits + 12);
 62				pde++;
 63				pte = 0;
 64			}
 65
 66			delta += (u64)len << vma->node->type;
 67		}
 68	}
 69
 70	vm->flush(vm);
 71}
 72
 73void
 74nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
 75{
 76	nouveau_vm_map_at(vma, 0, node);
 77}
 78
 79void
 80nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
 81			struct nouveau_mem *mem)
 82{
 83	struct nouveau_vm *vm = vma->vm;
 84	int big = vma->node->type != vm->spg_shift;
 85	u32 offset = vma->node->offset + (delta >> 12);
 86	u32 bits = vma->node->type - 12;
 87	u32 num  = length >> vma->node->type;
 88	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
 89	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
 90	u32 max  = 1 << (vm->pgt_bits - bits);
 91	unsigned m, sglen;
 92	u32 end, len;
 93	int i;
 94	struct scatterlist *sg;
 95
 96	for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
 97		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
 98		sglen = sg_dma_len(sg) >> PAGE_SHIFT;
 99
100		end = pte + sglen;
101		if (unlikely(end >= max))
102			end = max;
103		len = end - pte;
104
105		for (m = 0; m < len; m++) {
106			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
107
108			vm->map_sg(vma, pgt, mem, pte, 1, &addr);
109			num--;
110			pte++;
111
112			if (num == 0)
113				goto finish;
114		}
115		if (unlikely(end >= max)) {
116			pde++;
117			pte = 0;
118		}
119		if (m < sglen) {
120			for (; m < sglen; m++) {
121				dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
122
123				vm->map_sg(vma, pgt, mem, pte, 1, &addr);
124				num--;
125				pte++;
126				if (num == 0)
127					goto finish;
128			}
129		}
130
131	}
132finish:
133	vm->flush(vm);
134}
135
136void
137nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
138		  struct nouveau_mem *mem)
139{
140	struct nouveau_vm *vm = vma->vm;
141	dma_addr_t *list = mem->pages;
142	int big = vma->node->type != vm->spg_shift;
143	u32 offset = vma->node->offset + (delta >> 12);
144	u32 bits = vma->node->type - 12;
145	u32 num  = length >> vma->node->type;
146	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
147	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
148	u32 max  = 1 << (vm->pgt_bits - bits);
149	u32 end, len;
150
151	while (num) {
152		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
153
154		end = (pte + num);
155		if (unlikely(end >= max))
156			end = max;
157		len = end - pte;
158
159		vm->map_sg(vma, pgt, mem, pte, len, list);
160
161		num  -= len;
162		pte  += len;
163		list += len;
164		if (unlikely(end >= max)) {
165			pde++;
166			pte = 0;
167		}
168	}
169
170	vm->flush(vm);
171}
172
173void
174nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
175{
176	struct nouveau_vm *vm = vma->vm;
177	int big = vma->node->type != vm->spg_shift;
178	u32 offset = vma->node->offset + (delta >> 12);
179	u32 bits = vma->node->type - 12;
180	u32 num  = length >> vma->node->type;
181	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
182	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
183	u32 max  = 1 << (vm->pgt_bits - bits);
184	u32 end, len;
185
186	while (num) {
187		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
188
189		end = (pte + num);
190		if (unlikely(end >= max))
191			end = max;
192		len = end - pte;
193
194		vm->unmap(pgt, pte, len);
195
196		num -= len;
197		pte += len;
198		if (unlikely(end >= max)) {
199			pde++;
200			pte = 0;
201		}
202	}
203
204	vm->flush(vm);
205}
206
207void
208nouveau_vm_unmap(struct nouveau_vma *vma)
209{
210	nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
211}
212
213static void
214nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
215{
216	struct nouveau_vm_pgd *vpgd;
217	struct nouveau_vm_pgt *vpgt;
218	struct nouveau_gpuobj *pgt;
219	u32 pde;
220
221	for (pde = fpde; pde <= lpde; pde++) {
222		vpgt = &vm->pgt[pde - vm->fpde];
223		if (--vpgt->refcount[big])
224			continue;
225
226		pgt = vpgt->obj[big];
227		vpgt->obj[big] = NULL;
228
229		list_for_each_entry(vpgd, &vm->pgd_list, head) {
230			vm->map_pgt(vpgd->obj, pde, vpgt->obj);
231		}
232
233		mutex_unlock(&vm->mm.mutex);
234		nouveau_gpuobj_ref(NULL, &pgt);
235		mutex_lock(&vm->mm.mutex);
236	}
237}
238
239static int
240nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
241{
242	struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
243	struct nouveau_vm_pgd *vpgd;
244	struct nouveau_gpuobj *pgt;
245	int big = (type != vm->spg_shift);
246	u32 pgt_size;
247	int ret;
248
249	pgt_size  = (1 << (vm->pgt_bits + 12)) >> type;
250	pgt_size *= 8;
251
252	mutex_unlock(&vm->mm.mutex);
253	ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
254				 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
255	mutex_lock(&vm->mm.mutex);
256	if (unlikely(ret))
257		return ret;
258
259	/* someone beat us to filling the PDE while we didn't have the lock */
260	if (unlikely(vpgt->refcount[big]++)) {
261		mutex_unlock(&vm->mm.mutex);
262		nouveau_gpuobj_ref(NULL, &pgt);
263		mutex_lock(&vm->mm.mutex);
264		return 0;
265	}
266
267	vpgt->obj[big] = pgt;
268	list_for_each_entry(vpgd, &vm->pgd_list, head) {
269		vm->map_pgt(vpgd->obj, pde, vpgt->obj);
270	}
271
272	return 0;
273}
274
275int
276nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
277	       u32 access, struct nouveau_vma *vma)
278{
279	u32 align = (1 << page_shift) >> 12;
280	u32 msize = size >> 12;
281	u32 fpde, lpde, pde;
282	int ret;
283
284	mutex_lock(&vm->mm.mutex);
285	ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node);
286	if (unlikely(ret != 0)) {
287		mutex_unlock(&vm->mm.mutex);
288		return ret;
289	}
290
291	fpde = (vma->node->offset >> vm->pgt_bits);
292	lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
293	for (pde = fpde; pde <= lpde; pde++) {
294		struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
295		int big = (vma->node->type != vm->spg_shift);
296
297		if (likely(vpgt->refcount[big])) {
298			vpgt->refcount[big]++;
299			continue;
300		}
301
302		ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
303		if (ret) {
304			if (pde != fpde)
305				nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
306			nouveau_mm_put(&vm->mm, vma->node);
307			mutex_unlock(&vm->mm.mutex);
308			vma->node = NULL;
309			return ret;
310		}
311	}
312	mutex_unlock(&vm->mm.mutex);
313
314	vma->vm     = vm;
315	vma->offset = (u64)vma->node->offset << 12;
316	vma->access = access;
317	return 0;
318}
319
320void
321nouveau_vm_put(struct nouveau_vma *vma)
322{
323	struct nouveau_vm *vm = vma->vm;
324	u32 fpde, lpde;
325
326	if (unlikely(vma->node == NULL))
327		return;
328	fpde = (vma->node->offset >> vm->pgt_bits);
329	lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
330
331	mutex_lock(&vm->mm.mutex);
332	nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
333	nouveau_mm_put(&vm->mm, vma->node);
334	vma->node = NULL;
335	mutex_unlock(&vm->mm.mutex);
336}
337
338int
339nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
340	       struct nouveau_vm **pvm)
341{
342	struct drm_nouveau_private *dev_priv = dev->dev_private;
343	struct nouveau_vm *vm;
344	u64 mm_length = (offset + length) - mm_offset;
345	u32 block, pgt_bits;
346	int ret;
347
348	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
349	if (!vm)
350		return -ENOMEM;
351
352	if (dev_priv->card_type == NV_50) {
353		vm->map_pgt = nv50_vm_map_pgt;
354		vm->map = nv50_vm_map;
355		vm->map_sg = nv50_vm_map_sg;
356		vm->unmap = nv50_vm_unmap;
357		vm->flush = nv50_vm_flush;
358		vm->spg_shift = 12;
359		vm->lpg_shift = 16;
360
361		pgt_bits = 29;
362		block = (1 << pgt_bits);
363		if (length < block)
364			block = length;
365
366	} else
367	if (dev_priv->card_type >= NV_C0) {
368		vm->map_pgt = nvc0_vm_map_pgt;
369		vm->map = nvc0_vm_map;
370		vm->map_sg = nvc0_vm_map_sg;
371		vm->unmap = nvc0_vm_unmap;
372		vm->flush = nvc0_vm_flush;
373		vm->spg_shift = 12;
374		vm->lpg_shift = 17;
375		pgt_bits = 27;
376		block = 4096;
377	} else {
378		kfree(vm);
379		return -ENOSYS;
380	}
381
382	vm->fpde   = offset >> pgt_bits;
383	vm->lpde   = (offset + length - 1) >> pgt_bits;
384	vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
385	if (!vm->pgt) {
386		kfree(vm);
387		return -ENOMEM;
388	}
389
390	INIT_LIST_HEAD(&vm->pgd_list);
391	vm->dev = dev;
392	vm->refcount = 1;
393	vm->pgt_bits = pgt_bits - 12;
394
395	ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
396			      block >> 12);
397	if (ret) {
398		kfree(vm);
399		return ret;
400	}
401
402	*pvm = vm;
403	return 0;
404}
405
406static int
407nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
408{
409	struct nouveau_vm_pgd *vpgd;
410	int i;
411
412	if (!pgd)
413		return 0;
414
415	vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
416	if (!vpgd)
417		return -ENOMEM;
418
419	nouveau_gpuobj_ref(pgd, &vpgd->obj);
420
421	mutex_lock(&vm->mm.mutex);
422	for (i = vm->fpde; i <= vm->lpde; i++)
423		vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
424	list_add(&vpgd->head, &vm->pgd_list);
425	mutex_unlock(&vm->mm.mutex);
426	return 0;
427}
428
429static void
430nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
431{
432	struct nouveau_vm_pgd *vpgd, *tmp;
433	struct nouveau_gpuobj *pgd = NULL;
434
435	if (!mpgd)
436		return;
437
438	mutex_lock(&vm->mm.mutex);
439	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
440		if (vpgd->obj == mpgd) {
441			pgd = vpgd->obj;
442			list_del(&vpgd->head);
443			kfree(vpgd);
444			break;
445		}
446	}
447	mutex_unlock(&vm->mm.mutex);
448
449	nouveau_gpuobj_ref(NULL, &pgd);
450}
451
452static void
453nouveau_vm_del(struct nouveau_vm *vm)
454{
455	struct nouveau_vm_pgd *vpgd, *tmp;
456
457	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
458		nouveau_vm_unlink(vm, vpgd->obj);
459	}
460
461	nouveau_mm_fini(&vm->mm);
462	kfree(vm->pgt);
463	kfree(vm);
464}
465
466int
467nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
468	       struct nouveau_gpuobj *pgd)
469{
470	struct nouveau_vm *vm;
471	int ret;
472
473	vm = ref;
474	if (vm) {
475		ret = nouveau_vm_link(vm, pgd);
476		if (ret)
477			return ret;
478
479		vm->refcount++;
480	}
481
482	vm = *ptr;
483	*ptr = ref;
484
485	if (vm) {
486		nouveau_vm_unlink(vm, pgd);
487
488		if (--vm->refcount == 0)
489			nouveau_vm_del(vm);
490	}
491
492	return 0;
493}