Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
  3
  4#include <linux/slab.h>
  5#include <linux/dma-mapping.h>
  6
  7#include "lima_device.h"
  8#include "lima_vm.h"
  9#include "lima_gem.h"
 10#include "lima_regs.h"
 11
 12struct lima_bo_va {
 13	struct list_head list;
 14	unsigned int ref_count;
 15
 16	struct drm_mm_node node;
 17
 18	struct lima_vm *vm;
 19};
 20
 21#define LIMA_VM_PD_SHIFT 22
 22#define LIMA_VM_PT_SHIFT 12
 23#define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
 24#define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
 25
 26#define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
 27#define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
 28
 29#define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
 30#define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
 31#define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
 32#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
 33
 34
 35static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end)
 36{
 37	u32 addr;
 38
 39	for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
 40		u32 pbe = LIMA_PBE(addr);
 41		u32 bte = LIMA_BTE(addr);
 42
 43		vm->bts[pbe].cpu[bte] = 0;
 44	}
 45}
 46
 47static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va)
 
 48{
 49	u32 pbe = LIMA_PBE(va);
 50	u32 bte = LIMA_BTE(va);
 51
 52	if (!vm->bts[pbe].cpu) {
 53		dma_addr_t pts;
 54		u32 *pd;
 55		int j;
 56
 57		vm->bts[pbe].cpu = dma_alloc_wc(
 58			vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
 59			&vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
 60		if (!vm->bts[pbe].cpu)
 61			return -ENOMEM;
 62
 63		pts = vm->bts[pbe].dma;
 64		pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
 65		for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
 66			pd[j] = pts | LIMA_VM_FLAG_PRESENT;
 67			pts += LIMA_PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 68		}
 69	}
 70
 71	vm->bts[pbe].cpu[bte] = pa | LIMA_VM_FLAGS_CACHE;
 
 72
 73	return 0;
 74}
 75
 76static struct lima_bo_va *
 77lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
 78{
 79	struct lima_bo_va *bo_va, *ret = NULL;
 80
 81	list_for_each_entry(bo_va, &bo->va, list) {
 82		if (bo_va->vm == vm) {
 83			ret = bo_va;
 84			break;
 85		}
 86	}
 87
 88	return ret;
 89}
 90
 91int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
 92{
 93	struct lima_bo_va *bo_va;
 94	struct sg_dma_page_iter sg_iter;
 95	int offset = 0, err;
 96
 97	mutex_lock(&bo->lock);
 98
 99	bo_va = lima_vm_bo_find(vm, bo);
100	if (bo_va) {
101		bo_va->ref_count++;
102		mutex_unlock(&bo->lock);
103		return 0;
104	}
105
106	/* should not create new bo_va if not asked by caller */
107	if (!create) {
108		mutex_unlock(&bo->lock);
109		return -ENOENT;
110	}
111
112	bo_va = kzalloc(sizeof(*bo_va), GFP_KERNEL);
113	if (!bo_va) {
114		err = -ENOMEM;
115		goto err_out0;
116	}
117
118	bo_va->vm = vm;
119	bo_va->ref_count = 1;
120
121	mutex_lock(&vm->lock);
122
123	err = drm_mm_insert_node(&vm->mm, &bo_va->node, lima_bo_size(bo));
124	if (err)
125		goto err_out1;
126
127	for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) {
128		err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
129				       bo_va->node.start + offset);
130		if (err)
131			goto err_out2;
132
133		offset += PAGE_SIZE;
134	}
135
136	mutex_unlock(&vm->lock);
137
138	list_add_tail(&bo_va->list, &bo->va);
139
140	mutex_unlock(&bo->lock);
141	return 0;
142
143err_out2:
144	if (offset)
145		lima_vm_unmap_range(vm, bo_va->node.start, bo_va->node.start + offset - 1);
146	drm_mm_remove_node(&bo_va->node);
147err_out1:
148	mutex_unlock(&vm->lock);
149	kfree(bo_va);
150err_out0:
151	mutex_unlock(&bo->lock);
152	return err;
153}
154
155void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
156{
157	struct lima_bo_va *bo_va;
158	u32 size;
159
160	mutex_lock(&bo->lock);
161
162	bo_va = lima_vm_bo_find(vm, bo);
163	if (--bo_va->ref_count > 0) {
164		mutex_unlock(&bo->lock);
165		return;
166	}
167
168	mutex_lock(&vm->lock);
169
170	size = bo->heap_size ? bo->heap_size : bo_va->node.size;
171	lima_vm_unmap_range(vm, bo_va->node.start,
172			    bo_va->node.start + size - 1);
173
174	drm_mm_remove_node(&bo_va->node);
175
176	mutex_unlock(&vm->lock);
177
178	list_del(&bo_va->list);
179
180	mutex_unlock(&bo->lock);
181
182	kfree(bo_va);
183}
184
185u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo)
186{
187	struct lima_bo_va *bo_va;
188	u32 ret;
189
190	mutex_lock(&bo->lock);
191
192	bo_va = lima_vm_bo_find(vm, bo);
193	ret = bo_va->node.start;
194
195	mutex_unlock(&bo->lock);
196
197	return ret;
198}
199
200struct lima_vm *lima_vm_create(struct lima_device *dev)
201{
202	struct lima_vm *vm;
203
204	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
205	if (!vm)
206		return NULL;
207
208	vm->dev = dev;
209	mutex_init(&vm->lock);
210	kref_init(&vm->refcount);
211
212	vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
213				  GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
214	if (!vm->pd.cpu)
215		goto err_out0;
216
217	if (dev->dlbu_cpu) {
218		int err = lima_vm_map_page(
219			vm, dev->dlbu_dma, LIMA_VA_RESERVE_DLBU);
 
220		if (err)
221			goto err_out1;
222	}
223
224	drm_mm_init(&vm->mm, dev->va_start, dev->va_end - dev->va_start);
225
226	return vm;
227
228err_out1:
229	dma_free_wc(dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
230err_out0:
231	kfree(vm);
232	return NULL;
233}
234
235void lima_vm_release(struct kref *kref)
236{
237	struct lima_vm *vm = container_of(kref, struct lima_vm, refcount);
238	int i;
239
240	drm_mm_takedown(&vm->mm);
241
242	for (i = 0; i < LIMA_VM_NUM_BT; i++) {
243		if (vm->bts[i].cpu)
244			dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
245				    vm->bts[i].cpu, vm->bts[i].dma);
246	}
247
248	if (vm->pd.cpu)
249		dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
250
251	kfree(vm);
252}
253
254void lima_vm_print(struct lima_vm *vm)
255{
256	int i, j, k;
257	u32 *pd, *pt;
258
259	if (!vm->pd.cpu)
260		return;
261
262	pd = vm->pd.cpu;
263	for (i = 0; i < LIMA_VM_NUM_BT; i++) {
264		if (!vm->bts[i].cpu)
265			continue;
266
267		pt = vm->bts[i].cpu;
268		for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
269			int idx = (i << LIMA_VM_NUM_PT_PER_BT_SHIFT) + j;
270
271			printk(KERN_INFO "lima vm pd %03x:%08x\n", idx, pd[idx]);
272
273			for (k = 0; k < LIMA_PAGE_ENT_NUM; k++) {
274				u32 pte = *pt++;
275
276				if (pte)
277					printk(KERN_INFO "  pt %03x:%08x\n", k, pte);
278			}
279		}
280	}
281}
282
283int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff)
284{
285	struct lima_bo_va *bo_va;
286	struct sg_dma_page_iter sg_iter;
287	int offset = 0, err;
288	u32 base;
289
290	mutex_lock(&bo->lock);
291
292	bo_va = lima_vm_bo_find(vm, bo);
293	if (!bo_va) {
294		err = -ENOENT;
295		goto err_out0;
296	}
297
298	mutex_lock(&vm->lock);
299
300	base = bo_va->node.start + (pageoff << PAGE_SHIFT);
301	for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter,
302			     bo->base.sgt->nents, pageoff) {
303		err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
304				       base + offset);
305		if (err)
306			goto err_out1;
307
308		offset += PAGE_SIZE;
309	}
310
311	mutex_unlock(&vm->lock);
312
313	mutex_unlock(&bo->lock);
314	return 0;
315
316err_out1:
317	if (offset)
318		lima_vm_unmap_range(vm, base, base + offset - 1);
319	mutex_unlock(&vm->lock);
320err_out0:
321	mutex_unlock(&bo->lock);
322	return err;
323}
v5.4
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
  3
  4#include <linux/slab.h>
  5#include <linux/dma-mapping.h>
  6
  7#include "lima_device.h"
  8#include "lima_vm.h"
  9#include "lima_object.h"
 10#include "lima_regs.h"
 11
 12struct lima_bo_va {
 13	struct list_head list;
 14	unsigned int ref_count;
 15
 16	struct drm_mm_node node;
 17
 18	struct lima_vm *vm;
 19};
 20
 21#define LIMA_VM_PD_SHIFT 22
 22#define LIMA_VM_PT_SHIFT 12
 23#define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
 24#define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
 25
 26#define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
 27#define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
 28
 29#define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
 30#define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
 31#define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
 32#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
 33
 34
 35static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
 36{
 37	u32 addr;
 38
 39	for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
 40		u32 pbe = LIMA_PBE(addr);
 41		u32 bte = LIMA_BTE(addr);
 42
 43		vm->bts[pbe].cpu[bte] = 0;
 44	}
 45}
 46
 47static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
 48				  u32 start, u32 end)
 49{
 50	u64 addr;
 51	int i = 0;
 52
 53	for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
 54		u32 pbe = LIMA_PBE(addr);
 55		u32 bte = LIMA_BTE(addr);
 
 
 
 
 
 
 
 56
 57		if (!vm->bts[pbe].cpu) {
 58			dma_addr_t pts;
 59			u32 *pd;
 60			int j;
 61
 62			vm->bts[pbe].cpu = dma_alloc_wc(
 63				vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
 64				&vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO);
 65			if (!vm->bts[pbe].cpu) {
 66				if (addr != start)
 67					lima_vm_unmap_page_table(vm, start, addr - 1);
 68				return -ENOMEM;
 69			}
 70
 71			pts = vm->bts[pbe].dma;
 72			pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
 73			for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
 74				pd[j] = pts | LIMA_VM_FLAG_PRESENT;
 75				pts += LIMA_PAGE_SIZE;
 76			}
 77		}
 
 78
 79		vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
 80	}
 81
 82	return 0;
 83}
 84
 85static struct lima_bo_va *
 86lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
 87{
 88	struct lima_bo_va *bo_va, *ret = NULL;
 89
 90	list_for_each_entry(bo_va, &bo->va, list) {
 91		if (bo_va->vm == vm) {
 92			ret = bo_va;
 93			break;
 94		}
 95	}
 96
 97	return ret;
 98}
 99
100int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
101{
102	struct lima_bo_va *bo_va;
103	int err;
 
104
105	mutex_lock(&bo->lock);
106
107	bo_va = lima_vm_bo_find(vm, bo);
108	if (bo_va) {
109		bo_va->ref_count++;
110		mutex_unlock(&bo->lock);
111		return 0;
112	}
113
114	/* should not create new bo_va if not asked by caller */
115	if (!create) {
116		mutex_unlock(&bo->lock);
117		return -ENOENT;
118	}
119
120	bo_va = kzalloc(sizeof(*bo_va), GFP_KERNEL);
121	if (!bo_va) {
122		err = -ENOMEM;
123		goto err_out0;
124	}
125
126	bo_va->vm = vm;
127	bo_va->ref_count = 1;
128
129	mutex_lock(&vm->lock);
130
131	err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size);
132	if (err)
133		goto err_out1;
134
135	err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start,
136				     bo_va->node.start + bo_va->node.size - 1);
137	if (err)
138		goto err_out2;
 
 
 
 
139
140	mutex_unlock(&vm->lock);
141
142	list_add_tail(&bo_va->list, &bo->va);
143
144	mutex_unlock(&bo->lock);
145	return 0;
146
147err_out2:
 
 
148	drm_mm_remove_node(&bo_va->node);
149err_out1:
150	mutex_unlock(&vm->lock);
151	kfree(bo_va);
152err_out0:
153	mutex_unlock(&bo->lock);
154	return err;
155}
156
157void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
158{
159	struct lima_bo_va *bo_va;
 
160
161	mutex_lock(&bo->lock);
162
163	bo_va = lima_vm_bo_find(vm, bo);
164	if (--bo_va->ref_count > 0) {
165		mutex_unlock(&bo->lock);
166		return;
167	}
168
169	mutex_lock(&vm->lock);
170
171	lima_vm_unmap_page_table(vm, bo_va->node.start,
172				 bo_va->node.start + bo_va->node.size - 1);
 
173
174	drm_mm_remove_node(&bo_va->node);
175
176	mutex_unlock(&vm->lock);
177
178	list_del(&bo_va->list);
179
180	mutex_unlock(&bo->lock);
181
182	kfree(bo_va);
183}
184
185u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo)
186{
187	struct lima_bo_va *bo_va;
188	u32 ret;
189
190	mutex_lock(&bo->lock);
191
192	bo_va = lima_vm_bo_find(vm, bo);
193	ret = bo_va->node.start;
194
195	mutex_unlock(&bo->lock);
196
197	return ret;
198}
199
200struct lima_vm *lima_vm_create(struct lima_device *dev)
201{
202	struct lima_vm *vm;
203
204	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
205	if (!vm)
206		return NULL;
207
208	vm->dev = dev;
209	mutex_init(&vm->lock);
210	kref_init(&vm->refcount);
211
212	vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
213				  GFP_KERNEL | __GFP_ZERO);
214	if (!vm->pd.cpu)
215		goto err_out0;
216
217	if (dev->dlbu_cpu) {
218		int err = lima_vm_map_page_table(
219			vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU,
220			LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1);
221		if (err)
222			goto err_out1;
223	}
224
225	drm_mm_init(&vm->mm, dev->va_start, dev->va_end - dev->va_start);
226
227	return vm;
228
229err_out1:
230	dma_free_wc(dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
231err_out0:
232	kfree(vm);
233	return NULL;
234}
235
236void lima_vm_release(struct kref *kref)
237{
238	struct lima_vm *vm = container_of(kref, struct lima_vm, refcount);
239	int i;
240
241	drm_mm_takedown(&vm->mm);
242
243	for (i = 0; i < LIMA_VM_NUM_BT; i++) {
244		if (vm->bts[i].cpu)
245			dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
246				    vm->bts[i].cpu, vm->bts[i].dma);
247	}
248
249	if (vm->pd.cpu)
250		dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
251
252	kfree(vm);
253}
254
255void lima_vm_print(struct lima_vm *vm)
256{
257	int i, j, k;
258	u32 *pd, *pt;
259
260	if (!vm->pd.cpu)
261		return;
262
263	pd = vm->pd.cpu;
264	for (i = 0; i < LIMA_VM_NUM_BT; i++) {
265		if (!vm->bts[i].cpu)
266			continue;
267
268		pt = vm->bts[i].cpu;
269		for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
270			int idx = (i << LIMA_VM_NUM_PT_PER_BT_SHIFT) + j;
271
272			printk(KERN_INFO "lima vm pd %03x:%08x\n", idx, pd[idx]);
273
274			for (k = 0; k < LIMA_PAGE_ENT_NUM; k++) {
275				u32 pte = *pt++;
276
277				if (pte)
278					printk(KERN_INFO "  pt %03x:%08x\n", k, pte);
279			}
280		}
281	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282}