Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
  3 * All Rights Reserved.
  4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the "Software"),
  9 * to deal in the Software without restriction, including without limitation
 10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
 11 * and/or sell copies of the Software, and to permit persons to whom the
 12 * Software is furnished to do so, subject to the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 */
 26
 27#include "drmP.h"
 
 
 28
 29#include "nouveau_drv.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 30
 31int
 32nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
 33{
 34	struct drm_file *file_priv = filp->private_data;
 35	struct drm_nouveau_private *dev_priv =
 36		file_priv->minor->dev->dev_private;
 37
 38	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
 39		return drm_mmap(filp, vma);
 40
 41	return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
 42}
 43
 44static int
 45nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
 46{
 47	return ttm_mem_global_init(ref->object);
 48}
 49
 50static void
 51nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
 52{
 53	ttm_mem_global_release(ref->object);
 54}
 55
 56int
 57nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
 58{
 59	struct drm_global_reference *global_ref;
 60	int ret;
 61
 62	global_ref = &dev_priv->ttm.mem_global_ref;
 63	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
 64	global_ref->size = sizeof(struct ttm_mem_global);
 65	global_ref->init = &nouveau_ttm_mem_global_init;
 66	global_ref->release = &nouveau_ttm_mem_global_release;
 67
 68	ret = drm_global_item_ref(global_ref);
 69	if (unlikely(ret != 0)) {
 70		DRM_ERROR("Failed setting up TTM memory accounting\n");
 71		dev_priv->ttm.mem_global_ref.release = NULL;
 72		return ret;
 73	}
 74
 75	dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
 76	global_ref = &dev_priv->ttm.bo_global_ref.ref;
 77	global_ref->global_type = DRM_GLOBAL_TTM_BO;
 78	global_ref->size = sizeof(struct ttm_bo_global);
 79	global_ref->init = &ttm_bo_global_init;
 80	global_ref->release = &ttm_bo_global_release;
 81
 82	ret = drm_global_item_ref(global_ref);
 83	if (unlikely(ret != 0)) {
 84		DRM_ERROR("Failed setting up TTM BO subsystem\n");
 85		drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
 86		dev_priv->ttm.mem_global_ref.release = NULL;
 87		return ret;
 88	}
 89
 90	return 0;
 91}
 92
 93void
 94nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
 95{
 96	if (dev_priv->ttm.mem_global_ref.release == NULL)
 97		return;
 98
 99	drm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
100	drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
101	dev_priv->ttm.mem_global_ref.release = NULL;
102}
103
v4.6
  1/*
  2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
  3 * All Rights Reserved.
  4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the "Software"),
  9 * to deal in the Software without restriction, including without limitation
 10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
 11 * and/or sell copies of the Software, and to permit persons to whom the
 12 * Software is furnished to do so, subject to the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 */
 26
 27#include "nouveau_drm.h"
 28#include "nouveau_ttm.h"
 29#include "nouveau_gem.h"
 30
 31#include "drm_legacy.h"
 32
 33#include <core/tegra.h>
 34
 35static int
 36nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
 37{
 38	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
 39	struct nvkm_fb *fb = nvxx_fb(&drm->device);
 40	man->priv = fb;
 41	return 0;
 42}
 43
 44static int
 45nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
 46{
 47	man->priv = NULL;
 48	return 0;
 49}
 50
 51static inline void
 52nvkm_mem_node_cleanup(struct nvkm_mem *node)
 53{
 54	if (node->vma[0].node) {
 55		nvkm_vm_unmap(&node->vma[0]);
 56		nvkm_vm_put(&node->vma[0]);
 57	}
 58
 59	if (node->vma[1].node) {
 60		nvkm_vm_unmap(&node->vma[1]);
 61		nvkm_vm_put(&node->vma[1]);
 62	}
 63}
 64
 65static void
 66nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
 67			 struct ttm_mem_reg *mem)
 68{
 69	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
 70	struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
 71	nvkm_mem_node_cleanup(mem->mm_node);
 72	ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node);
 73}
 74
 75static int
 76nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
 77			 struct ttm_buffer_object *bo,
 78			 const struct ttm_place *place,
 79			 struct ttm_mem_reg *mem)
 80{
 81	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
 82	struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
 83	struct nouveau_bo *nvbo = nouveau_bo(bo);
 84	struct nvkm_mem *node;
 85	u32 size_nc = 0;
 86	int ret;
 87
 88	if (drm->device.info.ram_size == 0)
 89		return -ENOMEM;
 90
 91	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
 92		size_nc = 1 << nvbo->page_shift;
 93
 94	ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT,
 95			     mem->page_alignment << PAGE_SHIFT, size_nc,
 96			     (nvbo->tile_flags >> 8) & 0x3ff, &node);
 97	if (ret) {
 98		mem->mm_node = NULL;
 99		return (ret == -ENOSPC) ? 0 : ret;
100	}
101
102	node->page_shift = nvbo->page_shift;
103
104	mem->mm_node = node;
105	mem->start   = node->offset >> PAGE_SHIFT;
106	return 0;
107}
108
109const struct ttm_mem_type_manager_func nouveau_vram_manager = {
110	nouveau_vram_manager_init,
111	nouveau_vram_manager_fini,
112	nouveau_vram_manager_new,
113	nouveau_vram_manager_del,
114};
115
116static int
117nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
118{
119	return 0;
120}
121
122static int
123nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
124{
125	return 0;
126}
127
128static void
129nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
130			 struct ttm_mem_reg *mem)
131{
132	nvkm_mem_node_cleanup(mem->mm_node);
133	kfree(mem->mm_node);
134	mem->mm_node = NULL;
135}
136
137static int
138nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
139			 struct ttm_buffer_object *bo,
140			 const struct ttm_place *place,
141			 struct ttm_mem_reg *mem)
142{
143	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
144	struct nouveau_bo *nvbo = nouveau_bo(bo);
145	struct nvkm_mem *node;
146
147	node = kzalloc(sizeof(*node), GFP_KERNEL);
148	if (!node)
149		return -ENOMEM;
150
151	node->page_shift = 12;
152
153	switch (drm->device.info.family) {
154	case NV_DEVICE_INFO_V0_TNT:
155	case NV_DEVICE_INFO_V0_CELSIUS:
156	case NV_DEVICE_INFO_V0_KELVIN:
157	case NV_DEVICE_INFO_V0_RANKINE:
158	case NV_DEVICE_INFO_V0_CURIE:
159		break;
160	case NV_DEVICE_INFO_V0_TESLA:
161		if (drm->device.info.chipset != 0x50)
162			node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
163		break;
164	case NV_DEVICE_INFO_V0_FERMI:
165	case NV_DEVICE_INFO_V0_KEPLER:
166	case NV_DEVICE_INFO_V0_MAXWELL:
167		node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
168		break;
169	default:
170		NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
171			drm->device.info.family);
172		break;
173	}
174
175	mem->mm_node = node;
176	mem->start   = 0;
177	return 0;
178}
179
180static void
181nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
182{
183}
184
185const struct ttm_mem_type_manager_func nouveau_gart_manager = {
186	nouveau_gart_manager_init,
187	nouveau_gart_manager_fini,
188	nouveau_gart_manager_new,
189	nouveau_gart_manager_del,
190	nouveau_gart_manager_debug
191};
192
193/*XXX*/
194#include <subdev/mmu/nv04.h>
195static int
196nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
197{
198	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
199	struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
200	struct nv04_mmu *priv = (void *)mmu;
201	struct nvkm_vm *vm = NULL;
202	nvkm_vm_ref(priv->vm, &vm, NULL);
203	man->priv = vm;
204	return 0;
205}
206
207static int
208nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
209{
210	struct nvkm_vm *vm = man->priv;
211	nvkm_vm_ref(NULL, &vm, NULL);
212	man->priv = NULL;
213	return 0;
214}
215
216static void
217nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
218{
219	struct nvkm_mem *node = mem->mm_node;
220	if (node->vma[0].node)
221		nvkm_vm_put(&node->vma[0]);
222	kfree(mem->mm_node);
223	mem->mm_node = NULL;
224}
225
226static int
227nv04_gart_manager_new(struct ttm_mem_type_manager *man,
228		      struct ttm_buffer_object *bo,
229		      const struct ttm_place *place,
230		      struct ttm_mem_reg *mem)
231{
232	struct nvkm_mem *node;
233	int ret;
234
235	node = kzalloc(sizeof(*node), GFP_KERNEL);
236	if (!node)
237		return -ENOMEM;
238
239	node->page_shift = 12;
240
241	ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
242			  NV_MEM_ACCESS_RW, &node->vma[0]);
243	if (ret) {
244		kfree(node);
245		return ret;
246	}
247
248	mem->mm_node = node;
249	mem->start   = node->vma[0].offset >> PAGE_SHIFT;
250	return 0;
251}
252
253static void
254nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
255{
256}
257
258const struct ttm_mem_type_manager_func nv04_gart_manager = {
259	nv04_gart_manager_init,
260	nv04_gart_manager_fini,
261	nv04_gart_manager_new,
262	nv04_gart_manager_del,
263	nv04_gart_manager_debug
264};
265
266int
267nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
268{
269	struct drm_file *file_priv = filp->private_data;
270	struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
 
271
272	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
273		return drm_legacy_mmap(filp, vma);
274
275	return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
276}
277
278static int
279nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
280{
281	return ttm_mem_global_init(ref->object);
282}
283
284static void
285nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
286{
287	ttm_mem_global_release(ref->object);
288}
289
290int
291nouveau_ttm_global_init(struct nouveau_drm *drm)
292{
293	struct drm_global_reference *global_ref;
294	int ret;
295
296	global_ref = &drm->ttm.mem_global_ref;
297	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
298	global_ref->size = sizeof(struct ttm_mem_global);
299	global_ref->init = &nouveau_ttm_mem_global_init;
300	global_ref->release = &nouveau_ttm_mem_global_release;
301
302	ret = drm_global_item_ref(global_ref);
303	if (unlikely(ret != 0)) {
304		DRM_ERROR("Failed setting up TTM memory accounting\n");
305		drm->ttm.mem_global_ref.release = NULL;
306		return ret;
307	}
308
309	drm->ttm.bo_global_ref.mem_glob = global_ref->object;
310	global_ref = &drm->ttm.bo_global_ref.ref;
311	global_ref->global_type = DRM_GLOBAL_TTM_BO;
312	global_ref->size = sizeof(struct ttm_bo_global);
313	global_ref->init = &ttm_bo_global_init;
314	global_ref->release = &ttm_bo_global_release;
315
316	ret = drm_global_item_ref(global_ref);
317	if (unlikely(ret != 0)) {
318		DRM_ERROR("Failed setting up TTM BO subsystem\n");
319		drm_global_item_unref(&drm->ttm.mem_global_ref);
320		drm->ttm.mem_global_ref.release = NULL;
321		return ret;
322	}
323
324	return 0;
325}
326
327void
328nouveau_ttm_global_release(struct nouveau_drm *drm)
329{
330	if (drm->ttm.mem_global_ref.release == NULL)
331		return;
332
333	drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
334	drm_global_item_unref(&drm->ttm.mem_global_ref);
335	drm->ttm.mem_global_ref.release = NULL;
336}
337
338int
339nouveau_ttm_init(struct nouveau_drm *drm)
340{
341	struct nvkm_device *device = nvxx_device(&drm->device);
342	struct nvkm_pci *pci = device->pci;
343	struct drm_device *dev = drm->dev;
344	u8 bits;
345	int ret;
346
347	if (pci && pci->agp.bridge) {
348		drm->agp.bridge = pci->agp.bridge;
349		drm->agp.base = pci->agp.base;
350		drm->agp.size = pci->agp.size;
351		drm->agp.cma = pci->agp.cma;
352	}
353
354	bits = nvxx_mmu(&drm->device)->dma_bits;
355	if (nvxx_device(&drm->device)->func->pci) {
356		if (drm->agp.bridge)
357			bits = 32;
358	} else if (device->func->tegra) {
359		struct nvkm_device_tegra *tegra = device->func->tegra(device);
360
361		/*
362		 * If the platform can use a IOMMU, then the addressable DMA
363		 * space is constrained by the IOMMU bit
364		 */
365		if (tegra->func->iommu_bit)
366			bits = min(bits, tegra->func->iommu_bit);
367
368	}
369
370	ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
371	if (ret && bits != 32) {
372		bits = 32;
373		ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
374	}
375	if (ret)
376		return ret;
377
378	ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits));
379	if (ret)
380		dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32));
381
382	ret = nouveau_ttm_global_init(drm);
383	if (ret)
384		return ret;
385
386	ret = ttm_bo_device_init(&drm->ttm.bdev,
387				  drm->ttm.bo_global_ref.ref.object,
388				  &nouveau_bo_driver,
389				  dev->anon_inode->i_mapping,
390				  DRM_FILE_PAGE_OFFSET,
391				  bits <= 32 ? true : false);
392	if (ret) {
393		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
394		return ret;
395	}
396
397	/* VRAM init */
398	drm->gem.vram_available = drm->device.info.ram_user;
399
400	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
401			      drm->gem.vram_available >> PAGE_SHIFT);
402	if (ret) {
403		NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
404		return ret;
405	}
406
407	drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
408					 device->func->resource_size(device, 1));
409
410	/* GART init */
411	if (!drm->agp.bridge) {
412		drm->gem.gart_available = nvxx_mmu(&drm->device)->limit;
413	} else {
414		drm->gem.gart_available = drm->agp.size;
415	}
416
417	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
418			      drm->gem.gart_available >> PAGE_SHIFT);
419	if (ret) {
420		NV_ERROR(drm, "GART mm init failed, %d\n", ret);
421		return ret;
422	}
423
424	NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
425	NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
426	return 0;
427}
428
429void
430nouveau_ttm_fini(struct nouveau_drm *drm)
431{
432	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
433	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
434
435	ttm_bo_device_release(&drm->ttm.bdev);
436
437	nouveau_ttm_global_release(drm);
438
439	arch_phys_wc_del(drm->ttm.mtrr);
440	drm->ttm.mtrr = 0;
441}