Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v4.6
  1/*
  2 * Copyright 2011 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Dave Airlie
 23 */
 24
 25#include <drm/drmP.h>
 26#include <linux/dma-buf.h>
 27
 
 28#include "nouveau_drm.h"
 29#include "nouveau_gem.h"
 
 
 30
 31struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
 
 32{
 33	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
 
 34	int npages = nvbo->bo.num_pages;
 
 
 35
 36	return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
 
 
 
 
 37}
 38
 39void *nouveau_gem_prime_vmap(struct drm_gem_object *obj)
 
 40{
 41	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42	int ret;
 43
 
 
 
 
 
 
 44	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
 45			  &nvbo->dma_buf_vmap);
 46	if (ret)
 
 47		return ERR_PTR(ret);
 48
 
 
 
 49	return nvbo->dma_buf_vmap.virtual;
 50}
 51
 52void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
 53{
 54	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
 
 55
 56	ttm_bo_kunmap(&nvbo->dma_buf_vmap);
 
 
 
 
 
 57}
 58
 59struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
 60							 struct dma_buf_attachment *attach,
 61							 struct sg_table *sg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62{
 63	struct nouveau_bo *nvbo;
 64	struct reservation_object *robj = attach->dmabuf->resv;
 65	u32 flags = 0;
 66	int ret;
 67
 68	flags = TTM_PL_FLAG_TT;
 69
 70	ww_mutex_lock(&robj->lock, NULL);
 71	ret = nouveau_bo_new(dev, attach->dmabuf->size, 0, flags, 0, 0,
 72			     sg, robj, &nvbo);
 73	ww_mutex_unlock(&robj->lock);
 74	if (ret)
 75		return ERR_PTR(ret);
 
 76
 
 
 
 
 77	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
 78
 79	/* Initialize the embedded gem-object. We return a single gem-reference
 80	 * to the caller, instead of a normal nouveau_bo ttm reference. */
 81	ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
 82	if (ret) {
 83		nouveau_bo_ref(NULL, &nvbo);
 84		return ERR_PTR(-ENOMEM);
 85	}
 86
 87	return &nvbo->gem;
 
 88}
 89
 90int nouveau_gem_prime_pin(struct drm_gem_object *obj)
 
 91{
 92	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
 93	int ret;
 94
 95	/* pin buffer into GTT */
 96	ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT, false);
 97	if (ret)
 98		return -EINVAL;
 99
100	return 0;
101}
102
103void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
 
104{
105	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
 
 
 
106
107	nouveau_bo_unpin(nvbo);
108}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
110struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *obj)
111{
112	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
113
114	return nvbo->bo.resv;
 
 
 
 
 
 
 
 
115}
v3.5.6
  1/*
  2 * Copyright 2011 Red Hat Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 * Authors: Dave Airlie
 23 */
 24
 25#include "drmP.h"
 26#include "drm.h"
 27
 28#include "nouveau_drv.h"
 29#include "nouveau_drm.h"
 30#include "nouveau_dma.h"
 31
 32#include <linux/dma-buf.h>
 33
 34static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
 35					  enum dma_data_direction dir)
 36{
 37	struct nouveau_bo *nvbo = attachment->dmabuf->priv;
 38	struct drm_device *dev = nvbo->gem->dev;
 39	int npages = nvbo->bo.num_pages;
 40	struct sg_table *sg;
 41	int nents;
 42
 43	mutex_lock(&dev->struct_mutex);
 44	sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
 45	nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
 46	mutex_unlock(&dev->struct_mutex);
 47	return sg;
 48}
 49
 50static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
 51				      struct sg_table *sg, enum dma_data_direction dir)
 52{
 53	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
 54	sg_free_table(sg);
 55	kfree(sg);
 56}
 57
 58static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
 59{
 60	struct nouveau_bo *nvbo = dma_buf->priv;
 61
 62	if (nvbo->gem->export_dma_buf == dma_buf) {
 63		nvbo->gem->export_dma_buf = NULL;
 64		drm_gem_object_unreference_unlocked(nvbo->gem);
 65	}
 66}
 67
 68static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
 69{
 70	return NULL;
 71}
 72
 73static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
 74{
 75
 76}
 77static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 78{
 79	return NULL;
 80}
 81
 82static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
 83{
 84
 85}
 86
 87static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
 88{
 89	return -EINVAL;
 90}
 91
 92static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
 93{
 94	struct nouveau_bo *nvbo = dma_buf->priv;
 95	struct drm_device *dev = nvbo->gem->dev;
 96	int ret;
 97
 98	mutex_lock(&dev->struct_mutex);
 99	if (nvbo->vmapping_count) {
100		nvbo->vmapping_count++;
101		goto out_unlock;
102	}
103
104	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
105			  &nvbo->dma_buf_vmap);
106	if (ret) {
107		mutex_unlock(&dev->struct_mutex);
108		return ERR_PTR(ret);
109	}
110	nvbo->vmapping_count = 1;
111out_unlock:
112	mutex_unlock(&dev->struct_mutex);
113	return nvbo->dma_buf_vmap.virtual;
114}
115
116static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
117{
118	struct nouveau_bo *nvbo = dma_buf->priv;
119	struct drm_device *dev = nvbo->gem->dev;
120
121	mutex_lock(&dev->struct_mutex);
122	nvbo->vmapping_count--;
123	if (nvbo->vmapping_count == 0) {
124		ttm_bo_kunmap(&nvbo->dma_buf_vmap);
125	}
126	mutex_unlock(&dev->struct_mutex);
127}
128
129static const struct dma_buf_ops nouveau_dmabuf_ops =  {
130	.map_dma_buf = nouveau_gem_map_dma_buf,
131	.unmap_dma_buf = nouveau_gem_unmap_dma_buf,
132	.release = nouveau_gem_dmabuf_release,
133	.kmap = nouveau_gem_kmap,
134	.kmap_atomic = nouveau_gem_kmap_atomic,
135	.kunmap = nouveau_gem_kunmap,
136	.kunmap_atomic = nouveau_gem_kunmap_atomic,
137	.mmap = nouveau_gem_prime_mmap,
138	.vmap = nouveau_gem_prime_vmap,
139	.vunmap = nouveau_gem_prime_vunmap,
140};
141
142static int
143nouveau_prime_new(struct drm_device *dev,
144		  size_t size,
145		  struct sg_table *sg,
146		  struct nouveau_bo **pnvbo)
147{
148	struct nouveau_bo *nvbo;
 
149	u32 flags = 0;
150	int ret;
151
152	flags = TTM_PL_FLAG_TT;
153
154	ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
155			     sg, pnvbo);
 
 
156	if (ret)
157		return ret;
158	nvbo = *pnvbo;
159
160	/* we restrict allowed domains on nv50+ to only the types
161	 * that were requested at creation time.  not possibly on
162	 * earlier chips without busting the ABI.
163	 */
164	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
165	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
166	if (!nvbo->gem) {
167		nouveau_bo_ref(NULL, pnvbo);
168		return -ENOMEM;
 
 
 
169	}
170
171	nvbo->gem->driver_private = nvbo;
172	return 0;
173}
174
175struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
176				struct drm_gem_object *obj, int flags)
177{
178	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
179	int ret = 0;
180
181	/* pin buffer into GTT */
182	ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
183	if (ret)
184		return ERR_PTR(-EINVAL);
185
186	return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
187}
188
189struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
190				struct dma_buf *dma_buf)
191{
192	struct dma_buf_attachment *attach;
193	struct sg_table *sg;
194	struct nouveau_bo *nvbo;
195	int ret;
196
197	if (dma_buf->ops == &nouveau_dmabuf_ops) {
198		nvbo = dma_buf->priv;
199		if (nvbo->gem) {
200			if (nvbo->gem->dev == dev) {
201				drm_gem_object_reference(nvbo->gem);
202				return nvbo->gem;
203			}
204		}
205	}
206	/* need to attach */
207	attach = dma_buf_attach(dma_buf, dev->dev);
208	if (IS_ERR(attach))
209		return ERR_PTR(PTR_ERR(attach));
210
211	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
212	if (IS_ERR(sg)) {
213		ret = PTR_ERR(sg);
214		goto fail_detach;
215	}
216
217	ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
218	if (ret)
219		goto fail_unmap;
220
221	nvbo->gem->import_attach = attach;
222
223	return nvbo->gem;
224
225fail_unmap:
226	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
227fail_detach:
228	dma_buf_detach(dma_buf, attach);
229	return ERR_PTR(ret);
230}
231