Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1#ifndef __DRM_GEM_H__
  2#define __DRM_GEM_H__
  3
  4/*
  5 * GEM Graphics Execution Manager Driver Interfaces
  6 *
  7 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  8 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  9 * Copyright (c) 2009-2010, Code Aurora Forum.
 10 * All rights reserved.
 11 * Copyright © 2014 Intel Corporation
 12 *   Daniel Vetter <daniel.vetter@ffwll.ch>
 13 *
 14 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
 15 * Author: Gareth Hughes <gareth@valinux.com>
 16 *
 17 * Permission is hereby granted, free of charge, to any person obtaining a
 18 * copy of this software and associated documentation files (the "Software"),
 19 * to deal in the Software without restriction, including without limitation
 20 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 21 * and/or sell copies of the Software, and to permit persons to whom the
 22 * Software is furnished to do so, subject to the following conditions:
 23 *
 24 * The above copyright notice and this permission notice (including the next
 25 * paragraph) shall be included in all copies or substantial portions of the
 26 * Software.
 27 *
 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 34 * OTHER DEALINGS IN THE SOFTWARE.
 35 */
 36
 37/**
 38 * struct drm_gem_object - GEM buffer object
 39 *
 40 * This structure defines the generic parts for GEM buffer objects, which are
 41 * mostly around handling mmap and userspace handles.
 42 *
 43 * Buffer objects are often abbreviated to BO.
 44 */
 45struct drm_gem_object {
 46	/**
 47	 * @refcount:
 48	 *
 49	 * Reference count of this object
 50	 *
 51	 * Please use drm_gem_object_reference() to acquire and
 52	 * drm_gem_object_unreference() or drm_gem_object_unreference_unlocked()
 53	 * to release a reference to a GEM buffer object.
 54	 */
 55	struct kref refcount;
 56
 57	/**
 58	 * @handle_count:
 59	 *
 60	 * This is the GEM file_priv handle count of this object.
 61	 *
 62	 * Each handle also holds a reference. Note that when the handle_count
 63	 * drops to 0 any global names (e.g. the id in the flink namespace) will
 64	 * be cleared.
 65	 *
 66	 * Protected by dev->object_name_lock.
 67	 */
 68	unsigned handle_count;
 69
 70	/**
 71	 * @dev: DRM dev this object belongs to.
 72	 */
 73	struct drm_device *dev;
 74
 75	/**
 76	 * @filp:
 77	 *
 78	 * SHMEM file node used as backing storage for swappable buffer objects.
 79	 * GEM also supports driver private objects with driver-specific backing
 80	 * storage (contiguous CMA memory, special reserved blocks). In this
 81	 * case @filp is NULL.
 82	 */
 83	struct file *filp;
 84
 85	/**
 86	 * @vma_node:
 87	 *
 88	 * Mapping info for this object to support mmap. Drivers are supposed to
 89	 * allocate the mmap offset using drm_gem_create_mmap_offset(). The
 90	 * offset itself can be retrieved using drm_vma_node_offset_addr().
 91	 *
 92	 * Memory mapping itself is handled by drm_gem_mmap(), which also checks
 93	 * that userspace is allowed to access the object.
 94	 */
 95	struct drm_vma_offset_node vma_node;
 96
 97	/**
 98	 * @size:
 99	 *
100	 * Size of the object, in bytes.  Immutable over the object's
101	 * lifetime.
102	 */
103	size_t size;
104
105	/**
106	 * @name:
107	 *
108	 * Global name for this object, starts at 1. 0 means unnamed.
109	 * Access is covered by dev->object_name_lock. This is used by the GEM_FLINK
110	 * and GEM_OPEN ioctls.
111	 */
112	int name;
113
114	/**
115	 * @read_domains:
116	 *
117	 * Read memory domains. These monitor which caches contain read/write data
118	 * related to the object. When transitioning from one set of domains
119	 * to another, the driver is called to ensure that caches are suitably
120	 * flushed and invalidated.
121	 */
122	uint32_t read_domains;
123
124	/**
125	 * @write_domain: Corresponding unique write memory domain.
126	 */
127	uint32_t write_domain;
128
129	/**
130	 * @pending_read_domains:
131	 *
132	 * While validating an exec operation, the
133	 * new read/write domain values are computed here.
134	 * They will be transferred to the above values
135	 * at the point that any cache flushing occurs
136	 */
137	uint32_t pending_read_domains;
138
139	/**
140	 * @pending_write_domain: Write domain similar to @pending_read_domains.
141	 */
142	uint32_t pending_write_domain;
143
144	/**
145	 * @dma_buf:
146	 *
147	 * dma-buf associated with this GEM object.
148	 *
149	 * Pointer to the dma-buf associated with this gem object (either
150	 * through importing or exporting). We break the resulting reference
151	 * loop when the last gem handle for this object is released.
152	 *
153	 * Protected by obj->object_name_lock.
154	 */
155	struct dma_buf *dma_buf;
156
157	/**
158	 * @import_attach:
159	 *
160	 * dma-buf attachment backing this object.
161	 *
162	 * Any foreign dma_buf imported as a gem object has this set to the
163	 * attachment point for the device. This is invariant over the lifetime
164	 * of a gem object.
165	 *
166	 * The driver's ->gem_free_object callback is responsible for cleaning
167	 * up the dma_buf attachment and references acquired at import time.
168	 *
169	 * Note that the drm gem/prime core does not depend upon drivers setting
170	 * this field any more. So for drivers where this doesn't make sense
171	 * (e.g. virtual devices or a displaylink behind an usb bus) they can
172	 * simply leave it as NULL.
173	 */
174	struct dma_buf_attachment *import_attach;
175};
176
177void drm_gem_object_release(struct drm_gem_object *obj);
178void drm_gem_object_free(struct kref *kref);
179int drm_gem_object_init(struct drm_device *dev,
180			struct drm_gem_object *obj, size_t size);
181void drm_gem_private_object_init(struct drm_device *dev,
182				 struct drm_gem_object *obj, size_t size);
183void drm_gem_vm_open(struct vm_area_struct *vma);
184void drm_gem_vm_close(struct vm_area_struct *vma);
185int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
186		     struct vm_area_struct *vma);
187int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
188
189/**
190 * drm_gem_object_reference - acquire a GEM BO reference
191 * @obj: GEM buffer object
192 *
193 * This acquires additional reference to @obj. It is illegal to call this
194 * without already holding a reference. No locks required.
195 */
196static inline void
197drm_gem_object_reference(struct drm_gem_object *obj)
198{
199	kref_get(&obj->refcount);
200}
201
202/**
203 * drm_gem_object_unreference - release a GEM BO reference
204 * @obj: GEM buffer object
205 *
206 * This releases a reference to @obj. Callers must hold the dev->struct_mutex
207 * lock when calling this function, even when the driver doesn't use
208 * dev->struct_mutex for anything.
209 *
210 * For drivers not encumbered with legacy locking use
211 * drm_gem_object_unreference_unlocked() instead.
212 */
213static inline void
214drm_gem_object_unreference(struct drm_gem_object *obj)
215{
216	if (obj != NULL) {
217		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
218
219		kref_put(&obj->refcount, drm_gem_object_free);
220	}
221}
222
223/**
224 * drm_gem_object_unreference_unlocked - release a GEM BO reference
225 * @obj: GEM buffer object
226 *
227 * This releases a reference to @obj. Callers must not hold the
228 * dev->struct_mutex lock when calling this function.
229 */
230static inline void
231drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
232{
233	struct drm_device *dev;
234
235	if (!obj)
236		return;
237
238	dev = obj->dev;
239	if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex))
240		mutex_unlock(&dev->struct_mutex);
241	else
242		might_lock(&dev->struct_mutex);
243}
244
245int drm_gem_handle_create(struct drm_file *file_priv,
246			  struct drm_gem_object *obj,
247			  u32 *handlep);
248int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
249
250
251void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
252int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
253int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
254
255struct page **drm_gem_get_pages(struct drm_gem_object *obj);
256void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
257		bool dirty, bool accessed);
258
259struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
260					     struct drm_file *filp,
261					     u32 handle);
262int drm_gem_dumb_destroy(struct drm_file *file,
263			 struct drm_device *dev,
264			 uint32_t handle);
265
266#endif /* __DRM_GEM_H__ */