Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Copyright (C) 2013 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 18#ifndef __MSM_GEM_H__
 19#define __MSM_GEM_H__
 20
 21#include <linux/reservation.h>
 
 22#include "msm_drv.h"
 23
 
 
 
 
 
 24/* Additional internal-use only BO flags: */
 25#define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26
 27struct msm_gem_object {
 28	struct drm_gem_object base;
 29
 30	uint32_t flags;
 31
 32	/* And object is either:
 33	 *  inactive - on priv->inactive_list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34	 *  active   - on one one of the gpu's active_list..  well, at
 35	 *     least for now we don't have (I don't think) hw sync between
 36	 *     2d and 3d one devices which have both, meaning we need to
 37	 *     block on submit if a bo is already on other ring
 38	 *
 39	 */
 40	struct list_head mm_list;
 41	struct msm_gpu *gpu;     /* non-null if active */
 42	uint32_t read_fence, write_fence;
 43
 44	/* Transiently in the process of submit ioctl, objects associated
 45	 * with the submit are on submit->bo_list.. this only lasts for
 46	 * the duration of the ioctl, so one bo can never be on multiple
 47	 * submit lists.
 48	 */
 49	struct list_head submit_entry;
 50
 51	struct page **pages;
 52	struct sg_table *sgt;
 53	void *vaddr;
 54
 55	struct {
 56		// XXX
 57		uint32_t iova;
 58	} domain[NUM_DOMAINS];
 59
 60	/* normally (resv == &_resv) except for imported bo's */
 61	struct reservation_object *resv;
 62	struct reservation_object _resv;
 63
 64	/* For physically contiguous buffers.  Used when we don't have
 65	 * an IOMMU.  Also used for stolen/splashscreen buffer.
 66	 */
 67	struct drm_mm_node *vram_node;
 
 
 
 
 
 68};
 69#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 71static inline bool is_active(struct msm_gem_object *msm_obj)
 72{
 73	return msm_obj->gpu != NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 74}
 75
 76static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
 77		uint32_t op)
 78{
 79	uint32_t fence = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80
 81	if (op & MSM_PREP_READ)
 82		fence = msm_obj->write_fence;
 83	if (op & MSM_PREP_WRITE)
 84		fence = max(fence, msm_obj->read_fence);
 85
 86	return fence;
 
 
 87}
 88
 89#define MAX_CMDS 4
 
 
 90
 91/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
 92 * associated with the cmdstream submission for synchronization (and
 93 * make it easier to unwind when things go wrong, etc).  This only
 94 * lasts for the duration of the submit-ioctl.
 95 */
 96struct msm_gem_submit {
 
 97	struct drm_device *dev;
 98	struct msm_gpu *gpu;
 99	struct list_head node;   /* node in gpu submit_list */
 
100	struct list_head bo_list;
101	struct ww_acquire_ctx ticket;
102	uint32_t fence;
103	bool valid;
 
 
 
 
 
 
 
104	unsigned int nr_cmds;
105	unsigned int nr_bos;
 
106	struct {
107		uint32_t type;
108		uint32_t size;  /* in dwords */
109		uint32_t iova;
 
110		uint32_t idx;   /* cmdstream buffer idx in bos[] */
111	} cmd[MAX_CMDS];
 
 
112	struct {
113		uint32_t flags;
114		struct msm_gem_object *obj;
115		uint32_t iova;
116	} bos[0];
 
 
 
117};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
119#endif /* __MSM_GEM_H__ */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2013 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#ifndef __MSM_GEM_H__
  8#define __MSM_GEM_H__
  9
 10#include <linux/kref.h>
 11#include <linux/dma-resv.h>
 12#include "msm_drv.h"
 13
 14/* Make all GEM related WARN_ON()s ratelimited.. when things go wrong they
 15 * tend to go wrong 1000s of times in a short timespan.
 16 */
 17#define GEM_WARN_ON(x)  WARN_RATELIMIT(x, "%s", __stringify(x))
 18
 19/* Additional internal-use only BO flags: */
 20#define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
 21#define MSM_BO_MAP_PRIV      0x20000000    /* use IOMMU_PRIV when mapping */
 22
 23struct msm_gem_address_space {
 24	const char *name;
 25	/* NOTE: mm managed at the page level, size is in # of pages
 26	 * and position mm_node->start is in # of pages:
 27	 */
 28	struct drm_mm mm;
 29	spinlock_t lock; /* Protects drm_mm node allocation/removal */
 30	struct msm_mmu *mmu;
 31	struct kref kref;
 32
 33	/* For address spaces associated with a specific process, this
 34	 * will be non-NULL:
 35	 */
 36	struct pid *pid;
 37};
 38
 39struct msm_gem_vma {
 40	struct drm_mm_node node;
 41	uint64_t iova;
 42	struct msm_gem_address_space *aspace;
 43	struct list_head list;    /* node in msm_gem_object::vmas */
 44	bool mapped;
 45	int inuse;
 46};
 47
 48struct msm_gem_object {
 49	struct drm_gem_object base;
 50
 51	uint32_t flags;
 52
 53	/**
 54	 * Advice: are the backing pages purgeable?
 55	 */
 56	uint8_t madv;
 57
 58	/**
 59	 * Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)?
 60	 */
 61	bool dontneed : 1;
 62
 63	/**
 64	 * Is object evictable (ie. counted in priv->evictable_count)?
 65	 */
 66	bool evictable : 1;
 67
 68	/**
 69	 * count of active vmap'ing
 70	 */
 71	uint8_t vmap_count;
 72
 73	/**
 74	 * Node in list of all objects (mainly for debugfs, protected by
 75	 * priv->obj_lock
 76	 */
 77	struct list_head node;
 78
 79	/**
 80	 * An object is either:
 81	 *  inactive - on priv->inactive_dontneed or priv->inactive_willneed
 82	 *     (depending on purgeability status)
 83	 *  active   - on one one of the gpu's active_list..  well, at
 84	 *     least for now we don't have (I don't think) hw sync between
 85	 *     2d and 3d one devices which have both, meaning we need to
 86	 *     block on submit if a bo is already on other ring
 
 87	 */
 88	struct list_head mm_list;
 
 
 89
 90	/* Transiently in the process of submit ioctl, objects associated
 91	 * with the submit are on submit->bo_list.. this only lasts for
 92	 * the duration of the ioctl, so one bo can never be on multiple
 93	 * submit lists.
 94	 */
 95	struct list_head submit_entry;
 96
 97	struct page **pages;
 98	struct sg_table *sgt;
 99	void *vaddr;
100
101	struct list_head vmas;    /* list of msm_gem_vma */
 
 
 
 
 
 
 
102
103	/* For physically contiguous buffers.  Used when we don't have
104	 * an IOMMU.  Also used for stolen/splashscreen buffer.
105	 */
106	struct drm_mm_node *vram_node;
107
108	char name[32]; /* Identifier to print for the debugfs files */
109
110	int active_count;
111	int pin_count;
112};
113#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
114
115int msm_gem_mmap_obj(struct drm_gem_object *obj,
116			struct vm_area_struct *vma);
117int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
118uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
119int msm_gem_get_iova(struct drm_gem_object *obj,
120		struct msm_gem_address_space *aspace, uint64_t *iova);
121int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
122		struct msm_gem_address_space *aspace, uint64_t *iova,
123		u64 range_start, u64 range_end);
124int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
125		struct msm_gem_address_space *aspace, uint64_t *iova);
126int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
127		struct msm_gem_address_space *aspace, uint64_t *iova);
128uint64_t msm_gem_iova(struct drm_gem_object *obj,
129		struct msm_gem_address_space *aspace);
130void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
131		struct msm_gem_address_space *aspace);
132void msm_gem_unpin_iova(struct drm_gem_object *obj,
133		struct msm_gem_address_space *aspace);
134struct page **msm_gem_get_pages(struct drm_gem_object *obj);
135void msm_gem_put_pages(struct drm_gem_object *obj);
136int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
137		struct drm_mode_create_dumb *args);
138int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
139		uint32_t handle, uint64_t *offset);
140void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
141void *msm_gem_get_vaddr(struct drm_gem_object *obj);
142void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
143void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
144void msm_gem_put_vaddr(struct drm_gem_object *obj);
145int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
146int msm_gem_sync_object(struct drm_gem_object *obj,
147		struct msm_fence_context *fctx, bool exclusive);
148void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
149void msm_gem_active_put(struct drm_gem_object *obj);
150int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
151int msm_gem_cpu_fini(struct drm_gem_object *obj);
152void msm_gem_free_object(struct drm_gem_object *obj);
153int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
154		uint32_t size, uint32_t flags, uint32_t *handle, char *name);
155struct drm_gem_object *msm_gem_new(struct drm_device *dev,
156		uint32_t size, uint32_t flags);
157struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
158		uint32_t size, uint32_t flags);
159void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
160		uint32_t flags, struct msm_gem_address_space *aspace,
161		struct drm_gem_object **bo, uint64_t *iova);
162void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
163		uint32_t flags, struct msm_gem_address_space *aspace,
164		struct drm_gem_object **bo, uint64_t *iova);
165void msm_gem_kernel_put(struct drm_gem_object *bo,
166		struct msm_gem_address_space *aspace, bool locked);
167struct drm_gem_object *msm_gem_import(struct drm_device *dev,
168		struct dma_buf *dmabuf, struct sg_table *sgt);
169__printf(2, 3)
170void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
171
172#ifdef CONFIG_DEBUG_FS
173struct msm_gem_stats {
174	struct {
175		unsigned count;
176		size_t size;
177	} all, active, resident, purgeable, purged;
178};
179
180void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
181		struct msm_gem_stats *stats);
182void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
183#endif
184
185static inline void
186msm_gem_lock(struct drm_gem_object *obj)
187{
188	dma_resv_lock(obj->resv, NULL);
189}
190
191static inline bool __must_check
192msm_gem_trylock(struct drm_gem_object *obj)
193{
194	return dma_resv_trylock(obj->resv);
195}
196
197static inline int
198msm_gem_lock_interruptible(struct drm_gem_object *obj)
199{
200	return dma_resv_lock_interruptible(obj->resv, NULL);
201}
202
203static inline void
204msm_gem_unlock(struct drm_gem_object *obj)
205{
206	dma_resv_unlock(obj->resv);
207}
208
209static inline bool
210msm_gem_is_locked(struct drm_gem_object *obj)
211{
212	return dma_resv_is_locked(obj->resv);
213}
214
215static inline bool is_active(struct msm_gem_object *msm_obj)
216{
217	GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
218	return msm_obj->active_count;
219}
220
221/* imported/exported objects are not purgeable: */
222static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
223{
224	return msm_obj->base.import_attach || msm_obj->pin_count;
225}
226
227static inline bool is_purgeable(struct msm_gem_object *msm_obj)
228{
229	return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
230			!is_unpurgeable(msm_obj);
231}
232
233static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
234{
235	GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
236	return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
237}
238
239static inline void mark_purgeable(struct msm_gem_object *msm_obj)
240{
241	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
242
243	GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
244
245	if (is_unpurgeable(msm_obj))
246		return;
247
248	if (GEM_WARN_ON(msm_obj->dontneed))
249		return;
250
251	priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
252	msm_obj->dontneed = true;
253}
254
255static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
256{
257	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
258
259	GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
260
261	if (is_unpurgeable(msm_obj))
262		return;
263
264	if (GEM_WARN_ON(!msm_obj->dontneed))
265		return;
266
267	priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
268	GEM_WARN_ON(priv->shrinkable_count < 0);
269	msm_obj->dontneed = false;
270}
271
272static inline bool is_unevictable(struct msm_gem_object *msm_obj)
273{
274	return is_unpurgeable(msm_obj) || msm_obj->vaddr;
275}
276
277static inline void mark_evictable(struct msm_gem_object *msm_obj)
 
278{
279	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
280
281	WARN_ON(!mutex_is_locked(&priv->mm_lock));
282
283	if (is_unevictable(msm_obj))
284		return;
285
286	if (WARN_ON(msm_obj->evictable))
287		return;
288
289	priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT;
290	msm_obj->evictable = true;
291}
292
293static inline void mark_unevictable(struct msm_gem_object *msm_obj)
294{
295	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
296
297	WARN_ON(!mutex_is_locked(&priv->mm_lock));
298
299	if (is_unevictable(msm_obj))
300		return;
301
302	if (WARN_ON(!msm_obj->evictable))
303		return;
 
 
304
305	priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT;
306	WARN_ON(priv->evictable_count < 0);
307	msm_obj->evictable = false;
308}
309
310void msm_gem_purge(struct drm_gem_object *obj);
311void msm_gem_evict(struct drm_gem_object *obj);
312void msm_gem_vunmap(struct drm_gem_object *obj);
313
314/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
315 * associated with the cmdstream submission for synchronization (and
316 * make it easier to unwind when things go wrong, etc).  This only
317 * lasts for the duration of the submit-ioctl.
318 */
319struct msm_gem_submit {
320	struct kref ref;
321	struct drm_device *dev;
322	struct msm_gpu *gpu;
323	struct msm_gem_address_space *aspace;
324	struct list_head node;   /* node in ring submit list */
325	struct list_head bo_list;
326	struct ww_acquire_ctx ticket;
327	uint32_t seqno;		/* Sequence number of the submit on the ring */
328	struct dma_fence *fence;
329	struct msm_gpu_submitqueue *queue;
330	struct pid *pid;    /* submitting process */
331	bool fault_dumped;  /* Limit devcoredump dumping to one per submit */
332	bool valid;         /* true if no cmdstream patching needed */
333	bool in_rb;         /* "sudo" mode, copy cmds into RB */
334	struct msm_ringbuffer *ring;
335	struct msm_file_private *ctx;
336	unsigned int nr_cmds;
337	unsigned int nr_bos;
338	u32 ident;	   /* A "identifier" for the submit for logging */
339	struct {
340		uint32_t type;
341		uint32_t size;  /* in dwords */
342		uint64_t iova;
343		uint32_t offset;/* in dwords */
344		uint32_t idx;   /* cmdstream buffer idx in bos[] */
345		uint32_t nr_relocs;
346		struct drm_msm_gem_submit_reloc *relocs;
347	} *cmd;  /* array of size nr_cmds */
348	struct {
349		uint32_t flags;
350		union {
351			struct msm_gem_object *obj;
352			uint32_t handle;
353		};
354		uint64_t iova;
355	} bos[];
356};
357
358void __msm_gem_submit_destroy(struct kref *kref);
359
360static inline void msm_gem_submit_get(struct msm_gem_submit *submit)
361{
362	kref_get(&submit->ref);
363}
364
365static inline void msm_gem_submit_put(struct msm_gem_submit *submit)
366{
367	kref_put(&submit->ref, __msm_gem_submit_destroy);
368}
369
370/* helper to determine of a buffer in submit should be dumped, used for both
371 * devcoredump and debugfs cmdstream dumping:
372 */
373static inline bool
374should_dump(struct msm_gem_submit *submit, int idx)
375{
376	extern bool rd_full;
377	return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
378}
379
380#endif /* __MSM_GEM_H__ */