Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Copyright (C) 2013 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 18#ifndef __MSM_GEM_H__
 19#define __MSM_GEM_H__
 20
 21#include <linux/reservation.h>
 
 22#include "msm_drv.h"
 23
 24/* Additional internal-use only BO flags: */
 25#define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26
 27struct msm_gem_object {
 28	struct drm_gem_object base;
 29
 30	uint32_t flags;
 31
 
 
 
 
 
 
 
 
 
 
 32	/* And object is either:
 33	 *  inactive - on priv->inactive_list
 34	 *  active   - on one one of the gpu's active_list..  well, at
 35	 *     least for now we don't have (I don't think) hw sync between
 36	 *     2d and 3d one devices which have both, meaning we need to
 37	 *     block on submit if a bo is already on other ring
 38	 *
 39	 */
 40	struct list_head mm_list;
 41	struct msm_gpu *gpu;     /* non-null if active */
 42	uint32_t read_fence, write_fence;
 43
 44	/* Transiently in the process of submit ioctl, objects associated
 45	 * with the submit are on submit->bo_list.. this only lasts for
 46	 * the duration of the ioctl, so one bo can never be on multiple
 47	 * submit lists.
 48	 */
 49	struct list_head submit_entry;
 50
 51	struct page **pages;
 52	struct sg_table *sgt;
 53	void *vaddr;
 54
 55	struct {
 56		// XXX
 57		uint32_t iova;
 58	} domain[NUM_DOMAINS];
 59
 60	/* normally (resv == &_resv) except for imported bo's */
 61	struct reservation_object *resv;
 62	struct reservation_object _resv;
 63
 64	/* For physically contiguous buffers.  Used when we don't have
 65	 * an IOMMU.  Also used for stolen/splashscreen buffer.
 66	 */
 67	struct drm_mm_node *vram_node;
 
 
 
 68};
 69#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 70
 71static inline bool is_active(struct msm_gem_object *msm_obj)
 72{
 73	return msm_obj->gpu != NULL;
 74}
 75
 76static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
 77		uint32_t op)
 78{
 79	uint32_t fence = 0;
 80
 81	if (op & MSM_PREP_READ)
 82		fence = msm_obj->write_fence;
 83	if (op & MSM_PREP_WRITE)
 84		fence = max(fence, msm_obj->read_fence);
 85
 86	return fence;
 
 
 87}
 88
 89#define MAX_CMDS 4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90
 91/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
 92 * associated with the cmdstream submission for synchronization (and
 93 * make it easier to unwind when things go wrong, etc).  This only
 94 * lasts for the duration of the submit-ioctl.
 95 */
 96struct msm_gem_submit {
 97	struct drm_device *dev;
 98	struct msm_gpu *gpu;
 99	struct list_head node;   /* node in gpu submit_list */
 
100	struct list_head bo_list;
101	struct ww_acquire_ctx ticket;
102	uint32_t fence;
103	bool valid;
 
 
 
 
 
104	unsigned int nr_cmds;
105	unsigned int nr_bos;
 
106	struct {
107		uint32_t type;
108		uint32_t size;  /* in dwords */
109		uint32_t iova;
110		uint32_t idx;   /* cmdstream buffer idx in bos[] */
111	} cmd[MAX_CMDS];
112	struct {
113		uint32_t flags;
114		struct msm_gem_object *obj;
115		uint32_t iova;
116	} bos[0];
 
 
 
117};
 
 
 
 
 
 
 
 
 
 
118
119#endif /* __MSM_GEM_H__ */
v5.9
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2013 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#ifndef __MSM_GEM_H__
  8#define __MSM_GEM_H__
  9
 10#include <linux/kref.h>
 11#include <linux/dma-resv.h>
 12#include "msm_drv.h"
 13
 14/* Additional internal-use only BO flags: */
 15#define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
 16#define MSM_BO_MAP_PRIV      0x20000000    /* use IOMMU_PRIV when mapping */
 17
 18struct msm_gem_address_space {
 19	const char *name;
 20	/* NOTE: mm managed at the page level, size is in # of pages
 21	 * and position mm_node->start is in # of pages:
 22	 */
 23	struct drm_mm mm;
 24	spinlock_t lock; /* Protects drm_mm node allocation/removal */
 25	struct msm_mmu *mmu;
 26	struct kref kref;
 27};
 28
 29struct msm_gem_vma {
 30	struct drm_mm_node node;
 31	uint64_t iova;
 32	struct msm_gem_address_space *aspace;
 33	struct list_head list;    /* node in msm_gem_object::vmas */
 34	bool mapped;
 35	int inuse;
 36};
 37
 38struct msm_gem_object {
 39	struct drm_gem_object base;
 40
 41	uint32_t flags;
 42
 43	/**
 44	 * Advice: are the backing pages purgeable?
 45	 */
 46	uint8_t madv;
 47
 48	/**
 49	 * count of active vmap'ing
 50	 */
 51	uint8_t vmap_count;
 52
 53	/* And object is either:
 54	 *  inactive - on priv->inactive_list
 55	 *  active   - on one one of the gpu's active_list..  well, at
 56	 *     least for now we don't have (I don't think) hw sync between
 57	 *     2d and 3d one devices which have both, meaning we need to
 58	 *     block on submit if a bo is already on other ring
 59	 *
 60	 */
 61	struct list_head mm_list;
 62	struct msm_gpu *gpu;     /* non-null if active */
 
 63
 64	/* Transiently in the process of submit ioctl, objects associated
 65	 * with the submit are on submit->bo_list.. this only lasts for
 66	 * the duration of the ioctl, so one bo can never be on multiple
 67	 * submit lists.
 68	 */
 69	struct list_head submit_entry;
 70
 71	struct page **pages;
 72	struct sg_table *sgt;
 73	void *vaddr;
 74
 75	struct list_head vmas;    /* list of msm_gem_vma */
 76
 77	struct llist_node freed;
 
 
 
 
 
 78
 79	/* For physically contiguous buffers.  Used when we don't have
 80	 * an IOMMU.  Also used for stolen/splashscreen buffer.
 81	 */
 82	struct drm_mm_node *vram_node;
 83	struct mutex lock; /* Protects resources associated with bo */
 84
 85	char name[32]; /* Identifier to print for the debugfs files */
 86};
 87#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 88
 89static inline bool is_active(struct msm_gem_object *msm_obj)
 90{
 91	return msm_obj->gpu != NULL;
 92}
 93
 94static inline bool is_purgeable(struct msm_gem_object *msm_obj)
 
 95{
 96	WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
 97	return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
 98			!msm_obj->base.dma_buf && !msm_obj->base.import_attach;
 99}
 
 
100
101static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
102{
103	return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
104}
105
106/* The shrinker can be triggered while we hold objA->lock, and need
107 * to grab objB->lock to purge it.  Lockdep just sees these as a single
108 * class of lock, so we use subclasses to teach it the difference.
109 *
110 * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
111 * OBJ_LOCK_SHRINKER is used by shrinker.
112 *
113 * It is *essential* that we never go down paths that could trigger the
114 * shrinker for a purgable object.  This is ensured by checking that
115 * msm_obj->madv == MSM_MADV_WILLNEED.
116 */
117enum msm_gem_lock {
118	OBJ_LOCK_NORMAL,
119	OBJ_LOCK_SHRINKER,
120};
121
122void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
123void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
124void msm_gem_free_work(struct work_struct *work);
125
126/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
127 * associated with the cmdstream submission for synchronization (and
128 * make it easier to unwind when things go wrong, etc).  This only
129 * lasts for the duration of the submit-ioctl.
130 */
131struct msm_gem_submit {
132	struct drm_device *dev;
133	struct msm_gpu *gpu;
134	struct msm_gem_address_space *aspace;
135	struct list_head node;   /* node in ring submit list */
136	struct list_head bo_list;
137	struct ww_acquire_ctx ticket;
138	uint32_t seqno;		/* Sequence number of the submit on the ring */
139	struct dma_fence *fence;
140	struct msm_gpu_submitqueue *queue;
141	struct pid *pid;    /* submitting process */
142	bool valid;         /* true if no cmdstream patching needed */
143	bool in_rb;         /* "sudo" mode, copy cmds into RB */
144	struct msm_ringbuffer *ring;
145	unsigned int nr_cmds;
146	unsigned int nr_bos;
147	u32 ident;	   /* A "identifier" for the submit for logging */
148	struct {
149		uint32_t type;
150		uint32_t size;  /* in dwords */
151		uint64_t iova;
152		uint32_t idx;   /* cmdstream buffer idx in bos[] */
153	} *cmd;  /* array of size nr_cmds */
154	struct {
155		uint32_t flags;
156		union {
157			struct msm_gem_object *obj;
158			uint32_t handle;
159		};
160		uint64_t iova;
161	} bos[];
162};
163
164/* helper to determine of a buffer in submit should be dumped, used for both
165 * devcoredump and debugfs cmdstream dumping:
166 */
167static inline bool
168should_dump(struct msm_gem_submit *submit, int idx)
169{
170	extern bool rd_full;
171	return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
172}
173
174#endif /* __MSM_GEM_H__ */