Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v4.6
 
  1/*
  2 * Copyright (C) 2013 Red Hat
  3 * Author: Rob Clark <robdclark@gmail.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License version 2 as published by
  7 * the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program.  If not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 18#ifndef __MSM_GEM_H__
 19#define __MSM_GEM_H__
 20
 21#include <linux/reservation.h>
 
 22#include "msm_drv.h"
 23
 24/* Additional internal-use only BO flags: */
 25#define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
 26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27struct msm_gem_object {
 28	struct drm_gem_object base;
 29
 30	uint32_t flags;
 31
 
 
 
 
 
 
 
 
 
 
 32	/* And object is either:
 33	 *  inactive - on priv->inactive_list
 34	 *  active   - on one one of the gpu's active_list..  well, at
 35	 *     least for now we don't have (I don't think) hw sync between
 36	 *     2d and 3d one devices which have both, meaning we need to
 37	 *     block on submit if a bo is already on other ring
 38	 *
 39	 */
 40	struct list_head mm_list;
 41	struct msm_gpu *gpu;     /* non-null if active */
 42	uint32_t read_fence, write_fence;
 43
 44	/* Transiently in the process of submit ioctl, objects associated
 45	 * with the submit are on submit->bo_list.. this only lasts for
 46	 * the duration of the ioctl, so one bo can never be on multiple
 47	 * submit lists.
 48	 */
 49	struct list_head submit_entry;
 50
 51	struct page **pages;
 52	struct sg_table *sgt;
 53	void *vaddr;
 54
 55	struct {
 56		// XXX
 57		uint32_t iova;
 58	} domain[NUM_DOMAINS];
 59
 60	/* normally (resv == &_resv) except for imported bo's */
 61	struct reservation_object *resv;
 62	struct reservation_object _resv;
 63
 64	/* For physically contiguous buffers.  Used when we don't have
 65	 * an IOMMU.  Also used for stolen/splashscreen buffer.
 66	 */
 67	struct drm_mm_node *vram_node;
 
 
 
 68};
 69#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 70
 71static inline bool is_active(struct msm_gem_object *msm_obj)
 72{
 73	return msm_obj->gpu != NULL;
 74}
 75
 76static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
 77		uint32_t op)
 78{
 79	uint32_t fence = 0;
 80
 81	if (op & MSM_PREP_READ)
 82		fence = msm_obj->write_fence;
 83	if (op & MSM_PREP_WRITE)
 84		fence = max(fence, msm_obj->read_fence);
 85
 86	return fence;
 
 
 87}
 88
 89#define MAX_CMDS 4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90
 91/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
 92 * associated with the cmdstream submission for synchronization (and
 93 * make it easier to unwind when things go wrong, etc).  This only
 94 * lasts for the duration of the submit-ioctl.
 95 */
 96struct msm_gem_submit {
 97	struct drm_device *dev;
 98	struct msm_gpu *gpu;
 99	struct list_head node;   /* node in gpu submit_list */
 
100	struct list_head bo_list;
101	struct ww_acquire_ctx ticket;
102	uint32_t fence;
103	bool valid;
 
 
 
 
 
104	unsigned int nr_cmds;
105	unsigned int nr_bos;
 
106	struct {
107		uint32_t type;
108		uint32_t size;  /* in dwords */
109		uint32_t iova;
110		uint32_t idx;   /* cmdstream buffer idx in bos[] */
111	} cmd[MAX_CMDS];
112	struct {
113		uint32_t flags;
114		struct msm_gem_object *obj;
115		uint32_t iova;
 
 
 
116	} bos[0];
117};
118
119#endif /* __MSM_GEM_H__ */
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2013 Red Hat
  4 * Author: Rob Clark <robdclark@gmail.com>
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#ifndef __MSM_GEM_H__
  8#define __MSM_GEM_H__
  9
 10#include <linux/kref.h>
 11#include <linux/dma-resv.h>
 12#include "msm_drv.h"
 13
 14/* Additional internal-use only BO flags: */
 15#define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
 16
 17struct msm_gem_address_space {
 18	const char *name;
 19	/* NOTE: mm managed at the page level, size is in # of pages
 20	 * and position mm_node->start is in # of pages:
 21	 */
 22	struct drm_mm mm;
 23	spinlock_t lock; /* Protects drm_mm node allocation/removal */
 24	struct msm_mmu *mmu;
 25	struct kref kref;
 26};
 27
 28struct msm_gem_vma {
 29	struct drm_mm_node node;
 30	uint64_t iova;
 31	struct msm_gem_address_space *aspace;
 32	struct list_head list;    /* node in msm_gem_object::vmas */
 33	bool mapped;
 34	int inuse;
 35};
 36
 37struct msm_gem_object {
 38	struct drm_gem_object base;
 39
 40	uint32_t flags;
 41
 42	/**
 43	 * Advice: are the backing pages purgeable?
 44	 */
 45	uint8_t madv;
 46
 47	/**
 48	 * count of active vmap'ing
 49	 */
 50	uint8_t vmap_count;
 51
 52	/* And object is either:
 53	 *  inactive - on priv->inactive_list
 54	 *  active   - on one one of the gpu's active_list..  well, at
 55	 *     least for now we don't have (I don't think) hw sync between
 56	 *     2d and 3d one devices which have both, meaning we need to
 57	 *     block on submit if a bo is already on other ring
 58	 *
 59	 */
 60	struct list_head mm_list;
 61	struct msm_gpu *gpu;     /* non-null if active */
 
 62
 63	/* Transiently in the process of submit ioctl, objects associated
 64	 * with the submit are on submit->bo_list.. this only lasts for
 65	 * the duration of the ioctl, so one bo can never be on multiple
 66	 * submit lists.
 67	 */
 68	struct list_head submit_entry;
 69
 70	struct page **pages;
 71	struct sg_table *sgt;
 72	void *vaddr;
 73
 74	struct list_head vmas;    /* list of msm_gem_vma */
 75
 76	struct llist_node freed;
 
 
 
 
 
 77
 78	/* For physically contiguous buffers.  Used when we don't have
 79	 * an IOMMU.  Also used for stolen/splashscreen buffer.
 80	 */
 81	struct drm_mm_node *vram_node;
 82	struct mutex lock; /* Protects resources associated with bo */
 83
 84	char name[32]; /* Identifier to print for the debugfs files */
 85};
 86#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 87
 88static inline bool is_active(struct msm_gem_object *msm_obj)
 89{
 90	return msm_obj->gpu != NULL;
 91}
 92
 93static inline bool is_purgeable(struct msm_gem_object *msm_obj)
 
 94{
 95	WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
 96	return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
 97			!msm_obj->base.dma_buf && !msm_obj->base.import_attach;
 98}
 
 
 99
100static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
101{
102	return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
103}
104
105/* The shrinker can be triggered while we hold objA->lock, and need
106 * to grab objB->lock to purge it.  Lockdep just sees these as a single
107 * class of lock, so we use subclasses to teach it the difference.
108 *
109 * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
110 * OBJ_LOCK_SHRINKER is used by shrinker.
111 *
112 * It is *essential* that we never go down paths that could trigger the
113 * shrinker for a purgable object.  This is ensured by checking that
114 * msm_obj->madv == MSM_MADV_WILLNEED.
115 */
116enum msm_gem_lock {
117	OBJ_LOCK_NORMAL,
118	OBJ_LOCK_SHRINKER,
119};
120
121void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
122void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
123void msm_gem_free_work(struct work_struct *work);
124
125/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
126 * associated with the cmdstream submission for synchronization (and
127 * make it easier to unwind when things go wrong, etc).  This only
128 * lasts for the duration of the submit-ioctl.
129 */
130struct msm_gem_submit {
131	struct drm_device *dev;
132	struct msm_gpu *gpu;
133	struct msm_gem_address_space *aspace;
134	struct list_head node;   /* node in ring submit list */
135	struct list_head bo_list;
136	struct ww_acquire_ctx ticket;
137	uint32_t seqno;		/* Sequence number of the submit on the ring */
138	struct dma_fence *fence;
139	struct msm_gpu_submitqueue *queue;
140	struct pid *pid;    /* submitting process */
141	bool valid;         /* true if no cmdstream patching needed */
142	bool in_rb;         /* "sudo" mode, copy cmds into RB */
143	struct msm_ringbuffer *ring;
144	unsigned int nr_cmds;
145	unsigned int nr_bos;
146	u32 ident;	   /* A "identifier" for the submit for logging */
147	struct {
148		uint32_t type;
149		uint32_t size;  /* in dwords */
150		uint64_t iova;
151		uint32_t idx;   /* cmdstream buffer idx in bos[] */
152	} *cmd;  /* array of size nr_cmds */
153	struct {
154		uint32_t flags;
155		union {
156			struct msm_gem_object *obj;
157			uint32_t handle;
158		};
159		uint64_t iova;
160	} bos[0];
161};
162
163#endif /* __MSM_GEM_H__ */